kopipasta 0.21.0__py3-none-any.whl → 0.23.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of kopipasta might be problematic. Click here for more details.
- kopipasta/main.py +346 -99
- kopipasta-0.23.0.dist-info/METADATA +171 -0
- kopipasta-0.23.0.dist-info/RECORD +8 -0
- {kopipasta-0.21.0.dist-info → kopipasta-0.23.0.dist-info}/WHEEL +1 -1
- kopipasta-0.21.0.dist-info/METADATA +0 -94
- kopipasta-0.21.0.dist-info/RECORD +0 -8
- {kopipasta-0.21.0.dist-info → kopipasta-0.23.0.dist-info}/LICENSE +0 -0
- {kopipasta-0.21.0.dist-info → kopipasta-0.23.0.dist-info}/entry_points.txt +0 -0
- {kopipasta-0.21.0.dist-info → kopipasta-0.23.0.dist-info}/top_level.txt +0 -0
kopipasta/main.py
CHANGED
|
@@ -4,6 +4,7 @@ import io
|
|
|
4
4
|
import json
|
|
5
5
|
import os
|
|
6
6
|
import argparse
|
|
7
|
+
import sys
|
|
7
8
|
import re
|
|
8
9
|
import subprocess
|
|
9
10
|
import tempfile
|
|
@@ -17,8 +18,103 @@ import pygments.util
|
|
|
17
18
|
|
|
18
19
|
import requests
|
|
19
20
|
|
|
21
|
+
from pydantic import BaseModel, Field
|
|
22
|
+
import traceback
|
|
23
|
+
from google import genai
|
|
24
|
+
from google.genai.types import GenerateContentConfig
|
|
25
|
+
from prompt_toolkit import prompt # Added for multiline input
|
|
26
|
+
|
|
20
27
|
FileTuple = Tuple[str, bool, Optional[List[str]], str]
|
|
21
28
|
|
|
29
|
+
class SimplePatchItem(BaseModel):
|
|
30
|
+
"""A single change described by reasoning, file path, original text, and new text."""
|
|
31
|
+
reasoning: str = Field(..., description="Explanation for why this specific change is proposed.")
|
|
32
|
+
file_path: str = Field(..., description="Relative path to the file to be modified.")
|
|
33
|
+
original_text: str = Field(..., description="The exact, unique block of text to be replaced.")
|
|
34
|
+
new_text: str = Field(..., description="The text to replace the original_text with.")
|
|
35
|
+
|
|
36
|
+
class SimplePatchArgs(BaseModel):
|
|
37
|
+
"""A list of proposed code changes."""
|
|
38
|
+
patches: List[SimplePatchItem] = Field(..., description="A list of patches to apply.")
|
|
39
|
+
|
|
40
|
+
def apply_simple_patch(patch_item: SimplePatchItem) -> bool:
|
|
41
|
+
"""
|
|
42
|
+
Applies a single patch defined by replacing original_text with new_text in file_path.
|
|
43
|
+
|
|
44
|
+
Validates that the file exists and the original_text is unique.
|
|
45
|
+
"""
|
|
46
|
+
print(f"\nApplying patch to: {patch_item.file_path}")
|
|
47
|
+
print(f"Reasoning: {patch_item.reasoning}")
|
|
48
|
+
print("-" * 20)
|
|
49
|
+
|
|
50
|
+
file_path = patch_item.file_path
|
|
51
|
+
original_text = patch_item.original_text
|
|
52
|
+
new_text = patch_item.new_text
|
|
53
|
+
|
|
54
|
+
# --- Validation ---
|
|
55
|
+
if not os.path.exists(file_path):
|
|
56
|
+
print(f"❌ Error: File not found: {file_path}")
|
|
57
|
+
print("-" * 20)
|
|
58
|
+
return False
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
# Read the file content, attempting to preserve line endings implicitly
|
|
62
|
+
with open(file_path, 'r', encoding='utf-8', newline='') as f:
|
|
63
|
+
content = f.read()
|
|
64
|
+
|
|
65
|
+
# Check for unique occurrence of original_text
|
|
66
|
+
occurrences = content.count(original_text)
|
|
67
|
+
if occurrences == 0:
|
|
68
|
+
print(f"❌ Error: Original text not found in {file_path}.")
|
|
69
|
+
# Optional: print a snippet of the expected text for debugging
|
|
70
|
+
# print(f" Expected original text snippet: '{original_text[:100]}...'")
|
|
71
|
+
print("-" * 20)
|
|
72
|
+
return False
|
|
73
|
+
elif occurrences > 1:
|
|
74
|
+
print(f"❌ Error: Original text is not unique in {file_path} (found {occurrences} times).")
|
|
75
|
+
print(f" Patch cannot be applied automatically due to ambiguity.")
|
|
76
|
+
# print(f" Ambiguous original text snippet: '{original_text[:100]}...'")
|
|
77
|
+
print("-" * 20)
|
|
78
|
+
return False
|
|
79
|
+
|
|
80
|
+
# --- Application ---
|
|
81
|
+
# Replace the single unique occurrence
|
|
82
|
+
new_content = content.replace(original_text, new_text, 1)
|
|
83
|
+
|
|
84
|
+
# Heuristic to check if a newline might be needed at the end
|
|
85
|
+
original_ends_with_newline = content.endswith(('\n', '\r'))
|
|
86
|
+
new_ends_with_newline = new_content.endswith(('\n', '\r'))
|
|
87
|
+
|
|
88
|
+
if original_ends_with_newline and not new_ends_with_newline and new_content:
|
|
89
|
+
# Try to determine the original newline type
|
|
90
|
+
if content.endswith('\r\n'):
|
|
91
|
+
new_content += '\r\n'
|
|
92
|
+
else: # Assume '\n' otherwise
|
|
93
|
+
new_content += '\n'
|
|
94
|
+
elif not original_ends_with_newline and new_ends_with_newline:
|
|
95
|
+
# If original didn't end with newline, remove the one added by replacement
|
|
96
|
+
# This is less common but possible if new_text ends with \n and original_text didn't
|
|
97
|
+
new_content = new_content.rstrip('\r\n')
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
# Write the modified content back
|
|
101
|
+
with open(file_path, 'w', encoding='utf-8', newline='') as f:
|
|
102
|
+
f.write(new_content)
|
|
103
|
+
|
|
104
|
+
print(f"✅ Patch applied successfully to {file_path}.")
|
|
105
|
+
print("-" * 20)
|
|
106
|
+
return True
|
|
107
|
+
|
|
108
|
+
except IOError as e:
|
|
109
|
+
print(f"❌ Error reading or writing file {file_path}: {e}")
|
|
110
|
+
print("-" * 20)
|
|
111
|
+
return False
|
|
112
|
+
except Exception as e:
|
|
113
|
+
print(f"❌ An unexpected error occurred during patch application: {e}")
|
|
114
|
+
traceback.print_exc()
|
|
115
|
+
print("-" * 20)
|
|
116
|
+
return False
|
|
117
|
+
|
|
22
118
|
def get_colored_code(file_path, code):
|
|
23
119
|
try:
|
|
24
120
|
lexer = get_lexer_for_filename(file_path)
|
|
@@ -170,7 +266,7 @@ def split_python_file(file_content):
|
|
|
170
266
|
code = get_code(prev_end, chunk_start)
|
|
171
267
|
if code.strip():
|
|
172
268
|
chunks.append((code, prev_end, chunk_start))
|
|
173
|
-
|
|
269
|
+
|
|
174
270
|
# Add the merged chunk
|
|
175
271
|
code = get_code(chunk_start, chunk_end)
|
|
176
272
|
chunks.append((code, chunk_start, chunk_end))
|
|
@@ -574,78 +670,19 @@ def fetch_web_content(url: str) -> Tuple[Optional[FileTuple], Optional[str], Opt
|
|
|
574
670
|
content_type = response.headers.get('content-type', '').lower()
|
|
575
671
|
full_content = response.text
|
|
576
672
|
snippet = full_content[:10000] + "..." if len(full_content) > 10000 else full_content
|
|
577
|
-
|
|
673
|
+
|
|
578
674
|
if 'json' in content_type:
|
|
579
675
|
content_type = 'json'
|
|
580
676
|
elif 'csv' in content_type:
|
|
581
677
|
content_type = 'csv'
|
|
582
678
|
else:
|
|
583
679
|
content_type = 'text'
|
|
584
|
-
|
|
680
|
+
|
|
585
681
|
return (url, False, None, content_type), full_content, snippet
|
|
586
682
|
except requests.RequestException as e:
|
|
587
683
|
print(f"Error fetching content from {url}: {e}")
|
|
588
684
|
return None, None, None
|
|
589
685
|
|
|
590
|
-
def read_file_content(file_path):
|
|
591
|
-
_, ext = os.path.splitext(file_path)
|
|
592
|
-
if ext.lower() == '.json':
|
|
593
|
-
with open(file_path, 'r') as f:
|
|
594
|
-
return json.load(f), 'json'
|
|
595
|
-
elif ext.lower() == '.csv':
|
|
596
|
-
with open(file_path, 'r') as f:
|
|
597
|
-
return f.read(), 'csv'
|
|
598
|
-
else:
|
|
599
|
-
with open(file_path, 'r') as f:
|
|
600
|
-
return f.read(), 'text'
|
|
601
|
-
|
|
602
|
-
def get_content_snippet(content, content_type, max_lines=50, max_chars=4096):
|
|
603
|
-
if content_type == 'json':
|
|
604
|
-
return json.dumps(content, indent=2)[:max_chars]
|
|
605
|
-
elif content_type == 'csv':
|
|
606
|
-
csv_content = content if isinstance(content, str) else content.getvalue()
|
|
607
|
-
csv_reader = csv.reader(io.StringIO(csv_content))
|
|
608
|
-
rows = list(csv_reader)[:max_lines]
|
|
609
|
-
output = io.StringIO()
|
|
610
|
-
csv.writer(output).writerows(rows)
|
|
611
|
-
return output.getvalue()[:max_chars]
|
|
612
|
-
else:
|
|
613
|
-
return '\n'.join(content.split('\n')[:max_lines])[:max_chars]
|
|
614
|
-
|
|
615
|
-
def handle_content(content, content_type, file_or_url):
|
|
616
|
-
is_large = len(json.dumps(content)) > 102400 if content_type == 'json' else len(content) > 102400
|
|
617
|
-
|
|
618
|
-
if is_large:
|
|
619
|
-
while True:
|
|
620
|
-
choice = input(f"{file_or_url} is large. View (f)ull content, (s)nippet, or (p)review? ").lower()
|
|
621
|
-
if choice in ['f', 's', 'p']:
|
|
622
|
-
break
|
|
623
|
-
print("Invalid choice. Please enter 'f', 's', or 'p'.")
|
|
624
|
-
|
|
625
|
-
if choice == 'f':
|
|
626
|
-
return content, False
|
|
627
|
-
elif choice == 's':
|
|
628
|
-
return get_content_snippet(content, content_type), True
|
|
629
|
-
else: # preview
|
|
630
|
-
preview = get_content_preview(content, content_type)
|
|
631
|
-
print(f"\nPreview of {file_or_url}:\n{preview}\n")
|
|
632
|
-
return handle_content(content, content_type, file_or_url)
|
|
633
|
-
else:
|
|
634
|
-
return content, False
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
def get_content_preview(content, content_type):
|
|
638
|
-
if content_type == 'json':
|
|
639
|
-
return json.dumps(content, indent=2)[:1000] + "\n..."
|
|
640
|
-
elif content_type == 'csv':
|
|
641
|
-
csv_content = content if isinstance(content, str) else content.getvalue()
|
|
642
|
-
csv_reader = csv.reader(io.StringIO(csv_content))
|
|
643
|
-
rows = list(csv_reader)[:10]
|
|
644
|
-
output = io.StringIO()
|
|
645
|
-
csv.writer(output).writerows(rows)
|
|
646
|
-
return output.getvalue() + "\n..."
|
|
647
|
-
else:
|
|
648
|
-
return '\n'.join(content.split('\n')[:20]) + "\n..."
|
|
649
686
|
|
|
650
687
|
def read_env_file():
|
|
651
688
|
env_vars = {}
|
|
@@ -673,14 +710,14 @@ def handle_env_variables(content, env_vars):
|
|
|
673
710
|
print("Detected environment variables:")
|
|
674
711
|
for key, value in detected_vars:
|
|
675
712
|
print(f"- {key}={value}")
|
|
676
|
-
|
|
713
|
+
|
|
677
714
|
for key, value in detected_vars:
|
|
678
715
|
while True:
|
|
679
716
|
choice = input(f"How would you like to handle {key}? (m)ask / (s)kip / (k)eep: ").lower()
|
|
680
717
|
if choice in ['m', 's', 'k']:
|
|
681
718
|
break
|
|
682
719
|
print("Invalid choice. Please enter 'm', 's', or 'k'.")
|
|
683
|
-
|
|
720
|
+
|
|
684
721
|
if choice == 'm':
|
|
685
722
|
content = content.replace(value, '*' * len(value))
|
|
686
723
|
elif choice == 's':
|
|
@@ -712,7 +749,7 @@ def generate_prompt_template(files_to_include: List[FileTuple], ignore_patterns:
|
|
|
712
749
|
file_content = read_file_contents(file)
|
|
713
750
|
file_content = handle_env_variables(file_content, env_vars)
|
|
714
751
|
prompt += f"### {relative_path}\n\n```{language}\n{file_content}\n```\n\n"
|
|
715
|
-
|
|
752
|
+
|
|
716
753
|
if web_contents:
|
|
717
754
|
prompt += "## Web Content\n\n"
|
|
718
755
|
for url, (file_tuple, content) in web_contents.items():
|
|
@@ -720,7 +757,7 @@ def generate_prompt_template(files_to_include: List[FileTuple], ignore_patterns:
|
|
|
720
757
|
content = handle_env_variables(content, env_vars)
|
|
721
758
|
language = content_type if content_type in ['json', 'csv'] else ''
|
|
722
759
|
prompt += f"### {url}{' (snippet)' if is_snippet else ''}\n\n```{language}\n{content}\n```\n\n"
|
|
723
|
-
|
|
760
|
+
|
|
724
761
|
prompt += "## Task Instructions\n\n"
|
|
725
762
|
cursor_position = len(prompt)
|
|
726
763
|
prompt += "\n\n"
|
|
@@ -779,10 +816,196 @@ def open_editor_for_input(template: str, cursor_position: int) -> str:
|
|
|
779
816
|
finally:
|
|
780
817
|
os.unlink(temp_file_path)
|
|
781
818
|
|
|
819
|
+
def start_chat_session(initial_prompt: str):
|
|
820
|
+
"""Starts an interactive chat session with the Gemini API using google-genai."""
|
|
821
|
+
if not genai:
|
|
822
|
+
# Error message already printed during import if it failed
|
|
823
|
+
sys.exit(1)
|
|
824
|
+
|
|
825
|
+
# The google-genai library automatically uses GOOGLE_API_KEY env var if set
|
|
826
|
+
# We still check if it's set to provide a clearer error message upfront
|
|
827
|
+
if not os.environ.get('GOOGLE_API_KEY'):
|
|
828
|
+
print("Error: GOOGLE_API_KEY environment variable not set.")
|
|
829
|
+
print("Please set the GOOGLE_API_KEY environment variable with your API key.")
|
|
830
|
+
sys.exit(1)
|
|
831
|
+
|
|
832
|
+
try:
|
|
833
|
+
# Create the client - it will use the env var automatically
|
|
834
|
+
client = genai.Client()
|
|
835
|
+
print("Google GenAI Client created (using GOOGLE_API_KEY).")
|
|
836
|
+
# You could add a check here like listing models to verify the key early
|
|
837
|
+
# print("Available models:", [m.name for m in client.models.list()])
|
|
838
|
+
except Exception as e:
|
|
839
|
+
print(f"Error creating Google GenAI client: {e}")
|
|
840
|
+
print("Please ensure your GOOGLE_API_KEY is valid and has permissions.")
|
|
841
|
+
sys.exit(1)
|
|
842
|
+
|
|
843
|
+
model_name = 'gemini-2.5-pro-exp-03-25'
|
|
844
|
+
print(f"Using model: {model_name}")
|
|
845
|
+
|
|
846
|
+
try:
|
|
847
|
+
# Create a chat session using the client
|
|
848
|
+
chat = client.chats.create(model=model_name)
|
|
849
|
+
# Note: History is managed by the chat object itself
|
|
850
|
+
|
|
851
|
+
print("\n--- Starting Interactive Chat with Gemini ---")
|
|
852
|
+
print("Type /q to quit, /help or /? for help, /patch to request a diff patch.")
|
|
853
|
+
|
|
854
|
+
# Send the initial prompt using send_message_stream
|
|
855
|
+
print("\n🤖 Gemini:")
|
|
856
|
+
full_response_text = ""
|
|
857
|
+
# Use send_message_stream for streaming responses
|
|
858
|
+
response_stream = chat.send_message_stream(initial_prompt)
|
|
859
|
+
for chunk in response_stream:
|
|
860
|
+
print(chunk.text, end="", flush=True)
|
|
861
|
+
full_response_text += chunk.text
|
|
862
|
+
print("\n" + "-"*20)
|
|
863
|
+
|
|
864
|
+
while True:
|
|
865
|
+
try:
|
|
866
|
+
# Replace standard input with prompt_toolkit for multiline support
|
|
867
|
+
user_input = prompt("👤 You (Submit with Esc+Enter or Alt+Enter): ", multiline=True)
|
|
868
|
+
# prompt_toolkit raises EOFError on Ctrl+D, so this handler remains correct.
|
|
869
|
+
except EOFError:
|
|
870
|
+
print("\nExiting...")
|
|
871
|
+
break
|
|
872
|
+
except KeyboardInterrupt: # Handle Ctrl+C
|
|
873
|
+
print("\nExiting...")
|
|
874
|
+
break
|
|
875
|
+
|
|
876
|
+
if user_input.lower() == '/q':
|
|
877
|
+
break
|
|
878
|
+
elif user_input.strip() == '/patch':
|
|
879
|
+
print("\n🤖 Gemini: Thinking... (requesting code changes)")
|
|
880
|
+
# Prompt instructing the model to use the new JSON format
|
|
881
|
+
patch_request_prompt = (
|
|
882
|
+
"Based on our conversation, generate the necessary code changes "
|
|
883
|
+
"to fulfill my request. Provide the changes as a JSON list, where each item "
|
|
884
|
+
"is an object with the following keys:\n"
|
|
885
|
+
"- 'reasoning': Explain why this specific change is needed.\n"
|
|
886
|
+
"- 'file_path': The relative path to the file to modify.\n"
|
|
887
|
+
"- 'original_text': The exact, unique block of text to replace.\n"
|
|
888
|
+
"- 'new_text': The text to replace original_text with.\n"
|
|
889
|
+
"Ensure 'original_text' is unique within the specified 'file_path'. "
|
|
890
|
+
"Format the response strictly as: { \"patches\": [ { patch_item_1 }, { patch_item_2 }, ... ] }"
|
|
891
|
+
)
|
|
892
|
+
|
|
893
|
+
try:
|
|
894
|
+
# Request the response using the new schema
|
|
895
|
+
response = chat.send_message(
|
|
896
|
+
patch_request_prompt,
|
|
897
|
+
config=GenerateContentConfig(
|
|
898
|
+
response_schema=SimplePatchArgs.model_json_schema(),
|
|
899
|
+
response_mime_type='application/json'
|
|
900
|
+
)
|
|
901
|
+
)
|
|
902
|
+
|
|
903
|
+
print("🤖 Gemini: Received potential patches.")
|
|
904
|
+
try:
|
|
905
|
+
# Validate and parse args using the Pydantic model
|
|
906
|
+
# Explicitly validate the dictionary returned by response.parsed
|
|
907
|
+
if isinstance(response.parsed, dict):
|
|
908
|
+
patch_args = SimplePatchArgs.model_validate(response.parsed)
|
|
909
|
+
else:
|
|
910
|
+
# Handle unexpected type if response.parsed isn't a dict
|
|
911
|
+
print(f"❌ Error: Expected a dictionary for patches, but got type {type(response.parsed)}")
|
|
912
|
+
print(f" Content: {response.parsed}")
|
|
913
|
+
continue # Skip further processing for this response
|
|
914
|
+
|
|
915
|
+
if not patch_args or not patch_args.patches:
|
|
916
|
+
print("🤖 Gemini: No patches were proposed in the response.")
|
|
917
|
+
print("-" * 20)
|
|
918
|
+
continue
|
|
919
|
+
|
|
920
|
+
print("\nProposed Patches:")
|
|
921
|
+
print("=" * 30)
|
|
922
|
+
for i, patch_item in enumerate(patch_args.patches):
|
|
923
|
+
print(f"Patch {i+1}/{len(patch_args.patches)}:")
|
|
924
|
+
print(f" File: {patch_item.file_path}")
|
|
925
|
+
print(f" Reasoning: {patch_item.reasoning}")
|
|
926
|
+
# Optionally show snippets of original/new text for review
|
|
927
|
+
print(f" Original (snippet): '{patch_item.original_text[:80].strip()}...'")
|
|
928
|
+
print(f" New (snippet): '{patch_item.new_text[:80].strip()}...'")
|
|
929
|
+
print("-" * 20)
|
|
930
|
+
|
|
931
|
+
confirm = input(f"Apply these {len(patch_args.patches)} patches? (y/N): ").lower()
|
|
932
|
+
if confirm == 'y':
|
|
933
|
+
applied_count = 0
|
|
934
|
+
failed_count = 0
|
|
935
|
+
for patch_item in patch_args.patches:
|
|
936
|
+
# Call the new apply function for each patch
|
|
937
|
+
success = apply_simple_patch(patch_item)
|
|
938
|
+
if success:
|
|
939
|
+
applied_count += 1
|
|
940
|
+
else:
|
|
941
|
+
failed_count += 1
|
|
942
|
+
|
|
943
|
+
print("\nPatch Application Summary:")
|
|
944
|
+
if applied_count > 0:
|
|
945
|
+
print(f"✅ Successfully applied {applied_count} patches.")
|
|
946
|
+
if failed_count > 0:
|
|
947
|
+
print(f"❌ Failed to apply {failed_count} patches.")
|
|
948
|
+
if applied_count == 0 and failed_count == 0: # Should not happen if list wasn't empty
|
|
949
|
+
print("⚪ No patches were applied.")
|
|
950
|
+
print("=" * 30)
|
|
951
|
+
else:
|
|
952
|
+
print("🤖 Gemini: Patches not applied by user.")
|
|
953
|
+
print("-" * 20)
|
|
954
|
+
|
|
955
|
+
except Exception as e: # Catch Pydantic validation errors or other issues
|
|
956
|
+
print(f"❌ Error processing patch response: {e}")
|
|
957
|
+
# Attempt to show the raw response text if parsing failed
|
|
958
|
+
raw_text = ""
|
|
959
|
+
try:
|
|
960
|
+
if response.parts:
|
|
961
|
+
raw_text = "".join(part.text for part in response.parts if hasattr(part, 'text'))
|
|
962
|
+
elif hasattr(response, 'text'):
|
|
963
|
+
raw_text = response.text
|
|
964
|
+
except Exception:
|
|
965
|
+
pass # Ignore errors getting raw text
|
|
966
|
+
if raw_text:
|
|
967
|
+
print(f" Received response text:\n{raw_text}")
|
|
968
|
+
else:
|
|
969
|
+
print(f" Received response content: {response}") # Fallback representation
|
|
970
|
+
|
|
971
|
+
except Exception as e:
|
|
972
|
+
print(f"\n❌ An error occurred while requesting patches from Gemini: {e}")
|
|
973
|
+
print(" Please check your connection, API key, and model permissions/capabilities.")
|
|
974
|
+
print("-" * 20)
|
|
975
|
+
|
|
976
|
+
continue # Go to next loop iteration after handling /patch
|
|
977
|
+
elif user_input.strip() in ['/help', '/?']:
|
|
978
|
+
print("🤖 Gemini: Available commands:")
|
|
979
|
+
print(" /q - Quit the chat session.")
|
|
980
|
+
print(" /patch - Request a diff patch (not fully implemented yet).")
|
|
981
|
+
print(" /help or /? - Show this help message.")
|
|
982
|
+
print("-" * 20)
|
|
983
|
+
continue
|
|
984
|
+
elif not user_input.strip(): # Ignore empty input
|
|
985
|
+
continue
|
|
986
|
+
|
|
987
|
+
print("\n🤖 Gemini:")
|
|
988
|
+
full_response_text = ""
|
|
989
|
+
try:
|
|
990
|
+
# Use send_message_stream for subsequent messages
|
|
991
|
+
response_stream = chat.send_message_stream(user_input)
|
|
992
|
+
for chunk in response_stream:
|
|
993
|
+
print(chunk.text, end="", flush=True)
|
|
994
|
+
full_response_text += chunk.text
|
|
995
|
+
print("\n" + "-"*20)
|
|
996
|
+
except Exception as e:
|
|
997
|
+
print(f"\nAn unexpected error occurred: {e}")
|
|
998
|
+
print("Try again or type 'exit'.")
|
|
999
|
+
|
|
1000
|
+
except Exception as e:
|
|
1001
|
+
# Catch other potential errors
|
|
1002
|
+
print(f"\nAn error occurred setting up the chat session: {e}")
|
|
1003
|
+
|
|
782
1004
|
def main():
|
|
783
1005
|
parser = argparse.ArgumentParser(description="Generate a prompt with project structure, file contents, and web content.")
|
|
784
1006
|
parser.add_argument('inputs', nargs='+', help='Files, directories, or URLs to include in the prompt')
|
|
785
1007
|
parser.add_argument('-t', '--task', help='Task description for the AI prompt')
|
|
1008
|
+
parser.add_argument('-I', '--interactive', action='store_true', help='Start an interactive chat session after generating the prompt.')
|
|
786
1009
|
args = parser.parse_args()
|
|
787
1010
|
|
|
788
1011
|
ignore_patterns = read_gitignore()
|
|
@@ -801,15 +1024,15 @@ def main():
|
|
|
801
1024
|
is_large = len(full_content) > 10000
|
|
802
1025
|
if is_large:
|
|
803
1026
|
print(f"\nContent from {input_path} is large. Here's a snippet:\n")
|
|
804
|
-
print(snippet)
|
|
1027
|
+
print(get_colored_code(input_path, snippet))
|
|
805
1028
|
print("\n" + "-"*40 + "\n")
|
|
806
|
-
|
|
1029
|
+
|
|
807
1030
|
while True:
|
|
808
1031
|
choice = input("Use (f)ull content or (s)nippet? ").lower()
|
|
809
1032
|
if choice in ['f', 's']:
|
|
810
1033
|
break
|
|
811
1034
|
print("Invalid choice. Please enter 'f' or 's'.")
|
|
812
|
-
|
|
1035
|
+
|
|
813
1036
|
if choice == 'f':
|
|
814
1037
|
content = full_content
|
|
815
1038
|
is_snippet = False
|
|
@@ -822,7 +1045,7 @@ def main():
|
|
|
822
1045
|
content = full_content
|
|
823
1046
|
is_snippet = False
|
|
824
1047
|
print(f"Content from {input_path} is not large. Using full content.")
|
|
825
|
-
|
|
1048
|
+
|
|
826
1049
|
file_tuple = (file_tuple[0], is_snippet, file_tuple[2], file_tuple[3])
|
|
827
1050
|
web_contents[input_path] = (file_tuple, content)
|
|
828
1051
|
current_char_count += len(content)
|
|
@@ -901,38 +1124,62 @@ def main():
|
|
|
901
1124
|
print_char_count(current_char_count) # Print final count before prompt generation
|
|
902
1125
|
print(f"Summary: Added {len(files_to_include)} files and {len(web_contents)} web sources.")
|
|
903
1126
|
|
|
1127
|
+
added_files_count = len(files_to_include)
|
|
1128
|
+
added_dirs_count = len(processed_dirs) # Count unique processed directories
|
|
1129
|
+
added_web_count = len(web_contents)
|
|
1130
|
+
print(f"Summary: Added {added_files_count} files/patches from {added_dirs_count} directories and {added_web_count} web sources.")
|
|
1131
|
+
|
|
904
1132
|
prompt_template, cursor_position = generate_prompt_template(files_to_include, ignore_patterns, web_contents, env_vars)
|
|
905
1133
|
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
1134
|
+
# Logic branching for interactive mode vs. clipboard mode
|
|
1135
|
+
if args.interactive:
|
|
1136
|
+
print("\nPreparing initial prompt for editing...")
|
|
1137
|
+
# Determine the initial content for the editor
|
|
1138
|
+
if args.task:
|
|
1139
|
+
# Pre-populate the task section if --task was provided
|
|
1140
|
+
editor_initial_content = prompt_template[:cursor_position] + args.task + prompt_template[cursor_position:]
|
|
1141
|
+
print("Pre-populating editor with task provided via --task argument.")
|
|
1142
|
+
else:
|
|
1143
|
+
# Use the template as is (user will add task in the editor)
|
|
1144
|
+
editor_initial_content = prompt_template
|
|
1145
|
+
print("Opening editor for you to add the task instructions.")
|
|
1146
|
+
|
|
1147
|
+
# Always open the editor in interactive mode
|
|
1148
|
+
initial_chat_prompt = open_editor_for_input(editor_initial_content, cursor_position)
|
|
1149
|
+
print("Editor closed. Starting interactive chat session...")
|
|
1150
|
+
start_chat_session(initial_chat_prompt) # Start the chat with the edited prompt else:
|
|
916
1151
|
else:
|
|
917
|
-
|
|
918
|
-
|
|
919
|
-
|
|
920
|
-
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
print(
|
|
933
|
-
|
|
934
|
-
print(
|
|
935
|
-
print("
|
|
1152
|
+
# Original non-interactive behavior
|
|
1153
|
+
if args.task:
|
|
1154
|
+
task_description = args.task
|
|
1155
|
+
# Insert task description before "## Task Instructions"
|
|
1156
|
+
task_marker = "## Task Instructions\n\n"
|
|
1157
|
+
insertion_point = prompt_template.find(task_marker)
|
|
1158
|
+
if insertion_point != -1:
|
|
1159
|
+
final_prompt = prompt_template[:insertion_point + len(task_marker)] + task_description + "\n\n" + prompt_template[insertion_point + len(task_marker):]
|
|
1160
|
+
else: # Fallback if marker not found
|
|
1161
|
+
final_prompt = prompt_template[:cursor_position] + task_description + prompt_template[cursor_position:]
|
|
1162
|
+
print("\nUsing task description from -t argument.")
|
|
1163
|
+
else:
|
|
1164
|
+
print("\nOpening editor for task instructions...")
|
|
1165
|
+
final_prompt = open_editor_for_input(prompt_template, cursor_position)
|
|
1166
|
+
|
|
1167
|
+
print("\n\nGenerated prompt:")
|
|
1168
|
+
print("-" * 80)
|
|
1169
|
+
print(final_prompt)
|
|
1170
|
+
print("-" * 80)
|
|
1171
|
+
|
|
1172
|
+
# Copy the prompt to clipboard
|
|
1173
|
+
try:
|
|
1174
|
+
pyperclip.copy(final_prompt)
|
|
1175
|
+
separator = "\n" + "=" * 40 + "\n☕🍝 Kopipasta Complete! 🍝☕\n" + "=" * 40 + "\n"
|
|
1176
|
+
print(separator)
|
|
1177
|
+
final_char_count = len(final_prompt)
|
|
1178
|
+
final_token_estimate = final_char_count // 4
|
|
1179
|
+
print(f"Prompt has been copied to clipboard. Final size: {final_char_count} characters (~ {final_token_estimate} tokens)")
|
|
1180
|
+
except pyperclip.PyperclipException as e:
|
|
1181
|
+
print(f"\nWarning: Failed to copy to clipboard: {e}")
|
|
1182
|
+
print("You can manually copy the prompt above.")
|
|
936
1183
|
|
|
937
1184
|
if __name__ == "__main__":
|
|
938
1185
|
main()
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: kopipasta
|
|
3
|
+
Version: 0.23.0
|
|
4
|
+
Summary: A CLI tool to generate prompts with project structure and file contents
|
|
5
|
+
Home-page: https://github.com/mkorpela/kopipasta
|
|
6
|
+
Author: Mikko Korpela
|
|
7
|
+
Author-email: mikko.korpela@gmail.com
|
|
8
|
+
License: MIT
|
|
9
|
+
Classifier: Development Status :: 3 - Alpha
|
|
10
|
+
Classifier: Intended Audience :: Developers
|
|
11
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
+
Classifier: Operating System :: OS Independent
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Requires-Python: >=3.8
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
License-File: LICENSE
|
|
22
|
+
Requires-Dist: pyperclip==1.9.0
|
|
23
|
+
Requires-Dist: requests==2.32.3
|
|
24
|
+
Requires-Dist: Pygments==2.18.0
|
|
25
|
+
Requires-Dist: google-genai==1.8.0
|
|
26
|
+
Requires-Dist: prompt-toolkit==3.0.50
|
|
27
|
+
|
|
28
|
+
# kopipasta
|
|
29
|
+
|
|
30
|
+
[](https://pypi.python.org/pypi/kopipasta)
|
|
31
|
+
[](http://pepy.tech/project/kopipasta)
|
|
32
|
+
|
|
33
|
+
Streamline your interaction with LLMs for coding tasks. `kopipasta` helps you provide comprehensive context (project structure, file contents, web content) and facilitates an interactive, patch-based workflow. Go beyond TAB TAB TAB and take control of your LLM context.
|
|
34
|
+
|
|
35
|
+
<img src="kopipasta.jpg" alt="kopipasta" width="300">
|
|
36
|
+
|
|
37
|
+
- An LLM told me that kopi means Coffee in some languages.. and a Diffusion model then made this delicious soup.
|
|
38
|
+
|
|
39
|
+
## Installation
|
|
40
|
+
|
|
41
|
+
You can install kopipasta using pipx (recommended) or pip:
|
|
42
|
+
|
|
43
|
+
```bash
|
|
44
|
+
# Using pipx (recommended)
|
|
45
|
+
pipx install kopipasta
|
|
46
|
+
|
|
47
|
+
# Or using pip
|
|
48
|
+
pip install kopipasta
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Usage
|
|
52
|
+
|
|
53
|
+
```bash
|
|
54
|
+
kopipasta [options] [files_or_directories_or_urls...]
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
**Arguments:**
|
|
58
|
+
|
|
59
|
+
* `[files_or_directories_or_urls...]`: Paths to files, directories, or web URLs to include as context.
|
|
60
|
+
|
|
61
|
+
**Options:**
|
|
62
|
+
|
|
63
|
+
* `-t TASK`, `--task TASK`: Provide the task description directly via the command line. If omitted (and not using `-I`), an editor will open for you to write the task.
|
|
64
|
+
* `-I`, `--interactive`: Start an interactive chat session with Google's Gemini model after preparing the context. Requires `GOOGLE_API_KEY` environment variable.
|
|
65
|
+
|
|
66
|
+
**Examples:**
|
|
67
|
+
|
|
68
|
+
1. **Generate prompt and copy to clipboard (classic mode):**
|
|
69
|
+
```bash
|
|
70
|
+
# Interactively select files from src/, include config.json, fetch web content,
|
|
71
|
+
# then open editor for task input. Copy final prompt to clipboard.
|
|
72
|
+
kopipasta src/ config.json https://example.com/api-docs
|
|
73
|
+
|
|
74
|
+
# Provide task directly, include specific files, copy final prompt.
|
|
75
|
+
kopipasta -t "Refactor setup.py to read deps from requirements.txt" setup.py requirements.txt
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
2. **Start an interactive chat session:**
|
|
79
|
+
```bash
|
|
80
|
+
# Interactively select files, provide task directly, then start chat.
|
|
81
|
+
kopipasta -I -t "Implement the apply_simple_patch function" kopipasta/main.py
|
|
82
|
+
|
|
83
|
+
# Interactively select files, open editor for initial task, then start chat.
|
|
84
|
+
kopipasta -I kopipasta/ tests/
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
## Workflow
|
|
88
|
+
|
|
89
|
+
`kopipasta` is designed to support the following workflow when working with LLMs (like Gemini, ChatGPT, Claude, etc.) for coding tasks:
|
|
90
|
+
|
|
91
|
+
1. **Gather Context:** Run `kopipasta` with the relevant files, directories, and URLs. Interactively select exactly what content (full files, snippets, or specific code chunks/patches) should be included.
|
|
92
|
+
2. **Define Task:** Provide your coding task instructions, either via the `-t` flag or through your default editor.
|
|
93
|
+
3. **Interact (if using `-I`):**
|
|
94
|
+
* `kopipasta` prepares the context and your task as an initial prompt.
|
|
95
|
+
* An interactive chat session starts (currently using Google Gemini via `google-genai`).
|
|
96
|
+
* Discuss the task, clarify requirements, and ask the LLM to generate code.
|
|
97
|
+
* The initial prompt includes instructions guiding the LLM to provide incremental changes and clear explanations.
|
|
98
|
+
4. **Request Patches (`-I` mode):**
|
|
99
|
+
* During the chat, use the `/patch` command to ask the LLM to provide the proposed changes in a structured format.
|
|
100
|
+
* `kopipasta` will prompt you to review the proposed patches (file, reasoning, code change).
|
|
101
|
+
5. **Apply Patches (`-I` mode):**
|
|
102
|
+
* If you approve, `kopipasta` will attempt to automatically apply the patches to your local files. It validates that the original code exists and is unique before applying.
|
|
103
|
+
6. **Test & Iterate:** Test the changes locally. If further changes are needed, continue the chat, request new patches, or make manual edits.
|
|
104
|
+
7. **Commit:** Once satisfied, commit the changes.
|
|
105
|
+
|
|
106
|
+
For non-interactive mode, `kopipasta` generates the complete prompt (context + task) and copies it to your clipboard (Step 1 & 2). You can then paste this into your preferred LLM interface and proceed manually from Step 3 onwards.
|
|
107
|
+
|
|
108
|
+
## Features
|
|
109
|
+
|
|
110
|
+
* **Comprehensive Context Generation:** Creates structured prompts including:
|
|
111
|
+
* Project directory tree overview.
|
|
112
|
+
* Selected file contents.
|
|
113
|
+
* Content fetched from web URLs.
|
|
114
|
+
* Your specific task instructions.
|
|
115
|
+
* **Interactive File Selection:**
|
|
116
|
+
* Guides you through selecting files and directories.
|
|
117
|
+
* Option to include full file content, a snippet (first lines/bytes), or **select specific code chunks/patches** for large or complex files.
|
|
118
|
+
* Syntax highlighting during chunk selection for supported languages.
|
|
119
|
+
* Ignores files based on common `.gitignore` patterns and detects binary files.
|
|
120
|
+
* Displays estimated character/token counts during selection.
|
|
121
|
+
* **Web Content Fetching:** Includes content directly from URLs. Handles JSON/CSV content types.
|
|
122
|
+
* **Editor Integration:** Opens your preferred editor (`$EDITOR`) to input task instructions (if not using `-t`).
|
|
123
|
+
* **Environment Variable Handling:** Detects potential secrets from a `.env` file in included content and prompts you to mask, skip, or keep them.
|
|
124
|
+
* **Clipboard Integration:** Automatically copies the generated prompt to the clipboard (non-interactive mode).
|
|
125
|
+
* **Interactive Chat Mode (`-I`, `--interactive`):**
|
|
126
|
+
* Starts a chat session directly after context generation.
|
|
127
|
+
* Uses the `google-genai` library to interact with Google's Gemini models.
|
|
128
|
+
* Requires the `GOOGLE_API_KEY` environment variable to be set.
|
|
129
|
+
* Includes built-in instructions for the LLM to encourage clear, iterative responses.
|
|
130
|
+
* **Patch Management (`-I` mode):**
|
|
131
|
+
* `/patch` command to request structured code changes from the LLM.
|
|
132
|
+
* Prompts user to review proposed patches (reasoning, file, original/new code snippets).
|
|
133
|
+
* **Automatic patch application** to local files upon confirmation.
|
|
134
|
+
|
|
135
|
+
## Configuration
|
|
136
|
+
|
|
137
|
+
* **Editor:** Set the `EDITOR` environment variable to your preferred command-line editor (e.g., `vim`, `nvim`, `nano`, `emacs`, `code --wait`).
|
|
138
|
+
* **API Key (for `-I` mode):** Set the `GOOGLE_API_KEY` environment variable with your Google AI Studio API key to use the interactive chat feature.
|
|
139
|
+
|
|
140
|
+
## Real life example (Non-Interactive)
|
|
141
|
+
|
|
142
|
+
Context: I had a bug where `setup.py` didn't include all dependencies listed in `requirements.txt`.
|
|
143
|
+
|
|
144
|
+
1. `kopipasta -t "Update setup.py to read dependencies dynamically from requirements.txt" setup.py requirements.txt`
|
|
145
|
+
2. Paste the generated prompt (copied to clipboard) into my preferred LLM chat interface.
|
|
146
|
+
3. Review the LLM's proposed code.
|
|
147
|
+
4. Copy the code and update `setup.py` manually.
|
|
148
|
+
5. Test the changes.
|
|
149
|
+
|
|
150
|
+
## Real life example (Interactive)
|
|
151
|
+
|
|
152
|
+
Context: I want to refactor a function in `main.py`.
|
|
153
|
+
|
|
154
|
+
1. `export GOOGLE_API_KEY="YOUR_API_KEY_HERE"` (ensure key is set)
|
|
155
|
+
2. `kopipasta -I -t "Refactor the handle_content function in main.py to be more modular" module/main.py`
|
|
156
|
+
3. The tool gathers context, shows the file size, and confirms inclusion.
|
|
157
|
+
4. An interactive chat session starts with the context and task sent to Gemini.
|
|
158
|
+
5. Chat with the LLM:
|
|
159
|
+
* *User:* "Proceed"
|
|
160
|
+
* *LLM:* "Okay, I understand. My plan is to..."
|
|
161
|
+
* *User:* "Looks good."
|
|
162
|
+
* *LLM:* "Here's the first part of the refactoring..." (shows code)
|
|
163
|
+
6. Use the `/patch` command:
|
|
164
|
+
* *User:* `/patch`
|
|
165
|
+
* `kopipasta` asks the LLM for structured patches.
|
|
166
|
+
* `kopipasta` displays proposed patches: "Apply 1 patch to module/main.py? (y/N):"
|
|
167
|
+
7. Apply the patch:
|
|
168
|
+
* *User:* `y`
|
|
169
|
+
* `kopipasta` applies the change to `module/main.py`.
|
|
170
|
+
8. Test locally. If it works, commit. If not, continue chatting, request more patches, or debug.
|
|
171
|
+
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
kopipasta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
kopipasta/main.py,sha256=dcgcuIlPEjFoRdFwyBLQFRkFzFaIyhJEBiCeeZFFRR0,52209
|
|
3
|
+
kopipasta-0.23.0.dist-info/LICENSE,sha256=xw4C9TAU7LFu4r_MwSbky90uzkzNtRwAo3c51IWR8lk,1091
|
|
4
|
+
kopipasta-0.23.0.dist-info/METADATA,sha256=2knuAlALtbnaEbxpXJuC3GgnAAOCOPQfowV6GIjT5ts,8610
|
|
5
|
+
kopipasta-0.23.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
6
|
+
kopipasta-0.23.0.dist-info/entry_points.txt,sha256=but54qDNz1-F8fVvGstq_QID5tHjczP7bO7rWLFkc6Y,50
|
|
7
|
+
kopipasta-0.23.0.dist-info/top_level.txt,sha256=iXohixMuCdw8UjGDUp0ouICLYBDrx207sgZIJ9lxn0o,10
|
|
8
|
+
kopipasta-0.23.0.dist-info/RECORD,,
|
|
@@ -1,94 +0,0 @@
|
|
|
1
|
-
Metadata-Version: 2.1
|
|
2
|
-
Name: kopipasta
|
|
3
|
-
Version: 0.21.0
|
|
4
|
-
Summary: A CLI tool to generate prompts with project structure and file contents
|
|
5
|
-
Home-page: https://github.com/mkorpela/kopipasta
|
|
6
|
-
Author: Mikko Korpela
|
|
7
|
-
Author-email: mikko.korpela@gmail.com
|
|
8
|
-
License: MIT
|
|
9
|
-
Classifier: Development Status :: 3 - Alpha
|
|
10
|
-
Classifier: Intended Audience :: Developers
|
|
11
|
-
Classifier: License :: OSI Approved :: MIT License
|
|
12
|
-
Classifier: Operating System :: OS Independent
|
|
13
|
-
Classifier: Programming Language :: Python :: 3
|
|
14
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
15
|
-
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
-
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
-
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
-
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
-
Requires-Python: >=3.8
|
|
20
|
-
Description-Content-Type: text/markdown
|
|
21
|
-
License-File: LICENSE
|
|
22
|
-
Requires-Dist: pyperclip ==1.9.0
|
|
23
|
-
Requires-Dist: requests ==2.32.3
|
|
24
|
-
Requires-Dist: Pygments ==2.18.0
|
|
25
|
-
|
|
26
|
-
# kopipasta
|
|
27
|
-
|
|
28
|
-
[](https://pypi.python.org/pypi/kopipasta)
|
|
29
|
-
[](http://pepy.tech/project/kopipasta)
|
|
30
|
-
|
|
31
|
-
Beyond TAB TAB TAB. Giving you full control of the context.
|
|
32
|
-
|
|
33
|
-
A CLI tool for generating code task prompts with project structure and file contents, using an interactive editor-based workflow. OR a very easy way to give a large context to an LLM.
|
|
34
|
-
|
|
35
|
-
<img src="kopipasta.jpg" alt="kopipasta" width="300">
|
|
36
|
-
|
|
37
|
-
- An LLM told me that kopi means Coffee in some languages.. and a Diffusion model then made this delicious soup.
|
|
38
|
-
|
|
39
|
-
## Installation
|
|
40
|
-
|
|
41
|
-
You can install kopipasta using pipx (or pip):
|
|
42
|
-
|
|
43
|
-
```bash
|
|
44
|
-
pipx install kopipasta
|
|
45
|
-
```
|
|
46
|
-
|
|
47
|
-
## Usage
|
|
48
|
-
|
|
49
|
-
To use kopipasta, run the following command in your terminal:
|
|
50
|
-
|
|
51
|
-
```bash
|
|
52
|
-
kopipasta [files_or_directories_or_urls]
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
Replace `[files_or_directories_or_urls]` with the paths to the files or directories you want to include in the prompt, as well as any web URLs you want to fetch content from.
|
|
56
|
-
|
|
57
|
-
Example input:
|
|
58
|
-
```bash
|
|
59
|
-
kopipasta src/ config.json https://example.com/api-docs
|
|
60
|
-
```
|
|
61
|
-
|
|
62
|
-
This will guide you through an interactive process to:
|
|
63
|
-
1. Select files and directories to include in the prompt
|
|
64
|
-
2. Choose between full content, snippets, or patches for large files
|
|
65
|
-
3. Fetch and include content from provided URLs
|
|
66
|
-
4. Open an editor for you to input the specific task or code generation instructions
|
|
67
|
-
5. Generate a comprehensive prompt that includes project structure, selected file contents, and your task instructions
|
|
68
|
-
|
|
69
|
-
The generated prompt will be displayed in the console and automatically copied to your clipboard, ready to be used with an AI code generation tool.
|
|
70
|
-
|
|
71
|
-
## Features
|
|
72
|
-
|
|
73
|
-
- Generates structured prompts with project overview, file contents, web content, and task instructions
|
|
74
|
-
- Interactive file selection process with options for full content, snippets, or specific patches
|
|
75
|
-
- Fetches and includes content from web URLs
|
|
76
|
-
- Opens your preferred editor (configurable via EDITOR environment variable) for task input
|
|
77
|
-
- Detects and securely handles environment variables from a `.env` file
|
|
78
|
-
- Ignores files and directories based on common .gitignore patterns
|
|
79
|
-
- Allows interactive selection of files to include
|
|
80
|
-
- Supports various file types with syntax highlighting in the selection process
|
|
81
|
-
- Automatically copies the generated prompt to the clipboard
|
|
82
|
-
|
|
83
|
-
## Real life example
|
|
84
|
-
|
|
85
|
-
Context:
|
|
86
|
-
I had a bug that setup.py did not have all the dependencies. I wanted to make things easier:
|
|
87
|
-
|
|
88
|
-
1. `kopipasta -t "setup.py should take requirements from requirements.txt" requirements.txt setup.py`
|
|
89
|
-
2. Opened the service that provides the best LLM currently.
|
|
90
|
-
3. Pasted the prompt to their chat.
|
|
91
|
-
4. Reviewed the first message and typed "Proceed".
|
|
92
|
-
5. Got back the code that fixed the issue.
|
|
93
|
-
|
|
94
|
-
|
|
@@ -1,8 +0,0 @@
|
|
|
1
|
-
kopipasta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
kopipasta/main.py,sha256=69m3sVRwqYfEsqk_IukhfrC_ZIx-bjvNc1z_hZQ2zrM,39318
|
|
3
|
-
kopipasta-0.21.0.dist-info/LICENSE,sha256=xw4C9TAU7LFu4r_MwSbky90uzkzNtRwAo3c51IWR8lk,1091
|
|
4
|
-
kopipasta-0.21.0.dist-info/METADATA,sha256=NNROfNVFYI-1tllgg2_O-Cfua4cNKHXKlDN8GPhYMu4,3716
|
|
5
|
-
kopipasta-0.21.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
6
|
-
kopipasta-0.21.0.dist-info/entry_points.txt,sha256=but54qDNz1-F8fVvGstq_QID5tHjczP7bO7rWLFkc6Y,50
|
|
7
|
-
kopipasta-0.21.0.dist-info/top_level.txt,sha256=iXohixMuCdw8UjGDUp0ouICLYBDrx207sgZIJ9lxn0o,10
|
|
8
|
-
kopipasta-0.21.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|