kopipasta 0.21.0__py3-none-any.whl → 0.22.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kopipasta might be problematic. Click here for more details.

kopipasta/main.py CHANGED
@@ -4,6 +4,7 @@ import io
4
4
  import json
5
5
  import os
6
6
  import argparse
7
+ import sys
7
8
  import re
8
9
  import subprocess
9
10
  import tempfile
@@ -17,8 +18,102 @@ import pygments.util
17
18
 
18
19
  import requests
19
20
 
21
+ from pydantic import BaseModel, Field
22
+ import traceback
23
+ from google import genai
24
+ from google.genai.types import GenerateContentConfig
25
+
20
26
  FileTuple = Tuple[str, bool, Optional[List[str]], str]
21
27
 
28
+ class SimplePatchItem(BaseModel):
29
+ """A single change described by reasoning, file path, original text, and new text."""
30
+ reasoning: str = Field(..., description="Explanation for why this specific change is proposed.")
31
+ file_path: str = Field(..., description="Relative path to the file to be modified.")
32
+ original_text: str = Field(..., description="The exact, unique block of text to be replaced.")
33
+ new_text: str = Field(..., description="The text to replace the original_text with.")
34
+
35
+ class SimplePatchArgs(BaseModel):
36
+ """A list of proposed code changes."""
37
+ patches: List[SimplePatchItem] = Field(..., description="A list of patches to apply.")
38
+
39
+ def apply_simple_patch(patch_item: SimplePatchItem) -> bool:
40
+ """
41
+ Applies a single patch defined by replacing original_text with new_text in file_path.
42
+
43
+ Validates that the file exists and the original_text is unique.
44
+ """
45
+ print(f"\nApplying patch to: {patch_item.file_path}")
46
+ print(f"Reasoning: {patch_item.reasoning}")
47
+ print("-" * 20)
48
+
49
+ file_path = patch_item.file_path
50
+ original_text = patch_item.original_text
51
+ new_text = patch_item.new_text
52
+
53
+ # --- Validation ---
54
+ if not os.path.exists(file_path):
55
+ print(f"❌ Error: File not found: {file_path}")
56
+ print("-" * 20)
57
+ return False
58
+
59
+ try:
60
+ # Read the file content, attempting to preserve line endings implicitly
61
+ with open(file_path, 'r', encoding='utf-8', newline='') as f:
62
+ content = f.read()
63
+
64
+ # Check for unique occurrence of original_text
65
+ occurrences = content.count(original_text)
66
+ if occurrences == 0:
67
+ print(f"❌ Error: Original text not found in {file_path}.")
68
+ # Optional: print a snippet of the expected text for debugging
69
+ # print(f" Expected original text snippet: '{original_text[:100]}...'")
70
+ print("-" * 20)
71
+ return False
72
+ elif occurrences > 1:
73
+ print(f"❌ Error: Original text is not unique in {file_path} (found {occurrences} times).")
74
+ print(f" Patch cannot be applied automatically due to ambiguity.")
75
+ # print(f" Ambiguous original text snippet: '{original_text[:100]}...'")
76
+ print("-" * 20)
77
+ return False
78
+
79
+ # --- Application ---
80
+ # Replace the single unique occurrence
81
+ new_content = content.replace(original_text, new_text, 1)
82
+
83
+ # Heuristic to check if a newline might be needed at the end
84
+ original_ends_with_newline = content.endswith(('\n', '\r'))
85
+ new_ends_with_newline = new_content.endswith(('\n', '\r'))
86
+
87
+ if original_ends_with_newline and not new_ends_with_newline and new_content:
88
+ # Try to determine the original newline type
89
+ if content.endswith('\r\n'):
90
+ new_content += '\r\n'
91
+ else: # Assume '\n' otherwise
92
+ new_content += '\n'
93
+ elif not original_ends_with_newline and new_ends_with_newline:
94
+ # If original didn't end with newline, remove the one added by replacement
95
+ # This is less common but possible if new_text ends with \n and original_text didn't
96
+ new_content = new_content.rstrip('\r\n')
97
+
98
+
99
+ # Write the modified content back
100
+ with open(file_path, 'w', encoding='utf-8', newline='') as f:
101
+ f.write(new_content)
102
+
103
+ print(f"✅ Patch applied successfully to {file_path}.")
104
+ print("-" * 20)
105
+ return True
106
+
107
+ except IOError as e:
108
+ print(f"❌ Error reading or writing file {file_path}: {e}")
109
+ print("-" * 20)
110
+ return False
111
+ except Exception as e:
112
+ print(f"❌ An unexpected error occurred during patch application: {e}")
113
+ traceback.print_exc()
114
+ print("-" * 20)
115
+ return False
116
+
22
117
  def get_colored_code(file_path, code):
23
118
  try:
24
119
  lexer = get_lexer_for_filename(file_path)
@@ -170,7 +265,7 @@ def split_python_file(file_content):
170
265
  code = get_code(prev_end, chunk_start)
171
266
  if code.strip():
172
267
  chunks.append((code, prev_end, chunk_start))
173
-
268
+
174
269
  # Add the merged chunk
175
270
  code = get_code(chunk_start, chunk_end)
176
271
  chunks.append((code, chunk_start, chunk_end))
@@ -574,14 +669,14 @@ def fetch_web_content(url: str) -> Tuple[Optional[FileTuple], Optional[str], Opt
574
669
  content_type = response.headers.get('content-type', '').lower()
575
670
  full_content = response.text
576
671
  snippet = full_content[:10000] + "..." if len(full_content) > 10000 else full_content
577
-
672
+
578
673
  if 'json' in content_type:
579
674
  content_type = 'json'
580
675
  elif 'csv' in content_type:
581
676
  content_type = 'csv'
582
677
  else:
583
678
  content_type = 'text'
584
-
679
+
585
680
  return (url, False, None, content_type), full_content, snippet
586
681
  except requests.RequestException as e:
587
682
  print(f"Error fetching content from {url}: {e}")
@@ -673,14 +768,14 @@ def handle_env_variables(content, env_vars):
673
768
  print("Detected environment variables:")
674
769
  for key, value in detected_vars:
675
770
  print(f"- {key}={value}")
676
-
771
+
677
772
  for key, value in detected_vars:
678
773
  while True:
679
774
  choice = input(f"How would you like to handle {key}? (m)ask / (s)kip / (k)eep: ").lower()
680
775
  if choice in ['m', 's', 'k']:
681
776
  break
682
777
  print("Invalid choice. Please enter 'm', 's', or 'k'.")
683
-
778
+
684
779
  if choice == 'm':
685
780
  content = content.replace(value, '*' * len(value))
686
781
  elif choice == 's':
@@ -712,7 +807,7 @@ def generate_prompt_template(files_to_include: List[FileTuple], ignore_patterns:
712
807
  file_content = read_file_contents(file)
713
808
  file_content = handle_env_variables(file_content, env_vars)
714
809
  prompt += f"### {relative_path}\n\n```{language}\n{file_content}\n```\n\n"
715
-
810
+
716
811
  if web_contents:
717
812
  prompt += "## Web Content\n\n"
718
813
  for url, (file_tuple, content) in web_contents.items():
@@ -720,7 +815,7 @@ def generate_prompt_template(files_to_include: List[FileTuple], ignore_patterns:
720
815
  content = handle_env_variables(content, env_vars)
721
816
  language = content_type if content_type in ['json', 'csv'] else ''
722
817
  prompt += f"### {url}{' (snippet)' if is_snippet else ''}\n\n```{language}\n{content}\n```\n\n"
723
-
818
+
724
819
  prompt += "## Task Instructions\n\n"
725
820
  cursor_position = len(prompt)
726
821
  prompt += "\n\n"
@@ -779,10 +874,194 @@ def open_editor_for_input(template: str, cursor_position: int) -> str:
779
874
  finally:
780
875
  os.unlink(temp_file_path)
781
876
 
877
+ def start_chat_session(initial_prompt: str):
878
+ """Starts an interactive chat session with the Gemini API using google-genai."""
879
+ if not genai:
880
+ # Error message already printed during import if it failed
881
+ sys.exit(1)
882
+
883
+ # The google-genai library automatically uses GOOGLE_API_KEY env var if set
884
+ # We still check if it's set to provide a clearer error message upfront
885
+ if not os.environ.get('GOOGLE_API_KEY'):
886
+ print("Error: GOOGLE_API_KEY environment variable not set.")
887
+ print("Please set the GOOGLE_API_KEY environment variable with your API key.")
888
+ sys.exit(1)
889
+
890
+ try:
891
+ # Create the client - it will use the env var automatically
892
+ client = genai.Client()
893
+ print("Google GenAI Client created (using GOOGLE_API_KEY).")
894
+ # You could add a check here like listing models to verify the key early
895
+ # print("Available models:", [m.name for m in client.models.list()])
896
+ except Exception as e:
897
+ print(f"Error creating Google GenAI client: {e}")
898
+ print("Please ensure your GOOGLE_API_KEY is valid and has permissions.")
899
+ sys.exit(1)
900
+
901
+ model_name = 'gemini-2.5-pro-exp-03-25'
902
+ print(f"Using model: {model_name}")
903
+
904
+ try:
905
+ # Create a chat session using the client
906
+ chat = client.chats.create(model=model_name)
907
+ # Note: History is managed by the chat object itself
908
+
909
+ print("\n--- Starting Interactive Chat with Gemini ---")
910
+ print("Type /q to quit, /help or /? for help, /patch to request a diff patch.")
911
+
912
+ # Send the initial prompt using send_message_stream
913
+ print("\n🤖 Gemini:")
914
+ full_response_text = ""
915
+ # Use send_message_stream for streaming responses
916
+ response_stream = chat.send_message_stream(initial_prompt)
917
+ for chunk in response_stream:
918
+ print(chunk.text, end="", flush=True)
919
+ full_response_text += chunk.text
920
+ print("\n" + "-"*20)
921
+
922
+ while True:
923
+ try:
924
+ user_input = input("👤 You: ")
925
+ except EOFError: # Handle Ctrl+D
926
+ print("\nExiting...")
927
+ break
928
+ except KeyboardInterrupt: # Handle Ctrl+C
929
+ print("\nExiting...")
930
+ break
931
+
932
+ if user_input.lower() == '/q':
933
+ break
934
+ elif user_input.strip() == '/patch':
935
+ print("\n🤖 Gemini: Thinking... (requesting code changes)")
936
+ # Prompt instructing the model to use the new JSON format
937
+ patch_request_prompt = (
938
+ "Based on our conversation, generate the necessary code changes "
939
+ "to fulfill my request. Provide the changes as a JSON list, where each item "
940
+ "is an object with the following keys:\n"
941
+ "- 'reasoning': Explain why this specific change is needed.\n"
942
+ "- 'file_path': The relative path to the file to modify.\n"
943
+ "- 'original_text': The exact, unique block of text to replace.\n"
944
+ "- 'new_text': The text to replace original_text with.\n"
945
+ "Ensure 'original_text' is unique within the specified 'file_path'. "
946
+ "Format the response strictly as: { \"patches\": [ { patch_item_1 }, { patch_item_2 }, ... ] }"
947
+ )
948
+
949
+ try:
950
+ # Request the response using the new schema
951
+ response = chat.send_message(
952
+ patch_request_prompt,
953
+ config=GenerateContentConfig(
954
+ response_schema=SimplePatchArgs.model_json_schema(),
955
+ response_mime_type='application/json'
956
+ )
957
+ )
958
+
959
+ print("🤖 Gemini: Received potential patches.")
960
+ try:
961
+ # Validate and parse args using the Pydantic model
962
+ # Explicitly validate the dictionary returned by response.parsed
963
+ if isinstance(response.parsed, dict):
964
+ patch_args = SimplePatchArgs.model_validate(response.parsed)
965
+ else:
966
+ # Handle unexpected type if response.parsed isn't a dict
967
+ print(f"❌ Error: Expected a dictionary for patches, but got type {type(response.parsed)}")
968
+ print(f" Content: {response.parsed}")
969
+ continue # Skip further processing for this response
970
+
971
+ if not patch_args or not patch_args.patches:
972
+ print("🤖 Gemini: No patches were proposed in the response.")
973
+ print("-" * 20)
974
+ continue
975
+
976
+ print("\nProposed Patches:")
977
+ print("=" * 30)
978
+ for i, patch_item in enumerate(patch_args.patches):
979
+ print(f"Patch {i+1}/{len(patch_args.patches)}:")
980
+ print(f" File: {patch_item.file_path}")
981
+ print(f" Reasoning: {patch_item.reasoning}")
982
+ # Optionally show snippets of original/new text for review
983
+ print(f" Original (snippet): '{patch_item.original_text[:80].strip()}...'")
984
+ print(f" New (snippet): '{patch_item.new_text[:80].strip()}...'")
985
+ print("-" * 20)
986
+
987
+ confirm = input(f"Apply these {len(patch_args.patches)} patches? (y/N): ").lower()
988
+ if confirm == 'y':
989
+ applied_count = 0
990
+ failed_count = 0
991
+ for patch_item in patch_args.patches:
992
+ # Call the new apply function for each patch
993
+ success = apply_simple_patch(patch_item)
994
+ if success:
995
+ applied_count += 1
996
+ else:
997
+ failed_count += 1
998
+
999
+ print("\nPatch Application Summary:")
1000
+ if applied_count > 0:
1001
+ print(f"✅ Successfully applied {applied_count} patches.")
1002
+ if failed_count > 0:
1003
+ print(f"❌ Failed to apply {failed_count} patches.")
1004
+ if applied_count == 0 and failed_count == 0: # Should not happen if list wasn't empty
1005
+ print("⚪ No patches were applied.")
1006
+ print("=" * 30)
1007
+ else:
1008
+ print("🤖 Gemini: Patches not applied by user.")
1009
+ print("-" * 20)
1010
+
1011
+ except Exception as e: # Catch Pydantic validation errors or other issues
1012
+ print(f"❌ Error processing patch response: {e}")
1013
+ # Attempt to show the raw response text if parsing failed
1014
+ raw_text = ""
1015
+ try:
1016
+ if response.parts:
1017
+ raw_text = "".join(part.text for part in response.parts if hasattr(part, 'text'))
1018
+ elif hasattr(response, 'text'):
1019
+ raw_text = response.text
1020
+ except Exception:
1021
+ pass # Ignore errors getting raw text
1022
+ if raw_text:
1023
+ print(f" Received response text:\n{raw_text}")
1024
+ else:
1025
+ print(f" Received response content: {response}") # Fallback representation
1026
+
1027
+ except Exception as e:
1028
+ print(f"\n❌ An error occurred while requesting patches from Gemini: {e}")
1029
+ print(" Please check your connection, API key, and model permissions/capabilities.")
1030
+ print("-" * 20)
1031
+
1032
+ continue # Go to next loop iteration after handling /patch
1033
+ elif user_input.strip() in ['/help', '/?']:
1034
+ print("🤖 Gemini: Available commands:")
1035
+ print(" /q - Quit the chat session.")
1036
+ print(" /patch - Request a diff patch (not fully implemented yet).")
1037
+ print(" /help or /? - Show this help message.")
1038
+ print("-" * 20)
1039
+ continue
1040
+ elif not user_input.strip(): # Ignore empty input
1041
+ continue
1042
+
1043
+ print("\n🤖 Gemini:")
1044
+ full_response_text = ""
1045
+ try:
1046
+ # Use send_message_stream for subsequent messages
1047
+ response_stream = chat.send_message_stream(user_input)
1048
+ for chunk in response_stream:
1049
+ print(chunk.text, end="", flush=True)
1050
+ full_response_text += chunk.text
1051
+ print("\n" + "-"*20)
1052
+ except Exception as e:
1053
+ print(f"\nAn unexpected error occurred: {e}")
1054
+ print("Try again or type 'exit'.")
1055
+
1056
+ except Exception as e:
1057
+ # Catch other potential errors
1058
+ print(f"\nAn error occurred setting up the chat session: {e}")
1059
+
782
1060
  def main():
783
1061
  parser = argparse.ArgumentParser(description="Generate a prompt with project structure, file contents, and web content.")
784
1062
  parser.add_argument('inputs', nargs='+', help='Files, directories, or URLs to include in the prompt')
785
1063
  parser.add_argument('-t', '--task', help='Task description for the AI prompt')
1064
+ parser.add_argument('-I', '--interactive', action='store_true', help='Start an interactive chat session after generating the prompt.')
786
1065
  args = parser.parse_args()
787
1066
 
788
1067
  ignore_patterns = read_gitignore()
@@ -801,15 +1080,15 @@ def main():
801
1080
  is_large = len(full_content) > 10000
802
1081
  if is_large:
803
1082
  print(f"\nContent from {input_path} is large. Here's a snippet:\n")
804
- print(snippet)
1083
+ print(get_colored_code(input_path, snippet))
805
1084
  print("\n" + "-"*40 + "\n")
806
-
1085
+
807
1086
  while True:
808
1087
  choice = input("Use (f)ull content or (s)nippet? ").lower()
809
1088
  if choice in ['f', 's']:
810
1089
  break
811
1090
  print("Invalid choice. Please enter 'f' or 's'.")
812
-
1091
+
813
1092
  if choice == 'f':
814
1093
  content = full_content
815
1094
  is_snippet = False
@@ -822,7 +1101,7 @@ def main():
822
1101
  content = full_content
823
1102
  is_snippet = False
824
1103
  print(f"Content from {input_path} is not large. Using full content.")
825
-
1104
+
826
1105
  file_tuple = (file_tuple[0], is_snippet, file_tuple[2], file_tuple[3])
827
1106
  web_contents[input_path] = (file_tuple, content)
828
1107
  current_char_count += len(content)
@@ -901,38 +1180,62 @@ def main():
901
1180
  print_char_count(current_char_count) # Print final count before prompt generation
902
1181
  print(f"Summary: Added {len(files_to_include)} files and {len(web_contents)} web sources.")
903
1182
 
1183
+ added_files_count = len(files_to_include)
1184
+ added_dirs_count = len(processed_dirs) # Count unique processed directories
1185
+ added_web_count = len(web_contents)
1186
+ print(f"Summary: Added {added_files_count} files/patches from {added_dirs_count} directories and {added_web_count} web sources.")
1187
+
904
1188
  prompt_template, cursor_position = generate_prompt_template(files_to_include, ignore_patterns, web_contents, env_vars)
905
1189
 
906
- if args.task:
907
- task_description = args.task
908
- # Insert task description before "## Task Instructions"
909
- task_marker = "## Task Instructions\n\n"
910
- insertion_point = prompt_template.find(task_marker)
911
- if insertion_point != -1:
912
- final_prompt = prompt_template[:insertion_point + len(task_marker)] + task_description + "\n\n" + prompt_template[insertion_point + len(task_marker):]
913
- else: # Fallback if marker not found
914
- final_prompt = prompt_template[:cursor_position] + task_description + prompt_template[cursor_position:]
915
- print("\nUsing task description from -t argument.")
1190
+ # Logic branching for interactive mode vs. clipboard mode
1191
+ if args.interactive:
1192
+ print("\nPreparing initial prompt for editing...")
1193
+ # Determine the initial content for the editor
1194
+ if args.task:
1195
+ # Pre-populate the task section if --task was provided
1196
+ editor_initial_content = prompt_template[:cursor_position] + args.task + prompt_template[cursor_position:]
1197
+ print("Pre-populating editor with task provided via --task argument.")
1198
+ else:
1199
+ # Use the template as is (user will add task in the editor)
1200
+ editor_initial_content = prompt_template
1201
+ print("Opening editor for you to add the task instructions.")
1202
+
1203
+ # Always open the editor in interactive mode
1204
+ initial_chat_prompt = open_editor_for_input(editor_initial_content, cursor_position)
1205
+ print("Editor closed. Starting interactive chat session...")
1206
+ start_chat_session(initial_chat_prompt) # Start the chat with the edited prompt else:
916
1207
  else:
917
- print("\nOpening editor for task instructions...")
918
- final_prompt = open_editor_for_input(prompt_template, cursor_position)
919
-
920
- print("\n\nGenerated prompt:")
921
- print("-" * 80)
922
- print(final_prompt)
923
- print("-" * 80)
924
-
925
- # Copy the prompt to clipboard
926
- try:
927
- pyperclip.copy(final_prompt)
928
- separator = "\n" + "=" * 40 + "\n☕🍝 Kopipasta Complete! 🍝☕\n" + "=" * 40 + "\n"
929
- print(separator)
930
- final_char_count = len(final_prompt)
931
- final_token_estimate = final_char_count // 4
932
- print(f"Prompt has been copied to clipboard. Final size: {final_char_count} characters (~ {final_token_estimate} tokens)")
933
- except pyperclip.PyperclipException as e:
934
- print(f"\nWarning: Failed to copy to clipboard: {e}")
935
- print("You can manually copy the prompt above.")
1208
+ # Original non-interactive behavior
1209
+ if args.task:
1210
+ task_description = args.task
1211
+ # Insert task description before "## Task Instructions"
1212
+ task_marker = "## Task Instructions\n\n"
1213
+ insertion_point = prompt_template.find(task_marker)
1214
+ if insertion_point != -1:
1215
+ final_prompt = prompt_template[:insertion_point + len(task_marker)] + task_description + "\n\n" + prompt_template[insertion_point + len(task_marker):]
1216
+ else: # Fallback if marker not found
1217
+ final_prompt = prompt_template[:cursor_position] + task_description + prompt_template[cursor_position:]
1218
+ print("\nUsing task description from -t argument.")
1219
+ else:
1220
+ print("\nOpening editor for task instructions...")
1221
+ final_prompt = open_editor_for_input(prompt_template, cursor_position)
1222
+
1223
+ print("\n\nGenerated prompt:")
1224
+ print("-" * 80)
1225
+ print(final_prompt)
1226
+ print("-" * 80)
1227
+
1228
+ # Copy the prompt to clipboard
1229
+ try:
1230
+ pyperclip.copy(final_prompt)
1231
+ separator = "\n" + "=" * 40 + "\n☕🍝 Kopipasta Complete! 🍝☕\n" + "=" * 40 + "\n"
1232
+ print(separator)
1233
+ final_char_count = len(final_prompt)
1234
+ final_token_estimate = final_char_count // 4
1235
+ print(f"Prompt has been copied to clipboard. Final size: {final_char_count} characters (~ {final_token_estimate} tokens)")
1236
+ except pyperclip.PyperclipException as e:
1237
+ print(f"\nWarning: Failed to copy to clipboard: {e}")
1238
+ print("You can manually copy the prompt above.")
936
1239
 
937
1240
  if __name__ == "__main__":
938
1241
  main()
@@ -0,0 +1,170 @@
1
+ Metadata-Version: 2.1
2
+ Name: kopipasta
3
+ Version: 0.22.0
4
+ Summary: A CLI tool to generate prompts with project structure and file contents
5
+ Home-page: https://github.com/mkorpela/kopipasta
6
+ Author: Mikko Korpela
7
+ Author-email: mikko.korpela@gmail.com
8
+ License: MIT
9
+ Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: License :: OSI Approved :: MIT License
12
+ Classifier: Operating System :: OS Independent
13
+ Classifier: Programming Language :: Python :: 3
14
+ Classifier: Programming Language :: Python :: 3.8
15
+ Classifier: Programming Language :: Python :: 3.9
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Requires-Python: >=3.8
20
+ Description-Content-Type: text/markdown
21
+ License-File: LICENSE
22
+ Requires-Dist: pyperclip ==1.9.0
23
+ Requires-Dist: requests ==2.32.3
24
+ Requires-Dist: Pygments ==2.18.0
25
+ Requires-Dist: google-genai ==1.8.0
26
+
27
+ # kopipasta
28
+
29
+ [![Version](https://img.shields.io/pypi/v/kopipasta.svg)](https://pypi.python.org/pypi/kopipasta)
30
+ [![Downloads](http://pepy.tech/badge/kopipasta)](http://pepy.tech/project/kopipasta)
31
+
32
+ Streamline your interaction with LLMs for coding tasks. `kopipasta` helps you provide comprehensive context (project structure, file contents, web content) and facilitates an interactive, patch-based workflow. Go beyond TAB TAB TAB and take control of your LLM context.
33
+
34
+ <img src="kopipasta.jpg" alt="kopipasta" width="300">
35
+
36
+ - An LLM told me that kopi means Coffee in some languages.. and a Diffusion model then made this delicious soup.
37
+
38
+ ## Installation
39
+
40
+ You can install kopipasta using pipx (recommended) or pip:
41
+
42
+ ```bash
43
+ # Using pipx (recommended)
44
+ pipx install kopipasta
45
+
46
+ # Or using pip
47
+ pip install kopipasta
48
+ ```
49
+
50
+ ## Usage
51
+
52
+ ```bash
53
+ kopipasta [options] [files_or_directories_or_urls...]
54
+ ```
55
+
56
+ **Arguments:**
57
+
58
+ * `[files_or_directories_or_urls...]`: Paths to files, directories, or web URLs to include as context.
59
+
60
+ **Options:**
61
+
62
+ * `-t TASK`, `--task TASK`: Provide the task description directly via the command line. If omitted (and not using `-I`), an editor will open for you to write the task.
63
+ * `-I`, `--interactive`: Start an interactive chat session with Google's Gemini model after preparing the context. Requires `GOOGLE_API_KEY` environment variable.
64
+
65
+ **Examples:**
66
+
67
+ 1. **Generate prompt and copy to clipboard (classic mode):**
68
+ ```bash
69
+ # Interactively select files from src/, include config.json, fetch web content,
70
+ # then open editor for task input. Copy final prompt to clipboard.
71
+ kopipasta src/ config.json https://example.com/api-docs
72
+
73
+ # Provide task directly, include specific files, copy final prompt.
74
+ kopipasta -t "Refactor setup.py to read deps from requirements.txt" setup.py requirements.txt
75
+ ```
76
+
77
+ 2. **Start an interactive chat session:**
78
+ ```bash
79
+ # Interactively select files, provide task directly, then start chat.
80
+ kopipasta -I -t "Implement the apply_simple_patch function" kopipasta/main.py
81
+
82
+ # Interactively select files, open editor for initial task, then start chat.
83
+ kopipasta -I kopipasta/ tests/
84
+ ```
85
+
86
+ ## Workflow
87
+
88
+ `kopipasta` is designed to support the following workflow when working with LLMs (like Gemini, ChatGPT, Claude, etc.) for coding tasks:
89
+
90
+ 1. **Gather Context:** Run `kopipasta` with the relevant files, directories, and URLs. Interactively select exactly what content (full files, snippets, or specific code chunks/patches) should be included.
91
+ 2. **Define Task:** Provide your coding task instructions, either via the `-t` flag or through your default editor.
92
+ 3. **Interact (if using `-I`):**
93
+ * `kopipasta` prepares the context and your task as an initial prompt.
94
+ * An interactive chat session starts (currently using Google Gemini via `google-genai`).
95
+ * Discuss the task, clarify requirements, and ask the LLM to generate code.
96
+ * The initial prompt includes instructions guiding the LLM to provide incremental changes and clear explanations.
97
+ 4. **Request Patches (`-I` mode):**
98
+ * During the chat, use the `/patch` command to ask the LLM to provide the proposed changes in a structured format.
99
+ * `kopipasta` will prompt you to review the proposed patches (file, reasoning, code change).
100
+ 5. **Apply Patches (`-I` mode):**
101
+ * If you approve, `kopipasta` will attempt to automatically apply the patches to your local files. It validates that the original code exists and is unique before applying.
102
+ 6. **Test & Iterate:** Test the changes locally. If further changes are needed, continue the chat, request new patches, or make manual edits.
103
+ 7. **Commit:** Once satisfied, commit the changes.
104
+
105
+ For non-interactive mode, `kopipasta` generates the complete prompt (context + task) and copies it to your clipboard (Step 1 & 2). You can then paste this into your preferred LLM interface and proceed manually from Step 3 onwards.
106
+
107
+ ## Features
108
+
109
+ * **Comprehensive Context Generation:** Creates structured prompts including:
110
+ * Project directory tree overview.
111
+ * Selected file contents.
112
+ * Content fetched from web URLs.
113
+ * Your specific task instructions.
114
+ * **Interactive File Selection:**
115
+ * Guides you through selecting files and directories.
116
+ * Option to include full file content, a snippet (first lines/bytes), or **select specific code chunks/patches** for large or complex files.
117
+ * Syntax highlighting during chunk selection for supported languages.
118
+ * Ignores files based on common `.gitignore` patterns and detects binary files.
119
+ * Displays estimated character/token counts during selection.
120
+ * **Web Content Fetching:** Includes content directly from URLs. Handles JSON/CSV content types.
121
+ * **Editor Integration:** Opens your preferred editor (`$EDITOR`) to input task instructions (if not using `-t`).
122
+ * **Environment Variable Handling:** Detects potential secrets from a `.env` file in included content and prompts you to mask, skip, or keep them.
123
+ * **Clipboard Integration:** Automatically copies the generated prompt to the clipboard (non-interactive mode).
124
+ * **Interactive Chat Mode (`-I`, `--interactive`):**
125
+ * Starts a chat session directly after context generation.
126
+ * Uses the `google-genai` library to interact with Google's Gemini models.
127
+ * Requires the `GOOGLE_API_KEY` environment variable to be set.
128
+ * Includes built-in instructions for the LLM to encourage clear, iterative responses.
129
+ * **Patch Management (`-I` mode):**
130
+ * `/patch` command to request structured code changes from the LLM.
131
+ * Prompts user to review proposed patches (reasoning, file, original/new code snippets).
132
+ * **Automatic patch application** to local files upon confirmation.
133
+
134
+ ## Configuration
135
+
136
+ * **Editor:** Set the `EDITOR` environment variable to your preferred command-line editor (e.g., `vim`, `nvim`, `nano`, `emacs`, `code --wait`).
137
+ * **API Key (for `-I` mode):** Set the `GOOGLE_API_KEY` environment variable with your Google AI Studio API key to use the interactive chat feature.
138
+
139
+ ## Real life example (Non-Interactive)
140
+
141
+ Context: I had a bug where `setup.py` didn't include all dependencies listed in `requirements.txt`.
142
+
143
+ 1. `kopipasta -t "Update setup.py to read dependencies dynamically from requirements.txt" setup.py requirements.txt`
144
+ 2. Paste the generated prompt (copied to clipboard) into my preferred LLM chat interface.
145
+ 3. Review the LLM's proposed code.
146
+ 4. Copy the code and update `setup.py` manually.
147
+ 5. Test the changes.
148
+
149
+ ## Real life example (Interactive)
150
+
151
+ Context: I want to refactor a function in `main.py`.
152
+
153
+ 1. `export GOOGLE_API_KEY="YOUR_API_KEY_HERE"` (ensure key is set)
154
+ 2. `kopipasta -I -t "Refactor the handle_content function in main.py to be more modular" module/main.py`
155
+ 3. The tool gathers context, shows the file size, and confirms inclusion.
156
+ 4. An interactive chat session starts with the context and task sent to Gemini.
157
+ 5. Chat with the LLM:
158
+ * *User:* "Proceed"
159
+ * *LLM:* "Okay, I understand. My plan is to..."
160
+ * *User:* "Looks good."
161
+ * *LLM:* "Here's the first part of the refactoring..." (shows code)
162
+ 6. Use the `/patch` command:
163
+ * *User:* `/patch`
164
+ * `kopipasta` asks the LLM for structured patches.
165
+ * `kopipasta` displays proposed patches: "Apply 1 patch to module/main.py? (y/N):"
166
+ 7. Apply the patch:
167
+ * *User:* `y`
168
+ * `kopipasta` applies the change to `module/main.py`.
169
+ 8. Test locally. If it works, commit. If not, continue chatting, request more patches, or debug.
170
+
@@ -0,0 +1,8 @@
1
+ kopipasta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ kopipasta/main.py,sha256=RSoKARL9jnyikiXB1DvZz_lpWLhhQUovQuT6YUj7RGY,54292
3
+ kopipasta-0.22.0.dist-info/LICENSE,sha256=xw4C9TAU7LFu4r_MwSbky90uzkzNtRwAo3c51IWR8lk,1091
4
+ kopipasta-0.22.0.dist-info/METADATA,sha256=V2XrhJR8QIuCIa4iCDFuo5gyW1iJgihWK_Y6H3mVjMI,8576
5
+ kopipasta-0.22.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
6
+ kopipasta-0.22.0.dist-info/entry_points.txt,sha256=but54qDNz1-F8fVvGstq_QID5tHjczP7bO7rWLFkc6Y,50
7
+ kopipasta-0.22.0.dist-info/top_level.txt,sha256=iXohixMuCdw8UjGDUp0ouICLYBDrx207sgZIJ9lxn0o,10
8
+ kopipasta-0.22.0.dist-info/RECORD,,
@@ -1,94 +0,0 @@
1
- Metadata-Version: 2.1
2
- Name: kopipasta
3
- Version: 0.21.0
4
- Summary: A CLI tool to generate prompts with project structure and file contents
5
- Home-page: https://github.com/mkorpela/kopipasta
6
- Author: Mikko Korpela
7
- Author-email: mikko.korpela@gmail.com
8
- License: MIT
9
- Classifier: Development Status :: 3 - Alpha
10
- Classifier: Intended Audience :: Developers
11
- Classifier: License :: OSI Approved :: MIT License
12
- Classifier: Operating System :: OS Independent
13
- Classifier: Programming Language :: Python :: 3
14
- Classifier: Programming Language :: Python :: 3.8
15
- Classifier: Programming Language :: Python :: 3.9
16
- Classifier: Programming Language :: Python :: 3.10
17
- Classifier: Programming Language :: Python :: 3.11
18
- Classifier: Programming Language :: Python :: 3.12
19
- Requires-Python: >=3.8
20
- Description-Content-Type: text/markdown
21
- License-File: LICENSE
22
- Requires-Dist: pyperclip ==1.9.0
23
- Requires-Dist: requests ==2.32.3
24
- Requires-Dist: Pygments ==2.18.0
25
-
26
- # kopipasta
27
-
28
- [![Version](https://img.shields.io/pypi/v/kopipasta.svg)](https://pypi.python.org/pypi/kopipasta)
29
- [![Downloads](http://pepy.tech/badge/kopipasta)](http://pepy.tech/project/kopipasta)
30
-
31
- Beyond TAB TAB TAB. Giving you full control of the context.
32
-
33
- A CLI tool for generating code task prompts with project structure and file contents, using an interactive editor-based workflow. OR a very easy way to give a large context to an LLM.
34
-
35
- <img src="kopipasta.jpg" alt="kopipasta" width="300">
36
-
37
- - An LLM told me that kopi means Coffee in some languages.. and a Diffusion model then made this delicious soup.
38
-
39
- ## Installation
40
-
41
- You can install kopipasta using pipx (or pip):
42
-
43
- ```bash
44
- pipx install kopipasta
45
- ```
46
-
47
- ## Usage
48
-
49
- To use kopipasta, run the following command in your terminal:
50
-
51
- ```bash
52
- kopipasta [files_or_directories_or_urls]
53
- ```
54
-
55
- Replace `[files_or_directories_or_urls]` with the paths to the files or directories you want to include in the prompt, as well as any web URLs you want to fetch content from.
56
-
57
- Example input:
58
- ```bash
59
- kopipasta src/ config.json https://example.com/api-docs
60
- ```
61
-
62
- This will guide you through an interactive process to:
63
- 1. Select files and directories to include in the prompt
64
- 2. Choose between full content, snippets, or patches for large files
65
- 3. Fetch and include content from provided URLs
66
- 4. Open an editor for you to input the specific task or code generation instructions
67
- 5. Generate a comprehensive prompt that includes project structure, selected file contents, and your task instructions
68
-
69
- The generated prompt will be displayed in the console and automatically copied to your clipboard, ready to be used with an AI code generation tool.
70
-
71
- ## Features
72
-
73
- - Generates structured prompts with project overview, file contents, web content, and task instructions
74
- - Interactive file selection process with options for full content, snippets, or specific patches
75
- - Fetches and includes content from web URLs
76
- - Opens your preferred editor (configurable via EDITOR environment variable) for task input
77
- - Detects and securely handles environment variables from a `.env` file
78
- - Ignores files and directories based on common .gitignore patterns
79
- - Allows interactive selection of files to include
80
- - Supports various file types with syntax highlighting in the selection process
81
- - Automatically copies the generated prompt to the clipboard
82
-
83
- ## Real life example
84
-
85
- Context:
86
- I had a bug that setup.py did not have all the dependencies. I wanted to make things easier:
87
-
88
- 1. `kopipasta -t "setup.py should take requirements from requirements.txt" requirements.txt setup.py`
89
- 2. Opened the service that provides the best LLM currently.
90
- 3. Pasted the prompt to their chat.
91
- 4. Reviewed the first message and typed "Proceed".
92
- 5. Got back the code that fixed the issue.
93
-
94
-
@@ -1,8 +0,0 @@
1
- kopipasta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- kopipasta/main.py,sha256=69m3sVRwqYfEsqk_IukhfrC_ZIx-bjvNc1z_hZQ2zrM,39318
3
- kopipasta-0.21.0.dist-info/LICENSE,sha256=xw4C9TAU7LFu4r_MwSbky90uzkzNtRwAo3c51IWR8lk,1091
4
- kopipasta-0.21.0.dist-info/METADATA,sha256=NNROfNVFYI-1tllgg2_O-Cfua4cNKHXKlDN8GPhYMu4,3716
5
- kopipasta-0.21.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
6
- kopipasta-0.21.0.dist-info/entry_points.txt,sha256=but54qDNz1-F8fVvGstq_QID5tHjczP7bO7rWLFkc6Y,50
7
- kopipasta-0.21.0.dist-info/top_level.txt,sha256=iXohixMuCdw8UjGDUp0ouICLYBDrx207sgZIJ9lxn0o,10
8
- kopipasta-0.21.0.dist-info/RECORD,,