kopipasta 0.23.0__py3-none-any.whl → 0.25.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of kopipasta might be problematic. Click here for more details.

kopipasta/main.py CHANGED
@@ -841,30 +841,34 @@ def start_chat_session(initial_prompt: str):
841
841
  sys.exit(1)
842
842
 
843
843
  model_name = 'gemini-2.5-pro-exp-03-25'
844
+ config = GenerateContentConfig(temperature=0.0)
844
845
  print(f"Using model: {model_name}")
845
846
 
846
847
  try:
847
848
  # Create a chat session using the client
848
- chat = client.chats.create(model=model_name)
849
+ chat = client.chats.create(model=model_name, config=config)
849
850
  # Note: History is managed by the chat object itself
850
851
 
851
852
  print("\n--- Starting Interactive Chat with Gemini ---")
852
- print("Type /q to quit, /help or /? for help, /patch to request a diff patch.")
853
+ print("Type /q to quit, /help or /? for help, /review to make clear summary, /patch to request a diff patch.")
853
854
 
854
855
  # Send the initial prompt using send_message_stream
855
856
  print("\n🤖 Gemini:")
856
857
  full_response_text = ""
857
858
  # Use send_message_stream for streaming responses
858
- response_stream = chat.send_message_stream(initial_prompt)
859
+ response_stream = chat.send_message_stream(initial_prompt, config=config)
859
860
  for chunk in response_stream:
860
861
  print(chunk.text, end="", flush=True)
861
862
  full_response_text += chunk.text
862
863
  print("\n" + "-"*20)
863
864
 
864
865
  while True:
866
+ is_patch_request = False
865
867
  try:
866
- # Replace standard input with prompt_toolkit for multiline support
867
- user_input = prompt("👤 You (Submit with Esc+Enter or Alt+Enter): ", multiline=True)
868
+ # Print the header on a separate line
869
+ print("👤 You (Submit with Esc+Enter):")
870
+ # Get input using prompt_toolkit with a minimal indicator
871
+ user_input = prompt(">> ", multiline=True)
868
872
  # prompt_toolkit raises EOFError on Ctrl+D, so this handler remains correct.
869
873
  except EOFError:
870
874
  print("\nExiting...")
@@ -875,19 +879,35 @@ def start_chat_session(initial_prompt: str):
875
879
 
876
880
  if user_input.lower() == '/q':
877
881
  break
878
- elif user_input.strip() == '/patch':
879
- print("\n🤖 Gemini: Thinking... (requesting code changes)")
880
- # Prompt instructing the model to use the new JSON format
882
+ elif user_input.endswith('/patch'):
883
+ is_patch_request = True
884
+ # Extract message before /patch
885
+ user_message = user_input[:-len('/patch')].strip()
886
+ print(f"\n🛠️ Requesting patches... (Context: '{user_message}' if provided)")
887
+ elif user_input.lower() == '/review':
888
+ user_message = user_input = "Review and reflect on the solution. Summarize and write a minimal, complete set of changes needed for the solution. Do not use + and - style diff. Instead use comments to point where to place the code. Make it easy to copy and paste the solution."
889
+ elif not user_input:
890
+ continue # Ignore empty input
891
+ else:
892
+ user_message = user_input # Regular message
893
+
894
+
895
+ # --- Handle Patch Request ---
896
+ if is_patch_request:
897
+ print("🤖 Gemini: Thinking... (generating code changes)")
898
+ # Include user message part if it exists
899
+ patch_context = f"Based on our conversation and specifically: \"{user_message}\"\n\n" if user_message else "Based on our conversation,\n\n"
900
+
881
901
  patch_request_prompt = (
882
- "Based on our conversation, generate the necessary code changes "
883
- "to fulfill my request. Provide the changes as a JSON list, where each item "
902
+ patch_context +
903
+ "Generate the necessary code changes to fulfill the request. Provide the changes as a JSON list, where each item "
884
904
  "is an object with the following keys:\n"
885
905
  "- 'reasoning': Explain why this specific change is needed.\n"
886
906
  "- 'file_path': The relative path to the file to modify.\n"
887
907
  "- 'original_text': The exact, unique block of text to replace.\n"
888
- "- 'new_text': The text to replace original_text with.\n"
908
+ "- 'new_text': The text to replace original_text with. Do not include any temporary comments like '// CHANGE BEGINS' or '/* PATCH START */'.\n"
889
909
  "Ensure 'original_text' is unique within the specified 'file_path'. "
890
- "Format the response strictly as: { \"patches\": [ { patch_item_1 }, { patch_item_2 }, ... ] }"
910
+ "Respond ONLY with the JSON object conforming to this structure: { \"patches\": [ { patch_item_1 }, { patch_item_2 }, ... ] }"
891
911
  )
892
912
 
893
913
  try:
@@ -896,7 +916,8 @@ def start_chat_session(initial_prompt: str):
896
916
  patch_request_prompt,
897
917
  config=GenerateContentConfig(
898
918
  response_schema=SimplePatchArgs.model_json_schema(),
899
- response_mime_type='application/json'
919
+ response_mime_type='application/json',
920
+ temperature=0.0
900
921
  )
901
922
  )
902
923
 
@@ -978,6 +999,7 @@ def start_chat_session(initial_prompt: str):
978
999
  print("🤖 Gemini: Available commands:")
979
1000
  print(" /q - Quit the chat session.")
980
1001
  print(" /patch - Request a diff patch (not fully implemented yet).")
1002
+ print(" /review - Pre-fill input with a review/summary prompt template.")
981
1003
  print(" /help or /? - Show this help message.")
982
1004
  print("-" * 20)
983
1005
  continue
@@ -988,7 +1010,7 @@ def start_chat_session(initial_prompt: str):
988
1010
  full_response_text = ""
989
1011
  try:
990
1012
  # Use send_message_stream for subsequent messages
991
- response_stream = chat.send_message_stream(user_input)
1013
+ response_stream = chat.send_message_stream(user_input, config=config)
992
1014
  for chunk in response_stream:
993
1015
  print(chunk.text, end="", flush=True)
994
1016
  full_response_text += chunk.text
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: kopipasta
3
- Version: 0.23.0
3
+ Version: 0.25.0
4
4
  Summary: A CLI tool to generate prompts with project structure and file contents
5
5
  Home-page: https://github.com/mkorpela/kopipasta
6
6
  Author: Mikko Korpela
@@ -0,0 +1,8 @@
1
+ kopipasta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ kopipasta/main.py,sha256=U4_31xXY2xzYZ-Exm1B6TYzofHfH3NU4iEq6XBHyBj0,53647
3
+ kopipasta-0.25.0.dist-info/LICENSE,sha256=xw4C9TAU7LFu4r_MwSbky90uzkzNtRwAo3c51IWR8lk,1091
4
+ kopipasta-0.25.0.dist-info/METADATA,sha256=03L8Zbl0k7pgrrSxL-_DsWbIT-zTRFkENabTcC1MHmo,8610
5
+ kopipasta-0.25.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
6
+ kopipasta-0.25.0.dist-info/entry_points.txt,sha256=but54qDNz1-F8fVvGstq_QID5tHjczP7bO7rWLFkc6Y,50
7
+ kopipasta-0.25.0.dist-info/top_level.txt,sha256=iXohixMuCdw8UjGDUp0ouICLYBDrx207sgZIJ9lxn0o,10
8
+ kopipasta-0.25.0.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- kopipasta/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- kopipasta/main.py,sha256=dcgcuIlPEjFoRdFwyBLQFRkFzFaIyhJEBiCeeZFFRR0,52209
3
- kopipasta-0.23.0.dist-info/LICENSE,sha256=xw4C9TAU7LFu4r_MwSbky90uzkzNtRwAo3c51IWR8lk,1091
4
- kopipasta-0.23.0.dist-info/METADATA,sha256=2knuAlALtbnaEbxpXJuC3GgnAAOCOPQfowV6GIjT5ts,8610
5
- kopipasta-0.23.0.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
6
- kopipasta-0.23.0.dist-info/entry_points.txt,sha256=but54qDNz1-F8fVvGstq_QID5tHjczP7bO7rWLFkc6Y,50
7
- kopipasta-0.23.0.dist-info/top_level.txt,sha256=iXohixMuCdw8UjGDUp0ouICLYBDrx207sgZIJ9lxn0o,10
8
- kopipasta-0.23.0.dist-info/RECORD,,