pdd-cli 0.0.20__py3-none-any.whl → 0.0.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pdd-cli might be problematic. Click here for more details.

pdd/edit_file.py ADDED
@@ -0,0 +1,783 @@
1
+ """
2
+ File editor module that uses Claude 3.7 to edit files based on natural language instructions.
3
+
4
+ This module enables editing files through natural language instructions by:
5
+ 1. Using a LangGraph workflow to manage the editing process
6
+ 2. Leveraging Claude 3.7 Sonnet to understand instructions and plan edits
7
+ 3. Using MCP (Model Control Protocol) tools to read and modify file contents
8
+ 4. Tracking file state with hashes to ensure safe editing
9
+
10
+ Requirements:
11
+ - ANTHROPIC_API_KEY environment variable set with a valid Anthropic API key
12
+ - mcp-text-editor package installed (installed via 'pip install mcp-text-editor')
13
+ - A valid mcp_config.json file configured with an editor server
14
+
15
+ Example usage:
16
+ success, error_msg = await edit_file("path/to/file.txt", "Replace all occurrences of 'foo' with 'bar'")
17
+ """
18
+
19
+ import asyncio
20
+ import json
21
+ import os
22
+ import hashlib
23
+ import logging
24
+ import subprocess
25
+ import sys
26
+ from typing import TypedDict, Annotated, Optional, List, Tuple, Union, Sequence, Literal
27
+ import aiofiles
28
+ from pathlib import Path
29
+ import importlib.resources
30
+
31
+ # LangGraph imports
32
+ from langgraph.graph import StateGraph, END, START
33
+ from langgraph.graph.message import add_messages
34
+ from langgraph.prebuilt import ToolNode
35
+
36
+ # LangChain imports (assuming a model is needed for planning)
37
+ from langchain_core.messages import BaseMessage, HumanMessage, AIMessage, ToolMessage
38
+ # Replace with your preferred LLM provider if needed, e.g., langchain_anthropic
39
+ # from langchain_openai import ChatOpenAI
40
+ from pydantic import BaseModel, Field
41
+ from langchain_core.tools import BaseTool # Import BaseTool
42
+
43
+ # Anthropic imports for Claude 3.7
44
+ from langchain_anthropic import ChatAnthropic
45
+ from langchain_core.prompts import ChatPromptTemplate
46
+
47
+ # Import LangChain caching - use community package
48
+ from langchain import globals as langchain_globals
49
+ from langchain_community.cache import SQLiteCache
50
+
51
+ # MCP Adapter imports
52
+ from langchain_mcp_adapters.client import MultiServerMCPClient
53
+
54
+ # Setup logging
55
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
56
+ logger = logging.getLogger(__name__)
57
+
58
+ # Configure the SQLite cache
59
+ cache_path = ".langchain.db"
60
+ llm_cache = SQLiteCache(database_path=cache_path)
61
+ langchain_globals.set_llm_cache(llm_cache)
62
+
63
+ # --- State Definition ---
64
+ class EditFileState(TypedDict):
65
+ """
66
+ Represents the state of the file editing process.
67
+
68
+ Attributes:
69
+ file_path: The path to the file being edited.
70
+ original_content: The initial content of the file.
71
+ current_content: The content of the file after the latest edit.
72
+ original_hash: The SHA256 hash of the original content.
73
+ current_hash: The SHA256 hash of the current content.
74
+ edit_instructions: The user-provided instructions for editing.
75
+ messages: A list of messages tracking the conversation history for the agent.
76
+ available_tools: List of MCP tools available for editing.
77
+ error_message: An optional error message if something goes wrong.
78
+ last_tool_call_successful: Flag to indicate if the last edit was successful.
79
+ """
80
+ file_path: str
81
+ original_content: str
82
+ current_content: str
83
+ original_hash: str
84
+ current_hash: str
85
+ edit_instructions: str
86
+ messages: Annotated[Sequence[BaseMessage], add_messages]
87
+ available_tools: list # List[BaseTool] once loaded
88
+ error_message: Optional[str]
89
+ last_tool_call_successful: bool
90
+
91
+
92
+ # --- Utility Functions ---
93
+
94
+ def calculate_hash(content: str) -> str:
95
+ """Calculates the SHA256 hash of the given content."""
96
+ return hashlib.sha256(content.encode('utf-8')).hexdigest()
97
+
98
+ async def read_file_content(file_path: str) -> tuple[Optional[str], Optional[str]]:
99
+ """Asynchronously reads file content and calculates its hash."""
100
+ try:
101
+ async with aiofiles.open(file_path, mode='r', encoding='utf-8') as f:
102
+ content = await f.read()
103
+ content_hash = calculate_hash(content)
104
+ return content, content_hash
105
+ except FileNotFoundError:
106
+ logger.error(f"File not found: {file_path}")
107
+ return None, None
108
+ except IOError as e:
109
+ logger.error(f"Error reading file {file_path}: {e}")
110
+ return None, None
111
+ except Exception as e:
112
+ logger.error(f"Unexpected error reading file {file_path}: {e}")
113
+ return None, None
114
+
115
+ async def write_file_content(file_path: str, content: str) -> bool:
116
+ """Asynchronously writes content to a file."""
117
+ try:
118
+ async with aiofiles.open(file_path, mode='w', encoding='utf-8') as f:
119
+ await f.write(content)
120
+ return True
121
+ except IOError as e:
122
+ logger.error(f"Error writing file {file_path}: {e}")
123
+ return False
124
+ except Exception as e:
125
+ logger.error(f"Unexpected error writing file {file_path}: {e}")
126
+ return False
127
+
128
+ # --- LangGraph Nodes ---
129
+
130
+ async def start_editing(state: EditFileState) -> EditFileState:
131
+ """Initializes the state with file content and hash."""
132
+ logger.info(f"Starting edit process for: {state['file_path']}")
133
+ content, content_hash = await read_file_content(state['file_path'])
134
+ if content is None:
135
+ return {
136
+ **state,
137
+ "error_message": f"Failed to read initial file content from {state['file_path']}.",
138
+ }
139
+
140
+ # Initialize messages with user instructions
141
+ initial_messages = [
142
+ HumanMessage(
143
+ content=f"Please edit the file at '{state['file_path']}'. "
144
+ f"Here are the instructions: {state['edit_instructions']}\n\n"
145
+ f"Current file content:\n```\n{content}\n```"
146
+ )
147
+ ]
148
+
149
+ return {
150
+ **state,
151
+ "original_content": content,
152
+ "current_content": content,
153
+ "original_hash": content_hash,
154
+ "current_hash": content_hash,
155
+ "messages": initial_messages,
156
+ "error_message": None,
157
+ "last_tool_call_successful": True, # Start optimistically
158
+ }
159
+
160
+ def format_tools_for_claude(tools):
161
+ """
162
+ Create a summary of available tools and their proper usage format
163
+ to help Claude understand how to use them correctly.
164
+ """
165
+ tool_descriptions = []
166
+
167
+ for tool in tools:
168
+ description = f"Tool: {tool.name}\nDescription: {tool.description}\n"
169
+ if hasattr(tool, 'args_schema'):
170
+ description += f"Required Arguments: {str(tool.args_schema)}\n"
171
+ tool_descriptions.append(description)
172
+
173
+ return "\n".join(tool_descriptions)
174
+
175
+ async def plan_edits(state: EditFileState) -> EditFileState:
176
+ """
177
+ Uses Claude 3.7 to plan the next edit based on instructions and current content.
178
+ Outputs an AIMessage with tool calls.
179
+ """
180
+ logger.info("Planning next edit with Claude 3.7...")
181
+ if state.get("error_message"):
182
+ logger.warning("Skipping planning due to previous error.")
183
+ return {"messages": []} # No new messages if error occurred
184
+
185
+ # Initialize Claude 3.7
186
+ try:
187
+ # Use environment variable for API key
188
+ llm = ChatAnthropic(model="claude-3-7-sonnet-20250219", temperature=1, max_tokens=64000, thinking={"type": "enabled", "budget_tokens": 1024})
189
+ # Cache is set globally now, no need to pass it to the model
190
+
191
+ # Prepare the tools and their descriptions
192
+ tools = state['available_tools']
193
+ tool_descriptions = format_tools_for_claude(tools)
194
+
195
+ # Add tool descriptions to the first message if this is the first planning step
196
+ if len(state['messages']) == 1:
197
+ first_message = state['messages'][0]
198
+ enhanced_content = (
199
+ f"{first_message.content}\n\n"
200
+ f"Available tools:\n{tool_descriptions}\n\n"
201
+ f"IMPORTANT: Always use absolute file paths. When editing, you need to first get the file contents "
202
+ f"to obtain the range_hash before you can edit it."
203
+ )
204
+ enhanced_messages = [HumanMessage(content=enhanced_content)]
205
+ else:
206
+ enhanced_messages = state['messages']
207
+
208
+ # Bind available tools to the model
209
+ llm_with_tools = llm.bind_tools(tools)
210
+
211
+ # Call Claude with the current state and messages
212
+ response = await llm_with_tools.ainvoke(enhanced_messages)
213
+ logger.info("Claude 3.7 planning complete.")
214
+
215
+ return {"messages": [response]}
216
+
217
+ except Exception as e:
218
+ logger.error(f"Error during Claude 3.7 planning: {e}")
219
+ error_message = AIMessage(content=f"I encountered an error while planning the edit: {str(e)}")
220
+ return {"messages": [error_message]}
221
+
222
+
223
+ # ToolNode handles execution. We might need a wrapper for pre/post checks.
224
+ # Let's create a custom node for more control over hash checking and state updates.
225
+ async def execute_edit(state: EditFileState) -> EditFileState:
226
+ """
227
+ Executes the planned edit using MCP tools, verifies hash, and updates state.
228
+ """
229
+ logger.info("Attempting to execute edit...")
230
+ ai_message = state['messages'][-1]
231
+ if not isinstance(ai_message, AIMessage) or not ai_message.tool_calls:
232
+ logger.warning("No tool calls found in the last AI message. Skipping execution.")
233
+ # This might indicate the planning phase decided to finish.
234
+ return {**state, "last_tool_call_successful": True} # No tool call, so technically not a failure
235
+
236
+ # --- Hash Check Before Edit ---
237
+ logger.debug("Verifying file hash before edit...")
238
+ pre_edit_content, pre_edit_hash = await read_file_content(state['file_path'])
239
+ if pre_edit_content is None:
240
+ return {
241
+ **state,
242
+ "error_message": f"Failed to read file {state['file_path']} before edit.",
243
+ "last_tool_call_successful": False,
244
+ }
245
+ if pre_edit_hash != state['current_hash']:
246
+ logger.error(f"Hash mismatch for {state['file_path']}! Expected {state['current_hash']}, found {pre_edit_hash}. File may have been modified externally.")
247
+ return {
248
+ **state,
249
+ "error_message": "File content changed unexpectedly before edit.",
250
+ "last_tool_call_successful": False,
251
+ }
252
+ logger.debug("File hash verified.")
253
+
254
+ # --- Execute Tool Call(s) via ToolNode logic ---
255
+ # We'll simulate the ToolNode's core logic here for clarity on state updates
256
+ tool_node = ToolNode(state['available_tools'])
257
+ try:
258
+ # ToolNode expects state['messages'] as input
259
+ tool_result_state = await tool_node.ainvoke(state)
260
+ tool_messages = tool_result_state.get("messages", [])
261
+ logger.info(f"Tool execution completed. Result messages: {tool_messages}")
262
+
263
+ # Check for errors within the ToolMessages (ToolNode adds error info)
264
+ for msg in tool_messages:
265
+ if isinstance(msg, ToolMessage) and msg.additional_kwargs.get("is_error", False):
266
+ error_content = msg.content
267
+ logger.error(f"MCP Tool execution failed: {error_content}")
268
+ return {
269
+ **state,
270
+ "messages": tool_messages, # Add tool error message to history
271
+ "error_message": f"MCP Tool execution failed: {error_content}",
272
+ "last_tool_call_successful": False,
273
+ }
274
+
275
+ except Exception as e:
276
+ logger.exception("Error during MCP tool execution.")
277
+ error_msg = f"Failed to execute MCP tool: {e}"
278
+ # Create a ToolMessage representing the error for the LLM
279
+ tool_messages = []
280
+ for tool_call in ai_message.tool_calls:
281
+ tool_messages.append(ToolMessage(
282
+ content=f"Error executing tool {tool_call['name']}: {e}",
283
+ tool_call_id=tool_call['id'],
284
+ additional_kwargs={"is_error": True} # Flagging the error
285
+ ))
286
+ return {
287
+ **state,
288
+ "messages": tool_messages,
289
+ "error_message": error_msg,
290
+ "last_tool_call_successful": False,
291
+ }
292
+
293
+ # --- State Update After Successful Edit ---
294
+ # Assuming the tool modified the file, we need to get the new content and hash.
295
+ # Some MCP tools might return the new content, others might require a separate 'getContent' call.
296
+ # For this example, let's assume we need to re-read the file.
297
+ logger.info("Reading file content after successful edit...")
298
+ post_edit_content, post_edit_hash = await read_file_content(state['file_path'])
299
+
300
+ if post_edit_content is None:
301
+ # This is problematic - the tool claimed success but we can't read the file
302
+ error_msg = f"Tool execution seemed successful, but failed to read file {state['file_path']} afterwards."
303
+ logger.error(error_msg)
304
+ return {
305
+ **state,
306
+ "messages": tool_messages, # Include the successful tool message
307
+ "error_message": error_msg,
308
+ "last_tool_call_successful": False, # Mark as failure due to verification issue
309
+ }
310
+
311
+ logger.info(f"File content updated. New hash: {post_edit_hash}")
312
+ return {
313
+ **state,
314
+ "messages": tool_messages, # Add the successful tool output messages
315
+ "current_content": post_edit_content,
316
+ "current_hash": post_edit_hash,
317
+ "error_message": None, # Clear any previous error
318
+ "last_tool_call_successful": True,
319
+ }
320
+
321
+ def handle_error(state: EditFileState) -> EditFileState:
322
+ """Handles errors encountered during the process."""
323
+ logger.error(f"Entering error handling state. Error: {state.get('error_message', 'Unknown error')}")
324
+ # The error message is already set in the state by the node that failed.
325
+ # This node mainly serves as a terminal point in case of errors.
326
+ return state # Return state as is, error message is already set
327
+
328
+ # --- Conditional Edges ---
329
+
330
+ def decide_next_step(state: EditFileState) -> Literal["execute_edit", "handle_error", END]:
331
+ """Determines the next step after planning."""
332
+ if state.get("error_message"):
333
+ return "handle_error"
334
+ last_message = state['messages'][-1]
335
+ if last_message.__class__.__name__ == "AIMessage" and hasattr(last_message, "tool_calls") and last_message.tool_calls:
336
+ logger.info("Decision: Execute tool call.")
337
+ return "execute_edit"
338
+ else:
339
+ logger.info("Decision: No more tool calls planned, ending process.")
340
+ return END
341
+
342
+ def check_edit_result(state: EditFileState) -> Literal["plan_edits", "handle_error"]:
343
+ """Determines the next step after attempting an edit."""
344
+ if not state.get("last_tool_call_successful", False) or state.get("error_message"):
345
+ logger.warning("Edit failed or error occurred. Routing to error handler.")
346
+ return "handle_error"
347
+ else:
348
+ # Even after a successful edit, we go back to planning
349
+ # to see if more edits are needed based on the original instructions.
350
+ logger.info("Edit successful. Routing back to planning.")
351
+ return "plan_edits"
352
+
353
+ # --- Create the graph at module level for LangGraph to discover ---
354
+ # Build the graph definition
355
+ graph_builder = StateGraph(EditFileState)
356
+ graph_builder.add_node("start_editing", start_editing)
357
+ graph_builder.add_node("plan_edits", plan_edits)
358
+ graph_builder.add_node("execute_edit", execute_edit)
359
+ graph_builder.add_node("handle_error", handle_error)
360
+
361
+ graph_builder.add_edge(START, "start_editing")
362
+ graph_builder.add_edge("start_editing", "plan_edits")
363
+ graph_builder.add_conditional_edges("plan_edits", decide_next_step, {
364
+ "execute_edit": "execute_edit",
365
+ "handle_error": "handle_error",
366
+ END: END
367
+ })
368
+ graph_builder.add_conditional_edges("execute_edit", check_edit_result, {
369
+ "plan_edits": "plan_edits",
370
+ "handle_error": "handle_error"
371
+ })
372
+ graph_builder.add_edge("handle_error", END)
373
+
374
+ # Compile the graph and expose it as a module-level variable
375
+ graph = graph_builder.compile()
376
+
377
+ # --- Main Function ---
378
+
379
+ async def edit_file(file_path: str, edit_instructions: str, mcp_config_path: str = None) -> tuple[bool, Optional[str]]:
380
+ """
381
+ Asynchronously edits a file based on instructions using LangGraph and MCP tools.
382
+
383
+ Args:
384
+ file_path: The path to the file to edit.
385
+ edit_instructions: A description of the changes to make.
386
+ mcp_config_path: Optional path to MCP config. If None, will look for 'pdd/mcp_config.json' within the package.
387
+
388
+ Returns:
389
+ A tuple containing:
390
+ - success (boolean): Whether the file was edited successfully.
391
+ - error_message (Optional[str]): An error message if unsuccessful, None otherwise.
392
+ """
393
+ # Print current working directory
394
+ current_dir = os.getcwd()
395
+ logger.info(f"Current working directory: {current_dir}")
396
+
397
+ # 1. Initial File Validation
398
+ if not os.path.exists(file_path):
399
+ return False, f"File not found: {file_path}"
400
+ if not os.access(file_path, os.R_OK) or not os.access(file_path, os.W_OK):
401
+ return False, f"File not accessible (read/write permissions required): {file_path}"
402
+
403
+ # 2. Load MCP Configuration
404
+ if mcp_config_path is None:
405
+ # Default to looking inside the package directory
406
+ try:
407
+ # Use importlib.resources to find the file within the 'pdd' package
408
+ mcp_config_path_obj = importlib.resources.files('pdd').joinpath('mcp_config.json')
409
+ # Check if the resource exists and is a file
410
+ if mcp_config_path_obj.is_file():
411
+ mcp_config_path = str(mcp_config_path_obj)
412
+ logger.info(f"Using default MCP config from package: {mcp_config_path}")
413
+ else:
414
+ # Handle case where default config isn't found in package
415
+ # Try CWD as a fallback (or raise error?)
416
+ fallback_path = os.path.join(os.getcwd(), 'pdd', 'mcp_config.json')
417
+ if os.path.exists(fallback_path):
418
+ mcp_config_path = fallback_path
419
+ logger.warning(f"Default MCP config not found in package, using fallback: {fallback_path}")
420
+ else:
421
+ return False, f"Default MCP configuration 'pdd/mcp_config.json' not found in package or current directory."
422
+
423
+ except (ImportError, FileNotFoundError, Exception) as e:
424
+ # Handle errors during resource discovery (e.g., package not found)
425
+ logger.warning(f"Could not find default MCP config using importlib.resources: {e}. Trying CWD.")
426
+ fallback_path = os.path.join(os.getcwd(), 'pdd', 'mcp_config.json')
427
+ if os.path.exists(fallback_path):
428
+ mcp_config_path = fallback_path
429
+ logger.warning(f"Using fallback MCP config: {fallback_path}")
430
+ else:
431
+ return False, f"MCP configuration 'pdd/mcp_config.json' not found using importlib.resources or in current directory."
432
+
433
+ # Ensure we have an absolute path if it's not None
434
+ if mcp_config_path:
435
+ mcp_config_path = os.path.abspath(mcp_config_path)
436
+ else:
437
+ # This case should ideally not be reached if the logic above is correct
438
+ return False, "MCP configuration path could not be determined."
439
+
440
+ mcp_servers_config = {}
441
+ try:
442
+ # Log absolute path of config file
443
+ logger.info(f"Attempting to load MCP config from: {mcp_config_path}")
444
+
445
+ with open(mcp_config_path, 'r') as f:
446
+ mcp_servers_config = json.load(f)
447
+ logger.info(f"Successfully loaded MCP configuration from {mcp_config_path}")
448
+ # Basic validation of config structure
449
+ if not isinstance(mcp_servers_config, dict) or not mcp_servers_config:
450
+ raise ValueError("MCP config must be a non-empty dictionary.")
451
+ # Example: Ensure a 'text_editor' server is defined (adjust key as needed)
452
+ if 'my_editor_server' not in mcp_servers_config:
453
+ logger.warning("MCP config doesn't contain a 'my_editor_server' server definition. Ensure your config is correct.")
454
+ # Depending on requirements, you might want to raise an error here.
455
+
456
+ except FileNotFoundError:
457
+ return False, f"MCP configuration file not found: {mcp_config_path}"
458
+ except json.JSONDecodeError:
459
+ return False, f"Error decoding JSON from {mcp_config_path}"
460
+ except ValueError as e:
461
+ return False, f"Invalid MCP configuration: {e}"
462
+ except Exception as e:
463
+ return False, f"Unexpected error loading MCP config: {e}"
464
+
465
+ # 3. MCP Client and Tool Loading
466
+ available_tools = []
467
+ try:
468
+ async with MultiServerMCPClient(mcp_servers_config) as mcp_client:
469
+ logger.info("MCP Client connected.")
470
+ try:
471
+ # get_tools() might return a list directly or a coroutine
472
+ tools_result = mcp_client.get_tools()
473
+ # Check if the result is awaitable before awaiting it
474
+ if hasattr(tools_result, "__await__"):
475
+ available_tools = await tools_result
476
+ else:
477
+ available_tools = tools_result
478
+
479
+ logger.info(f"Discovered {len(available_tools)} MCP tools.")
480
+ if not available_tools:
481
+ logger.warning("No MCP tools discovered. Editing will likely fail.")
482
+ # Add validation for specific required tools if necessary
483
+ # e.g., tool_names = {t.name for t in available_tools}
484
+ # if "replace_text_tool_name" not in tool_names:
485
+ # return False, "Required MCP tool 'replace_text_tool_name' not found."
486
+
487
+ except Exception as e:
488
+ logger.exception("Failed to load MCP tools.")
489
+ return False, f"Failed to load MCP tools: {e}"
490
+
491
+ # 4. Setup and Run LangGraph
492
+ # We're now using the global graph variable defined above
493
+ app = graph # Use the globally defined graph
494
+
495
+ initial_state: EditFileState = {
496
+ "file_path": file_path,
497
+ "edit_instructions": edit_instructions,
498
+ "available_tools": available_tools,
499
+ # Other fields will be populated by start_editing
500
+ "original_content": "",
501
+ "current_content": "",
502
+ "original_hash": "",
503
+ "current_hash": "",
504
+ "messages": [],
505
+ "error_message": None,
506
+ "last_tool_call_successful": True,
507
+ }
508
+
509
+ try:
510
+ logger.info("Invoking LangGraph...")
511
+ final_state = await app.ainvoke(initial_state)
512
+ logger.info("LangGraph invocation complete.")
513
+
514
+ # 5. Process Final State
515
+ if final_state.get("error_message"):
516
+ logger.error(f"Graph finished with error: {final_state['error_message']}")
517
+ # Optionally try to restore original content if hash changed?
518
+ # For simplicity, just report error for now.
519
+ return False, final_state["error_message"]
520
+ else:
521
+ # Check if content actually changed before writing
522
+ if final_state["current_hash"] != final_state["original_hash"]:
523
+ logger.info("File content changed, writing final version.")
524
+ # write_success = await write_file_content(file_path, final_state["current_content"])
525
+ # if not write_success:
526
+ # return False, f"Successfully edited in memory, but failed to write final content to {file_path}"
527
+ # The execute_edit node already wrote the file via MCP tool
528
+ logger.info(f"File '{file_path}' edited successfully.")
529
+ return True, None
530
+ else:
531
+ logger.info("No changes made to the file content.")
532
+ return True, None # Success, but no changes needed/made
533
+
534
+ except Exception as e:
535
+ logger.exception("Error during LangGraph invocation.")
536
+ return False, f"Error during graph execution: {e}"
537
+
538
+ except Exception as e:
539
+ logger.exception("Failed to connect or interact with MCP client.")
540
+ return False, f"MCP Client error: {e}"
541
+
542
+
543
+ # --- Example Usage and Testing ---
544
+
545
+ async def main():
546
+ """Main function to run the example."""
547
+ # Check for required API key
548
+ if "ANTHROPIC_API_KEY" not in os.environ:
549
+ logger.error("ANTHROPIC_API_KEY environment variable is not set. Please set it before running this script.")
550
+ return
551
+
552
+ # Initialize and log SQLite cache location
553
+ logger.info(f"Using SQLite cache at: {cache_path}")
554
+
555
+ test_file_path = os.path.abspath("output/test_edit_file.txt") # Use absolute path
556
+
557
+ # --- Determine MCP Config Path for Example ---
558
+ # Option 1: Assume running from project root during development
559
+ project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Get project root (assuming script is in pdd/)
560
+ mcp_config_path_example = os.path.join(project_root, 'pdd', 'mcp_config.json')
561
+
562
+ # Option 2: Try using importlib.resources (might work if package installed editable)
563
+ try:
564
+ mcp_config_path_obj = importlib.resources.files('pdd').joinpath('mcp_config.json')
565
+ if mcp_config_path_obj.is_file():
566
+ mcp_config_path_example = str(mcp_config_path_obj)
567
+ # else: keep the project root relative path
568
+ except Exception:
569
+ pass # Keep the project root relative path if importlib fails
570
+
571
+ mcp_config_path_example = os.path.abspath(mcp_config_path_example)
572
+ # --- /Determine MCP Config Path ---
573
+
574
+ initial_content = "This is the initial content.\nIt has multiple lines."
575
+ edit_instructions = "Replace the word 'initial' with 'edited' and add a new line at the end saying 'Edit complete.'"
576
+
577
+ # 1. Create dummy MCP config if it doesn't exist (using the determined path)
578
+ if not os.path.exists(mcp_config_path_example):
579
+ # Log absolute path where config will be created
580
+ logger.info(f"MCP config not found. Creating dummy config at: {mcp_config_path_example}")
581
+
582
+ # Ensure parent directory exists
583
+ os.makedirs(os.path.dirname(mcp_config_path_example), exist_ok=True)
584
+
585
+ # IMPORTANT: Replace with your actual MCP server configuration
586
+ # This dummy config assumes a server named 'my_editor_server' running via stdio
587
+ dummy_config = {
588
+ "my_editor_server": {
589
+ "command": "npx",
590
+ "args": ["-y", "mcp-server-text-editor"],
591
+ "transport": "stdio"
592
+ }
593
+ }
594
+ try:
595
+ with open(mcp_config_path_example, 'w') as f:
596
+ json.dump(dummy_config, f, indent=2)
597
+ logger.info(f"Dummy MCP config created at: {mcp_config_path_example}")
598
+ except Exception as e:
599
+ logger.error(f"Could not create dummy MCP config: {e}")
600
+ return
601
+
602
+ # 2. Create the test file
603
+ logger.info(f"Creating test file: {test_file_path}")
604
+ async with aiofiles.open(test_file_path, mode='w', encoding='utf-8') as f:
605
+ await f.write(initial_content)
606
+ logger.info("Test file created with initial content.")
607
+
608
+ # 3. Run the edit function
609
+ logger.info("Calling edit_file function...")
610
+ # --- IMPORTANT ---
611
+ # Ensure your pdd/mcp_config.json points to a valid, running MCP server
612
+ # that provides text editing tools (e.g., replace_text, insert_line, etc.)
613
+ # The dummy response in plan_edits assumes a tool named 'replace_text_tool_name' exists.
614
+ # You MUST adapt the dummy response or implement a real LLM planner
615
+ # based on the actual tools provided by your MCP server.
616
+ # --- /IMPORTANT ---
617
+ success, error_msg = await edit_file(test_file_path, edit_instructions, mcp_config_path=mcp_config_path_example) # Use example path
618
+
619
+ # 4. Verify the result
620
+ if success:
621
+ logger.info("edit_file completed successfully.")
622
+ async with aiofiles.open(test_file_path, mode='r', encoding='utf-8') as f:
623
+ final_content = await f.read()
624
+ logger.info(f"Final file content:\n---\n{final_content}\n---")
625
+ # Add specific checks for expected content if possible
626
+ expected_content = "This is the edited content.\nIt has multiple lines.\nEdit complete."
627
+ if final_content.strip() == expected_content.strip():
628
+ logger.info("File content matches expected output.")
629
+ else:
630
+ logger.warning("File content does NOT match expected output.")
631
+ logger.warning(f"Expected:\n{expected_content}")
632
+ logger.warning(f"Got:\n{final_content}")
633
+
634
+ else:
635
+ logger.error(f"edit_file failed: {error_msg}")
636
+
637
+ def run_edit_in_subprocess(file_path: str, edit_instructions: str) -> Tuple[bool, Optional[str]]:
638
+ """
639
+ Run the edit_file function in a separate process to bridge between async and sync worlds.
640
+
641
+ This function allows synchronous callers to use the asynchronous edit_file function without
642
+ needing to manage async event loops themselves. It runs a child Python process that executes
643
+ a small script to run the async function and capture its results.
644
+
645
+ Args:
646
+ file_path: The path to the file to edit
647
+ edit_instructions: Instructions for editing the file
648
+
649
+ Returns:
650
+ A tuple containing:
651
+ - success (boolean): Whether the edit was successful
652
+ - error_message (Optional[str]): Error message if unsuccessful, None otherwise
653
+ """
654
+ logger.info(f"Running edit_file in subprocess for: {file_path}")
655
+
656
+ # --- Determine MCP Config Path for Subprocess ---
657
+ # Default to package location unless specified otherwise (though this func doesn't take it as arg)
658
+ mcp_config_abs_path = None
659
+ try:
660
+ mcp_config_path_obj = importlib.resources.files('pdd').joinpath('mcp_config.json')
661
+ if mcp_config_path_obj.is_file():
662
+ mcp_config_abs_path = str(mcp_config_path_obj)
663
+ logger.info(f"Subprocess using default MCP config from package: {mcp_config_abs_path}")
664
+ # else: # Fallback or error if needed, similar to edit_file function
665
+ # For simplicity, assume it exists in package or was handled by caller
666
+ except Exception as e:
667
+ logger.warning(f"Could not find default MCP config for subprocess via importlib: {e}. Trying CWD relative path.")
668
+ fallback_path = os.path.join(os.getcwd(), 'pdd', 'mcp_config.json')
669
+ if os.path.exists(fallback_path):
670
+ mcp_config_abs_path = fallback_path
671
+ logger.warning(f"Subprocess using fallback MCP config: {fallback_path}")
672
+
673
+ # Ensure we have an absolute path
674
+ if mcp_config_abs_path:
675
+ mcp_config_abs_path = os.path.abspath(mcp_config_abs_path)
676
+ else:
677
+ # Handle error: config path couldn't be determined
678
+ logger.error("MCP config path could not be determined for subprocess.")
679
+ return False, "MCP config path could not be determined for subprocess."
680
+
681
+ # Get the actual directory of this module to pass to the subprocess
682
+ module_dir = os.path.dirname(os.path.abspath(__file__))
683
+
684
+ # Create a temporary Python script to run the async function
685
+ script = f"""
686
+ import asyncio
687
+ import json
688
+ import os
689
+ import sys
690
+ import logging
691
+
692
+ # Set up logging
693
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
694
+ logger = logging.getLogger(__name__)
695
+
696
+ # Add the module directory to sys.path to import the edit_file module
697
+ module_dir = {repr(module_dir)}
698
+ sys.path.append(module_dir)
699
+ from edit_file import edit_file
700
+
701
+ async def main():
702
+ # Print current working directory in subprocess
703
+ current_dir = os.getcwd()
704
+ print(f"Subprocess working directory: {{current_dir}}")
705
+
706
+ # Use the absolute path for MCP config passed from parent process
707
+ mcp_config_path = {repr(mcp_config_abs_path)}
708
+ print(f"Subprocess using MCP config at: {{mcp_config_path}}")
709
+ print(f"MCP config exists: {{os.path.exists(mcp_config_path)}}")
710
+
711
+ file_path = {repr(file_path)}
712
+ edit_instructions = {repr(edit_instructions)}
713
+
714
+ # Run the async edit_file function with explicit MCP config path
715
+ success, error_msg = await edit_file(file_path, edit_instructions, mcp_config_path=mcp_config_path)
716
+
717
+ # Return the result as JSON to stdout
718
+ result = {{"success": success, "error_message": error_msg}}
719
+ print(json.dumps(result))
720
+
721
+ if __name__ == "__main__":
722
+ asyncio.run(main())
723
+ """
724
+
725
+ # Execute the script in a subprocess
726
+ try:
727
+ # Make sure the environment variable is passed to the subprocess
728
+ env = os.environ.copy()
729
+ result = subprocess.run(
730
+ [sys.executable, "-c", script],
731
+ capture_output=True,
732
+ text=True,
733
+ env=env
734
+ )
735
+
736
+ if result.returncode != 0:
737
+ logger.error(f"Subprocess failed with return code {result.returncode}")
738
+ logger.error(f"Stderr: {result.stderr}")
739
+ return False, f"Subprocess error: {result.stderr}"
740
+
741
+ # Parse the JSON result from stdout
742
+ try:
743
+ output = result.stdout.strip()
744
+ # Find the last line that might be JSON (in case there's logging output)
745
+ for line in reversed(output.split('\n')):
746
+ try:
747
+ result_data = json.loads(line)
748
+ return result_data["success"], result_data["error_message"]
749
+ except json.JSONDecodeError:
750
+ continue
751
+
752
+ # If we get here, we couldn't find valid JSON
753
+ logger.error(f"Could not parse subprocess output as JSON: {output}")
754
+ return False, f"Could not parse subprocess output: {output}"
755
+
756
+ except json.JSONDecodeError as e:
757
+ logger.error(f"Failed to parse subprocess JSON output: {e}")
758
+ logger.error(f"Output was: {result.stdout}")
759
+ return False, f"Failed to parse subprocess result: {e}"
760
+
761
+ except Exception as e:
762
+ logger.exception(f"Error running subprocess: {e}")
763
+ return False, f"Error running subprocess: {e}"
764
+
765
+ if __name__ == "__main__":
766
+ # Ensure an event loop is running if executing directly (e.g., in a script)
767
+ # In environments like Jupyter, this might not be necessary.
768
+ try:
769
+ asyncio.run(main())
770
+ except RuntimeError as e:
771
+ if "Cannot run the event loop while another loop is running" in str(e):
772
+ # Handle cases where loop is already running (like in Jupyter)
773
+ logger.info("Event loop already running, executing main directly.")
774
+ # This might require adjustments depending on the environment
775
+ # For simplicity, just calling await main() if loop exists
776
+ # loop = asyncio.get_event_loop()
777
+ # loop.run_until_complete(main())
778
+ import nest_asyncio
779
+ nest_asyncio.apply()
780
+ asyncio.run(main())
781
+
782
+ else:
783
+ raise e