todo-agent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,173 @@
1
+ """
2
+ LLM client for OpenRouter API communication.
3
+ """
4
+
5
+ import json
6
+ import time
7
+ from typing import Any, Dict, List
8
+
9
+ import requests
10
+
11
+ try:
12
+ from todo_agent.infrastructure.config import Config
13
+ from todo_agent.infrastructure.logger import Logger
14
+ from todo_agent.infrastructure.token_counter import get_token_counter
15
+ from todo_agent.infrastructure.llm_client import LLMClient
16
+ except ImportError:
17
+ from infrastructure.config import Config
18
+ from infrastructure.logger import Logger
19
+ from infrastructure.token_counter import get_token_counter
20
+ from infrastructure.llm_client import LLMClient
21
+
22
+
23
+ class OpenRouterClient(LLMClient):
24
+ """LLM API communication and response handling."""
25
+
26
+ def __init__(self, config: Config):
27
+ self.config = config
28
+ self.api_key = config.openrouter_api_key
29
+ self.model = config.model
30
+ self.base_url = "https://openrouter.ai/api/v1"
31
+ self.logger = Logger("openrouter_client")
32
+ self.token_counter = get_token_counter(self.model)
33
+
34
+ def _estimate_tokens(self, text: str) -> int:
35
+ """
36
+ Estimate token count for text using accurate tokenization.
37
+
38
+ Args:
39
+ text: Text to count tokens for
40
+
41
+ Returns:
42
+ Number of tokens
43
+ """
44
+ return self.token_counter.count_tokens(text)
45
+
46
+ def _log_request_details(self, payload: Dict[str, Any], start_time: float):
47
+ """Log request details including accurate token count."""
48
+ # Count tokens for messages
49
+ messages = payload.get("messages", [])
50
+ tools = payload.get("tools", [])
51
+
52
+ total_tokens = self.token_counter.count_request_tokens(messages, tools)
53
+
54
+ self.logger.info(f"Request sent - Token count: {total_tokens}")
55
+ # self.logger.debug(f"Raw request payload: {json.dumps(payload, indent=2)}")
56
+
57
+ def _log_response_details(self, response: Dict[str, Any], start_time: float):
58
+ """Log response details including token count and latency."""
59
+ end_time = time.time()
60
+ latency_ms = (end_time - start_time) * 1000
61
+
62
+ # Extract token usage from response if available
63
+ usage = response.get("usage", {})
64
+ prompt_tokens = usage.get("prompt_tokens", "unknown")
65
+ completion_tokens = usage.get("completion_tokens", "unknown")
66
+ total_tokens = usage.get("total_tokens", "unknown")
67
+
68
+ self.logger.info(f"Response received - Latency: {latency_ms:.2f}ms")
69
+ self.logger.info(f"Token usage - Prompt: {prompt_tokens}, Completion: {completion_tokens}, Total: {total_tokens}")
70
+
71
+ # Log tool call details if present
72
+ if "choices" in response and response["choices"]:
73
+ choice = response["choices"][0]
74
+ if "message" in choice and "tool_calls" in choice["message"]:
75
+ tool_calls = choice["message"]["tool_calls"]
76
+ self.logger.info(f"Response contains {len(tool_calls)} tool calls")
77
+ for i, tool_call in enumerate(tool_calls):
78
+ tool_name = tool_call.get("function", {}).get("name", "unknown")
79
+ self.logger.info(f" Tool call {i+1}: {tool_name}")
80
+ elif "message" in choice and "content" in choice["message"]:
81
+ content = choice["message"]["content"]
82
+ self.logger.debug(f"Response contains content: {content[:100]}{'...' if len(content) > 100 else ''}")
83
+
84
+ self.logger.debug(f"Raw response: {json.dumps(response, indent=2)}")
85
+
86
+ def chat_with_tools(
87
+ self, messages: List[Dict[str, str]], tools: List[Dict[str, Any]]
88
+ ) -> Dict[str, Any]:
89
+ """
90
+ Send chat message with function calling enabled.
91
+
92
+ Args:
93
+ messages: List of message dictionaries
94
+ tools: List of tool definitions
95
+
96
+ Returns:
97
+ API response dictionary
98
+ """
99
+ headers = {
100
+ "Authorization": f"Bearer {self.api_key}",
101
+ "Content-Type": "application/json",
102
+ }
103
+
104
+ payload = {
105
+ "model": self.model,
106
+ "messages": messages,
107
+ "tools": tools,
108
+ "tool_choice": "auto",
109
+ }
110
+
111
+ start_time = time.time()
112
+ self._log_request_details(payload, start_time)
113
+
114
+ response = requests.post(
115
+ f"{self.base_url}/chat/completions", headers=headers, json=payload
116
+ )
117
+
118
+ if response.status_code != 200:
119
+ self.logger.error(f"OpenRouter API error: {response.text}")
120
+ raise Exception(f"OpenRouter API error: {response.text}")
121
+
122
+ response_data = response.json()
123
+ self._log_response_details(response_data, start_time)
124
+
125
+ return response_data
126
+
127
+ def continue_with_tool_result(self, tool_result: Dict[str, Any]) -> Dict[str, Any]:
128
+ """
129
+ Continue conversation with tool execution result.
130
+
131
+ Args:
132
+ tool_result: Tool execution result
133
+
134
+ Returns:
135
+ API response dictionary
136
+ """
137
+ # TODO: Implement continuation logic
138
+ return {}
139
+
140
+ def extract_tool_calls(self, response: Dict[str, Any]) -> List[Dict[str, Any]]:
141
+ """Extract tool calls from API response."""
142
+ tool_calls = []
143
+ if "choices" in response and response["choices"]:
144
+ choice = response["choices"][0]
145
+ if "message" in choice and "tool_calls" in choice["message"]:
146
+ tool_calls = choice["message"]["tool_calls"]
147
+ self.logger.debug(f"Extracted {len(tool_calls)} tool calls from response")
148
+ for i, tool_call in enumerate(tool_calls):
149
+ tool_name = tool_call.get("function", {}).get("name", "unknown")
150
+ tool_call_id = tool_call.get("id", "unknown")
151
+ self.logger.debug(f"Tool call {i+1}: {tool_name} (ID: {tool_call_id})")
152
+ else:
153
+ self.logger.debug("No tool calls found in response")
154
+ else:
155
+ self.logger.debug("No choices found in response")
156
+ return tool_calls
157
+
158
+ def extract_content(self, response: Dict[str, Any]) -> str:
159
+ """Extract content from API response."""
160
+ if "choices" in response and response["choices"]:
161
+ choice = response["choices"][0]
162
+ if "message" in choice and "content" in choice["message"]:
163
+ return choice["message"]["content"]
164
+ return ""
165
+
166
+ def get_model_name(self) -> str:
167
+ """
168
+ Get the model name being used by this client.
169
+
170
+ Returns:
171
+ Model name string
172
+ """
173
+ return self.model
@@ -0,0 +1,51 @@
1
+ You are a todo.sh assistant managing tasks in standard todo.txt format.
2
+
3
+ CURRENT DATE/TIME: {current_datetime}
4
+
5
+ CORE PRINCIPLES:
6
+ 1. **Strategic Tool Usage**: Batch discovery tools ([list_tasks, list_completed_tasks, list_projects, list_contexts]) to minimize API calls
7
+ 2. **Conversational**: Respond naturally without mentioning tools or technical details
8
+ 3. **Data Integrity**: Only reference tasks/projects/contexts returned by actual tool calls - NEVER hallucinate
9
+ 4. **Safety**: Always verify current state before modifications using list_tasks() and list_completed_tasks()
10
+ 5. **Todo.txt Compliance**: Use standard format and ordering
11
+
12
+ TODO.TXT FORMAT:
13
+ - Priority: (A), (B), (C) • Completion: "x YYYY-MM-DD" • Creation: YYYY-MM-DD
14
+ - Projects: +project • Contexts: @context • Due dates: due:YYYY-MM-DD
15
+ - Example: "(A) 2024-01-15 Call dentist +health @phone due:2024-01-20"
16
+
17
+ WORKFLOW:
18
+ **Discovery First**: Gather context with batched tool calls before any action
19
+ **Verify Before Action**: Check for duplicates, conflicts, or existing completions
20
+ **Sequential Processing**: Tools execute in order within batches
21
+
22
+ CONTEXT INFERENCE:
23
+ - Extract temporal urgency from due dates and creation dates
24
+ - Identify task relationships through shared projects/contexts
25
+ - Determine scope boundaries from natural language (work vs personal tasks)
26
+ - Recognize priority patterns and dependencies
27
+
28
+ TASK ADVICE:
29
+ Think deeply and critically to categorize tasks and suggest actions:
30
+ - Consider real-life implications and importance to my responsibilities regardless of explicit priority
31
+ - When users request prioritization help, use Eisenhower Matrix:
32
+ Q1 (Urgent+Important: DO), Q2 (Important: SCHEDULE), Q3 (Urgent: DELEGATE), Q4 (Neither: ELIMINATE) [assign SPARINGLY].
33
+
34
+ COMPLETED TASKS:
35
+ When users mention past accomplishments ("I did XXX today"):
36
+ 1. add_task() with description
37
+ 2. complete_task() with same ID using "x YYYY-MM-DD" format
38
+
39
+ ERROR HANDLING:
40
+ - Empty results: Suggest next steps
41
+ - Ambiguous requests: Show numbered options
42
+ - Large lists: Use filtering/summaries for 10+ items
43
+ - Failed operations: Explain clearly with alternatives
44
+
45
+ CRITICAL RULES:
46
+ - **Anti-hallucination**: If no tool data exists, say "I need to check your tasks first"
47
+ - Use appropriate discovery tools extensively
48
+ - Never assume task existence without verification
49
+ - Maintain todo.txt standard compliance
50
+
51
+ AVAILABLE TOOLS: {tools_section}
@@ -0,0 +1,151 @@
1
+ """
2
+ Subprocess wrapper for todo.sh operations.
3
+ """
4
+
5
+ import os
6
+ import subprocess
7
+ from typing import List, Optional
8
+
9
+ try:
10
+ from todo_agent.core.exceptions import TodoShellError
11
+ except ImportError:
12
+ from core.exceptions import TodoShellError
13
+
14
+
15
+ class TodoShell:
16
+ """Subprocess execution wrapper with error management."""
17
+
18
+ def __init__(self, todo_file_path: str, logger=None):
19
+ self.todo_file_path = todo_file_path
20
+ self.todo_dir = os.path.dirname(todo_file_path) or os.getcwd()
21
+ self.logger = logger
22
+
23
+ def execute(self, command: List[str], cwd: Optional[str] = None) -> str:
24
+ """
25
+ Execute todo.sh command.
26
+
27
+ Args:
28
+ command: List of command arguments
29
+ cwd: Working directory (defaults to todo.sh directory)
30
+
31
+ Returns:
32
+ Command output as string
33
+
34
+ Raises:
35
+ TodoShellError: If command execution fails
36
+ """
37
+ # Log the raw command being executed
38
+ if self.logger:
39
+ raw_command = " ".join(command)
40
+ self.logger.debug(f"=== RAW COMMAND EXECUTION ===")
41
+ self.logger.debug(f"Raw command: {raw_command}")
42
+ self.logger.debug(f"Working directory: {cwd or self.todo_dir}")
43
+
44
+ try:
45
+ working_dir = cwd or self.todo_dir
46
+ result = subprocess.run(
47
+ command, cwd=working_dir, capture_output=True, text=True, check=True
48
+ )
49
+
50
+ # Log the raw output
51
+ if self.logger:
52
+ self.logger.debug(f"=== RAW COMMAND OUTPUT ===")
53
+ self.logger.debug(f"Raw command: {raw_command}")
54
+ self.logger.debug(f"Raw stdout: {result.stdout}")
55
+ self.logger.debug(f"Raw stderr: {result.stderr}")
56
+ self.logger.debug(f"Return code: {result.returncode}")
57
+
58
+ return result.stdout.strip()
59
+ except subprocess.CalledProcessError as e:
60
+ # Log error details
61
+ if self.logger:
62
+ self.logger.error(f"=== COMMAND EXECUTION FAILED ===")
63
+ self.logger.error(f"Raw command: {' '.join(command)}")
64
+ self.logger.error(f"Error stderr: {e.stderr}")
65
+ self.logger.error(f"Error return code: {e.returncode}")
66
+ raise TodoShellError(f"Todo.sh command failed: {e.stderr}")
67
+ except Exception as e:
68
+ # Log error details
69
+ if self.logger:
70
+ self.logger.error(f"=== COMMAND EXECUTION EXCEPTION ===")
71
+ self.logger.error(f"Raw command: {' '.join(command)}")
72
+ self.logger.error(f"Exception: {str(e)}")
73
+ raise TodoShellError(f"Todo.sh command failed: {e}")
74
+
75
+ def add(self, description: str) -> str:
76
+ """Add new task."""
77
+ return self.execute(["todo.sh", "add", description])
78
+
79
+ def list_tasks(self, filter_str: Optional[str] = None) -> str:
80
+ """List tasks with optional filtering."""
81
+ command = ["todo.sh", "ls"]
82
+ if filter_str:
83
+ command.append(filter_str)
84
+ return self.execute(command)
85
+
86
+ def complete(self, task_number: int) -> str:
87
+ """Mark task complete."""
88
+ return self.execute(["todo.sh", "do", str(task_number)])
89
+
90
+ def replace(self, task_number: int, new_description: str) -> str:
91
+ """Replace task content."""
92
+ return self.execute(["todo.sh", "replace", str(task_number), new_description])
93
+
94
+ def append(self, task_number: int, text: str) -> str:
95
+ """Append text to task."""
96
+ return self.execute(["todo.sh", "append", str(task_number), text])
97
+
98
+ def prepend(self, task_number: int, text: str) -> str:
99
+ """Prepend text to task."""
100
+ return self.execute(["todo.sh", "prepend", str(task_number), text])
101
+
102
+ def delete(self, task_number: int, term: Optional[str] = None) -> str:
103
+ """Delete task or term."""
104
+ command = ["todo.sh", "-f", "del", str(task_number)]
105
+ if term:
106
+ command.append(term)
107
+ return self.execute(command)
108
+
109
+ def move(self, task_number: int, destination: str, source: Optional[str] = None) -> str:
110
+ """Move task from source to destination file."""
111
+ command = ["todo.sh", "-f", "move", str(task_number), destination]
112
+ if source:
113
+ command.append(source)
114
+ return self.execute(command)
115
+
116
+ def set_priority(self, task_number: int, priority: str) -> str:
117
+ """Set task priority."""
118
+ return self.execute(["todo.sh", "pri", str(task_number), priority])
119
+
120
+ def remove_priority(self, task_number: int) -> str:
121
+ """Remove task priority."""
122
+ return self.execute(["todo.sh", "depri", str(task_number)])
123
+
124
+ def list_projects(self) -> str:
125
+ """List projects."""
126
+ return self.execute(["todo.sh", "lsp"])
127
+
128
+ def list_contexts(self) -> str:
129
+ """List contexts."""
130
+ return self.execute(["todo.sh", "lsc"])
131
+
132
+ def list_completed(self, filter_str: Optional[str] = None) -> str:
133
+ """List completed tasks with optional filtering."""
134
+ command = ["todo.sh", "listfile", "done.txt"]
135
+ if filter_str:
136
+ command.append(filter_str)
137
+ return self.execute(command)
138
+
139
+ def archive(self) -> str:
140
+ """Archive completed tasks."""
141
+ return self.execute(["todo.sh", "-f", "archive"])
142
+
143
+ def deduplicate(self) -> str:
144
+ """Remove duplicate tasks."""
145
+ try:
146
+ return self.execute(["todo.sh", "-f", "deduplicate"])
147
+ except TodoShellError as e:
148
+ # Handle the case where no duplicates are found (not really an error)
149
+ if "No duplicate tasks found" in str(e):
150
+ return "No duplicate tasks found"
151
+ raise
@@ -0,0 +1,184 @@
1
+ """
2
+ Token counting utilities for accurate LLM token estimation.
3
+ """
4
+
5
+ import json
6
+ from typing import Any, Dict, List, Optional
7
+
8
+ try:
9
+ import tiktoken
10
+ except ImportError:
11
+ tiktoken = None
12
+
13
+
14
+ class TokenCounter:
15
+ """Accurate token counting using tiktoken library."""
16
+
17
+ def __init__(self, model: str = "gpt-4"):
18
+ """
19
+ Initialize token counter for a specific model.
20
+
21
+ Args:
22
+ model: Model name to use for tokenization (default: gpt-4)
23
+ """
24
+ self.model = model
25
+ self._encoder = None
26
+ self._initialize_encoder()
27
+
28
+ def _initialize_encoder(self) -> None:
29
+ """Initialize the tiktoken encoder for the specified model."""
30
+ if tiktoken is None:
31
+ raise ImportError(
32
+ "tiktoken library is required for accurate token counting. "
33
+ "Install it with: pip install tiktoken"
34
+ )
35
+
36
+
37
+ self._encoder = tiktoken.get_encoding("cl100k_base")
38
+
39
+ def count_tokens(self, text: str) -> int:
40
+ """
41
+ Count tokens in text using accurate tokenization.
42
+
43
+ Args:
44
+ text: Text to count tokens for
45
+
46
+ Returns:
47
+ Number of tokens
48
+ """
49
+ if not text:
50
+ return 0
51
+
52
+ return len(self._encoder.encode(text))
53
+
54
+ def count_message_tokens(self, message: Dict[str, Any]) -> int:
55
+ """
56
+ Count tokens in a single message (including role, content, and tool calls).
57
+
58
+ Args:
59
+ message: Message dictionary with role, content, etc.
60
+
61
+ Returns:
62
+ Number of tokens
63
+ """
64
+ tokens = 0
65
+
66
+ # Count role tokens (typically 1-2 tokens)
67
+ role = message.get("role", "")
68
+ tokens += self.count_tokens(role)
69
+
70
+ # Count content tokens
71
+ content = message.get("content", "")
72
+ if content:
73
+ tokens += self.count_tokens(content)
74
+
75
+ # Count tool calls tokens
76
+ tool_calls = message.get("tool_calls", [])
77
+ for tool_call in tool_calls:
78
+ tokens += self.count_tool_call_tokens(tool_call)
79
+
80
+ # Count tool call ID if present
81
+ tool_call_id = message.get("tool_call_id", "")
82
+ if tool_call_id:
83
+ tokens += self.count_tokens(tool_call_id)
84
+
85
+ return tokens
86
+
87
+ def count_tool_call_tokens(self, tool_call: Dict[str, Any]) -> int:
88
+ """
89
+ Count tokens in a tool call.
90
+
91
+ Args:
92
+ tool_call: Tool call dictionary
93
+
94
+ Returns:
95
+ Number of tokens
96
+ """
97
+ tokens = 0
98
+
99
+ # Count tool call ID
100
+ tool_call_id = tool_call.get("id", "")
101
+ tokens += self.count_tokens(tool_call_id)
102
+
103
+ # Count function call
104
+ function = tool_call.get("function", {})
105
+ if function:
106
+ # Count function name
107
+ function_name = function.get("name", "")
108
+ tokens += self.count_tokens(function_name)
109
+
110
+ # Count function arguments
111
+ arguments = function.get("arguments", "")
112
+ if arguments:
113
+ tokens += self.count_tokens(arguments)
114
+
115
+ return tokens
116
+
117
+ def count_messages_tokens(self, messages: List[Dict[str, Any]]) -> int:
118
+ """
119
+ Count total tokens in a list of messages.
120
+
121
+ Args:
122
+ messages: List of message dictionaries
123
+
124
+ Returns:
125
+ Total number of tokens
126
+ """
127
+ total_tokens = 0
128
+
129
+ for message in messages:
130
+ total_tokens += self.count_message_tokens(message)
131
+
132
+ return total_tokens
133
+
134
+ def count_tools_tokens(self, tools: List[Dict[str, Any]]) -> int:
135
+ """
136
+ Count tokens in tool definitions.
137
+
138
+ Args:
139
+ tools: List of tool definition dictionaries
140
+
141
+ Returns:
142
+ Number of tokens
143
+ """
144
+ if not tools:
145
+ return 0
146
+
147
+ # Convert tools to JSON string and count tokens
148
+ tools_json = json.dumps(tools, separators=(',', ':'))
149
+ return self.count_tokens(tools_json)
150
+
151
+ def count_request_tokens(
152
+ self,
153
+ messages: List[Dict[str, Any]],
154
+ tools: Optional[List[Dict[str, Any]]] = None
155
+ ) -> int:
156
+ """
157
+ Count total tokens in a complete request (messages + tools).
158
+
159
+ Args:
160
+ messages: List of message dictionaries
161
+ tools: Optional list of tool definitions
162
+
163
+ Returns:
164
+ Total number of tokens
165
+ """
166
+ total_tokens = self.count_messages_tokens(messages)
167
+
168
+ if tools:
169
+ total_tokens += self.count_tools_tokens(tools)
170
+
171
+ return total_tokens
172
+
173
+
174
+ def get_token_counter(model: str = "gpt-4") -> TokenCounter:
175
+ """
176
+ Get a token counter instance for the specified model.
177
+
178
+ Args:
179
+ model: Model name to use for tokenization
180
+
181
+ Returns:
182
+ TokenCounter instance
183
+ """
184
+ return TokenCounter(model)
@@ -0,0 +1,10 @@
1
+ """
2
+ Interface layer for todo.sh LLM agent.
3
+
4
+ This module contains user interfaces and presentation logic.
5
+ """
6
+
7
+ from .cli import CLI
8
+ from .tools import ToolCallHandler
9
+
10
+ __all__ = ["CLI", "ToolCallHandler"]