todo-agent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
todo_agent/__init__.py ADDED
@@ -0,0 +1,14 @@
1
+ """
2
+ Todo Agent - A natural language interface for todo.sh task management.
3
+ """
4
+
5
+ try:
6
+ from ._version import version as __version__
7
+ except ImportError:
8
+ __version__ = "unknown"
9
+
10
+ __author__ = "codeprimate"
11
+
12
+ from .main import main
13
+
14
+ __all__ = ["main"]
todo_agent/_version.py ADDED
@@ -0,0 +1,34 @@
1
+ # file generated by setuptools-scm
2
+ # don't change, don't track in version control
3
+
4
+ __all__ = [
5
+ "__version__",
6
+ "__version_tuple__",
7
+ "version",
8
+ "version_tuple",
9
+ "__commit_id__",
10
+ "commit_id",
11
+ ]
12
+
13
+ TYPE_CHECKING = False
14
+ if TYPE_CHECKING:
15
+ from typing import Tuple
16
+ from typing import Union
17
+
18
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
19
+ COMMIT_ID = Union[str, None]
20
+ else:
21
+ VERSION_TUPLE = object
22
+ COMMIT_ID = object
23
+
24
+ version: str
25
+ __version__: str
26
+ __version_tuple__: VERSION_TUPLE
27
+ version_tuple: VERSION_TUPLE
28
+ commit_id: COMMIT_ID
29
+ __commit_id__: COMMIT_ID
30
+
31
+ __version__ = version = '0.1.0'
32
+ __version_tuple__ = version_tuple = (0, 1, 0)
33
+
34
+ __commit_id__ = commit_id = None
@@ -0,0 +1,16 @@
1
+ """
2
+ Core domain layer for todo.sh LLM agent.
3
+
4
+ This module contains the business logic and domain entities
5
+ for managing todo.sh operations.
6
+ """
7
+
8
+ from .exceptions import InvalidTaskFormatError, TaskNotFoundError, TodoError
9
+ from .todo_manager import TodoManager
10
+
11
+ __all__ = [
12
+ "TodoManager",
13
+ "TodoError",
14
+ "TaskNotFoundError",
15
+ "InvalidTaskFormatError",
16
+ ]
@@ -0,0 +1,310 @@
1
+ """
2
+ Conversation management for todo.sh LLM agent.
3
+ """
4
+
5
+ import time
6
+ from typing import List, Dict, Optional
7
+ from dataclasses import dataclass
8
+ from enum import Enum
9
+
10
+ try:
11
+ from todo_agent.infrastructure.token_counter import get_token_counter
12
+ except ImportError:
13
+ from infrastructure.token_counter import get_token_counter
14
+
15
+
16
+ class MessageRole(Enum):
17
+ USER = "user"
18
+ ASSISTANT = "assistant"
19
+ TOOL = "tool"
20
+ SYSTEM = "system"
21
+
22
+
23
+ @dataclass
24
+ class ConversationMessage:
25
+ role: MessageRole
26
+ content: str
27
+ tool_call_id: Optional[str] = None
28
+ timestamp: Optional[float] = None
29
+ tool_calls: Optional[List[Dict]] = None
30
+ thinking_time: Optional[float] = None
31
+ token_count: Optional[int] = None # Cache token count for efficiency
32
+
33
+
34
+ class ConversationManager:
35
+ """Manages conversation state and memory for LLM interactions."""
36
+
37
+ def __init__(
38
+ self, max_tokens: int = 4000, max_messages: int = 50, model: str = "gpt-4"
39
+ ):
40
+ self.history: List[ConversationMessage] = []
41
+ self.max_tokens = max_tokens
42
+ self.max_messages = max_messages
43
+ self.system_prompt: Optional[str] = None
44
+ self.token_counter = get_token_counter(model)
45
+ self._total_tokens = 0 # Running total of tokens in conversation
46
+
47
+ def add_message(
48
+ self,
49
+ role: MessageRole,
50
+ content: str,
51
+ tool_call_id: Optional[str] = None,
52
+ thinking_time: Optional[float] = None,
53
+ ) -> None:
54
+ """
55
+ Add a message to conversation history.
56
+
57
+ Args:
58
+ role: Message role (user, assistant, tool, system)
59
+ content: Message content
60
+ tool_call_id: Optional tool call identifier for tool messages
61
+ thinking_time: Optional thinking time in seconds for assistant messages
62
+ """
63
+ # Calculate token count for this message
64
+ token_count = self._estimate_tokens(content)
65
+
66
+ message = ConversationMessage(
67
+ role=role,
68
+ content=content,
69
+ tool_call_id=tool_call_id,
70
+ timestamp=time.time(),
71
+ thinking_time=thinking_time,
72
+ token_count=token_count,
73
+ )
74
+
75
+ self.history.append(message)
76
+ self._total_tokens += token_count
77
+ self._trim_if_needed()
78
+
79
+ def get_messages(self, include_tool_calls: bool = True) -> List[Dict[str, str]]:
80
+ """
81
+ Get conversation messages in OpenRouter API format.
82
+
83
+ Args:
84
+ include_tool_calls: Whether to include tool call messages
85
+
86
+ Returns:
87
+ List of message dictionaries for API consumption
88
+ """
89
+ messages = []
90
+
91
+ # Add conversation messages (system prompt is already in history)
92
+ for msg in self.history:
93
+ if msg.role == MessageRole.TOOL and not include_tool_calls:
94
+ continue
95
+
96
+ message_dict = {"role": msg.role.value, "content": msg.content}
97
+
98
+ # Handle tool calls in assistant messages
99
+ if msg.role == MessageRole.ASSISTANT and msg.tool_calls:
100
+ message_dict["tool_calls"] = msg.tool_calls
101
+
102
+ # Handle tool call IDs
103
+ if msg.tool_call_id:
104
+ message_dict["tool_call_id"] = msg.tool_call_id
105
+
106
+ messages.append(message_dict)
107
+
108
+ return messages
109
+
110
+ def _estimate_tokens(self, text: str) -> int:
111
+ """
112
+ Count tokens in text using accurate tokenization.
113
+
114
+ Args:
115
+ text: Text to count tokens for
116
+
117
+ Returns:
118
+ Number of tokens
119
+ """
120
+ return self.token_counter.count_tokens(text)
121
+
122
+ def _get_conversation_tokens(self) -> int:
123
+ """Get total tokens in conversation using cached count."""
124
+ return self._total_tokens
125
+
126
+ def _remove_message_at_index(self, index: int) -> None:
127
+ """
128
+ Remove a message at the specified index and update token count.
129
+
130
+ Args:
131
+ index: Index of message to remove
132
+ """
133
+ if 0 <= index < len(self.history):
134
+ message = self.history[index]
135
+ # Decrement total token count by this message's token count
136
+ if message.token_count is not None:
137
+ self._total_tokens -= message.token_count
138
+ self.history.pop(index)
139
+
140
+ def _trim_if_needed(self) -> None:
141
+ """
142
+ Trim conversation history if it exceeds token or message limits.
143
+ Preserves most recent messages and system prompt.
144
+ """
145
+ # Check message count limit
146
+ if len(self.history) > self.max_messages:
147
+ # Keep system prompt and most recent messages
148
+ system_messages = [
149
+ msg for msg in self.history if msg.role == MessageRole.SYSTEM
150
+ ]
151
+ recent_messages = self.history[-self.max_messages:]
152
+ self.history = system_messages + recent_messages[-self.max_messages + len(system_messages):]
153
+
154
+ # Recalculate total tokens after message count trimming
155
+ self._recalculate_total_tokens()
156
+
157
+ # Check token limit - remove oldest non-system messages until under limit
158
+ while self._total_tokens > self.max_tokens and len(self.history) > 2:
159
+ # Find oldest non-system message to remove
160
+ for i, msg in enumerate(self.history):
161
+ if msg.role != MessageRole.SYSTEM:
162
+ self._remove_message_at_index(i)
163
+ break
164
+ else:
165
+ # No non-system messages found, break to avoid infinite loop
166
+ break
167
+
168
+ def _recalculate_total_tokens(self) -> None:
169
+ """Recalculate total token count from scratch (used after major restructuring)."""
170
+ self._total_tokens = 0
171
+ for msg in self.history:
172
+ if msg.token_count is not None:
173
+ self._total_tokens += msg.token_count
174
+ else:
175
+ # Recalculate token count if not cached
176
+ token_count = self._estimate_tokens(msg.content)
177
+ msg.token_count = token_count
178
+ self._total_tokens += token_count
179
+
180
+ def clear_conversation(self, keep_system: bool = True) -> None:
181
+ """
182
+ Clear conversation history.
183
+
184
+ Args:
185
+ keep_system: Whether to preserve system prompt
186
+ """
187
+ if keep_system:
188
+ system_messages = [
189
+ msg for msg in self.history if msg.role == MessageRole.SYSTEM
190
+ ]
191
+ self.history = system_messages
192
+ # Recalculate total tokens for remaining system messages
193
+ self._recalculate_total_tokens()
194
+ else:
195
+ self.history = []
196
+ self._total_tokens = 0
197
+
198
+ def set_system_prompt(self, prompt: str) -> None:
199
+ """Set or update the system prompt."""
200
+ # Set the system prompt attribute
201
+ self.system_prompt = prompt
202
+
203
+ # Remove existing system messages and update token count
204
+ for i in range(len(self.history) - 1, -1, -1):
205
+ if self.history[i].role == MessageRole.SYSTEM:
206
+ self._remove_message_at_index(i)
207
+
208
+ # Add new system prompt at beginning
209
+ token_count = self._estimate_tokens(prompt)
210
+ system_message = ConversationMessage(
211
+ role=MessageRole.SYSTEM, content=prompt, token_count=token_count
212
+ )
213
+ self.history.insert(0, system_message)
214
+ self._total_tokens += token_count
215
+
216
+ def get_conversation_summary(self) -> Dict[str, any]:
217
+ """
218
+ Get conversation statistics and summary.
219
+
220
+ Returns:
221
+ Dictionary with conversation metrics
222
+ """
223
+ # Filter out messages with None timestamps for min/max calculations
224
+ messages_with_timestamps = [
225
+ msg for msg in self.history if msg.timestamp is not None
226
+ ]
227
+
228
+ # Calculate thinking time statistics
229
+ assistant_messages_with_time = [
230
+ msg
231
+ for msg in self.history
232
+ if msg.role == MessageRole.ASSISTANT and msg.thinking_time is not None
233
+ ]
234
+ thinking_times = [msg.thinking_time for msg in assistant_messages_with_time]
235
+
236
+ thinking_stats = {}
237
+ if thinking_times:
238
+ thinking_stats = {
239
+ "total_thinking_time": sum(thinking_times),
240
+ "average_thinking_time": sum(thinking_times) / len(thinking_times),
241
+ "min_thinking_time": min(thinking_times),
242
+ "max_thinking_time": max(thinking_times),
243
+ "thinking_time_count": len(thinking_times),
244
+ }
245
+
246
+ return {
247
+ "total_messages": len(self.history),
248
+ "estimated_tokens": self._total_tokens,
249
+ "user_messages": len(
250
+ [msg for msg in self.history if msg.role == MessageRole.USER]
251
+ ),
252
+ "assistant_messages": len(
253
+ [msg for msg in self.history if msg.role == MessageRole.ASSISTANT]
254
+ ),
255
+ "tool_messages": len(
256
+ [msg for msg in self.history if msg.role == MessageRole.TOOL]
257
+ ),
258
+ "oldest_message": (
259
+ min([msg.timestamp for msg in messages_with_timestamps])
260
+ if messages_with_timestamps
261
+ else None
262
+ ),
263
+ "newest_message": (
264
+ max([msg.timestamp for msg in messages_with_timestamps])
265
+ if messages_with_timestamps
266
+ else None
267
+ ),
268
+ **thinking_stats,
269
+ }
270
+
271
+ def add_tool_call_sequence(
272
+ self, tool_calls: List[Dict], tool_results: List[Dict]
273
+ ) -> None:
274
+ """
275
+ Add a complete tool call sequence to conversation history.
276
+
277
+ Args:
278
+ tool_calls: List of tool call requests from LLM
279
+ tool_results: List of tool execution results
280
+ """
281
+ # Add tool calls as assistant message with tool_calls field
282
+ if tool_calls:
283
+ # Create a single assistant message with all tool calls
284
+ self.add_message(
285
+ role=MessageRole.ASSISTANT,
286
+ content="", # Tool calls don't have content
287
+ tool_call_id=None, # This will be handled specially in get_messages
288
+ )
289
+ # Store tool calls for this assistant message
290
+ self.history[-1].tool_calls = tool_calls
291
+
292
+ # Add tool results
293
+ for result in tool_results:
294
+ self.add_message(
295
+ role=MessageRole.TOOL,
296
+ content=result.get("output", ""),
297
+ tool_call_id=result.get("tool_call_id"),
298
+ )
299
+
300
+ def get_recent_context(self, num_messages: int = 5) -> List[ConversationMessage]:
301
+ """
302
+ Get recent conversation context for analysis.
303
+
304
+ Args:
305
+ num_messages: Number of recent messages to return
306
+
307
+ Returns:
308
+ List of recent conversation messages
309
+ """
310
+ return self.history[-num_messages:] if self.history else []
@@ -0,0 +1,27 @@
1
+ """
2
+ Domain-specific exceptions for todo.sh operations.
3
+ """
4
+
5
+
6
+ class TodoError(Exception):
7
+ """Base exception for todo operations."""
8
+
9
+ pass
10
+
11
+
12
+ class TaskNotFoundError(TodoError):
13
+ """Task not found in todo file."""
14
+
15
+ pass
16
+
17
+
18
+ class InvalidTaskFormatError(TodoError):
19
+ """Invalid task format."""
20
+
21
+ pass
22
+
23
+
24
+ class TodoShellError(TodoError):
25
+ """Subprocess execution error."""
26
+
27
+ pass
@@ -0,0 +1,194 @@
1
+ """
2
+ Todo.sh operations orchestration and business logic.
3
+ """
4
+
5
+ from typing import Optional
6
+ from datetime import datetime
7
+
8
+
9
+ class TodoManager:
10
+ """Orchestrates todo.sh operations with business logic."""
11
+
12
+ def __init__(self, todo_shell):
13
+ self.todo_shell = todo_shell
14
+
15
+ def add_task(
16
+ self,
17
+ description: str,
18
+ priority: Optional[str] = None,
19
+ project: Optional[str] = None,
20
+ context: Optional[str] = None,
21
+ due: Optional[str] = None,
22
+ ) -> str:
23
+ """Add new task with explicit project/context parameters."""
24
+ # Build the full task description with priority, project, and context
25
+ full_description = description
26
+
27
+ if priority:
28
+ full_description = f"({priority}) {full_description}"
29
+
30
+ if project:
31
+ full_description = f"{full_description} +{project}"
32
+
33
+ if context:
34
+ full_description = f"{full_description} @{context}"
35
+
36
+ if due:
37
+ full_description = f"{full_description} due:{due}"
38
+
39
+ result = self.todo_shell.add(full_description)
40
+ return f"Added task: {full_description}"
41
+
42
+ def list_tasks(self, filter: Optional[str] = None) -> str:
43
+ """List tasks with optional filtering."""
44
+ result = self.todo_shell.list_tasks(filter)
45
+ if not result.strip():
46
+ return "No tasks found."
47
+
48
+ # Return the raw todo.txt format for the LLM to format conversationally
49
+ # The LLM will convert this into natural language in its response
50
+ return result
51
+
52
+ def complete_task(self, task_number: int) -> str:
53
+ """Mark task complete by line number."""
54
+ result = self.todo_shell.complete(task_number)
55
+ return f"Completed task {task_number}: {result}"
56
+
57
+ def get_overview(self, **kwargs) -> str:
58
+ """Show current task statistics."""
59
+ tasks = self.todo_shell.list_tasks()
60
+ completed = self.todo_shell.list_completed()
61
+
62
+ task_count = len([line for line in tasks.split('\n') if line.strip()]) if tasks.strip() else 0
63
+ completed_count = len([line for line in completed.split('\n') if line.strip()]) if completed.strip() else 0
64
+
65
+ return f"Task Overview:\n- Active tasks: {task_count}\n- Completed tasks: {completed_count}"
66
+
67
+ def replace_task(self, task_number: int, new_description: str) -> str:
68
+ """Replace entire task content."""
69
+ result = self.todo_shell.replace(task_number, new_description)
70
+ return f"Replaced task {task_number}: {result}"
71
+
72
+ def append_to_task(self, task_number: int, text: str) -> str:
73
+ """Add text to end of existing task."""
74
+ result = self.todo_shell.append(task_number, text)
75
+ return f"Appended to task {task_number}: {result}"
76
+
77
+ def prepend_to_task(self, task_number: int, text: str) -> str:
78
+ """Add text to beginning of existing task."""
79
+ result = self.todo_shell.prepend(task_number, text)
80
+ return f"Prepended to task {task_number}: {result}"
81
+
82
+ def delete_task(self, task_number: int, term: Optional[str] = None) -> str:
83
+ """Delete entire task or specific term from task."""
84
+ result = self.todo_shell.delete(task_number, term)
85
+ if term:
86
+ return f"Removed '{term}' from task {task_number}: {result}"
87
+ else:
88
+ return f"Deleted task {task_number}: {result}"
89
+
90
+ def set_priority(self, task_number: int, priority: str) -> str:
91
+ """Set or change task priority (A-Z)."""
92
+ result = self.todo_shell.set_priority(task_number, priority)
93
+ return f"Set priority {priority} for task {task_number}: {result}"
94
+
95
+ def remove_priority(self, task_number: int) -> str:
96
+ """Remove priority from task."""
97
+ result = self.todo_shell.remove_priority(task_number)
98
+ return f"Removed priority from task {task_number}: {result}"
99
+
100
+ def list_projects(self, **kwargs) -> str:
101
+ """List all available projects in todo.txt."""
102
+ result = self.todo_shell.list_projects()
103
+ if not result.strip():
104
+ return "No projects found."
105
+ return result
106
+
107
+ def list_contexts(self, **kwargs) -> str:
108
+ """List all available contexts in todo.txt."""
109
+ result = self.todo_shell.list_contexts()
110
+ if not result.strip():
111
+ return "No contexts found."
112
+ return result
113
+
114
+ def list_completed_tasks(
115
+ self,
116
+ filter: Optional[str] = None,
117
+ project: Optional[str] = None,
118
+ context: Optional[str] = None,
119
+ text_search: Optional[str] = None,
120
+ date_from: Optional[str] = None,
121
+ date_to: Optional[str] = None,
122
+ **kwargs
123
+ ) -> str:
124
+ """List completed tasks with optional filtering.
125
+
126
+ Args:
127
+ filter: Raw filter string (e.g., '+work', '@office')
128
+ project: Filter by project (without + symbol)
129
+ context: Filter by context (without @ symbol)
130
+ text_search: Search for text in task descriptions
131
+ date_from: Filter tasks completed from this date (YYYY-MM-DD)
132
+ date_to: Filter tasks completed until this date (YYYY-MM-DD)
133
+ """
134
+ # Build filter string from individual parameters
135
+ filter_parts = []
136
+
137
+ if filter:
138
+ filter_parts.append(filter)
139
+
140
+ if project:
141
+ filter_parts.append(f"+{project}")
142
+
143
+ if context:
144
+ filter_parts.append(f"@{context}")
145
+
146
+ if text_search:
147
+ filter_parts.append(text_search)
148
+
149
+ # Handle date filtering - todo.sh supports direct date pattern matching
150
+ # LIMITATIONS: Due to todo.sh constraints, complex date ranges are not supported.
151
+ # The filtering behavior is:
152
+ # - date_from + date_to: Uses year-month pattern (YYYY-MM) from date_from for month-based filtering
153
+ # - date_from only: Uses exact date pattern (YYYY-MM-DD) for precise date matching
154
+ # - date_to only: Uses year-month pattern (YYYY-MM) from date_to for month-based filtering
155
+ # - Complex ranges spanning multiple months are not supported by todo.sh
156
+ if date_from and date_to:
157
+ # For a date range, we'll use the year-month pattern from date_from
158
+ # This will match all tasks in that month
159
+ filter_parts.append(date_from[:7]) # YYYY-MM format
160
+ elif date_from:
161
+ # For single date, use the full date pattern
162
+ filter_parts.append(date_from)
163
+ elif date_to:
164
+ # For end date only, we'll use the year-month pattern
165
+ # This will match all tasks in that month
166
+ filter_parts.append(date_to[:7]) # YYYY-MM format
167
+
168
+ # Combine all filters
169
+ combined_filter = " ".join(filter_parts) if filter_parts else None
170
+
171
+ result = self.todo_shell.list_completed(combined_filter)
172
+ if not result.strip():
173
+ return "No completed tasks found matching the criteria."
174
+ return result
175
+
176
+ def move_task(self, task_number: int, destination: str, source: Optional[str] = None) -> str:
177
+ """Move task from source to destination file."""
178
+ result = self.todo_shell.move(task_number, destination, source)
179
+ return f"Moved task {task_number} to {destination}: {result}"
180
+
181
+ def archive_tasks(self, **kwargs) -> str:
182
+ """Archive completed tasks."""
183
+ result = self.todo_shell.archive()
184
+ return f"Archived tasks: {result}"
185
+
186
+ def deduplicate_tasks(self, **kwargs) -> str:
187
+ """Remove duplicate tasks."""
188
+ result = self.todo_shell.deduplicate()
189
+ return f"Deduplicated tasks: {result}"
190
+
191
+ def get_current_datetime(self, **kwargs) -> str:
192
+ """Get the current date and time."""
193
+ now = datetime.now()
194
+ return f"Current date and time: {now.strftime('%Y-%m-%d %H:%M:%S')} ({now.strftime('%A, %B %d, %Y at %I:%M %p')})"
@@ -0,0 +1,11 @@
1
+ """
2
+ Infrastructure layer for todo.sh LLM agent.
3
+
4
+ This module contains external integrations and system operations.
5
+ """
6
+
7
+ from .config import Config
8
+ from .openrouter_client import OpenRouterClient
9
+ from .todo_shell import TodoShell
10
+
11
+ __all__ = ["OpenRouterClient", "TodoShell", "Config"]
@@ -0,0 +1,59 @@
1
+ """
2
+ Configuration management for todo.sh LLM agent.
3
+ """
4
+
5
+ import os
6
+
7
+
8
+ class Config:
9
+ """Environment and configuration management."""
10
+
11
+ DEFAULT_MODEL = "openai/gpt-4o-mini"
12
+ # DEFAULT_MODEL = "mistralai/mistral-small-3.1-24b-instruct"
13
+
14
+ def __init__(self):
15
+ # Provider selection
16
+ self.provider = os.getenv("LLM_PROVIDER", "openrouter")
17
+
18
+ # OpenRouter configuration
19
+ self.openrouter_api_key = os.getenv("OPENROUTER_API_KEY")
20
+ self.openrouter_model = os.getenv("OPENROUTER_MODEL", self.DEFAULT_MODEL)
21
+
22
+ # Ollama configuration
23
+ self.ollama_base_url = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
24
+ self.ollama_model = os.getenv("OLLAMA_MODEL", "mistral-small3.1")
25
+
26
+ # Common configuration
27
+ self.model = self._get_model_for_provider()
28
+ self.log_level = os.getenv("LOG_LEVEL", "INFO")
29
+ self.todo_file_path = os.getenv("TODO_FILE", "todo.txt")
30
+
31
+ def _get_model_for_provider(self) -> str:
32
+ """Get model name for current provider."""
33
+ if self.provider == "openrouter":
34
+ return self.openrouter_model
35
+ elif self.provider == "ollama":
36
+ return self.ollama_model
37
+ return self.openrouter_model # fallback
38
+
39
+ def validate(self) -> bool:
40
+ """Validate required configuration."""
41
+ if self.provider == "openrouter":
42
+ if not self.openrouter_api_key:
43
+ raise ValueError("OPENROUTER_API_KEY environment variable is required for OpenRouter provider")
44
+ elif self.provider == "ollama":
45
+ # Ollama doesn't require API key, but we could validate the base URL is reachable
46
+ pass
47
+ else:
48
+ raise ValueError(f"Unsupported LLM provider: {self.provider}")
49
+ return True
50
+
51
+ @property
52
+ def todo_dir(self) -> str:
53
+ """Get todo.sh directory path."""
54
+ return os.path.dirname(self.todo_file_path)
55
+
56
+ @property
57
+ def done_file_path(self) -> str:
58
+ """Get done.txt file path."""
59
+ return os.path.join(self.todo_dir, "done.txt")