cognautic-cli 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,196 @@
1
+ """
2
+ Auto-continuation module for Cognautic CLI
3
+ Ensures AI automatically continues after tool execution without manual intervention
4
+ """
5
+
6
+ import asyncio
7
+ from typing import AsyncGenerator, Dict, Any, List
8
+
9
+
10
+ class AutoContinuationManager:
11
+ """Manages automatic continuation of AI responses after tool execution"""
12
+
13
+ def __init__(self, max_iterations: int = 50):
14
+ """
15
+ Initialize auto-continuation manager
16
+
17
+ Args:
18
+ max_iterations: Maximum number of auto-continuation iterations to prevent infinite loops
19
+ """
20
+ self.max_iterations = max_iterations
21
+ self.iteration_count = 0
22
+
23
+ def reset(self):
24
+ """Reset iteration counter for new conversation turn"""
25
+ self.iteration_count = 0
26
+
27
+ def should_continue(self, tool_results: List[Dict[str, Any]], has_end_response: bool) -> bool:
28
+ """
29
+ Determine if AI should automatically continue
30
+
31
+ Args:
32
+ tool_results: List of tool execution results
33
+ has_end_response: Whether end_response tool was called
34
+
35
+ Returns:
36
+ True if should continue, False otherwise
37
+ """
38
+ # Don't continue if end_response was explicitly called
39
+ if has_end_response:
40
+ return False
41
+
42
+ # Don't continue if max iterations reached
43
+ if self.iteration_count >= self.max_iterations:
44
+ return False
45
+
46
+ # Continue if there were tool executions
47
+ if tool_results:
48
+ self.iteration_count += 1
49
+ return True
50
+
51
+ return False
52
+
53
+ def build_continuation_prompt(self, tool_results: List[Dict[str, Any]]) -> str:
54
+ """
55
+ Build an appropriate continuation prompt based on tool results
56
+
57
+ Args:
58
+ tool_results: List of tool execution results
59
+
60
+ Returns:
61
+ Continuation prompt string
62
+ """
63
+ # Categorize tool results
64
+ has_file_ops = any(r.get('type') in ['file_op', 'file_write', 'file_read'] for r in tool_results)
65
+ has_commands = any(r.get('type') == 'command' for r in tool_results)
66
+ has_web_search = any(r.get('type') in ['web_search', 'web_fetch'] for r in tool_results)
67
+
68
+ # Build context summary
69
+ context_parts = []
70
+ for result in tool_results:
71
+ result_type = result.get('type', 'unknown')
72
+
73
+ if result_type == 'command':
74
+ cmd = result.get('command', 'unknown')
75
+ output = result.get('output', '')
76
+ # Truncate long outputs
77
+ if len(output) > 500:
78
+ output = output[:500] + "... [truncated]"
79
+ context_parts.append(f"Command '{cmd}' executed with output: {output}")
80
+
81
+ elif result_type in ['file_op', 'file_write']:
82
+ context_parts.append("File operation completed successfully")
83
+
84
+ elif result_type == 'file_read':
85
+ file_path = result.get('file_path', 'unknown')
86
+ context_parts.append(f"Read file: {file_path}")
87
+
88
+ elif result_type == 'web_search':
89
+ query = result.get('query', 'unknown')
90
+ results_count = len(result.get('results', []))
91
+ context_parts.append(f"Web search for '{query}' returned {results_count} results")
92
+
93
+ context = "\n".join(context_parts)
94
+
95
+ # Build appropriate prompt based on tool types
96
+ if has_web_search:
97
+ return f"""The web search has been completed. Based on the results:
98
+
99
+ {context}
100
+
101
+ Continue with the next steps:
102
+ 1. If you need to create files, create them now with the information from the search
103
+ 2. If you need to run commands, execute them now
104
+ 3. Continue until the task is FULLY complete
105
+ 4. When EVERYTHING is done, call the end_response tool
106
+
107
+ Continue now:"""
108
+
109
+ elif has_file_ops and not has_commands:
110
+ return f"""Files have been created/modified:
111
+
112
+ {context}
113
+
114
+ Continue with the next steps:
115
+ 1. If you need to create MORE files, create them now
116
+ 2. If you need to install dependencies (npm install, pip install, etc.), run the commands now
117
+ 3. If you need to configure anything else, do it now
118
+ 4. When EVERYTHING is done, call the end_response tool
119
+
120
+ Continue now:"""
121
+
122
+ elif has_commands:
123
+ return f"""Commands have been executed:
124
+
125
+ {context}
126
+
127
+ Continue with the next steps:
128
+ 1. If there are errors, fix them
129
+ 2. If more setup is needed, do it now
130
+ 3. If everything is working, provide final instructions
131
+ 4. When EVERYTHING is done, call the end_response tool
132
+
133
+ Continue now:"""
134
+
135
+ else:
136
+ return f"""Tool execution completed:
137
+
138
+ {context}
139
+
140
+ Continue with any remaining work, then call end_response when fully done.
141
+
142
+ Continue now:"""
143
+
144
+ async def generate_continuation(
145
+ self,
146
+ ai_provider,
147
+ messages: List[Dict[str, str]],
148
+ tool_results: List[Dict[str, Any]],
149
+ model: str,
150
+ config: Dict[str, Any]
151
+ ) -> str:
152
+ """
153
+ Generate continuation response from AI
154
+
155
+ Args:
156
+ ai_provider: AI provider instance
157
+ messages: Conversation history
158
+ tool_results: Tool execution results
159
+ model: Model name
160
+ config: Configuration dict
161
+
162
+ Returns:
163
+ AI's continuation response
164
+ """
165
+ try:
166
+ # Build continuation prompt
167
+ continuation_prompt = self.build_continuation_prompt(tool_results)
168
+
169
+ # Add to messages
170
+ continuation_messages = messages + [
171
+ {"role": "assistant", "content": "I'll continue with the task."},
172
+ {"role": "user", "content": continuation_prompt}
173
+ ]
174
+
175
+ # Generate response
176
+ max_tokens = config.get("max_tokens")
177
+ if max_tokens == 0 or max_tokens == -1:
178
+ max_tokens = None
179
+
180
+ response = await ai_provider.generate_response(
181
+ messages=continuation_messages,
182
+ model=model,
183
+ max_tokens=max_tokens or 16384,
184
+ temperature=config.get("temperature", 0.7)
185
+ )
186
+
187
+ # Ensure we got a response
188
+ if not response or not response.strip():
189
+ # If empty response, return a simple continuation message
190
+ return "Continuing with the task..."
191
+
192
+ return response
193
+ except Exception as e:
194
+ # Log error and return a fallback message
195
+ print(f"[Auto-continuation error: {e}]")
196
+ return "Continuing with the task..."