chatterer 0.1.25__py3-none-any.whl → 0.1.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. chatterer/__init__.py +87 -97
  2. chatterer/common_types/__init__.py +21 -21
  3. chatterer/common_types/io.py +19 -19
  4. chatterer/constants.py +5 -0
  5. chatterer/examples/__main__.py +75 -75
  6. chatterer/examples/any2md.py +83 -85
  7. chatterer/examples/pdf2md.py +231 -338
  8. chatterer/examples/pdf2txt.py +52 -54
  9. chatterer/examples/ppt.py +487 -486
  10. chatterer/examples/pw.py +141 -143
  11. chatterer/examples/snippet.py +54 -56
  12. chatterer/examples/transcribe.py +192 -192
  13. chatterer/examples/upstage.py +87 -89
  14. chatterer/examples/web2md.py +80 -80
  15. chatterer/interactive.py +422 -354
  16. chatterer/language_model.py +530 -536
  17. chatterer/messages.py +21 -21
  18. chatterer/tools/__init__.py +46 -46
  19. chatterer/tools/caption_markdown_images.py +388 -384
  20. chatterer/tools/citation_chunking/__init__.py +3 -3
  21. chatterer/tools/citation_chunking/chunks.py +51 -53
  22. chatterer/tools/citation_chunking/citation_chunker.py +117 -118
  23. chatterer/tools/citation_chunking/citations.py +284 -285
  24. chatterer/tools/citation_chunking/prompt.py +157 -157
  25. chatterer/tools/citation_chunking/reference.py +26 -26
  26. chatterer/tools/citation_chunking/utils.py +138 -138
  27. chatterer/tools/convert_pdf_to_markdown.py +636 -645
  28. chatterer/tools/convert_to_text.py +446 -446
  29. chatterer/tools/upstage_document_parser.py +704 -705
  30. chatterer/tools/webpage_to_markdown.py +739 -739
  31. chatterer/tools/youtube.py +146 -147
  32. chatterer/utils/__init__.py +15 -15
  33. chatterer/utils/base64_image.py +349 -293
  34. chatterer/utils/bytesio.py +59 -59
  35. chatterer/utils/code_agent.py +237 -237
  36. chatterer/utils/imghdr.py +145 -148
  37. {chatterer-0.1.25.dist-info → chatterer-0.1.27.dist-info}/METADATA +377 -390
  38. chatterer-0.1.27.dist-info/RECORD +43 -0
  39. chatterer/strategies/__init__.py +0 -13
  40. chatterer/strategies/atom_of_thoughts.py +0 -975
  41. chatterer/strategies/base.py +0 -14
  42. chatterer-0.1.25.dist-info/RECORD +0 -45
  43. {chatterer-0.1.25.dist-info → chatterer-0.1.27.dist-info}/WHEEL +0 -0
  44. {chatterer-0.1.25.dist-info → chatterer-0.1.27.dist-info}/entry_points.txt +0 -0
  45. {chatterer-0.1.25.dist-info → chatterer-0.1.27.dist-info}/top_level.txt +0 -0
chatterer/interactive.py CHANGED
@@ -1,354 +1,422 @@
1
- from typing import TYPE_CHECKING, Any, Callable, Iterable, Optional, TypeVar
2
-
3
- from langchain_core.messages import (
4
- AIMessage,
5
- BaseMessage,
6
- HumanMessage,
7
- SystemMessage,
8
- )
9
- from langchain_core.runnables import RunnableConfig
10
- from pydantic import BaseModel, Field
11
- from rich.console import Console
12
- from rich.panel import Panel
13
- from rich.prompt import Prompt
14
-
15
- from .language_model import Chatterer
16
- from .utils.code_agent import (
17
- DEFAULT_CODE_GENERATION_PROMPT,
18
- DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
19
- DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
20
- CodeExecutionResult,
21
- FunctionSignature,
22
- augment_prompt_for_toolcall,
23
- get_default_repl_tool,
24
- )
25
-
26
- if TYPE_CHECKING:
27
- # Import only for type hinting to avoid circular dependencies if necessary
28
- from langchain_experimental.tools.python.tool import PythonAstREPLTool
29
-
30
- T = TypeVar("T", bound=BaseModel)
31
-
32
- # --- Pydantic Models ---
33
-
34
-
35
- class ThinkBeforeSpeak(BaseModel):
36
- """
37
- Analyze the user's request and formulate an initial plan.
38
- This involves understanding the core task and breaking it down into logical steps.
39
- """
40
-
41
- task: str = Field(description="A concise summary of the user's overall goal or question.")
42
- plans: list[str] = Field(
43
- description="A sequence of actionable steps required to address the user's task. "
44
- "Each step should be clear and logical. Indicate if a step likely requires code execution."
45
- )
46
-
47
-
48
- class IsToolCallNeeded(BaseModel):
49
- """
50
- Determine if executing Python code is the necessary *next* action.
51
- Carefully review the most recent messages, especially the last code execution output and review (if any).
52
- """
53
-
54
- is_tool_call_needed: bool = Field(
55
- description="Set to True ONLY if the *next logical step* requires executing Python code AND the previous step (if it involved code) did not already attempt this exact action and fail or produce unusable results. If the last code execution failed to achieve its goal (e.g., wrong data, error), set to False unless you plan to execute *different* code to overcome the previous issue. Set to False if the next step is reasoning, asking questions, or formulating a response based on existing information (including failed tool attempts)."
56
- )
57
-
58
-
59
- class ReviewOnToolcall(BaseModel):
60
- """
61
- Evaluate the outcome of the Python code execution and decide the subsequent action.
62
- Critically assess if the execution achieved the intended goal and if the output is usable.
63
- """
64
-
65
- review_on_code_execution: str = Field(
66
- description="A critical analysis of the code execution result. Did it succeed technically? Did it produce the *expected and usable* output according to the plan? Explicitly mention any errors, unexpected values (like incorrect dates), or unusable results."
67
- )
68
- next_action: str = Field(
69
- description="Describe the *immediate next logical action* based on the review. **If the execution failed or yielded unusable/unexpected results, DO NOT suggest repeating the exact same code execution.** Instead, propose a different action, such as: 'Try a different code approach to get the time', 'Inform the user about the environmental issue with the date', 'Ask the user to verify the result', or 'Abandon this approach and try something else'. If the execution was successful and useful, describe the next step in the plan (e.g., 'Use the retrieved time to formulate the answer')."
70
- )
71
- is_task_completed: bool = Field(
72
- description="Set to True ONLY IF the *overall user task* is now fully addressed OR if the *only remaining action* based on the review is to generate the final response/answer directly to the user (this includes informing the user about an unresolvable issue found during execution). Set to False if further *productive* intermediate steps (like trying different code, processing data further, asking for input) are needed before the final response."
73
- )
74
-
75
-
76
- class Think(BaseModel):
77
- """
78
- Engage in reasoning when code execution is not the immediate next step.
79
- This could involve synthesizing information, preparing the final answer, or identifying missing information.
80
- """
81
-
82
- my_thinking: str = Field(
83
- description="Explain your reasoning process. Why is code execution not needed now? "
84
- "What information are you using from the context? How are you planning to formulate the response or proceed?"
85
- )
86
- next_action: str = Field(
87
- description="Describe the *immediate next action* resulting from this thinking process. "
88
- "Examples: 'Formulate the final answer to the user', 'Ask the user a clarifying question', "
89
- "'Summarize the findings so far'."
90
- )
91
- # --- MODIFIED DESCRIPTION ---
92
- is_task_completed: bool = Field(
93
- description="Set this to True IF AND ONLY IF the 'next_action' you just described involves generating the final response, explanation, or answer directly for the user, based on the reasoning in 'my_thinking'. If the 'next_action' involves asking the user a question, planning *further* internal steps (beyond formulating the immediate response), or indicates the task cannot be completed yet, set this to False. **If the plan is simply to tell the user the answer now, set this to True.**"
94
- )
95
- # --- END OF MODIFICATION ---
96
-
97
-
98
- # --- Interactive Shell Function ---
99
-
100
-
101
- def interactive_shell(
102
- chatterer: Chatterer,
103
- system_instruction: BaseMessage | Iterable[BaseMessage] = ([
104
- SystemMessage(
105
- "You are an AI assistant capable of answering questions and executing Python code to help users solve tasks."
106
- ),
107
- ]),
108
- repl_tool: Optional["PythonAstREPLTool"] = None,
109
- prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
110
- additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None,
111
- function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
112
- function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
113
- config: Optional[RunnableConfig] = None,
114
- stop: Optional[list[str]] = None,
115
- **kwargs: Any,
116
- ) -> None:
117
- try:
118
- console = Console()
119
- # Style settings
120
- AI_STYLE = "bold bright_blue"
121
- EXECUTED_CODE_STYLE = "bold bright_yellow"
122
- OUTPUT_STYLE = "bold bright_cyan"
123
- THINKING_STYLE = "dim white"
124
- except ImportError:
125
- raise ImportError("Rich library not found. Please install it: pip install rich")
126
-
127
- # --- Shell Initialization and Main Loop ---
128
- if repl_tool is None:
129
- repl_tool = get_default_repl_tool()
130
-
131
- def set_locals(**kwargs: object) -> None:
132
- """Set local variables for the REPL tool."""
133
- if repl_tool.locals is None: # pyright: ignore[reportUnknownMemberType]
134
- repl_tool.locals = {}
135
- for key, value in kwargs.items():
136
- repl_tool.locals[key] = value # pyright: ignore[reportUnknownMemberType]
137
-
138
- def respond(messages: list[BaseMessage]) -> str:
139
- response = ""
140
- with console.status("[bold yellow]AI is thinking..."):
141
- response_panel = Panel("", title="AI Response", style=AI_STYLE, border_style="blue")
142
- current_content = ""
143
- for chunk in chatterer.generate_stream(messages=messages):
144
- current_content += chunk
145
- # Update renderable (might not display smoothly without Live)
146
- response_panel.renderable = current_content
147
- response = current_content
148
- console.print(Panel(response, title="AI Response", style=AI_STYLE))
149
- return response.strip()
150
-
151
- def complete_task(think_before_speak: ThinkBeforeSpeak) -> None:
152
- task_info = f"[bold]Task:[/bold] {think_before_speak.task}\n[bold]Plans:[/bold]\n- " + "\n- ".join(
153
- think_before_speak.plans
154
- )
155
- console.print(Panel(task_info, title="Task Analysis & Plan", style="magenta"))
156
- session_messages: list[BaseMessage] = [
157
- AIMessage(
158
- content=f"Okay, I understand the task. Here's my plan:\n"
159
- f"- Task Summary: {think_before_speak.task}\n"
160
- f"- Steps:\n" + "\n".join(f" - {p}" for p in think_before_speak.plans)
161
- )
162
- ]
163
-
164
- while True:
165
- current_context = context + session_messages
166
- is_tool_call_needed: IsToolCallNeeded = chatterer(
167
- augment_prompt_for_toolcall(
168
- function_signatures=function_signatures,
169
- messages=current_context,
170
- prompt_for_code_invoke=prompt_for_code_invoke,
171
- function_reference_prefix=function_reference_prefix,
172
- function_reference_seperator=function_reference_seperator,
173
- ),
174
- IsToolCallNeeded,
175
- config=config,
176
- stop=stop,
177
- **kwargs,
178
- )
179
-
180
- if is_tool_call_needed.is_tool_call_needed:
181
- # --- Code Execution Path ---
182
- set_locals(__context__=context, __session__=session_messages)
183
- code_execution: CodeExecutionResult = chatterer.exec(
184
- messages=current_context,
185
- repl_tool=repl_tool,
186
- prompt_for_code_invoke=prompt_for_code_invoke,
187
- function_signatures=function_signatures,
188
- function_reference_prefix=function_reference_prefix,
189
- function_reference_seperator=function_reference_seperator,
190
- config=config,
191
- stop=stop,
192
- **kwargs,
193
- )
194
- code_block_display = (
195
- f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
196
- f"[bold]Output:[/bold]\n{code_execution.output}"
197
- )
198
- console.print(
199
- Panel(code_block_display, title="Code Execution", style=EXECUTED_CODE_STYLE, border_style="yellow")
200
- )
201
- tool_call_message = AIMessage(
202
- content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
203
- )
204
- session_messages.append(tool_call_message)
205
-
206
- # --- Review Code Execution ---
207
- current_context_after_exec = context + session_messages
208
- decision = chatterer(
209
- augment_prompt_for_toolcall(
210
- function_signatures=function_signatures,
211
- messages=current_context_after_exec,
212
- prompt_for_code_invoke=prompt_for_code_invoke,
213
- function_reference_prefix=function_reference_prefix,
214
- function_reference_seperator=function_reference_seperator,
215
- ),
216
- ReviewOnToolcall,
217
- config=config,
218
- stop=stop,
219
- **kwargs,
220
- )
221
- review_text = (
222
- f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
223
- f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
224
- )
225
- console.print(Panel(review_text, title="Execution Review", style=OUTPUT_STYLE, border_style="cyan"))
226
- review_message = AIMessage(
227
- content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
228
- f"**Next Action:** {decision.next_action.strip()}"
229
- )
230
- session_messages.append(review_message)
231
-
232
- # --- Check Completion after Review ---
233
- if decision.is_task_completed:
234
- console.print(
235
- Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
236
- )
237
- break # Exit loop
238
- else:
239
- # --- Thinking Path (No Code Needed) ---
240
- current_context_before_think = context + session_messages
241
- decision = chatterer(
242
- augment_prompt_for_toolcall(
243
- function_signatures=function_signatures,
244
- messages=current_context_before_think,
245
- prompt_for_code_invoke=prompt_for_code_invoke,
246
- function_reference_prefix=function_reference_prefix,
247
- function_reference_seperator=function_reference_seperator,
248
- ),
249
- Think,
250
- config=config,
251
- stop=stop,
252
- **kwargs,
253
- )
254
- thinking_text = (
255
- f"[dim]Reasoning:[/dim] {decision.my_thinking.strip()}\n"
256
- f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
257
- )
258
- console.print(
259
- Panel(
260
- thinking_text, title="AI Thought Process (No Code)", style=THINKING_STYLE, border_style="white"
261
- )
262
- )
263
- thinking_message = AIMessage(
264
- content=f"**My Reasoning (without code execution):** {decision.my_thinking.strip()}\n"
265
- f"**Next Action:** {decision.next_action.strip()}"
266
- )
267
- session_messages.append(thinking_message)
268
-
269
- # --- Check Completion after Thinking ---
270
- # This check now relies on the LLM correctly interpreting the updated
271
- # description for Think.is_task_completed
272
- if decision.is_task_completed:
273
- console.print(
274
- Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
275
- )
276
- break # Exit loop
277
-
278
- # --- End of Loop ---
279
- # Generate and display the final response based on the *entire* session history
280
- final_response_messages = context + session_messages
281
- response: str = respond(final_response_messages)
282
- # Add the final AI response to the main context
283
- context.append(AIMessage(content=response))
284
-
285
- if additional_callables:
286
- if callable(additional_callables):
287
- additional_callables = [additional_callables]
288
-
289
- function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
290
- else:
291
- function_signatures: list[FunctionSignature] = []
292
-
293
- context: list[BaseMessage] = []
294
- if system_instruction:
295
- if isinstance(system_instruction, BaseMessage):
296
- context.append(system_instruction)
297
- elif isinstance(system_instruction, str):
298
- context.append(SystemMessage(content=system_instruction))
299
- else:
300
- context.extend(list(system_instruction))
301
-
302
- console.print(
303
- Panel(
304
- "Welcome to the Interactive Chatterer Shell!\nType 'quit' or 'exit' to end the conversation.",
305
- title="Welcome",
306
- style=AI_STYLE,
307
- border_style="blue",
308
- )
309
- )
310
-
311
- while True:
312
- try:
313
- user_input = Prompt.ask("[bold green]You[/bold green]")
314
- except EOFError:
315
- user_input = "exit"
316
-
317
- if user_input.strip().lower() in ["quit", "exit"]:
318
- console.print(Panel("Goodbye!", title="Exit", style=AI_STYLE, border_style="blue"))
319
- break
320
-
321
- context.append(HumanMessage(content=user_input.strip()))
322
-
323
- try:
324
- # Initial planning step
325
- initial_plan_decision = chatterer(
326
- augment_prompt_for_toolcall(
327
- function_signatures=function_signatures,
328
- messages=context,
329
- prompt_for_code_invoke=prompt_for_code_invoke,
330
- function_reference_prefix=function_reference_prefix,
331
- function_reference_seperator=function_reference_seperator,
332
- ),
333
- ThinkBeforeSpeak,
334
- config=config,
335
- stop=stop,
336
- **kwargs,
337
- )
338
- # Execute the task completion loop
339
- complete_task(initial_plan_decision)
340
-
341
- except Exception as e:
342
- import traceback
343
-
344
- console.print(
345
- Panel(
346
- f"[bold red]An error occurred:[/bold red]\n{e}\n\n[yellow]Traceback:[/yellow]\n{traceback.format_exc()}",
347
- title="Error",
348
- border_style="red",
349
- )
350
- )
351
-
352
-
353
- if __name__ == "__main__":
354
- interactive_shell(chatterer=Chatterer.openai())
1
+ from typing import (
2
+ TYPE_CHECKING,
3
+ Any,
4
+ Callable,
5
+ Iterable,
6
+ Optional,
7
+ TypeVar,
8
+ )
9
+
10
+ from langchain_core.messages import (
11
+ AIMessage,
12
+ BaseMessage,
13
+ HumanMessage,
14
+ SystemMessage,
15
+ )
16
+ from langchain_core.runnables import (
17
+ RunnableConfig,
18
+ )
19
+ from pydantic import BaseModel, Field
20
+ from rich.console import Console
21
+ from rich.panel import Panel
22
+ from rich.prompt import Prompt
23
+
24
+ from .language_model import Chatterer
25
+ from .utils.code_agent import (
26
+ DEFAULT_CODE_GENERATION_PROMPT,
27
+ DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
28
+ DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
29
+ CodeExecutionResult,
30
+ FunctionSignature,
31
+ augment_prompt_for_toolcall,
32
+ get_default_repl_tool,
33
+ )
34
+
35
+ if TYPE_CHECKING:
36
+ # Import only for type hinting to avoid circular dependencies if necessary
37
+ from langchain_experimental.tools.python.tool import (
38
+ PythonAstREPLTool,
39
+ )
40
+
41
+ T = TypeVar("T", bound=BaseModel)
42
+
43
+ # --- Pydantic Models ---
44
+
45
+
46
+ class ThinkBeforeSpeak(BaseModel):
47
+ """
48
+ Analyze the user's request and formulate an initial plan.
49
+ This involves understanding the core task and breaking it down into logical steps.
50
+ """
51
+
52
+ task: str = Field(description="A concise summary of the user's overall goal or question.")
53
+ plans: list[str] = Field(
54
+ description="A sequence of actionable steps required to address the user's task. "
55
+ "Each step should be clear and logical. Indicate if a step likely requires code execution."
56
+ )
57
+
58
+
59
+ class IsToolCallNeeded(BaseModel):
60
+ """
61
+ Determine if executing Python code is the necessary *next* action.
62
+ Carefully review the most recent messages, especially the last code execution output and review (if any).
63
+ """
64
+
65
+ is_tool_call_needed: bool = Field(
66
+ description="Set to True ONLY if the *next logical step* requires executing Python code AND the previous step (if it involved code) did not already attempt this exact action and fail or produce unusable results. If the last code execution failed to achieve its goal (e.g., wrong data, error), set to False unless you plan to execute *different* code to overcome the previous issue. Set to False if the next step is reasoning, asking questions, or formulating a response based on existing information (including failed tool attempts)."
67
+ )
68
+
69
+
70
+ class ReviewOnToolcall(BaseModel):
71
+ """
72
+ Evaluate the outcome of the Python code execution and decide the subsequent action.
73
+ Critically assess if the execution achieved the intended goal and if the output is usable.
74
+ """
75
+
76
+ review_on_code_execution: str = Field(
77
+ description="A critical analysis of the code execution result. Did it succeed technically? Did it produce the *expected and usable* output according to the plan? Explicitly mention any errors, unexpected values (like incorrect dates), or unusable results."
78
+ )
79
+ next_action: str = Field(
80
+ description="Describe the *immediate next logical action* based on the review. **If the execution failed or yielded unusable/unexpected results, DO NOT suggest repeating the exact same code execution.** Instead, propose a different action, such as: 'Try a different code approach to get the time', 'Inform the user about the environmental issue with the date', 'Ask the user to verify the result', or 'Abandon this approach and try something else'. If the execution was successful and useful, describe the next step in the plan (e.g., 'Use the retrieved time to formulate the answer')."
81
+ )
82
+ is_task_completed: bool = Field(
83
+ description="Set to True ONLY IF the *overall user task* is now fully addressed OR if the *only remaining action* based on the review is to generate the final response/answer directly to the user (this includes informing the user about an unresolvable issue found during execution). Set to False if further *productive* intermediate steps (like trying different code, processing data further, asking for input) are needed before the final response."
84
+ )
85
+
86
+
87
+ class Think(BaseModel):
88
+ """
89
+ Engage in reasoning when code execution is not the immediate next step.
90
+ This could involve synthesizing information, preparing the final answer, or identifying missing information.
91
+ """
92
+
93
+ my_thinking: str = Field(
94
+ description="Explain your reasoning process. Why is code execution not needed now? "
95
+ "What information are you using from the context? How are you planning to formulate the response or proceed?"
96
+ )
97
+ next_action: str = Field(
98
+ description="Describe the *immediate next action* resulting from this thinking process. "
99
+ "Examples: 'Formulate the final answer to the user', 'Ask the user a clarifying question', "
100
+ "'Summarize the findings so far'."
101
+ )
102
+ # --- MODIFIED DESCRIPTION ---
103
+ is_task_completed: bool = Field(
104
+ description="Set this to True IF AND ONLY IF the 'next_action' you just described involves generating the final response, explanation, or answer directly for the user, based on the reasoning in 'my_thinking'. If the 'next_action' involves asking the user a question, planning *further* internal steps (beyond formulating the immediate response), or indicates the task cannot be completed yet, set this to False. **If the plan is simply to tell the user the answer now, set this to True.**"
105
+ )
106
+ # --- END OF MODIFICATION ---
107
+
108
+
109
+ # --- Interactive Shell Function ---
110
+
111
+
112
+ def interactive_shell(
113
+ chatterer: Chatterer,
114
+ system_instruction: BaseMessage | Iterable[BaseMessage] = ([
115
+ SystemMessage(
116
+ "You are an AI assistant capable of answering questions and executing Python code to help users solve tasks."
117
+ ),
118
+ ]),
119
+ repl_tool: Optional["PythonAstREPLTool"] = None,
120
+ prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
121
+ additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None,
122
+ function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
123
+ function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
124
+ config: Optional[RunnableConfig] = None,
125
+ stop: Optional[list[str]] = None,
126
+ **kwargs: Any,
127
+ ) -> None:
128
+ try:
129
+ console = Console()
130
+ # Style settings
131
+ AI_STYLE = "bold bright_blue"
132
+ EXECUTED_CODE_STYLE = "bold bright_yellow"
133
+ OUTPUT_STYLE = "bold bright_cyan"
134
+ THINKING_STYLE = "dim white"
135
+ except ImportError:
136
+ raise ImportError("Rich library not found. Please install it: pip install rich")
137
+
138
+ # --- Shell Initialization and Main Loop ---
139
+ if repl_tool is None:
140
+ repl_tool = get_default_repl_tool()
141
+
142
+ def set_locals(**kwargs: object) -> None:
143
+ """Set local variables for the REPL tool."""
144
+ if repl_tool.locals is None: # pyright: ignore[reportUnknownMemberType]
145
+ repl_tool.locals = {}
146
+ for key, value in kwargs.items():
147
+ repl_tool.locals[key] = value # pyright: ignore[reportUnknownMemberType]
148
+
149
+ def respond(
150
+ messages: list[BaseMessage],
151
+ ) -> str:
152
+ response = ""
153
+ with console.status("[bold yellow]AI is thinking..."):
154
+ response_panel = Panel(
155
+ "",
156
+ title="AI Response",
157
+ style=AI_STYLE,
158
+ border_style="blue",
159
+ )
160
+ current_content = ""
161
+ for chunk in chatterer.generate_stream(messages=messages):
162
+ current_content += chunk
163
+ # Update renderable (might not display smoothly without Live)
164
+ response_panel.renderable = current_content
165
+ response = current_content
166
+ console.print(
167
+ Panel(
168
+ response,
169
+ title="AI Response",
170
+ style=AI_STYLE,
171
+ )
172
+ )
173
+ return response.strip()
174
+
175
+ def complete_task(
176
+ think_before_speak: ThinkBeforeSpeak,
177
+ ) -> None:
178
+ task_info = f"[bold]Task:[/bold] {think_before_speak.task}\n[bold]Plans:[/bold]\n- " + "\n- ".join(
179
+ think_before_speak.plans
180
+ )
181
+ console.print(
182
+ Panel(
183
+ task_info,
184
+ title="Task Analysis & Plan",
185
+ style="magenta",
186
+ )
187
+ )
188
+ session_messages: list[BaseMessage] = [
189
+ AIMessage(
190
+ content=f"Okay, I understand the task. Here's my plan:\n"
191
+ f"- Task Summary: {think_before_speak.task}\n"
192
+ f"- Steps:\n" + "\n".join(f" - {p}" for p in think_before_speak.plans)
193
+ )
194
+ ]
195
+
196
+ while True:
197
+ current_context = context + session_messages
198
+ is_tool_call_needed: IsToolCallNeeded = chatterer(
199
+ augment_prompt_for_toolcall(
200
+ function_signatures=function_signatures,
201
+ messages=current_context,
202
+ prompt_for_code_invoke=prompt_for_code_invoke,
203
+ function_reference_prefix=function_reference_prefix,
204
+ function_reference_seperator=function_reference_seperator,
205
+ ),
206
+ IsToolCallNeeded,
207
+ config=config,
208
+ stop=stop,
209
+ **kwargs,
210
+ )
211
+
212
+ if is_tool_call_needed.is_tool_call_needed:
213
+ # --- Code Execution Path ---
214
+ set_locals(
215
+ __context__=context,
216
+ __session__=session_messages,
217
+ )
218
+ code_execution: CodeExecutionResult = chatterer.exec(
219
+ messages=current_context,
220
+ repl_tool=repl_tool,
221
+ prompt_for_code_invoke=prompt_for_code_invoke,
222
+ function_signatures=function_signatures,
223
+ function_reference_prefix=function_reference_prefix,
224
+ function_reference_seperator=function_reference_seperator,
225
+ config=config,
226
+ stop=stop,
227
+ **kwargs,
228
+ )
229
+ code_block_display = (
230
+ f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
231
+ f"[bold]Output:[/bold]\n{code_execution.output}"
232
+ )
233
+ console.print(
234
+ Panel(
235
+ code_block_display,
236
+ title="Code Execution",
237
+ style=EXECUTED_CODE_STYLE,
238
+ border_style="yellow",
239
+ )
240
+ )
241
+ tool_call_message = AIMessage(
242
+ content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
243
+ )
244
+ session_messages.append(tool_call_message)
245
+
246
+ # --- Review Code Execution ---
247
+ current_context_after_exec = context + session_messages
248
+ decision = chatterer(
249
+ augment_prompt_for_toolcall(
250
+ function_signatures=function_signatures,
251
+ messages=current_context_after_exec,
252
+ prompt_for_code_invoke=prompt_for_code_invoke,
253
+ function_reference_prefix=function_reference_prefix,
254
+ function_reference_seperator=function_reference_seperator,
255
+ ),
256
+ ReviewOnToolcall,
257
+ config=config,
258
+ stop=stop,
259
+ **kwargs,
260
+ )
261
+ review_text = (
262
+ f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
263
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
264
+ )
265
+ console.print(
266
+ Panel(
267
+ review_text,
268
+ title="Execution Review",
269
+ style=OUTPUT_STYLE,
270
+ border_style="cyan",
271
+ )
272
+ )
273
+ review_message = AIMessage(
274
+ content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
275
+ f"**Next Action:** {decision.next_action.strip()}"
276
+ )
277
+ session_messages.append(review_message)
278
+
279
+ # --- Check Completion after Review ---
280
+ if decision.is_task_completed:
281
+ console.print(
282
+ Panel(
283
+ "[bold green]Task Completed![/bold green]",
284
+ title="Status",
285
+ border_style="green",
286
+ )
287
+ )
288
+ break # Exit loop
289
+ else:
290
+ # --- Thinking Path (No Code Needed) ---
291
+ current_context_before_think = context + session_messages
292
+ decision = chatterer(
293
+ augment_prompt_for_toolcall(
294
+ function_signatures=function_signatures,
295
+ messages=current_context_before_think,
296
+ prompt_for_code_invoke=prompt_for_code_invoke,
297
+ function_reference_prefix=function_reference_prefix,
298
+ function_reference_seperator=function_reference_seperator,
299
+ ),
300
+ Think,
301
+ config=config,
302
+ stop=stop,
303
+ **kwargs,
304
+ )
305
+ thinking_text = (
306
+ f"[dim]Reasoning:[/dim] {decision.my_thinking.strip()}\n"
307
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
308
+ )
309
+ console.print(
310
+ Panel(
311
+ thinking_text,
312
+ title="AI Thought Process (No Code)",
313
+ style=THINKING_STYLE,
314
+ border_style="white",
315
+ )
316
+ )
317
+ thinking_message = AIMessage(
318
+ content=f"**My Reasoning (without code execution):** {decision.my_thinking.strip()}\n"
319
+ f"**Next Action:** {decision.next_action.strip()}"
320
+ )
321
+ session_messages.append(thinking_message)
322
+
323
+ # --- Check Completion after Thinking ---
324
+ # This check now relies on the LLM correctly interpreting the updated
325
+ # description for Think.is_task_completed
326
+ if decision.is_task_completed:
327
+ console.print(
328
+ Panel(
329
+ "[bold green]Task Completed![/bold green]",
330
+ title="Status",
331
+ border_style="green",
332
+ )
333
+ )
334
+ break # Exit loop
335
+
336
+ # --- End of Loop ---
337
+ # Generate and display the final response based on the *entire* session history
338
+ final_response_messages = context + session_messages
339
+ response: str = respond(final_response_messages)
340
+ # Add the final AI response to the main context
341
+ context.append(AIMessage(content=response))
342
+
343
+ if additional_callables:
344
+ if callable(additional_callables):
345
+ additional_callables = [additional_callables]
346
+
347
+ function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
348
+ else:
349
+ function_signatures = []
350
+
351
+ context: list[BaseMessage] = []
352
+ if system_instruction:
353
+ if isinstance(system_instruction, BaseMessage):
354
+ context.append(system_instruction)
355
+ elif isinstance(system_instruction, str):
356
+ context.append(SystemMessage(content=system_instruction))
357
+ else:
358
+ context.extend(list(system_instruction))
359
+
360
+ console.print(
361
+ Panel(
362
+ "Welcome to the Interactive Chatterer Shell!\nType 'quit' or 'exit' to end the conversation.",
363
+ title="Welcome",
364
+ style=AI_STYLE,
365
+ border_style="blue",
366
+ )
367
+ )
368
+
369
+ while True:
370
+ try:
371
+ user_input = Prompt.ask("[bold green]You[/bold green]")
372
+ except EOFError:
373
+ user_input = "exit"
374
+
375
+ if user_input.strip().lower() in [
376
+ "quit",
377
+ "exit",
378
+ ]:
379
+ console.print(
380
+ Panel(
381
+ "Goodbye!",
382
+ title="Exit",
383
+ style=AI_STYLE,
384
+ border_style="blue",
385
+ )
386
+ )
387
+ break
388
+
389
+ context.append(HumanMessage(content=user_input.strip()))
390
+
391
+ try:
392
+ # Initial planning step
393
+ initial_plan_decision = chatterer(
394
+ augment_prompt_for_toolcall(
395
+ function_signatures=function_signatures,
396
+ messages=context,
397
+ prompt_for_code_invoke=prompt_for_code_invoke,
398
+ function_reference_prefix=function_reference_prefix,
399
+ function_reference_seperator=function_reference_seperator,
400
+ ),
401
+ ThinkBeforeSpeak,
402
+ config=config,
403
+ stop=stop,
404
+ **kwargs,
405
+ )
406
+ # Execute the task completion loop
407
+ complete_task(initial_plan_decision)
408
+
409
+ except Exception as e:
410
+ import traceback
411
+
412
+ console.print(
413
+ Panel(
414
+ f"[bold red]An error occurred:[/bold red]\n{e}\n\n[yellow]Traceback:[/yellow]\n{traceback.format_exc()}",
415
+ title="Error",
416
+ border_style="red",
417
+ )
418
+ )
419
+
420
+
421
+ if __name__ == "__main__":
422
+ interactive_shell(chatterer=Chatterer.openai())