chatterer 0.1.14__py3-none-any.whl → 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. chatterer/__init__.py +93 -97
  2. chatterer/common_types/__init__.py +21 -21
  3. chatterer/common_types/io.py +19 -19
  4. chatterer/interactive.py +692 -353
  5. chatterer/language_model.py +533 -454
  6. chatterer/messages.py +21 -21
  7. chatterer/strategies/__init__.py +13 -13
  8. chatterer/strategies/atom_of_thoughts.py +975 -975
  9. chatterer/strategies/base.py +14 -14
  10. chatterer/tools/__init__.py +46 -46
  11. chatterer/tools/caption_markdown_images.py +384 -384
  12. chatterer/tools/citation_chunking/__init__.py +3 -3
  13. chatterer/tools/citation_chunking/chunks.py +53 -53
  14. chatterer/tools/citation_chunking/citation_chunker.py +118 -118
  15. chatterer/tools/citation_chunking/citations.py +285 -285
  16. chatterer/tools/citation_chunking/prompt.py +157 -157
  17. chatterer/tools/citation_chunking/reference.py +26 -26
  18. chatterer/tools/citation_chunking/utils.py +138 -138
  19. chatterer/tools/convert_pdf_to_markdown.py +302 -302
  20. chatterer/tools/convert_to_text.py +447 -447
  21. chatterer/tools/upstage_document_parser.py +705 -705
  22. chatterer/tools/webpage_to_markdown.py +739 -739
  23. chatterer/tools/youtube.py +146 -146
  24. chatterer/utils/__init__.py +15 -18
  25. chatterer/utils/base64_image.py +285 -285
  26. chatterer/utils/bytesio.py +59 -59
  27. chatterer/utils/code_agent.py +237 -237
  28. chatterer/utils/imghdr.py +148 -148
  29. {chatterer-0.1.14.dist-info → chatterer-0.1.16.dist-info}/METADATA +392 -387
  30. chatterer-0.1.16.dist-info/RECORD +33 -0
  31. {chatterer-0.1.14.dist-info → chatterer-0.1.16.dist-info}/WHEEL +1 -1
  32. chatterer/utils/cli.py +0 -476
  33. chatterer-0.1.14.dist-info/RECORD +0 -34
  34. {chatterer-0.1.14.dist-info → chatterer-0.1.16.dist-info}/top_level.txt +0 -0
chatterer/interactive.py CHANGED
@@ -1,353 +1,692 @@
1
- import sys
2
- from typing import TYPE_CHECKING, Any, Callable, Iterable, Optional
3
-
4
- from langchain_core.messages import (
5
- AIMessage,
6
- BaseMessage,
7
- HumanMessage,
8
- SystemMessage,
9
- )
10
- from langchain_core.runnables import RunnableConfig
11
- from pydantic import BaseModel, Field
12
-
13
- from .language_model import Chatterer
14
- from .utils.code_agent import (
15
- DEFAULT_CODE_GENERATION_PROMPT,
16
- DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
17
- DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
18
- CodeExecutionResult,
19
- FunctionSignature,
20
- augment_prompt_for_toolcall,
21
- get_default_repl_tool,
22
- )
23
-
24
- if TYPE_CHECKING:
25
- # Import only for type hinting to avoid circular dependencies if necessary
26
- from langchain_experimental.tools.python.tool import PythonAstREPLTool
27
-
28
-
29
- # --- Pydantic Models ---
30
-
31
-
32
- class ThinkBeforeSpeak(BaseModel):
33
- """
34
- Analyze the user's request and formulate an initial plan.
35
- This involves understanding the core task and breaking it down into logical steps.
36
- """
37
-
38
- task: str = Field(description="A concise summary of the user's overall goal or question.")
39
- plans: list[str] = Field(
40
- description="A sequence of actionable steps required to address the user's task. "
41
- "Each step should be clear and logical. Indicate if a step likely requires code execution."
42
- )
43
-
44
-
45
- class IsToolCallNeeded(BaseModel):
46
- """
47
- Determine if executing Python code is the necessary *next* action.
48
- Carefully review the most recent messages, especially the last code execution output and review (if any).
49
- """
50
-
51
- is_tool_call_needed: bool = Field(
52
- description="Set to True ONLY if the *next logical step* requires executing Python code AND the previous step (if it involved code) did not already attempt this exact action and fail or produce unusable results. If the last code execution failed to achieve its goal (e.g., wrong data, error), set to False unless you plan to execute *different* code to overcome the previous issue. Set to False if the next step is reasoning, asking questions, or formulating a response based on existing information (including failed tool attempts)."
53
- )
54
-
55
-
56
- class ReviewOnToolcall(BaseModel):
57
- """
58
- Evaluate the outcome of the Python code execution and decide the subsequent action.
59
- Critically assess if the execution achieved the intended goal and if the output is usable.
60
- """
61
-
62
- review_on_code_execution: str = Field(
63
- description="A critical analysis of the code execution result. Did it succeed technically? Did it produce the *expected and usable* output according to the plan? Explicitly mention any errors, unexpected values (like incorrect dates), or unusable results."
64
- )
65
- next_action: str = Field(
66
- description="Describe the *immediate next logical action* based on the review. **If the execution failed or yielded unusable/unexpected results, DO NOT suggest repeating the exact same code execution.** Instead, propose a different action, such as: 'Try a different code approach to get the time', 'Inform the user about the environmental issue with the date', 'Ask the user to verify the result', or 'Abandon this approach and try something else'. If the execution was successful and useful, describe the next step in the plan (e.g., 'Use the retrieved time to formulate the answer')."
67
- )
68
- is_task_completed: bool = Field(
69
- description="Set to True ONLY IF the *overall user task* is now fully addressed OR if the *only remaining action* based on the review is to generate the final response/answer directly to the user (this includes informing the user about an unresolvable issue found during execution). Set to False if further *productive* intermediate steps (like trying different code, processing data further, asking for input) are needed before the final response."
70
- )
71
-
72
-
73
- class Think(BaseModel):
74
- """
75
- Engage in reasoning when code execution is not the immediate next step.
76
- This could involve synthesizing information, preparing the final answer, or identifying missing information.
77
- """
78
-
79
- my_thinking: str = Field(
80
- description="Explain your reasoning process. Why is code execution not needed now? "
81
- "What information are you using from the context? How are you planning to formulate the response or proceed?"
82
- )
83
- next_action: str = Field(
84
- description="Describe the *immediate next action* resulting from this thinking process. "
85
- "Examples: 'Formulate the final answer to the user', 'Ask the user a clarifying question', "
86
- "'Summarize the findings so far'."
87
- )
88
- # --- MODIFIED DESCRIPTION ---
89
- is_task_completed: bool = Field(
90
- description="Set this to True IF AND ONLY IF the 'next_action' you just described involves generating the final response, explanation, or answer directly for the user, based on the reasoning in 'my_thinking'. If the 'next_action' involves asking the user a question, planning *further* internal steps (beyond formulating the immediate response), or indicates the task cannot be completed yet, set this to False. **If the plan is simply to tell the user the answer now, set this to True.**"
91
- )
92
- # --- END OF MODIFICATION ---
93
-
94
-
95
- # --- Interactive Shell Function ---
96
-
97
-
98
- def interactive_shell(
99
- chatterer: Chatterer = Chatterer.openai(), # Assuming Chatterer.openai() is correct
100
- system_instruction: BaseMessage | Iterable[BaseMessage] = ([
101
- SystemMessage(
102
- "You are an AI assistant capable of answering questions and executing Python code to help users solve tasks."
103
- ),
104
- ]),
105
- repl_tool: Optional["PythonAstREPLTool"] = None,
106
- prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
107
- additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None,
108
- function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
109
- function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
110
- config: Optional[RunnableConfig] = None,
111
- stop: Optional[list[str]] = None,
112
- **kwargs: Any,
113
- ) -> None:
114
- try:
115
- from rich.console import Console
116
- from rich.panel import Panel
117
- from rich.prompt import Prompt
118
-
119
- console = Console()
120
- # Style settings
121
- AI_STYLE = "bold bright_blue"
122
- EXECUTED_CODE_STYLE = "bold bright_yellow"
123
- OUTPUT_STYLE = "bold bright_cyan"
124
- THINKING_STYLE = "dim white"
125
- except ImportError:
126
- raise ImportError("Rich library not found. Please install it: pip install rich")
127
-
128
- def respond(messages: list[BaseMessage]) -> str:
129
- response = ""
130
- if "rich" not in sys.modules:
131
- for chunk in chatterer.generate_stream(messages=messages):
132
- print(chunk, end="", flush=True)
133
- response += chunk
134
- print()
135
- else:
136
- with console.status("[bold yellow]AI is thinking..."):
137
- response_panel = Panel("", title="AI Response", style=AI_STYLE, border_style="blue")
138
- current_content = ""
139
- for chunk in chatterer.generate_stream(messages=messages):
140
- current_content += chunk
141
- # Update renderable (might not display smoothly without Live)
142
- response_panel.renderable = current_content
143
- response = current_content
144
- console.print(Panel(response, title="AI Response", style=AI_STYLE))
145
- return response.strip()
146
-
147
- def complete_task(think_before_speak: ThinkBeforeSpeak) -> None:
148
- task_info = f"[bold]Task:[/bold] {think_before_speak.task}\n[bold]Plans:[/bold]\n- " + "\n- ".join(
149
- think_before_speak.plans
150
- )
151
- console.print(Panel(task_info, title="Task Analysis & Plan", style="magenta"))
152
- session_messages: list[BaseMessage] = [
153
- AIMessage(
154
- content=f"Okay, I understand the task. Here's my plan:\n"
155
- f"- Task Summary: {think_before_speak.task}\n"
156
- f"- Steps:\n" + "\n".join(f" - {p}" for p in think_before_speak.plans)
157
- )
158
- ]
159
-
160
- while True:
161
- current_context = context + session_messages
162
- is_tool_call_needed: IsToolCallNeeded = chatterer.generate_pydantic(
163
- response_model=IsToolCallNeeded,
164
- messages=augment_prompt_for_toolcall(
165
- function_signatures=function_signatures,
166
- messages=current_context,
167
- prompt_for_code_invoke=prompt_for_code_invoke,
168
- function_reference_prefix=function_reference_prefix,
169
- function_reference_seperator=function_reference_seperator,
170
- ),
171
- config=config,
172
- stop=stop,
173
- **kwargs,
174
- )
175
-
176
- if is_tool_call_needed.is_tool_call_needed:
177
- # --- Code Execution Path ---
178
- code_execution: CodeExecutionResult = chatterer.invoke_code_execution(
179
- messages=current_context,
180
- repl_tool=repl_tool,
181
- prompt_for_code_invoke=prompt_for_code_invoke,
182
- function_signatures=function_signatures,
183
- function_reference_prefix=function_reference_prefix,
184
- function_reference_seperator=function_reference_seperator,
185
- config=config,
186
- stop=stop,
187
- **kwargs,
188
- )
189
- code_block_display = (
190
- f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
191
- f"[bold]Output:[/bold]\n{code_execution.output}"
192
- )
193
- console.print(
194
- Panel(code_block_display, title="Code Execution", style=EXECUTED_CODE_STYLE, border_style="yellow")
195
- )
196
- tool_call_message = AIMessage(
197
- content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
198
- )
199
- session_messages.append(tool_call_message)
200
-
201
- # --- Review Code Execution ---
202
- current_context_after_exec = context + session_messages
203
- decision = chatterer.generate_pydantic(
204
- response_model=ReviewOnToolcall,
205
- messages=augment_prompt_for_toolcall(
206
- function_signatures=function_signatures,
207
- messages=current_context_after_exec,
208
- prompt_for_code_invoke=prompt_for_code_invoke,
209
- function_reference_prefix=function_reference_prefix,
210
- function_reference_seperator=function_reference_seperator,
211
- ),
212
- config=config,
213
- stop=stop,
214
- **kwargs,
215
- )
216
- review_text = (
217
- f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
218
- f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
219
- )
220
- console.print(Panel(review_text, title="Execution Review", style=OUTPUT_STYLE, border_style="cyan"))
221
- review_message = AIMessage(
222
- content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
223
- f"**Next Action:** {decision.next_action.strip()}"
224
- )
225
- session_messages.append(review_message)
226
-
227
- # --- Check Completion after Review ---
228
- if decision.is_task_completed:
229
- console.print(
230
- Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
231
- )
232
- break # Exit loop
233
- else:
234
- # --- Thinking Path (No Code Needed) ---
235
- current_context_before_think = context + session_messages
236
- decision = chatterer.generate_pydantic(
237
- response_model=Think, # Uses updated description
238
- messages=augment_prompt_for_toolcall(
239
- function_signatures=function_signatures,
240
- messages=current_context_before_think,
241
- prompt_for_code_invoke=prompt_for_code_invoke,
242
- function_reference_prefix=function_reference_prefix,
243
- function_reference_seperator=function_reference_seperator,
244
- ),
245
- config=config,
246
- stop=stop,
247
- **kwargs,
248
- )
249
- thinking_text = (
250
- f"[dim]Reasoning:[/dim] {decision.my_thinking.strip()}\n"
251
- f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
252
- )
253
- console.print(
254
- Panel(
255
- thinking_text, title="AI Thought Process (No Code)", style=THINKING_STYLE, border_style="white"
256
- )
257
- )
258
- thinking_message = AIMessage(
259
- content=f"**My Reasoning (without code execution):** {decision.my_thinking.strip()}\n"
260
- f"**Next Action:** {decision.next_action.strip()}"
261
- )
262
- session_messages.append(thinking_message)
263
-
264
- # --- Check Completion after Thinking ---
265
- # This check now relies on the LLM correctly interpreting the updated
266
- # description for Think.is_task_completed
267
- if decision.is_task_completed:
268
- console.print(
269
- Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
270
- )
271
- break # Exit loop
272
-
273
- # --- End of Loop ---
274
- # Generate and display the final response based on the *entire* session history
275
- final_response_messages = context + session_messages
276
- response: str = respond(final_response_messages)
277
- # Add the final AI response to the main context
278
- context.append(AIMessage(content=response))
279
-
280
- # --- Shell Initialization and Main Loop ---
281
- if repl_tool is None:
282
- repl_tool = get_default_repl_tool()
283
-
284
- if additional_callables:
285
- if callable(additional_callables):
286
- additional_callables = [additional_callables]
287
-
288
- function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
289
- else:
290
- function_signatures: list[FunctionSignature] = []
291
-
292
- context: list[BaseMessage] = []
293
- if system_instruction:
294
- if isinstance(system_instruction, BaseMessage):
295
- context.append(system_instruction)
296
- elif isinstance(system_instruction, str):
297
- context.append(SystemMessage(content=system_instruction))
298
- else:
299
- context.extend(list(system_instruction))
300
-
301
- console.print(
302
- Panel(
303
- "Welcome to the Interactive Chatterer Shell!\nType 'quit' or 'exit' to end the conversation.",
304
- title="Welcome",
305
- style=AI_STYLE,
306
- border_style="blue",
307
- )
308
- )
309
-
310
- while True:
311
- try:
312
- user_input = Prompt.ask("[bold green]You[/bold green]")
313
- except EOFError:
314
- user_input = "exit"
315
-
316
- if user_input.strip().lower() in ["quit", "exit"]:
317
- console.print(Panel("Goodbye!", title="Exit", style=AI_STYLE, border_style="blue"))
318
- break
319
-
320
- context.append(HumanMessage(content=user_input.strip()))
321
-
322
- try:
323
- # Initial planning step
324
- initial_plan_decision = chatterer.generate_pydantic(
325
- response_model=ThinkBeforeSpeak,
326
- messages=augment_prompt_for_toolcall(
327
- function_signatures=function_signatures,
328
- messages=context,
329
- prompt_for_code_invoke=prompt_for_code_invoke,
330
- function_reference_prefix=function_reference_prefix,
331
- function_reference_seperator=function_reference_seperator,
332
- ),
333
- config=config,
334
- stop=stop,
335
- **kwargs,
336
- )
337
- # Execute the task completion loop
338
- complete_task(initial_plan_decision)
339
-
340
- except Exception as e:
341
- import traceback
342
-
343
- console.print(
344
- Panel(
345
- f"[bold red]An error occurred:[/bold red]\n{e}\n\n[yellow]Traceback:[/yellow]\n{traceback.format_exc()}",
346
- title="Error",
347
- border_style="red",
348
- )
349
- )
350
-
351
-
352
- if __name__ == "__main__":
353
- interactive_shell()
1
+ from functools import cached_property
2
+ from typing import TYPE_CHECKING, Any, Callable, Iterable, Optional, Type, TypeVar, cast, overload
3
+
4
+ from langchain_core.messages import (
5
+ AIMessage,
6
+ BaseMessage,
7
+ HumanMessage,
8
+ SystemMessage,
9
+ )
10
+ from langchain_core.runnables import RunnableConfig
11
+ from pydantic import BaseModel, Field
12
+ from rich.console import Console
13
+ from rich.panel import Panel
14
+ from rich.prompt import Prompt
15
+
16
+ from .language_model import Chatterer, LanguageModelInput
17
+ from .utils.code_agent import (
18
+ DEFAULT_CODE_GENERATION_PROMPT,
19
+ DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
20
+ DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
21
+ CodeExecutionResult,
22
+ FunctionSignature,
23
+ augment_prompt_for_toolcall,
24
+ get_default_repl_tool,
25
+ )
26
+
27
+ if TYPE_CHECKING:
28
+ # Import only for type hinting to avoid circular dependencies if necessary
29
+ from langchain_experimental.tools.python.tool import PythonAstREPLTool
30
+
31
+ T = TypeVar("T", bound=BaseModel)
32
+
33
+ # --- Pydantic Models ---
34
+
35
+
36
+ class ThinkBeforeSpeak(BaseModel):
37
+ """
38
+ Analyze the user's request and formulate an initial plan.
39
+ This involves understanding the core task and breaking it down into logical steps.
40
+ """
41
+
42
+ task: str = Field(description="A concise summary of the user's overall goal or question.")
43
+ plans: list[str] = Field(
44
+ description="A sequence of actionable steps required to address the user's task. "
45
+ "Each step should be clear and logical. Indicate if a step likely requires code execution."
46
+ )
47
+
48
+
49
+ class IsToolCallNeeded(BaseModel):
50
+ """
51
+ Determine if executing Python code is the necessary *next* action.
52
+ Carefully review the most recent messages, especially the last code execution output and review (if any).
53
+ """
54
+
55
+ is_tool_call_needed: bool = Field(
56
+ description="Set to True ONLY if the *next logical step* requires executing Python code AND the previous step (if it involved code) did not already attempt this exact action and fail or produce unusable results. If the last code execution failed to achieve its goal (e.g., wrong data, error), set to False unless you plan to execute *different* code to overcome the previous issue. Set to False if the next step is reasoning, asking questions, or formulating a response based on existing information (including failed tool attempts)."
57
+ )
58
+
59
+
60
+ class ReviewOnToolcall(BaseModel):
61
+ """
62
+ Evaluate the outcome of the Python code execution and decide the subsequent action.
63
+ Critically assess if the execution achieved the intended goal and if the output is usable.
64
+ """
65
+
66
+ review_on_code_execution: str = Field(
67
+ description="A critical analysis of the code execution result. Did it succeed technically? Did it produce the *expected and usable* output according to the plan? Explicitly mention any errors, unexpected values (like incorrect dates), or unusable results."
68
+ )
69
+ next_action: str = Field(
70
+ description="Describe the *immediate next logical action* based on the review. **If the execution failed or yielded unusable/unexpected results, DO NOT suggest repeating the exact same code execution.** Instead, propose a different action, such as: 'Try a different code approach to get the time', 'Inform the user about the environmental issue with the date', 'Ask the user to verify the result', or 'Abandon this approach and try something else'. If the execution was successful and useful, describe the next step in the plan (e.g., 'Use the retrieved time to formulate the answer')."
71
+ )
72
+ is_task_completed: bool = Field(
73
+ description="Set to True ONLY IF the *overall user task* is now fully addressed OR if the *only remaining action* based on the review is to generate the final response/answer directly to the user (this includes informing the user about an unresolvable issue found during execution). Set to False if further *productive* intermediate steps (like trying different code, processing data further, asking for input) are needed before the final response."
74
+ )
75
+
76
+
77
+ class Think(BaseModel):
78
+ """
79
+ Engage in reasoning when code execution is not the immediate next step.
80
+ This could involve synthesizing information, preparing the final answer, or identifying missing information.
81
+ """
82
+
83
+ my_thinking: str = Field(
84
+ description="Explain your reasoning process. Why is code execution not needed now? "
85
+ "What information are you using from the context? How are you planning to formulate the response or proceed?"
86
+ )
87
+ next_action: str = Field(
88
+ description="Describe the *immediate next action* resulting from this thinking process. "
89
+ "Examples: 'Formulate the final answer to the user', 'Ask the user a clarifying question', "
90
+ "'Summarize the findings so far'."
91
+ )
92
+ # --- MODIFIED DESCRIPTION ---
93
+ is_task_completed: bool = Field(
94
+ description="Set this to True IF AND ONLY IF the 'next_action' you just described involves generating the final response, explanation, or answer directly for the user, based on the reasoning in 'my_thinking'. If the 'next_action' involves asking the user a question, planning *further* internal steps (beyond formulating the immediate response), or indicates the task cannot be completed yet, set this to False. **If the plan is simply to tell the user the answer now, set this to True.**"
95
+ )
96
+ # --- END OF MODIFICATION ---
97
+
98
+
99
+ # --- Interactive Shell Function ---
100
+
101
+
102
+ class InteractiveShell(BaseModel):
103
+ """
104
+ A class to create an interactive shell for the Chatterer language model.
105
+ This shell allows users to interact with the model, execute Python code, and receive responses in real-time.
106
+ """
107
+
108
+ chatterer: Chatterer
109
+ context: list[BaseMessage] = Field(default_factory=list)
110
+ additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None
111
+ system_instruction: BaseMessage | Iterable[BaseMessage] = Field(
112
+ default_factory=lambda: [
113
+ SystemMessage(
114
+ "You are an AI assistant capable of answering questions and executing Python code to help users solve tasks."
115
+ ),
116
+ ]
117
+ )
118
+
119
+ repl_tool: Optional["PythonAstREPLTool"] = None
120
+ prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT
121
+ function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT
122
+ function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR
123
+ config: Optional[RunnableConfig] = None
124
+ stop: Optional[list[str]] = None
125
+ call_kwargs: Optional[dict[str, Any]] = None
126
+
127
+ AI_STYLE: str = "bold bright_blue"
128
+ EXECUTED_CODE_STYLE: str = "bold bright_yellow"
129
+ OUTPUT_STYLE: str = "bold bright_cyan"
130
+ THINKING_STYLE: str = "dim white"
131
+
132
+ @overload
133
+ def invoke(self, messages: LanguageModelInput, response_model: Type[T]) -> T: ...
134
+ @overload
135
+ def invoke(self, messages: LanguageModelInput, response_model: None) -> str: ...
136
+ def invoke(self, messages: LanguageModelInput, response_model: Optional[Type[T]]) -> T | str:
137
+ return self.chatterer(
138
+ self.get_tool_call_prompt(messages),
139
+ response_model,
140
+ config=self.config,
141
+ stop=self.stop,
142
+ **(self.call_kwargs or {}),
143
+ )
144
+
145
+ def exec(self, messages: LanguageModelInput) -> CodeExecutionResult:
146
+ return self.chatterer.exec(
147
+ messages=messages,
148
+ repl_tool=self.python,
149
+ prompt_for_code_invoke=self.prompt_for_code_invoke,
150
+ function_signatures=self.function_signatures,
151
+ function_reference_prefix=self.function_reference_prefix,
152
+ function_reference_seperator=self.function_reference_seperator,
153
+ config=self.config,
154
+ stop=self.stop,
155
+ **(self.call_kwargs or {}),
156
+ )
157
+
158
+ def respond(self, messages: list[BaseMessage]) -> str:
159
+ response: str = ""
160
+ with self.console.status("[bold yellow]AI is thinking..."):
161
+ response_panel = Panel("", title="AI Response", style=self.AI_STYLE, border_style="blue")
162
+ current_content = ""
163
+ for chunk in self.chatterer.generate_stream(messages=messages):
164
+ current_content += chunk
165
+ # Update renderable (might not display smoothly without Live)
166
+ response_panel.renderable = current_content
167
+ response = current_content
168
+ self.console.print(Panel(response, title="AI Response", style=self.AI_STYLE))
169
+ return response.strip()
170
+
171
+ def do_if_tool_call_needed(self, session_messages: list[BaseMessage]) -> None:
172
+ # --- Code Execution Path ---
173
+ self.python_locals.update({"__context__": self.context, "__session__": session_messages})
174
+ code_execution: CodeExecutionResult = self.exec(self.context + session_messages)
175
+ self.console.print(
176
+ Panel(
177
+ (
178
+ f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
179
+ f"[bold]Output:[/bold]\n{code_execution.output}"
180
+ ),
181
+ title="Code Execution",
182
+ style=self.EXECUTED_CODE_STYLE,
183
+ border_style="yellow",
184
+ )
185
+ )
186
+ session_messages.append(
187
+ AIMessage(
188
+ content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
189
+ )
190
+ )
191
+
192
+ # --- Review Code Execution ---
193
+ decision = self.invoke(self.context + session_messages, ReviewOnToolcall)
194
+ self.console.print(
195
+ Panel(
196
+ (
197
+ f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
198
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
199
+ ),
200
+ title="Execution Review",
201
+ style=self.OUTPUT_STYLE,
202
+ border_style="cyan",
203
+ )
204
+ )
205
+ session_messages.append(
206
+ AIMessage(
207
+ content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
208
+ f"**Next Action:** {decision.next_action.strip()}"
209
+ )
210
+ )
211
+ # --- Check Completion after Review ---
212
+ if decision.is_task_completed:
213
+ self.console.print(Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green"))
214
+
215
+ def complete_task(self, think_before_speak: ThinkBeforeSpeak) -> None:
216
+ task_info = f"[bold]Task:[/bold] {think_before_speak.task}\n[bold]Plans:[/bold]\n- " + "\n- ".join(
217
+ think_before_speak.plans
218
+ )
219
+ console: Console = self.console
220
+ context: list[BaseMessage] = self.context
221
+ chatterer: Chatterer = self.chatterer
222
+ repl_tool = self.python
223
+ function_signatures = self.function_signatures
224
+ prompt_for_code_invoke = self.prompt_for_code_invoke
225
+ function_reference_prefix = self.function_reference_prefix
226
+ function_reference_seperator = self.function_reference_seperator
227
+
228
+ console.print(Panel(task_info, title="Task Analysis & Plan", style="magenta"))
229
+ session_messages: list[BaseMessage] = [
230
+ AIMessage(
231
+ content=f"Okay, I understand the task. Here's my plan:\n"
232
+ f"- Task Summary: {think_before_speak.task}\n"
233
+ f"- Steps:\n" + "\n".join(f" - {p}" for p in think_before_speak.plans)
234
+ )
235
+ ]
236
+
237
+ while True:
238
+ current_context = context + session_messages
239
+ is_tool_call_needed: IsToolCallNeeded = chatterer(
240
+ augment_prompt_for_toolcall(
241
+ function_signatures=function_signatures,
242
+ messages=current_context,
243
+ prompt_for_code_invoke=prompt_for_code_invoke,
244
+ function_reference_prefix=function_reference_prefix,
245
+ function_reference_seperator=function_reference_seperator,
246
+ ),
247
+ IsToolCallNeeded,
248
+ config=self.config,
249
+ stop=self.stop,
250
+ **(self.call_kwargs or {}),
251
+ )
252
+
253
+ if is_tool_call_needed.is_tool_call_needed:
254
+ # --- Code Execution Path ---
255
+ set_locals(__context__=context, __session__=session_messages)
256
+ code_execution: CodeExecutionResult = chatterer.exec(
257
+ messages=current_context,
258
+ repl_tool=repl_tool,
259
+ prompt_for_code_invoke=prompt_for_code_invoke,
260
+ function_signatures=function_signatures,
261
+ function_reference_prefix=function_reference_prefix,
262
+ function_reference_seperator=function_reference_seperator,
263
+ config=self.config,
264
+ stop=self.stop,
265
+ **(self.call_kwargs or {}),
266
+ )
267
+ code_block_display = (
268
+ f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
269
+ f"[bold]Output:[/bold]\n{code_execution.output}"
270
+ )
271
+ console.print(
272
+ Panel(
273
+ code_block_display,
274
+ title="Code Execution",
275
+ style=self.EXECUTED_CODE_STYLE,
276
+ border_style="yellow",
277
+ )
278
+ )
279
+ tool_call_message = AIMessage(
280
+ content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
281
+ )
282
+ session_messages.append(tool_call_message)
283
+
284
+ # --- Review Code Execution ---
285
+ current_context_after_exec = context + session_messages
286
+ decision = chatterer(
287
+ augment_prompt_for_toolcall(
288
+ function_signatures=function_signatures,
289
+ messages=current_context_after_exec,
290
+ prompt_for_code_invoke=prompt_for_code_invoke,
291
+ function_reference_prefix=function_reference_prefix,
292
+ function_reference_seperator=function_reference_seperator,
293
+ ),
294
+ ReviewOnToolcall,
295
+ config=self.config,
296
+ stop=self.stop,
297
+ **(self.call_kwargs or {}),
298
+ )
299
+ review_text = (
300
+ f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
301
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
302
+ )
303
+ console.print(
304
+ Panel(review_text, title="Execution Review", style=self.OUTPUT_STYLE, border_style="cyan")
305
+ )
306
+ review_message = AIMessage(
307
+ content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
308
+ f"**Next Action:** {decision.next_action.strip()}"
309
+ )
310
+ session_messages.append(review_message)
311
+
312
+ # --- Check Completion after Review ---
313
+ if decision.is_task_completed:
314
+ console.print(
315
+ Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
316
+ )
317
+ break # Exit loop
318
+ else:
319
+ # --- Thinking Path (No Code Needed) ---
320
+ current_context_before_think = context + session_messages
321
+ decision = chatterer(
322
+ augment_prompt_for_toolcall(
323
+ function_signatures=function_signatures,
324
+ messages=current_context_before_think,
325
+ prompt_for_code_invoke=prompt_for_code_invoke,
326
+ function_reference_prefix=function_reference_prefix,
327
+ function_reference_seperator=function_reference_seperator,
328
+ ),
329
+ Think,
330
+ config=self.config,
331
+ stop=self.stop,
332
+ **(self.call_kwargs or {}),
333
+ )
334
+ thinking_text = (
335
+ f"[dim]Reasoning:[/dim] {decision.my_thinking.strip()}\n"
336
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
337
+ )
338
+ console.print(
339
+ Panel(
340
+ thinking_text,
341
+ title="AI Thought Process (No Code)",
342
+ style=self.THINKING_STYLE,
343
+ border_style="white",
344
+ )
345
+ )
346
+ thinking_message = AIMessage(
347
+ content=f"**My Reasoning (without code execution):** {decision.my_thinking.strip()}\n"
348
+ f"**Next Action:** {decision.next_action.strip()}"
349
+ )
350
+ session_messages.append(thinking_message)
351
+
352
+ # --- Check Completion after Thinking ---
353
+ # This check now relies on the LLM correctly interpreting the updated
354
+ # description for Think.is_task_completed
355
+ if decision.is_task_completed:
356
+ console.print(
357
+ Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
358
+ )
359
+ break # Exit loop
360
+
361
+ # --- End of Loop ---
362
+ # Generate and display the final response based on the *entire* session history
363
+ final_response_messages = context + session_messages
364
+ response: str = self.respond(final_response_messages)
365
+ # Add the final AI response to the main context
366
+ context.append(AIMessage(content=response))
367
+
368
+ def get_tool_call_prompt(self, messages: LanguageModelInput) -> LanguageModelInput:
369
+ return augment_prompt_for_toolcall(
370
+ function_signatures=self.function_signatures,
371
+ messages=messages,
372
+ prompt_for_code_invoke=self.prompt_for_code_invoke,
373
+ function_reference_prefix=self.function_reference_prefix,
374
+ function_reference_seperator=self.function_reference_seperator,
375
+ )
376
+
377
+ @property
378
+ def python(self) -> "PythonAstREPLTool":
379
+ if self.repl_tool is None:
380
+ self.repl_tool = get_default_repl_tool()
381
+ return self.repl_tool
382
+
383
+ @property
384
+ def python_locals(self) -> dict[str, object]:
385
+ """Get the local variables for the REPL tool."""
386
+ if self.python.locals is None: # pyright: ignore[reportUnknownMemberType]
387
+ self.python.locals = {}
388
+ return cast(dict[str, object], self.python.locals) # pyright: ignore[reportUnknownMemberType]
389
+
390
+ @property
391
+ def python_globals(self) -> dict[str, object]:
392
+ """Get the global variables for the REPL tool."""
393
+ if self.python.globals is None: # pyright: ignore[reportUnknownMemberType]
394
+ self.python.globals = {}
395
+ return cast(dict[str, object], self.python.globals) # pyright: ignore[reportUnknownMemberType]
396
+
397
+ @cached_property
398
+ def console(self):
399
+ try:
400
+ from rich.console import Console
401
+
402
+ return Console()
403
+ except ImportError:
404
+ raise ImportError("Rich library not found. Please install it: pip install rich")
405
+
406
+ @property
407
+ def function_signatures(self) -> list[FunctionSignature]:
408
+ additional_callables = self.additional_callables
409
+ if additional_callables:
410
+ if callable(additional_callables):
411
+ additional_callables = [additional_callables]
412
+ function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
413
+ else:
414
+ function_signatures: list[FunctionSignature] = []
415
+ return function_signatures
416
+
417
+
418
+ def interactive_shell(
419
+ chatterer: Chatterer,
420
+ system_instruction: BaseMessage | Iterable[BaseMessage] = ([
421
+ SystemMessage(
422
+ "You are an AI assistant capable of answering questions and executing Python code to help users solve tasks."
423
+ ),
424
+ ]),
425
+ repl_tool: Optional["PythonAstREPLTool"] = None,
426
+ prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
427
+ additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None,
428
+ function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
429
+ function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
430
+ config: Optional[RunnableConfig] = None,
431
+ stop: Optional[list[str]] = None,
432
+ **kwargs: Any,
433
+ ) -> None:
434
+ try:
435
+ console = Console()
436
+ # Style settings
437
+ AI_STYLE = "bold bright_blue"
438
+ EXECUTED_CODE_STYLE = "bold bright_yellow"
439
+ OUTPUT_STYLE = "bold bright_cyan"
440
+ THINKING_STYLE = "dim white"
441
+ except ImportError:
442
+ raise ImportError("Rich library not found. Please install it: pip install rich")
443
+
444
+ # --- Shell Initialization and Main Loop ---
445
+ if repl_tool is None:
446
+ repl_tool = get_default_repl_tool()
447
+
448
+ def set_locals(**kwargs: object) -> None:
449
+ """Set local variables for the REPL tool."""
450
+ if repl_tool.locals is None: # pyright: ignore[reportUnknownMemberType]
451
+ repl_tool.locals = {}
452
+ for key, value in kwargs.items():
453
+ repl_tool.locals[key] = value # pyright: ignore[reportUnknownMemberType]
454
+
455
+ def respond(messages: list[BaseMessage]) -> str:
456
+ response = ""
457
+ with console.status("[bold yellow]AI is thinking..."):
458
+ response_panel = Panel("", title="AI Response", style=AI_STYLE, border_style="blue")
459
+ current_content = ""
460
+ for chunk in chatterer.generate_stream(messages=messages):
461
+ current_content += chunk
462
+ # Update renderable (might not display smoothly without Live)
463
+ response_panel.renderable = current_content
464
+ response = current_content
465
+ console.print(Panel(response, title="AI Response", style=AI_STYLE))
466
+ return response.strip()
467
+
468
+ def complete_task(think_before_speak: ThinkBeforeSpeak) -> None:
469
+ task_info = f"[bold]Task:[/bold] {think_before_speak.task}\n[bold]Plans:[/bold]\n- " + "\n- ".join(
470
+ think_before_speak.plans
471
+ )
472
+ console.print(Panel(task_info, title="Task Analysis & Plan", style="magenta"))
473
+ session_messages: list[BaseMessage] = [
474
+ AIMessage(
475
+ content=f"Okay, I understand the task. Here's my plan:\n"
476
+ f"- Task Summary: {think_before_speak.task}\n"
477
+ f"- Steps:\n" + "\n".join(f" - {p}" for p in think_before_speak.plans)
478
+ )
479
+ ]
480
+
481
+ while True:
482
+ current_context = context + session_messages
483
+ is_tool_call_needed: IsToolCallNeeded = chatterer(
484
+ augment_prompt_for_toolcall(
485
+ function_signatures=function_signatures,
486
+ messages=current_context,
487
+ prompt_for_code_invoke=prompt_for_code_invoke,
488
+ function_reference_prefix=function_reference_prefix,
489
+ function_reference_seperator=function_reference_seperator,
490
+ ),
491
+ IsToolCallNeeded,
492
+ config=config,
493
+ stop=stop,
494
+ **kwargs,
495
+ )
496
+
497
+ if is_tool_call_needed.is_tool_call_needed:
498
+ # --- Code Execution Path ---
499
+ set_locals(__context__=context, __session__=session_messages)
500
+ code_execution: CodeExecutionResult = chatterer.exec(
501
+ messages=current_context,
502
+ repl_tool=repl_tool,
503
+ prompt_for_code_invoke=prompt_for_code_invoke,
504
+ function_signatures=function_signatures,
505
+ function_reference_prefix=function_reference_prefix,
506
+ function_reference_seperator=function_reference_seperator,
507
+ config=config,
508
+ stop=stop,
509
+ **kwargs,
510
+ )
511
+ code_block_display = (
512
+ f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
513
+ f"[bold]Output:[/bold]\n{code_execution.output}"
514
+ )
515
+ console.print(
516
+ Panel(code_block_display, title="Code Execution", style=EXECUTED_CODE_STYLE, border_style="yellow")
517
+ )
518
+ tool_call_message = AIMessage(
519
+ content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
520
+ )
521
+ session_messages.append(tool_call_message)
522
+
523
+ # --- Review Code Execution ---
524
+ current_context_after_exec = context + session_messages
525
+ decision = chatterer(
526
+ augment_prompt_for_toolcall(
527
+ function_signatures=function_signatures,
528
+ messages=current_context_after_exec,
529
+ prompt_for_code_invoke=prompt_for_code_invoke,
530
+ function_reference_prefix=function_reference_prefix,
531
+ function_reference_seperator=function_reference_seperator,
532
+ ),
533
+ ReviewOnToolcall,
534
+ config=config,
535
+ stop=stop,
536
+ **kwargs,
537
+ )
538
+ review_text = (
539
+ f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
540
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
541
+ )
542
+ console.print(Panel(review_text, title="Execution Review", style=OUTPUT_STYLE, border_style="cyan"))
543
+ review_message = AIMessage(
544
+ content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
545
+ f"**Next Action:** {decision.next_action.strip()}"
546
+ )
547
+ session_messages.append(review_message)
548
+
549
+ # --- Check Completion after Review ---
550
+ if decision.is_task_completed:
551
+ console.print(
552
+ Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
553
+ )
554
+ break # Exit loop
555
+ else:
556
+ # --- Thinking Path (No Code Needed) ---
557
+ current_context_before_think = context + session_messages
558
+ decision = chatterer(
559
+ augment_prompt_for_toolcall(
560
+ function_signatures=function_signatures,
561
+ messages=current_context_before_think,
562
+ prompt_for_code_invoke=prompt_for_code_invoke,
563
+ function_reference_prefix=function_reference_prefix,
564
+ function_reference_seperator=function_reference_seperator,
565
+ ),
566
+ Think,
567
+ config=config,
568
+ stop=stop,
569
+ **kwargs,
570
+ )
571
+ thinking_text = (
572
+ f"[dim]Reasoning:[/dim] {decision.my_thinking.strip()}\n"
573
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
574
+ )
575
+ console.print(
576
+ Panel(
577
+ thinking_text, title="AI Thought Process (No Code)", style=THINKING_STYLE, border_style="white"
578
+ )
579
+ )
580
+ thinking_message = AIMessage(
581
+ content=f"**My Reasoning (without code execution):** {decision.my_thinking.strip()}\n"
582
+ f"**Next Action:** {decision.next_action.strip()}"
583
+ )
584
+ session_messages.append(thinking_message)
585
+
586
+ # --- Check Completion after Thinking ---
587
+ # This check now relies on the LLM correctly interpreting the updated
588
+ # description for Think.is_task_completed
589
+ if decision.is_task_completed:
590
+ console.print(
591
+ Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
592
+ )
593
+ break # Exit loop
594
+
595
+ # --- End of Loop ---
596
+ # Generate and display the final response based on the *entire* session history
597
+ final_response_messages = context + session_messages
598
+ response: str = respond(final_response_messages)
599
+ # Add the final AI response to the main context
600
+ context.append(AIMessage(content=response))
601
+
602
+ if additional_callables:
603
+ if callable(additional_callables):
604
+ additional_callables = [additional_callables]
605
+
606
+ function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
607
+ else:
608
+ function_signatures: list[FunctionSignature] = []
609
+
610
+ context: list[BaseMessage] = []
611
+ if system_instruction:
612
+ if isinstance(system_instruction, BaseMessage):
613
+ context.append(system_instruction)
614
+ elif isinstance(system_instruction, str):
615
+ context.append(SystemMessage(content=system_instruction))
616
+ else:
617
+ context.extend(list(system_instruction))
618
+
619
+ console.print(
620
+ Panel(
621
+ "Welcome to the Interactive Chatterer Shell!\nType 'quit' or 'exit' to end the conversation.",
622
+ title="Welcome",
623
+ style=AI_STYLE,
624
+ border_style="blue",
625
+ )
626
+ )
627
+
628
+ while True:
629
+ try:
630
+ user_input = Prompt.ask("[bold green]You[/bold green]")
631
+ except EOFError:
632
+ user_input = "exit"
633
+
634
+ if user_input.strip().lower() in ["quit", "exit"]:
635
+ console.print(Panel("Goodbye!", title="Exit", style=AI_STYLE, border_style="blue"))
636
+ break
637
+
638
+ context.append(HumanMessage(content=user_input.strip()))
639
+
640
+ try:
641
+ # Initial planning step
642
+ initial_plan_decision = chatterer(
643
+ augment_prompt_for_toolcall(
644
+ function_signatures=function_signatures,
645
+ messages=context,
646
+ prompt_for_code_invoke=prompt_for_code_invoke,
647
+ function_reference_prefix=function_reference_prefix,
648
+ function_reference_seperator=function_reference_seperator,
649
+ ),
650
+ ThinkBeforeSpeak,
651
+ config=config,
652
+ stop=stop,
653
+ **kwargs,
654
+ )
655
+ # Execute the task completion loop
656
+ complete_task(initial_plan_decision)
657
+
658
+ except Exception as e:
659
+ import traceback
660
+
661
+ console.print(
662
+ Panel(
663
+ f"[bold red]An error occurred:[/bold red]\n{e}\n\n[yellow]Traceback:[/yellow]\n{traceback.format_exc()}",
664
+ title="Error",
665
+ border_style="red",
666
+ )
667
+ )
668
+
669
+
670
+ # if __name__ == "__main__":
671
+ # from .utils.base64_image import Base64Image
672
+
673
+ # repl_tool = get_default_repl_tool()
674
+
675
+ # def view_image(image_path: str) -> None:
676
+ # locals = repl_tool.locals # pyright: ignore[reportUnknownVariableType, reportUnknownMemberType]
677
+ # assert isinstance(locals, dict), "REPL tool locals are not set."
678
+ # session: list[BaseMessage] = cast(list[BaseMessage], locals["__session__"])
679
+
680
+ # image_or_none = Base64Image.from_url_or_path(image_path)
681
+ # if image_or_none is None:
682
+ # session.append(
683
+ # HumanMessage(content=f"Image not found at path: {image_path}. Please check the path and try again.")
684
+ # )
685
+ # return
686
+ # session.append(HumanMessage([image_or_none.data_uri_content]))
687
+
688
+ # interactive_shell(
689
+ # chatterer=Chatterer.from_provider("openai:gpt-4.1"),
690
+ # additional_callables=[view_image],
691
+ # repl_tool=repl_tool,
692
+ # )