chatterer 0.1.23__py3-none-any.whl → 0.1.25__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. chatterer/__init__.py +97 -93
  2. chatterer/common_types/__init__.py +21 -21
  3. chatterer/common_types/io.py +19 -19
  4. chatterer/examples/__main__.py +75 -0
  5. chatterer/examples/{anything_to_markdown.py → any2md.py} +85 -85
  6. chatterer/examples/{pdf_to_markdown.py → pdf2md.py} +338 -338
  7. chatterer/examples/{pdf_to_text.py → pdf2txt.py} +54 -54
  8. chatterer/examples/{make_ppt.py → ppt.py} +486 -488
  9. chatterer/examples/pw.py +143 -0
  10. chatterer/examples/{get_code_snippets.py → snippet.py} +56 -55
  11. chatterer/examples/transcribe.py +192 -0
  12. chatterer/examples/{upstage_parser.py → upstage.py} +89 -89
  13. chatterer/examples/{webpage_to_markdown.py → web2md.py} +80 -70
  14. chatterer/interactive.py +354 -354
  15. chatterer/language_model.py +536 -536
  16. chatterer/messages.py +21 -21
  17. chatterer/strategies/__init__.py +13 -13
  18. chatterer/strategies/atom_of_thoughts.py +975 -975
  19. chatterer/strategies/base.py +14 -14
  20. chatterer/tools/__init__.py +46 -46
  21. chatterer/tools/caption_markdown_images.py +384 -384
  22. chatterer/tools/citation_chunking/__init__.py +3 -3
  23. chatterer/tools/citation_chunking/chunks.py +53 -53
  24. chatterer/tools/citation_chunking/citation_chunker.py +118 -118
  25. chatterer/tools/citation_chunking/citations.py +285 -285
  26. chatterer/tools/citation_chunking/prompt.py +157 -157
  27. chatterer/tools/citation_chunking/reference.py +26 -26
  28. chatterer/tools/citation_chunking/utils.py +138 -138
  29. chatterer/tools/convert_pdf_to_markdown.py +645 -625
  30. chatterer/tools/convert_to_text.py +446 -446
  31. chatterer/tools/upstage_document_parser.py +705 -705
  32. chatterer/tools/webpage_to_markdown.py +739 -739
  33. chatterer/tools/youtube.py +146 -146
  34. chatterer/utils/__init__.py +15 -15
  35. chatterer/utils/base64_image.py +293 -285
  36. chatterer/utils/bytesio.py +59 -59
  37. chatterer/utils/code_agent.py +237 -237
  38. chatterer/utils/imghdr.py +148 -148
  39. {chatterer-0.1.23.dist-info → chatterer-0.1.25.dist-info}/METADATA +390 -392
  40. chatterer-0.1.25.dist-info/RECORD +45 -0
  41. chatterer-0.1.25.dist-info/entry_points.txt +2 -0
  42. chatterer/examples/login_with_playwright.py +0 -156
  43. chatterer/examples/transcription_api.py +0 -112
  44. chatterer-0.1.23.dist-info/RECORD +0 -44
  45. chatterer-0.1.23.dist-info/entry_points.txt +0 -10
  46. {chatterer-0.1.23.dist-info → chatterer-0.1.25.dist-info}/WHEEL +0 -0
  47. {chatterer-0.1.23.dist-info → chatterer-0.1.25.dist-info}/top_level.txt +0 -0
chatterer/interactive.py CHANGED
@@ -1,354 +1,354 @@
1
- from typing import TYPE_CHECKING, Any, Callable, Iterable, Optional, TypeVar
2
-
3
- from langchain_core.messages import (
4
- AIMessage,
5
- BaseMessage,
6
- HumanMessage,
7
- SystemMessage,
8
- )
9
- from langchain_core.runnables import RunnableConfig
10
- from pydantic import BaseModel, Field
11
- from rich.console import Console
12
- from rich.panel import Panel
13
- from rich.prompt import Prompt
14
-
15
- from .language_model import Chatterer
16
- from .utils.code_agent import (
17
- DEFAULT_CODE_GENERATION_PROMPT,
18
- DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
19
- DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
20
- CodeExecutionResult,
21
- FunctionSignature,
22
- augment_prompt_for_toolcall,
23
- get_default_repl_tool,
24
- )
25
-
26
- if TYPE_CHECKING:
27
- # Import only for type hinting to avoid circular dependencies if necessary
28
- from langchain_experimental.tools.python.tool import PythonAstREPLTool
29
-
30
- T = TypeVar("T", bound=BaseModel)
31
-
32
- # --- Pydantic Models ---
33
-
34
-
35
- class ThinkBeforeSpeak(BaseModel):
36
- """
37
- Analyze the user's request and formulate an initial plan.
38
- This involves understanding the core task and breaking it down into logical steps.
39
- """
40
-
41
- task: str = Field(description="A concise summary of the user's overall goal or question.")
42
- plans: list[str] = Field(
43
- description="A sequence of actionable steps required to address the user's task. "
44
- "Each step should be clear and logical. Indicate if a step likely requires code execution."
45
- )
46
-
47
-
48
- class IsToolCallNeeded(BaseModel):
49
- """
50
- Determine if executing Python code is the necessary *next* action.
51
- Carefully review the most recent messages, especially the last code execution output and review (if any).
52
- """
53
-
54
- is_tool_call_needed: bool = Field(
55
- description="Set to True ONLY if the *next logical step* requires executing Python code AND the previous step (if it involved code) did not already attempt this exact action and fail or produce unusable results. If the last code execution failed to achieve its goal (e.g., wrong data, error), set to False unless you plan to execute *different* code to overcome the previous issue. Set to False if the next step is reasoning, asking questions, or formulating a response based on existing information (including failed tool attempts)."
56
- )
57
-
58
-
59
- class ReviewOnToolcall(BaseModel):
60
- """
61
- Evaluate the outcome of the Python code execution and decide the subsequent action.
62
- Critically assess if the execution achieved the intended goal and if the output is usable.
63
- """
64
-
65
- review_on_code_execution: str = Field(
66
- description="A critical analysis of the code execution result. Did it succeed technically? Did it produce the *expected and usable* output according to the plan? Explicitly mention any errors, unexpected values (like incorrect dates), or unusable results."
67
- )
68
- next_action: str = Field(
69
- description="Describe the *immediate next logical action* based on the review. **If the execution failed or yielded unusable/unexpected results, DO NOT suggest repeating the exact same code execution.** Instead, propose a different action, such as: 'Try a different code approach to get the time', 'Inform the user about the environmental issue with the date', 'Ask the user to verify the result', or 'Abandon this approach and try something else'. If the execution was successful and useful, describe the next step in the plan (e.g., 'Use the retrieved time to formulate the answer')."
70
- )
71
- is_task_completed: bool = Field(
72
- description="Set to True ONLY IF the *overall user task* is now fully addressed OR if the *only remaining action* based on the review is to generate the final response/answer directly to the user (this includes informing the user about an unresolvable issue found during execution). Set to False if further *productive* intermediate steps (like trying different code, processing data further, asking for input) are needed before the final response."
73
- )
74
-
75
-
76
- class Think(BaseModel):
77
- """
78
- Engage in reasoning when code execution is not the immediate next step.
79
- This could involve synthesizing information, preparing the final answer, or identifying missing information.
80
- """
81
-
82
- my_thinking: str = Field(
83
- description="Explain your reasoning process. Why is code execution not needed now? "
84
- "What information are you using from the context? How are you planning to formulate the response or proceed?"
85
- )
86
- next_action: str = Field(
87
- description="Describe the *immediate next action* resulting from this thinking process. "
88
- "Examples: 'Formulate the final answer to the user', 'Ask the user a clarifying question', "
89
- "'Summarize the findings so far'."
90
- )
91
- # --- MODIFIED DESCRIPTION ---
92
- is_task_completed: bool = Field(
93
- description="Set this to True IF AND ONLY IF the 'next_action' you just described involves generating the final response, explanation, or answer directly for the user, based on the reasoning in 'my_thinking'. If the 'next_action' involves asking the user a question, planning *further* internal steps (beyond formulating the immediate response), or indicates the task cannot be completed yet, set this to False. **If the plan is simply to tell the user the answer now, set this to True.**"
94
- )
95
- # --- END OF MODIFICATION ---
96
-
97
-
98
- # --- Interactive Shell Function ---
99
-
100
-
101
- def interactive_shell(
102
- chatterer: Chatterer,
103
- system_instruction: BaseMessage | Iterable[BaseMessage] = ([
104
- SystemMessage(
105
- "You are an AI assistant capable of answering questions and executing Python code to help users solve tasks."
106
- ),
107
- ]),
108
- repl_tool: Optional["PythonAstREPLTool"] = None,
109
- prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
110
- additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None,
111
- function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
112
- function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
113
- config: Optional[RunnableConfig] = None,
114
- stop: Optional[list[str]] = None,
115
- **kwargs: Any,
116
- ) -> None:
117
- try:
118
- console = Console()
119
- # Style settings
120
- AI_STYLE = "bold bright_blue"
121
- EXECUTED_CODE_STYLE = "bold bright_yellow"
122
- OUTPUT_STYLE = "bold bright_cyan"
123
- THINKING_STYLE = "dim white"
124
- except ImportError:
125
- raise ImportError("Rich library not found. Please install it: pip install rich")
126
-
127
- # --- Shell Initialization and Main Loop ---
128
- if repl_tool is None:
129
- repl_tool = get_default_repl_tool()
130
-
131
- def set_locals(**kwargs: object) -> None:
132
- """Set local variables for the REPL tool."""
133
- if repl_tool.locals is None: # pyright: ignore[reportUnknownMemberType]
134
- repl_tool.locals = {}
135
- for key, value in kwargs.items():
136
- repl_tool.locals[key] = value # pyright: ignore[reportUnknownMemberType]
137
-
138
- def respond(messages: list[BaseMessage]) -> str:
139
- response = ""
140
- with console.status("[bold yellow]AI is thinking..."):
141
- response_panel = Panel("", title="AI Response", style=AI_STYLE, border_style="blue")
142
- current_content = ""
143
- for chunk in chatterer.generate_stream(messages=messages):
144
- current_content += chunk
145
- # Update renderable (might not display smoothly without Live)
146
- response_panel.renderable = current_content
147
- response = current_content
148
- console.print(Panel(response, title="AI Response", style=AI_STYLE))
149
- return response.strip()
150
-
151
- def complete_task(think_before_speak: ThinkBeforeSpeak) -> None:
152
- task_info = f"[bold]Task:[/bold] {think_before_speak.task}\n[bold]Plans:[/bold]\n- " + "\n- ".join(
153
- think_before_speak.plans
154
- )
155
- console.print(Panel(task_info, title="Task Analysis & Plan", style="magenta"))
156
- session_messages: list[BaseMessage] = [
157
- AIMessage(
158
- content=f"Okay, I understand the task. Here's my plan:\n"
159
- f"- Task Summary: {think_before_speak.task}\n"
160
- f"- Steps:\n" + "\n".join(f" - {p}" for p in think_before_speak.plans)
161
- )
162
- ]
163
-
164
- while True:
165
- current_context = context + session_messages
166
- is_tool_call_needed: IsToolCallNeeded = chatterer(
167
- augment_prompt_for_toolcall(
168
- function_signatures=function_signatures,
169
- messages=current_context,
170
- prompt_for_code_invoke=prompt_for_code_invoke,
171
- function_reference_prefix=function_reference_prefix,
172
- function_reference_seperator=function_reference_seperator,
173
- ),
174
- IsToolCallNeeded,
175
- config=config,
176
- stop=stop,
177
- **kwargs,
178
- )
179
-
180
- if is_tool_call_needed.is_tool_call_needed:
181
- # --- Code Execution Path ---
182
- set_locals(__context__=context, __session__=session_messages)
183
- code_execution: CodeExecutionResult = chatterer.exec(
184
- messages=current_context,
185
- repl_tool=repl_tool,
186
- prompt_for_code_invoke=prompt_for_code_invoke,
187
- function_signatures=function_signatures,
188
- function_reference_prefix=function_reference_prefix,
189
- function_reference_seperator=function_reference_seperator,
190
- config=config,
191
- stop=stop,
192
- **kwargs,
193
- )
194
- code_block_display = (
195
- f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
196
- f"[bold]Output:[/bold]\n{code_execution.output}"
197
- )
198
- console.print(
199
- Panel(code_block_display, title="Code Execution", style=EXECUTED_CODE_STYLE, border_style="yellow")
200
- )
201
- tool_call_message = AIMessage(
202
- content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
203
- )
204
- session_messages.append(tool_call_message)
205
-
206
- # --- Review Code Execution ---
207
- current_context_after_exec = context + session_messages
208
- decision = chatterer(
209
- augment_prompt_for_toolcall(
210
- function_signatures=function_signatures,
211
- messages=current_context_after_exec,
212
- prompt_for_code_invoke=prompt_for_code_invoke,
213
- function_reference_prefix=function_reference_prefix,
214
- function_reference_seperator=function_reference_seperator,
215
- ),
216
- ReviewOnToolcall,
217
- config=config,
218
- stop=stop,
219
- **kwargs,
220
- )
221
- review_text = (
222
- f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
223
- f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
224
- )
225
- console.print(Panel(review_text, title="Execution Review", style=OUTPUT_STYLE, border_style="cyan"))
226
- review_message = AIMessage(
227
- content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
228
- f"**Next Action:** {decision.next_action.strip()}"
229
- )
230
- session_messages.append(review_message)
231
-
232
- # --- Check Completion after Review ---
233
- if decision.is_task_completed:
234
- console.print(
235
- Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
236
- )
237
- break # Exit loop
238
- else:
239
- # --- Thinking Path (No Code Needed) ---
240
- current_context_before_think = context + session_messages
241
- decision = chatterer(
242
- augment_prompt_for_toolcall(
243
- function_signatures=function_signatures,
244
- messages=current_context_before_think,
245
- prompt_for_code_invoke=prompt_for_code_invoke,
246
- function_reference_prefix=function_reference_prefix,
247
- function_reference_seperator=function_reference_seperator,
248
- ),
249
- Think,
250
- config=config,
251
- stop=stop,
252
- **kwargs,
253
- )
254
- thinking_text = (
255
- f"[dim]Reasoning:[/dim] {decision.my_thinking.strip()}\n"
256
- f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
257
- )
258
- console.print(
259
- Panel(
260
- thinking_text, title="AI Thought Process (No Code)", style=THINKING_STYLE, border_style="white"
261
- )
262
- )
263
- thinking_message = AIMessage(
264
- content=f"**My Reasoning (without code execution):** {decision.my_thinking.strip()}\n"
265
- f"**Next Action:** {decision.next_action.strip()}"
266
- )
267
- session_messages.append(thinking_message)
268
-
269
- # --- Check Completion after Thinking ---
270
- # This check now relies on the LLM correctly interpreting the updated
271
- # description for Think.is_task_completed
272
- if decision.is_task_completed:
273
- console.print(
274
- Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
275
- )
276
- break # Exit loop
277
-
278
- # --- End of Loop ---
279
- # Generate and display the final response based on the *entire* session history
280
- final_response_messages = context + session_messages
281
- response: str = respond(final_response_messages)
282
- # Add the final AI response to the main context
283
- context.append(AIMessage(content=response))
284
-
285
- if additional_callables:
286
- if callable(additional_callables):
287
- additional_callables = [additional_callables]
288
-
289
- function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
290
- else:
291
- function_signatures: list[FunctionSignature] = []
292
-
293
- context: list[BaseMessage] = []
294
- if system_instruction:
295
- if isinstance(system_instruction, BaseMessage):
296
- context.append(system_instruction)
297
- elif isinstance(system_instruction, str):
298
- context.append(SystemMessage(content=system_instruction))
299
- else:
300
- context.extend(list(system_instruction))
301
-
302
- console.print(
303
- Panel(
304
- "Welcome to the Interactive Chatterer Shell!\nType 'quit' or 'exit' to end the conversation.",
305
- title="Welcome",
306
- style=AI_STYLE,
307
- border_style="blue",
308
- )
309
- )
310
-
311
- while True:
312
- try:
313
- user_input = Prompt.ask("[bold green]You[/bold green]")
314
- except EOFError:
315
- user_input = "exit"
316
-
317
- if user_input.strip().lower() in ["quit", "exit"]:
318
- console.print(Panel("Goodbye!", title="Exit", style=AI_STYLE, border_style="blue"))
319
- break
320
-
321
- context.append(HumanMessage(content=user_input.strip()))
322
-
323
- try:
324
- # Initial planning step
325
- initial_plan_decision = chatterer(
326
- augment_prompt_for_toolcall(
327
- function_signatures=function_signatures,
328
- messages=context,
329
- prompt_for_code_invoke=prompt_for_code_invoke,
330
- function_reference_prefix=function_reference_prefix,
331
- function_reference_seperator=function_reference_seperator,
332
- ),
333
- ThinkBeforeSpeak,
334
- config=config,
335
- stop=stop,
336
- **kwargs,
337
- )
338
- # Execute the task completion loop
339
- complete_task(initial_plan_decision)
340
-
341
- except Exception as e:
342
- import traceback
343
-
344
- console.print(
345
- Panel(
346
- f"[bold red]An error occurred:[/bold red]\n{e}\n\n[yellow]Traceback:[/yellow]\n{traceback.format_exc()}",
347
- title="Error",
348
- border_style="red",
349
- )
350
- )
351
-
352
-
353
- if __name__ == "__main__":
354
- interactive_shell(chatterer=Chatterer.openai())
1
+ from typing import TYPE_CHECKING, Any, Callable, Iterable, Optional, TypeVar
2
+
3
+ from langchain_core.messages import (
4
+ AIMessage,
5
+ BaseMessage,
6
+ HumanMessage,
7
+ SystemMessage,
8
+ )
9
+ from langchain_core.runnables import RunnableConfig
10
+ from pydantic import BaseModel, Field
11
+ from rich.console import Console
12
+ from rich.panel import Panel
13
+ from rich.prompt import Prompt
14
+
15
+ from .language_model import Chatterer
16
+ from .utils.code_agent import (
17
+ DEFAULT_CODE_GENERATION_PROMPT,
18
+ DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
19
+ DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
20
+ CodeExecutionResult,
21
+ FunctionSignature,
22
+ augment_prompt_for_toolcall,
23
+ get_default_repl_tool,
24
+ )
25
+
26
+ if TYPE_CHECKING:
27
+ # Import only for type hinting to avoid circular dependencies if necessary
28
+ from langchain_experimental.tools.python.tool import PythonAstREPLTool
29
+
30
+ T = TypeVar("T", bound=BaseModel)
31
+
32
+ # --- Pydantic Models ---
33
+
34
+
35
+ class ThinkBeforeSpeak(BaseModel):
36
+ """
37
+ Analyze the user's request and formulate an initial plan.
38
+ This involves understanding the core task and breaking it down into logical steps.
39
+ """
40
+
41
+ task: str = Field(description="A concise summary of the user's overall goal or question.")
42
+ plans: list[str] = Field(
43
+ description="A sequence of actionable steps required to address the user's task. "
44
+ "Each step should be clear and logical. Indicate if a step likely requires code execution."
45
+ )
46
+
47
+
48
+ class IsToolCallNeeded(BaseModel):
49
+ """
50
+ Determine if executing Python code is the necessary *next* action.
51
+ Carefully review the most recent messages, especially the last code execution output and review (if any).
52
+ """
53
+
54
+ is_tool_call_needed: bool = Field(
55
+ description="Set to True ONLY if the *next logical step* requires executing Python code AND the previous step (if it involved code) did not already attempt this exact action and fail or produce unusable results. If the last code execution failed to achieve its goal (e.g., wrong data, error), set to False unless you plan to execute *different* code to overcome the previous issue. Set to False if the next step is reasoning, asking questions, or formulating a response based on existing information (including failed tool attempts)."
56
+ )
57
+
58
+
59
+ class ReviewOnToolcall(BaseModel):
60
+ """
61
+ Evaluate the outcome of the Python code execution and decide the subsequent action.
62
+ Critically assess if the execution achieved the intended goal and if the output is usable.
63
+ """
64
+
65
+ review_on_code_execution: str = Field(
66
+ description="A critical analysis of the code execution result. Did it succeed technically? Did it produce the *expected and usable* output according to the plan? Explicitly mention any errors, unexpected values (like incorrect dates), or unusable results."
67
+ )
68
+ next_action: str = Field(
69
+ description="Describe the *immediate next logical action* based on the review. **If the execution failed or yielded unusable/unexpected results, DO NOT suggest repeating the exact same code execution.** Instead, propose a different action, such as: 'Try a different code approach to get the time', 'Inform the user about the environmental issue with the date', 'Ask the user to verify the result', or 'Abandon this approach and try something else'. If the execution was successful and useful, describe the next step in the plan (e.g., 'Use the retrieved time to formulate the answer')."
70
+ )
71
+ is_task_completed: bool = Field(
72
+ description="Set to True ONLY IF the *overall user task* is now fully addressed OR if the *only remaining action* based on the review is to generate the final response/answer directly to the user (this includes informing the user about an unresolvable issue found during execution). Set to False if further *productive* intermediate steps (like trying different code, processing data further, asking for input) are needed before the final response."
73
+ )
74
+
75
+
76
+ class Think(BaseModel):
77
+ """
78
+ Engage in reasoning when code execution is not the immediate next step.
79
+ This could involve synthesizing information, preparing the final answer, or identifying missing information.
80
+ """
81
+
82
+ my_thinking: str = Field(
83
+ description="Explain your reasoning process. Why is code execution not needed now? "
84
+ "What information are you using from the context? How are you planning to formulate the response or proceed?"
85
+ )
86
+ next_action: str = Field(
87
+ description="Describe the *immediate next action* resulting from this thinking process. "
88
+ "Examples: 'Formulate the final answer to the user', 'Ask the user a clarifying question', "
89
+ "'Summarize the findings so far'."
90
+ )
91
+ # --- MODIFIED DESCRIPTION ---
92
+ is_task_completed: bool = Field(
93
+ description="Set this to True IF AND ONLY IF the 'next_action' you just described involves generating the final response, explanation, or answer directly for the user, based on the reasoning in 'my_thinking'. If the 'next_action' involves asking the user a question, planning *further* internal steps (beyond formulating the immediate response), or indicates the task cannot be completed yet, set this to False. **If the plan is simply to tell the user the answer now, set this to True.**"
94
+ )
95
+ # --- END OF MODIFICATION ---
96
+
97
+
98
+ # --- Interactive Shell Function ---
99
+
100
+
101
+ def interactive_shell(
102
+ chatterer: Chatterer,
103
+ system_instruction: BaseMessage | Iterable[BaseMessage] = ([
104
+ SystemMessage(
105
+ "You are an AI assistant capable of answering questions and executing Python code to help users solve tasks."
106
+ ),
107
+ ]),
108
+ repl_tool: Optional["PythonAstREPLTool"] = None,
109
+ prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
110
+ additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None,
111
+ function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
112
+ function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
113
+ config: Optional[RunnableConfig] = None,
114
+ stop: Optional[list[str]] = None,
115
+ **kwargs: Any,
116
+ ) -> None:
117
+ try:
118
+ console = Console()
119
+ # Style settings
120
+ AI_STYLE = "bold bright_blue"
121
+ EXECUTED_CODE_STYLE = "bold bright_yellow"
122
+ OUTPUT_STYLE = "bold bright_cyan"
123
+ THINKING_STYLE = "dim white"
124
+ except ImportError:
125
+ raise ImportError("Rich library not found. Please install it: pip install rich")
126
+
127
+ # --- Shell Initialization and Main Loop ---
128
+ if repl_tool is None:
129
+ repl_tool = get_default_repl_tool()
130
+
131
+ def set_locals(**kwargs: object) -> None:
132
+ """Set local variables for the REPL tool."""
133
+ if repl_tool.locals is None: # pyright: ignore[reportUnknownMemberType]
134
+ repl_tool.locals = {}
135
+ for key, value in kwargs.items():
136
+ repl_tool.locals[key] = value # pyright: ignore[reportUnknownMemberType]
137
+
138
+ def respond(messages: list[BaseMessage]) -> str:
139
+ response = ""
140
+ with console.status("[bold yellow]AI is thinking..."):
141
+ response_panel = Panel("", title="AI Response", style=AI_STYLE, border_style="blue")
142
+ current_content = ""
143
+ for chunk in chatterer.generate_stream(messages=messages):
144
+ current_content += chunk
145
+ # Update renderable (might not display smoothly without Live)
146
+ response_panel.renderable = current_content
147
+ response = current_content
148
+ console.print(Panel(response, title="AI Response", style=AI_STYLE))
149
+ return response.strip()
150
+
151
+ def complete_task(think_before_speak: ThinkBeforeSpeak) -> None:
152
+ task_info = f"[bold]Task:[/bold] {think_before_speak.task}\n[bold]Plans:[/bold]\n- " + "\n- ".join(
153
+ think_before_speak.plans
154
+ )
155
+ console.print(Panel(task_info, title="Task Analysis & Plan", style="magenta"))
156
+ session_messages: list[BaseMessage] = [
157
+ AIMessage(
158
+ content=f"Okay, I understand the task. Here's my plan:\n"
159
+ f"- Task Summary: {think_before_speak.task}\n"
160
+ f"- Steps:\n" + "\n".join(f" - {p}" for p in think_before_speak.plans)
161
+ )
162
+ ]
163
+
164
+ while True:
165
+ current_context = context + session_messages
166
+ is_tool_call_needed: IsToolCallNeeded = chatterer(
167
+ augment_prompt_for_toolcall(
168
+ function_signatures=function_signatures,
169
+ messages=current_context,
170
+ prompt_for_code_invoke=prompt_for_code_invoke,
171
+ function_reference_prefix=function_reference_prefix,
172
+ function_reference_seperator=function_reference_seperator,
173
+ ),
174
+ IsToolCallNeeded,
175
+ config=config,
176
+ stop=stop,
177
+ **kwargs,
178
+ )
179
+
180
+ if is_tool_call_needed.is_tool_call_needed:
181
+ # --- Code Execution Path ---
182
+ set_locals(__context__=context, __session__=session_messages)
183
+ code_execution: CodeExecutionResult = chatterer.exec(
184
+ messages=current_context,
185
+ repl_tool=repl_tool,
186
+ prompt_for_code_invoke=prompt_for_code_invoke,
187
+ function_signatures=function_signatures,
188
+ function_reference_prefix=function_reference_prefix,
189
+ function_reference_seperator=function_reference_seperator,
190
+ config=config,
191
+ stop=stop,
192
+ **kwargs,
193
+ )
194
+ code_block_display = (
195
+ f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
196
+ f"[bold]Output:[/bold]\n{code_execution.output}"
197
+ )
198
+ console.print(
199
+ Panel(code_block_display, title="Code Execution", style=EXECUTED_CODE_STYLE, border_style="yellow")
200
+ )
201
+ tool_call_message = AIMessage(
202
+ content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
203
+ )
204
+ session_messages.append(tool_call_message)
205
+
206
+ # --- Review Code Execution ---
207
+ current_context_after_exec = context + session_messages
208
+ decision = chatterer(
209
+ augment_prompt_for_toolcall(
210
+ function_signatures=function_signatures,
211
+ messages=current_context_after_exec,
212
+ prompt_for_code_invoke=prompt_for_code_invoke,
213
+ function_reference_prefix=function_reference_prefix,
214
+ function_reference_seperator=function_reference_seperator,
215
+ ),
216
+ ReviewOnToolcall,
217
+ config=config,
218
+ stop=stop,
219
+ **kwargs,
220
+ )
221
+ review_text = (
222
+ f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
223
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
224
+ )
225
+ console.print(Panel(review_text, title="Execution Review", style=OUTPUT_STYLE, border_style="cyan"))
226
+ review_message = AIMessage(
227
+ content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
228
+ f"**Next Action:** {decision.next_action.strip()}"
229
+ )
230
+ session_messages.append(review_message)
231
+
232
+ # --- Check Completion after Review ---
233
+ if decision.is_task_completed:
234
+ console.print(
235
+ Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
236
+ )
237
+ break # Exit loop
238
+ else:
239
+ # --- Thinking Path (No Code Needed) ---
240
+ current_context_before_think = context + session_messages
241
+ decision = chatterer(
242
+ augment_prompt_for_toolcall(
243
+ function_signatures=function_signatures,
244
+ messages=current_context_before_think,
245
+ prompt_for_code_invoke=prompt_for_code_invoke,
246
+ function_reference_prefix=function_reference_prefix,
247
+ function_reference_seperator=function_reference_seperator,
248
+ ),
249
+ Think,
250
+ config=config,
251
+ stop=stop,
252
+ **kwargs,
253
+ )
254
+ thinking_text = (
255
+ f"[dim]Reasoning:[/dim] {decision.my_thinking.strip()}\n"
256
+ f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
257
+ )
258
+ console.print(
259
+ Panel(
260
+ thinking_text, title="AI Thought Process (No Code)", style=THINKING_STYLE, border_style="white"
261
+ )
262
+ )
263
+ thinking_message = AIMessage(
264
+ content=f"**My Reasoning (without code execution):** {decision.my_thinking.strip()}\n"
265
+ f"**Next Action:** {decision.next_action.strip()}"
266
+ )
267
+ session_messages.append(thinking_message)
268
+
269
+ # --- Check Completion after Thinking ---
270
+ # This check now relies on the LLM correctly interpreting the updated
271
+ # description for Think.is_task_completed
272
+ if decision.is_task_completed:
273
+ console.print(
274
+ Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
275
+ )
276
+ break # Exit loop
277
+
278
+ # --- End of Loop ---
279
+ # Generate and display the final response based on the *entire* session history
280
+ final_response_messages = context + session_messages
281
+ response: str = respond(final_response_messages)
282
+ # Add the final AI response to the main context
283
+ context.append(AIMessage(content=response))
284
+
285
+ if additional_callables:
286
+ if callable(additional_callables):
287
+ additional_callables = [additional_callables]
288
+
289
+ function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
290
+ else:
291
+ function_signatures: list[FunctionSignature] = []
292
+
293
+ context: list[BaseMessage] = []
294
+ if system_instruction:
295
+ if isinstance(system_instruction, BaseMessage):
296
+ context.append(system_instruction)
297
+ elif isinstance(system_instruction, str):
298
+ context.append(SystemMessage(content=system_instruction))
299
+ else:
300
+ context.extend(list(system_instruction))
301
+
302
+ console.print(
303
+ Panel(
304
+ "Welcome to the Interactive Chatterer Shell!\nType 'quit' or 'exit' to end the conversation.",
305
+ title="Welcome",
306
+ style=AI_STYLE,
307
+ border_style="blue",
308
+ )
309
+ )
310
+
311
+ while True:
312
+ try:
313
+ user_input = Prompt.ask("[bold green]You[/bold green]")
314
+ except EOFError:
315
+ user_input = "exit"
316
+
317
+ if user_input.strip().lower() in ["quit", "exit"]:
318
+ console.print(Panel("Goodbye!", title="Exit", style=AI_STYLE, border_style="blue"))
319
+ break
320
+
321
+ context.append(HumanMessage(content=user_input.strip()))
322
+
323
+ try:
324
+ # Initial planning step
325
+ initial_plan_decision = chatterer(
326
+ augment_prompt_for_toolcall(
327
+ function_signatures=function_signatures,
328
+ messages=context,
329
+ prompt_for_code_invoke=prompt_for_code_invoke,
330
+ function_reference_prefix=function_reference_prefix,
331
+ function_reference_seperator=function_reference_seperator,
332
+ ),
333
+ ThinkBeforeSpeak,
334
+ config=config,
335
+ stop=stop,
336
+ **kwargs,
337
+ )
338
+ # Execute the task completion loop
339
+ complete_task(initial_plan_decision)
340
+
341
+ except Exception as e:
342
+ import traceback
343
+
344
+ console.print(
345
+ Panel(
346
+ f"[bold red]An error occurred:[/bold red]\n{e}\n\n[yellow]Traceback:[/yellow]\n{traceback.format_exc()}",
347
+ title="Error",
348
+ border_style="red",
349
+ )
350
+ )
351
+
352
+
353
+ if __name__ == "__main__":
354
+ interactive_shell(chatterer=Chatterer.openai())