chatterer 0.1.16__py3-none-any.whl → 0.1.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- chatterer/__init__.py +93 -93
- chatterer/common_types/__init__.py +21 -21
- chatterer/common_types/io.py +19 -19
- chatterer/examples/anything_to_markdown.py +91 -0
- chatterer/examples/get_code_snippets.py +62 -0
- chatterer/examples/login_with_playwright.py +167 -0
- chatterer/examples/make_ppt.py +497 -0
- chatterer/examples/pdf_to_markdown.py +107 -0
- chatterer/examples/pdf_to_text.py +56 -0
- chatterer/examples/transcription_api.py +123 -0
- chatterer/examples/upstage_parser.py +100 -0
- chatterer/examples/webpage_to_markdown.py +79 -0
- chatterer/interactive.py +354 -692
- chatterer/language_model.py +533 -533
- chatterer/messages.py +21 -21
- chatterer/strategies/__init__.py +13 -13
- chatterer/strategies/atom_of_thoughts.py +975 -975
- chatterer/strategies/base.py +14 -14
- chatterer/tools/__init__.py +46 -46
- chatterer/tools/caption_markdown_images.py +384 -384
- chatterer/tools/citation_chunking/__init__.py +3 -3
- chatterer/tools/citation_chunking/chunks.py +53 -53
- chatterer/tools/citation_chunking/citation_chunker.py +118 -118
- chatterer/tools/citation_chunking/citations.py +285 -285
- chatterer/tools/citation_chunking/prompt.py +157 -157
- chatterer/tools/citation_chunking/reference.py +26 -26
- chatterer/tools/citation_chunking/utils.py +138 -138
- chatterer/tools/convert_pdf_to_markdown.py +302 -302
- chatterer/tools/convert_to_text.py +447 -447
- chatterer/tools/upstage_document_parser.py +705 -705
- chatterer/tools/webpage_to_markdown.py +739 -739
- chatterer/tools/youtube.py +146 -146
- chatterer/utils/__init__.py +15 -15
- chatterer/utils/base64_image.py +285 -285
- chatterer/utils/bytesio.py +59 -59
- chatterer/utils/code_agent.py +237 -237
- chatterer/utils/imghdr.py +148 -148
- {chatterer-0.1.16.dist-info → chatterer-0.1.18.dist-info}/METADATA +392 -392
- chatterer-0.1.18.dist-info/RECORD +42 -0
- {chatterer-0.1.16.dist-info → chatterer-0.1.18.dist-info}/WHEEL +1 -1
- chatterer-0.1.16.dist-info/RECORD +0 -33
- {chatterer-0.1.16.dist-info → chatterer-0.1.18.dist-info}/top_level.txt +0 -0
chatterer/interactive.py
CHANGED
@@ -1,692 +1,354 @@
|
|
1
|
-
from
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
from
|
11
|
-
from
|
12
|
-
from rich.
|
13
|
-
from rich.
|
14
|
-
|
15
|
-
|
16
|
-
from .
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
if
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
"
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
**
|
266
|
-
)
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
title="
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
309
|
-
|
310
|
-
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
)
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
if decision.is_task_completed:
|
356
|
-
console.print(
|
357
|
-
Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
|
358
|
-
)
|
359
|
-
break # Exit loop
|
360
|
-
|
361
|
-
# --- End of Loop ---
|
362
|
-
# Generate and display the final response based on the *entire* session history
|
363
|
-
final_response_messages = context + session_messages
|
364
|
-
response: str = self.respond(final_response_messages)
|
365
|
-
# Add the final AI response to the main context
|
366
|
-
context.append(AIMessage(content=response))
|
367
|
-
|
368
|
-
def get_tool_call_prompt(self, messages: LanguageModelInput) -> LanguageModelInput:
|
369
|
-
return augment_prompt_for_toolcall(
|
370
|
-
function_signatures=self.function_signatures,
|
371
|
-
messages=messages,
|
372
|
-
prompt_for_code_invoke=self.prompt_for_code_invoke,
|
373
|
-
function_reference_prefix=self.function_reference_prefix,
|
374
|
-
function_reference_seperator=self.function_reference_seperator,
|
375
|
-
)
|
376
|
-
|
377
|
-
@property
|
378
|
-
def python(self) -> "PythonAstREPLTool":
|
379
|
-
if self.repl_tool is None:
|
380
|
-
self.repl_tool = get_default_repl_tool()
|
381
|
-
return self.repl_tool
|
382
|
-
|
383
|
-
@property
|
384
|
-
def python_locals(self) -> dict[str, object]:
|
385
|
-
"""Get the local variables for the REPL tool."""
|
386
|
-
if self.python.locals is None: # pyright: ignore[reportUnknownMemberType]
|
387
|
-
self.python.locals = {}
|
388
|
-
return cast(dict[str, object], self.python.locals) # pyright: ignore[reportUnknownMemberType]
|
389
|
-
|
390
|
-
@property
|
391
|
-
def python_globals(self) -> dict[str, object]:
|
392
|
-
"""Get the global variables for the REPL tool."""
|
393
|
-
if self.python.globals is None: # pyright: ignore[reportUnknownMemberType]
|
394
|
-
self.python.globals = {}
|
395
|
-
return cast(dict[str, object], self.python.globals) # pyright: ignore[reportUnknownMemberType]
|
396
|
-
|
397
|
-
@cached_property
|
398
|
-
def console(self):
|
399
|
-
try:
|
400
|
-
from rich.console import Console
|
401
|
-
|
402
|
-
return Console()
|
403
|
-
except ImportError:
|
404
|
-
raise ImportError("Rich library not found. Please install it: pip install rich")
|
405
|
-
|
406
|
-
@property
|
407
|
-
def function_signatures(self) -> list[FunctionSignature]:
|
408
|
-
additional_callables = self.additional_callables
|
409
|
-
if additional_callables:
|
410
|
-
if callable(additional_callables):
|
411
|
-
additional_callables = [additional_callables]
|
412
|
-
function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
|
413
|
-
else:
|
414
|
-
function_signatures: list[FunctionSignature] = []
|
415
|
-
return function_signatures
|
416
|
-
|
417
|
-
|
418
|
-
def interactive_shell(
|
419
|
-
chatterer: Chatterer,
|
420
|
-
system_instruction: BaseMessage | Iterable[BaseMessage] = ([
|
421
|
-
SystemMessage(
|
422
|
-
"You are an AI assistant capable of answering questions and executing Python code to help users solve tasks."
|
423
|
-
),
|
424
|
-
]),
|
425
|
-
repl_tool: Optional["PythonAstREPLTool"] = None,
|
426
|
-
prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
|
427
|
-
additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None,
|
428
|
-
function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
|
429
|
-
function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
|
430
|
-
config: Optional[RunnableConfig] = None,
|
431
|
-
stop: Optional[list[str]] = None,
|
432
|
-
**kwargs: Any,
|
433
|
-
) -> None:
|
434
|
-
try:
|
435
|
-
console = Console()
|
436
|
-
# Style settings
|
437
|
-
AI_STYLE = "bold bright_blue"
|
438
|
-
EXECUTED_CODE_STYLE = "bold bright_yellow"
|
439
|
-
OUTPUT_STYLE = "bold bright_cyan"
|
440
|
-
THINKING_STYLE = "dim white"
|
441
|
-
except ImportError:
|
442
|
-
raise ImportError("Rich library not found. Please install it: pip install rich")
|
443
|
-
|
444
|
-
# --- Shell Initialization and Main Loop ---
|
445
|
-
if repl_tool is None:
|
446
|
-
repl_tool = get_default_repl_tool()
|
447
|
-
|
448
|
-
def set_locals(**kwargs: object) -> None:
|
449
|
-
"""Set local variables for the REPL tool."""
|
450
|
-
if repl_tool.locals is None: # pyright: ignore[reportUnknownMemberType]
|
451
|
-
repl_tool.locals = {}
|
452
|
-
for key, value in kwargs.items():
|
453
|
-
repl_tool.locals[key] = value # pyright: ignore[reportUnknownMemberType]
|
454
|
-
|
455
|
-
def respond(messages: list[BaseMessage]) -> str:
|
456
|
-
response = ""
|
457
|
-
with console.status("[bold yellow]AI is thinking..."):
|
458
|
-
response_panel = Panel("", title="AI Response", style=AI_STYLE, border_style="blue")
|
459
|
-
current_content = ""
|
460
|
-
for chunk in chatterer.generate_stream(messages=messages):
|
461
|
-
current_content += chunk
|
462
|
-
# Update renderable (might not display smoothly without Live)
|
463
|
-
response_panel.renderable = current_content
|
464
|
-
response = current_content
|
465
|
-
console.print(Panel(response, title="AI Response", style=AI_STYLE))
|
466
|
-
return response.strip()
|
467
|
-
|
468
|
-
def complete_task(think_before_speak: ThinkBeforeSpeak) -> None:
|
469
|
-
task_info = f"[bold]Task:[/bold] {think_before_speak.task}\n[bold]Plans:[/bold]\n- " + "\n- ".join(
|
470
|
-
think_before_speak.plans
|
471
|
-
)
|
472
|
-
console.print(Panel(task_info, title="Task Analysis & Plan", style="magenta"))
|
473
|
-
session_messages: list[BaseMessage] = [
|
474
|
-
AIMessage(
|
475
|
-
content=f"Okay, I understand the task. Here's my plan:\n"
|
476
|
-
f"- Task Summary: {think_before_speak.task}\n"
|
477
|
-
f"- Steps:\n" + "\n".join(f" - {p}" for p in think_before_speak.plans)
|
478
|
-
)
|
479
|
-
]
|
480
|
-
|
481
|
-
while True:
|
482
|
-
current_context = context + session_messages
|
483
|
-
is_tool_call_needed: IsToolCallNeeded = chatterer(
|
484
|
-
augment_prompt_for_toolcall(
|
485
|
-
function_signatures=function_signatures,
|
486
|
-
messages=current_context,
|
487
|
-
prompt_for_code_invoke=prompt_for_code_invoke,
|
488
|
-
function_reference_prefix=function_reference_prefix,
|
489
|
-
function_reference_seperator=function_reference_seperator,
|
490
|
-
),
|
491
|
-
IsToolCallNeeded,
|
492
|
-
config=config,
|
493
|
-
stop=stop,
|
494
|
-
**kwargs,
|
495
|
-
)
|
496
|
-
|
497
|
-
if is_tool_call_needed.is_tool_call_needed:
|
498
|
-
# --- Code Execution Path ---
|
499
|
-
set_locals(__context__=context, __session__=session_messages)
|
500
|
-
code_execution: CodeExecutionResult = chatterer.exec(
|
501
|
-
messages=current_context,
|
502
|
-
repl_tool=repl_tool,
|
503
|
-
prompt_for_code_invoke=prompt_for_code_invoke,
|
504
|
-
function_signatures=function_signatures,
|
505
|
-
function_reference_prefix=function_reference_prefix,
|
506
|
-
function_reference_seperator=function_reference_seperator,
|
507
|
-
config=config,
|
508
|
-
stop=stop,
|
509
|
-
**kwargs,
|
510
|
-
)
|
511
|
-
code_block_display = (
|
512
|
-
f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
|
513
|
-
f"[bold]Output:[/bold]\n{code_execution.output}"
|
514
|
-
)
|
515
|
-
console.print(
|
516
|
-
Panel(code_block_display, title="Code Execution", style=EXECUTED_CODE_STYLE, border_style="yellow")
|
517
|
-
)
|
518
|
-
tool_call_message = AIMessage(
|
519
|
-
content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
|
520
|
-
)
|
521
|
-
session_messages.append(tool_call_message)
|
522
|
-
|
523
|
-
# --- Review Code Execution ---
|
524
|
-
current_context_after_exec = context + session_messages
|
525
|
-
decision = chatterer(
|
526
|
-
augment_prompt_for_toolcall(
|
527
|
-
function_signatures=function_signatures,
|
528
|
-
messages=current_context_after_exec,
|
529
|
-
prompt_for_code_invoke=prompt_for_code_invoke,
|
530
|
-
function_reference_prefix=function_reference_prefix,
|
531
|
-
function_reference_seperator=function_reference_seperator,
|
532
|
-
),
|
533
|
-
ReviewOnToolcall,
|
534
|
-
config=config,
|
535
|
-
stop=stop,
|
536
|
-
**kwargs,
|
537
|
-
)
|
538
|
-
review_text = (
|
539
|
-
f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
|
540
|
-
f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
|
541
|
-
)
|
542
|
-
console.print(Panel(review_text, title="Execution Review", style=OUTPUT_STYLE, border_style="cyan"))
|
543
|
-
review_message = AIMessage(
|
544
|
-
content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
|
545
|
-
f"**Next Action:** {decision.next_action.strip()}"
|
546
|
-
)
|
547
|
-
session_messages.append(review_message)
|
548
|
-
|
549
|
-
# --- Check Completion after Review ---
|
550
|
-
if decision.is_task_completed:
|
551
|
-
console.print(
|
552
|
-
Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
|
553
|
-
)
|
554
|
-
break # Exit loop
|
555
|
-
else:
|
556
|
-
# --- Thinking Path (No Code Needed) ---
|
557
|
-
current_context_before_think = context + session_messages
|
558
|
-
decision = chatterer(
|
559
|
-
augment_prompt_for_toolcall(
|
560
|
-
function_signatures=function_signatures,
|
561
|
-
messages=current_context_before_think,
|
562
|
-
prompt_for_code_invoke=prompt_for_code_invoke,
|
563
|
-
function_reference_prefix=function_reference_prefix,
|
564
|
-
function_reference_seperator=function_reference_seperator,
|
565
|
-
),
|
566
|
-
Think,
|
567
|
-
config=config,
|
568
|
-
stop=stop,
|
569
|
-
**kwargs,
|
570
|
-
)
|
571
|
-
thinking_text = (
|
572
|
-
f"[dim]Reasoning:[/dim] {decision.my_thinking.strip()}\n"
|
573
|
-
f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
|
574
|
-
)
|
575
|
-
console.print(
|
576
|
-
Panel(
|
577
|
-
thinking_text, title="AI Thought Process (No Code)", style=THINKING_STYLE, border_style="white"
|
578
|
-
)
|
579
|
-
)
|
580
|
-
thinking_message = AIMessage(
|
581
|
-
content=f"**My Reasoning (without code execution):** {decision.my_thinking.strip()}\n"
|
582
|
-
f"**Next Action:** {decision.next_action.strip()}"
|
583
|
-
)
|
584
|
-
session_messages.append(thinking_message)
|
585
|
-
|
586
|
-
# --- Check Completion after Thinking ---
|
587
|
-
# This check now relies on the LLM correctly interpreting the updated
|
588
|
-
# description for Think.is_task_completed
|
589
|
-
if decision.is_task_completed:
|
590
|
-
console.print(
|
591
|
-
Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
|
592
|
-
)
|
593
|
-
break # Exit loop
|
594
|
-
|
595
|
-
# --- End of Loop ---
|
596
|
-
# Generate and display the final response based on the *entire* session history
|
597
|
-
final_response_messages = context + session_messages
|
598
|
-
response: str = respond(final_response_messages)
|
599
|
-
# Add the final AI response to the main context
|
600
|
-
context.append(AIMessage(content=response))
|
601
|
-
|
602
|
-
if additional_callables:
|
603
|
-
if callable(additional_callables):
|
604
|
-
additional_callables = [additional_callables]
|
605
|
-
|
606
|
-
function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
|
607
|
-
else:
|
608
|
-
function_signatures: list[FunctionSignature] = []
|
609
|
-
|
610
|
-
context: list[BaseMessage] = []
|
611
|
-
if system_instruction:
|
612
|
-
if isinstance(system_instruction, BaseMessage):
|
613
|
-
context.append(system_instruction)
|
614
|
-
elif isinstance(system_instruction, str):
|
615
|
-
context.append(SystemMessage(content=system_instruction))
|
616
|
-
else:
|
617
|
-
context.extend(list(system_instruction))
|
618
|
-
|
619
|
-
console.print(
|
620
|
-
Panel(
|
621
|
-
"Welcome to the Interactive Chatterer Shell!\nType 'quit' or 'exit' to end the conversation.",
|
622
|
-
title="Welcome",
|
623
|
-
style=AI_STYLE,
|
624
|
-
border_style="blue",
|
625
|
-
)
|
626
|
-
)
|
627
|
-
|
628
|
-
while True:
|
629
|
-
try:
|
630
|
-
user_input = Prompt.ask("[bold green]You[/bold green]")
|
631
|
-
except EOFError:
|
632
|
-
user_input = "exit"
|
633
|
-
|
634
|
-
if user_input.strip().lower() in ["quit", "exit"]:
|
635
|
-
console.print(Panel("Goodbye!", title="Exit", style=AI_STYLE, border_style="blue"))
|
636
|
-
break
|
637
|
-
|
638
|
-
context.append(HumanMessage(content=user_input.strip()))
|
639
|
-
|
640
|
-
try:
|
641
|
-
# Initial planning step
|
642
|
-
initial_plan_decision = chatterer(
|
643
|
-
augment_prompt_for_toolcall(
|
644
|
-
function_signatures=function_signatures,
|
645
|
-
messages=context,
|
646
|
-
prompt_for_code_invoke=prompt_for_code_invoke,
|
647
|
-
function_reference_prefix=function_reference_prefix,
|
648
|
-
function_reference_seperator=function_reference_seperator,
|
649
|
-
),
|
650
|
-
ThinkBeforeSpeak,
|
651
|
-
config=config,
|
652
|
-
stop=stop,
|
653
|
-
**kwargs,
|
654
|
-
)
|
655
|
-
# Execute the task completion loop
|
656
|
-
complete_task(initial_plan_decision)
|
657
|
-
|
658
|
-
except Exception as e:
|
659
|
-
import traceback
|
660
|
-
|
661
|
-
console.print(
|
662
|
-
Panel(
|
663
|
-
f"[bold red]An error occurred:[/bold red]\n{e}\n\n[yellow]Traceback:[/yellow]\n{traceback.format_exc()}",
|
664
|
-
title="Error",
|
665
|
-
border_style="red",
|
666
|
-
)
|
667
|
-
)
|
668
|
-
|
669
|
-
|
670
|
-
# if __name__ == "__main__":
|
671
|
-
# from .utils.base64_image import Base64Image
|
672
|
-
|
673
|
-
# repl_tool = get_default_repl_tool()
|
674
|
-
|
675
|
-
# def view_image(image_path: str) -> None:
|
676
|
-
# locals = repl_tool.locals # pyright: ignore[reportUnknownVariableType, reportUnknownMemberType]
|
677
|
-
# assert isinstance(locals, dict), "REPL tool locals are not set."
|
678
|
-
# session: list[BaseMessage] = cast(list[BaseMessage], locals["__session__"])
|
679
|
-
|
680
|
-
# image_or_none = Base64Image.from_url_or_path(image_path)
|
681
|
-
# if image_or_none is None:
|
682
|
-
# session.append(
|
683
|
-
# HumanMessage(content=f"Image not found at path: {image_path}. Please check the path and try again.")
|
684
|
-
# )
|
685
|
-
# return
|
686
|
-
# session.append(HumanMessage([image_or_none.data_uri_content]))
|
687
|
-
|
688
|
-
# interactive_shell(
|
689
|
-
# chatterer=Chatterer.from_provider("openai:gpt-4.1"),
|
690
|
-
# additional_callables=[view_image],
|
691
|
-
# repl_tool=repl_tool,
|
692
|
-
# )
|
1
|
+
from typing import TYPE_CHECKING, Any, Callable, Iterable, Optional, TypeVar
|
2
|
+
|
3
|
+
from langchain_core.messages import (
|
4
|
+
AIMessage,
|
5
|
+
BaseMessage,
|
6
|
+
HumanMessage,
|
7
|
+
SystemMessage,
|
8
|
+
)
|
9
|
+
from langchain_core.runnables import RunnableConfig
|
10
|
+
from pydantic import BaseModel, Field
|
11
|
+
from rich.console import Console
|
12
|
+
from rich.panel import Panel
|
13
|
+
from rich.prompt import Prompt
|
14
|
+
|
15
|
+
from .language_model import Chatterer
|
16
|
+
from .utils.code_agent import (
|
17
|
+
DEFAULT_CODE_GENERATION_PROMPT,
|
18
|
+
DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
|
19
|
+
DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
|
20
|
+
CodeExecutionResult,
|
21
|
+
FunctionSignature,
|
22
|
+
augment_prompt_for_toolcall,
|
23
|
+
get_default_repl_tool,
|
24
|
+
)
|
25
|
+
|
26
|
+
if TYPE_CHECKING:
|
27
|
+
# Import only for type hinting to avoid circular dependencies if necessary
|
28
|
+
from langchain_experimental.tools.python.tool import PythonAstREPLTool
|
29
|
+
|
30
|
+
T = TypeVar("T", bound=BaseModel)
|
31
|
+
|
32
|
+
# --- Pydantic Models ---
|
33
|
+
|
34
|
+
|
35
|
+
class ThinkBeforeSpeak(BaseModel):
|
36
|
+
"""
|
37
|
+
Analyze the user's request and formulate an initial plan.
|
38
|
+
This involves understanding the core task and breaking it down into logical steps.
|
39
|
+
"""
|
40
|
+
|
41
|
+
task: str = Field(description="A concise summary of the user's overall goal or question.")
|
42
|
+
plans: list[str] = Field(
|
43
|
+
description="A sequence of actionable steps required to address the user's task. "
|
44
|
+
"Each step should be clear and logical. Indicate if a step likely requires code execution."
|
45
|
+
)
|
46
|
+
|
47
|
+
|
48
|
+
class IsToolCallNeeded(BaseModel):
|
49
|
+
"""
|
50
|
+
Determine if executing Python code is the necessary *next* action.
|
51
|
+
Carefully review the most recent messages, especially the last code execution output and review (if any).
|
52
|
+
"""
|
53
|
+
|
54
|
+
is_tool_call_needed: bool = Field(
|
55
|
+
description="Set to True ONLY if the *next logical step* requires executing Python code AND the previous step (if it involved code) did not already attempt this exact action and fail or produce unusable results. If the last code execution failed to achieve its goal (e.g., wrong data, error), set to False unless you plan to execute *different* code to overcome the previous issue. Set to False if the next step is reasoning, asking questions, or formulating a response based on existing information (including failed tool attempts)."
|
56
|
+
)
|
57
|
+
|
58
|
+
|
59
|
+
class ReviewOnToolcall(BaseModel):
|
60
|
+
"""
|
61
|
+
Evaluate the outcome of the Python code execution and decide the subsequent action.
|
62
|
+
Critically assess if the execution achieved the intended goal and if the output is usable.
|
63
|
+
"""
|
64
|
+
|
65
|
+
review_on_code_execution: str = Field(
|
66
|
+
description="A critical analysis of the code execution result. Did it succeed technically? Did it produce the *expected and usable* output according to the plan? Explicitly mention any errors, unexpected values (like incorrect dates), or unusable results."
|
67
|
+
)
|
68
|
+
next_action: str = Field(
|
69
|
+
description="Describe the *immediate next logical action* based on the review. **If the execution failed or yielded unusable/unexpected results, DO NOT suggest repeating the exact same code execution.** Instead, propose a different action, such as: 'Try a different code approach to get the time', 'Inform the user about the environmental issue with the date', 'Ask the user to verify the result', or 'Abandon this approach and try something else'. If the execution was successful and useful, describe the next step in the plan (e.g., 'Use the retrieved time to formulate the answer')."
|
70
|
+
)
|
71
|
+
is_task_completed: bool = Field(
|
72
|
+
description="Set to True ONLY IF the *overall user task* is now fully addressed OR if the *only remaining action* based on the review is to generate the final response/answer directly to the user (this includes informing the user about an unresolvable issue found during execution). Set to False if further *productive* intermediate steps (like trying different code, processing data further, asking for input) are needed before the final response."
|
73
|
+
)
|
74
|
+
|
75
|
+
|
76
|
+
class Think(BaseModel):
|
77
|
+
"""
|
78
|
+
Engage in reasoning when code execution is not the immediate next step.
|
79
|
+
This could involve synthesizing information, preparing the final answer, or identifying missing information.
|
80
|
+
"""
|
81
|
+
|
82
|
+
my_thinking: str = Field(
|
83
|
+
description="Explain your reasoning process. Why is code execution not needed now? "
|
84
|
+
"What information are you using from the context? How are you planning to formulate the response or proceed?"
|
85
|
+
)
|
86
|
+
next_action: str = Field(
|
87
|
+
description="Describe the *immediate next action* resulting from this thinking process. "
|
88
|
+
"Examples: 'Formulate the final answer to the user', 'Ask the user a clarifying question', "
|
89
|
+
"'Summarize the findings so far'."
|
90
|
+
)
|
91
|
+
# --- MODIFIED DESCRIPTION ---
|
92
|
+
is_task_completed: bool = Field(
|
93
|
+
description="Set this to True IF AND ONLY IF the 'next_action' you just described involves generating the final response, explanation, or answer directly for the user, based on the reasoning in 'my_thinking'. If the 'next_action' involves asking the user a question, planning *further* internal steps (beyond formulating the immediate response), or indicates the task cannot be completed yet, set this to False. **If the plan is simply to tell the user the answer now, set this to True.**"
|
94
|
+
)
|
95
|
+
# --- END OF MODIFICATION ---
|
96
|
+
|
97
|
+
|
98
|
+
# --- Interactive Shell Function ---
|
99
|
+
|
100
|
+
|
101
|
+
def interactive_shell(
|
102
|
+
chatterer: Chatterer,
|
103
|
+
system_instruction: BaseMessage | Iterable[BaseMessage] = ([
|
104
|
+
SystemMessage(
|
105
|
+
"You are an AI assistant capable of answering questions and executing Python code to help users solve tasks."
|
106
|
+
),
|
107
|
+
]),
|
108
|
+
repl_tool: Optional["PythonAstREPLTool"] = None,
|
109
|
+
prompt_for_code_invoke: Optional[str] = DEFAULT_CODE_GENERATION_PROMPT,
|
110
|
+
additional_callables: Optional[Callable[..., object] | Iterable[Callable[..., object]]] = None,
|
111
|
+
function_reference_prefix: Optional[str] = DEFAULT_FUNCTION_REFERENCE_PREFIX_PROMPT,
|
112
|
+
function_reference_seperator: str = DEFAULT_FUNCTION_REFERENCE_SEPARATOR,
|
113
|
+
config: Optional[RunnableConfig] = None,
|
114
|
+
stop: Optional[list[str]] = None,
|
115
|
+
**kwargs: Any,
|
116
|
+
) -> None:
|
117
|
+
try:
|
118
|
+
console = Console()
|
119
|
+
# Style settings
|
120
|
+
AI_STYLE = "bold bright_blue"
|
121
|
+
EXECUTED_CODE_STYLE = "bold bright_yellow"
|
122
|
+
OUTPUT_STYLE = "bold bright_cyan"
|
123
|
+
THINKING_STYLE = "dim white"
|
124
|
+
except ImportError:
|
125
|
+
raise ImportError("Rich library not found. Please install it: pip install rich")
|
126
|
+
|
127
|
+
# --- Shell Initialization and Main Loop ---
|
128
|
+
if repl_tool is None:
|
129
|
+
repl_tool = get_default_repl_tool()
|
130
|
+
|
131
|
+
def set_locals(**kwargs: object) -> None:
|
132
|
+
"""Set local variables for the REPL tool."""
|
133
|
+
if repl_tool.locals is None: # pyright: ignore[reportUnknownMemberType]
|
134
|
+
repl_tool.locals = {}
|
135
|
+
for key, value in kwargs.items():
|
136
|
+
repl_tool.locals[key] = value # pyright: ignore[reportUnknownMemberType]
|
137
|
+
|
138
|
+
def respond(messages: list[BaseMessage]) -> str:
|
139
|
+
response = ""
|
140
|
+
with console.status("[bold yellow]AI is thinking..."):
|
141
|
+
response_panel = Panel("", title="AI Response", style=AI_STYLE, border_style="blue")
|
142
|
+
current_content = ""
|
143
|
+
for chunk in chatterer.generate_stream(messages=messages):
|
144
|
+
current_content += chunk
|
145
|
+
# Update renderable (might not display smoothly without Live)
|
146
|
+
response_panel.renderable = current_content
|
147
|
+
response = current_content
|
148
|
+
console.print(Panel(response, title="AI Response", style=AI_STYLE))
|
149
|
+
return response.strip()
|
150
|
+
|
151
|
+
def complete_task(think_before_speak: ThinkBeforeSpeak) -> None:
|
152
|
+
task_info = f"[bold]Task:[/bold] {think_before_speak.task}\n[bold]Plans:[/bold]\n- " + "\n- ".join(
|
153
|
+
think_before_speak.plans
|
154
|
+
)
|
155
|
+
console.print(Panel(task_info, title="Task Analysis & Plan", style="magenta"))
|
156
|
+
session_messages: list[BaseMessage] = [
|
157
|
+
AIMessage(
|
158
|
+
content=f"Okay, I understand the task. Here's my plan:\n"
|
159
|
+
f"- Task Summary: {think_before_speak.task}\n"
|
160
|
+
f"- Steps:\n" + "\n".join(f" - {p}" for p in think_before_speak.plans)
|
161
|
+
)
|
162
|
+
]
|
163
|
+
|
164
|
+
while True:
|
165
|
+
current_context = context + session_messages
|
166
|
+
is_tool_call_needed: IsToolCallNeeded = chatterer(
|
167
|
+
augment_prompt_for_toolcall(
|
168
|
+
function_signatures=function_signatures,
|
169
|
+
messages=current_context,
|
170
|
+
prompt_for_code_invoke=prompt_for_code_invoke,
|
171
|
+
function_reference_prefix=function_reference_prefix,
|
172
|
+
function_reference_seperator=function_reference_seperator,
|
173
|
+
),
|
174
|
+
IsToolCallNeeded,
|
175
|
+
config=config,
|
176
|
+
stop=stop,
|
177
|
+
**kwargs,
|
178
|
+
)
|
179
|
+
|
180
|
+
if is_tool_call_needed.is_tool_call_needed:
|
181
|
+
# --- Code Execution Path ---
|
182
|
+
set_locals(__context__=context, __session__=session_messages)
|
183
|
+
code_execution: CodeExecutionResult = chatterer.exec(
|
184
|
+
messages=current_context,
|
185
|
+
repl_tool=repl_tool,
|
186
|
+
prompt_for_code_invoke=prompt_for_code_invoke,
|
187
|
+
function_signatures=function_signatures,
|
188
|
+
function_reference_prefix=function_reference_prefix,
|
189
|
+
function_reference_seperator=function_reference_seperator,
|
190
|
+
config=config,
|
191
|
+
stop=stop,
|
192
|
+
**kwargs,
|
193
|
+
)
|
194
|
+
code_block_display = (
|
195
|
+
f"[bold]Executed Code:[/bold]\n```python\n{code_execution.code}\n```\n\n"
|
196
|
+
f"[bold]Output:[/bold]\n{code_execution.output}"
|
197
|
+
)
|
198
|
+
console.print(
|
199
|
+
Panel(code_block_display, title="Code Execution", style=EXECUTED_CODE_STYLE, border_style="yellow")
|
200
|
+
)
|
201
|
+
tool_call_message = AIMessage(
|
202
|
+
content=f"I executed the following code:\n```python\n{code_execution.code}\n```\n**Output:**\n{code_execution.output}"
|
203
|
+
)
|
204
|
+
session_messages.append(tool_call_message)
|
205
|
+
|
206
|
+
# --- Review Code Execution ---
|
207
|
+
current_context_after_exec = context + session_messages
|
208
|
+
decision = chatterer(
|
209
|
+
augment_prompt_for_toolcall(
|
210
|
+
function_signatures=function_signatures,
|
211
|
+
messages=current_context_after_exec,
|
212
|
+
prompt_for_code_invoke=prompt_for_code_invoke,
|
213
|
+
function_reference_prefix=function_reference_prefix,
|
214
|
+
function_reference_seperator=function_reference_seperator,
|
215
|
+
),
|
216
|
+
ReviewOnToolcall,
|
217
|
+
config=config,
|
218
|
+
stop=stop,
|
219
|
+
**kwargs,
|
220
|
+
)
|
221
|
+
review_text = (
|
222
|
+
f"[bold]Review:[/bold] {decision.review_on_code_execution.strip()}\n"
|
223
|
+
f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
|
224
|
+
)
|
225
|
+
console.print(Panel(review_text, title="Execution Review", style=OUTPUT_STYLE, border_style="cyan"))
|
226
|
+
review_message = AIMessage(
|
227
|
+
content=f"**Review of Execution:** {decision.review_on_code_execution.strip()}\n"
|
228
|
+
f"**Next Action:** {decision.next_action.strip()}"
|
229
|
+
)
|
230
|
+
session_messages.append(review_message)
|
231
|
+
|
232
|
+
# --- Check Completion after Review ---
|
233
|
+
if decision.is_task_completed:
|
234
|
+
console.print(
|
235
|
+
Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
|
236
|
+
)
|
237
|
+
break # Exit loop
|
238
|
+
else:
|
239
|
+
# --- Thinking Path (No Code Needed) ---
|
240
|
+
current_context_before_think = context + session_messages
|
241
|
+
decision = chatterer(
|
242
|
+
augment_prompt_for_toolcall(
|
243
|
+
function_signatures=function_signatures,
|
244
|
+
messages=current_context_before_think,
|
245
|
+
prompt_for_code_invoke=prompt_for_code_invoke,
|
246
|
+
function_reference_prefix=function_reference_prefix,
|
247
|
+
function_reference_seperator=function_reference_seperator,
|
248
|
+
),
|
249
|
+
Think,
|
250
|
+
config=config,
|
251
|
+
stop=stop,
|
252
|
+
**kwargs,
|
253
|
+
)
|
254
|
+
thinking_text = (
|
255
|
+
f"[dim]Reasoning:[/dim] {decision.my_thinking.strip()}\n"
|
256
|
+
f"[bold]Next Action:[/bold] {decision.next_action.strip()}"
|
257
|
+
)
|
258
|
+
console.print(
|
259
|
+
Panel(
|
260
|
+
thinking_text, title="AI Thought Process (No Code)", style=THINKING_STYLE, border_style="white"
|
261
|
+
)
|
262
|
+
)
|
263
|
+
thinking_message = AIMessage(
|
264
|
+
content=f"**My Reasoning (without code execution):** {decision.my_thinking.strip()}\n"
|
265
|
+
f"**Next Action:** {decision.next_action.strip()}"
|
266
|
+
)
|
267
|
+
session_messages.append(thinking_message)
|
268
|
+
|
269
|
+
# --- Check Completion after Thinking ---
|
270
|
+
# This check now relies on the LLM correctly interpreting the updated
|
271
|
+
# description for Think.is_task_completed
|
272
|
+
if decision.is_task_completed:
|
273
|
+
console.print(
|
274
|
+
Panel("[bold green]Task Completed![/bold green]", title="Status", border_style="green")
|
275
|
+
)
|
276
|
+
break # Exit loop
|
277
|
+
|
278
|
+
# --- End of Loop ---
|
279
|
+
# Generate and display the final response based on the *entire* session history
|
280
|
+
final_response_messages = context + session_messages
|
281
|
+
response: str = respond(final_response_messages)
|
282
|
+
# Add the final AI response to the main context
|
283
|
+
context.append(AIMessage(content=response))
|
284
|
+
|
285
|
+
if additional_callables:
|
286
|
+
if callable(additional_callables):
|
287
|
+
additional_callables = [additional_callables]
|
288
|
+
|
289
|
+
function_signatures: list[FunctionSignature] = FunctionSignature.from_callable(list(additional_callables))
|
290
|
+
else:
|
291
|
+
function_signatures: list[FunctionSignature] = []
|
292
|
+
|
293
|
+
context: list[BaseMessage] = []
|
294
|
+
if system_instruction:
|
295
|
+
if isinstance(system_instruction, BaseMessage):
|
296
|
+
context.append(system_instruction)
|
297
|
+
elif isinstance(system_instruction, str):
|
298
|
+
context.append(SystemMessage(content=system_instruction))
|
299
|
+
else:
|
300
|
+
context.extend(list(system_instruction))
|
301
|
+
|
302
|
+
console.print(
|
303
|
+
Panel(
|
304
|
+
"Welcome to the Interactive Chatterer Shell!\nType 'quit' or 'exit' to end the conversation.",
|
305
|
+
title="Welcome",
|
306
|
+
style=AI_STYLE,
|
307
|
+
border_style="blue",
|
308
|
+
)
|
309
|
+
)
|
310
|
+
|
311
|
+
while True:
|
312
|
+
try:
|
313
|
+
user_input = Prompt.ask("[bold green]You[/bold green]")
|
314
|
+
except EOFError:
|
315
|
+
user_input = "exit"
|
316
|
+
|
317
|
+
if user_input.strip().lower() in ["quit", "exit"]:
|
318
|
+
console.print(Panel("Goodbye!", title="Exit", style=AI_STYLE, border_style="blue"))
|
319
|
+
break
|
320
|
+
|
321
|
+
context.append(HumanMessage(content=user_input.strip()))
|
322
|
+
|
323
|
+
try:
|
324
|
+
# Initial planning step
|
325
|
+
initial_plan_decision = chatterer(
|
326
|
+
augment_prompt_for_toolcall(
|
327
|
+
function_signatures=function_signatures,
|
328
|
+
messages=context,
|
329
|
+
prompt_for_code_invoke=prompt_for_code_invoke,
|
330
|
+
function_reference_prefix=function_reference_prefix,
|
331
|
+
function_reference_seperator=function_reference_seperator,
|
332
|
+
),
|
333
|
+
ThinkBeforeSpeak,
|
334
|
+
config=config,
|
335
|
+
stop=stop,
|
336
|
+
**kwargs,
|
337
|
+
)
|
338
|
+
# Execute the task completion loop
|
339
|
+
complete_task(initial_plan_decision)
|
340
|
+
|
341
|
+
except Exception as e:
|
342
|
+
import traceback
|
343
|
+
|
344
|
+
console.print(
|
345
|
+
Panel(
|
346
|
+
f"[bold red]An error occurred:[/bold red]\n{e}\n\n[yellow]Traceback:[/yellow]\n{traceback.format_exc()}",
|
347
|
+
title="Error",
|
348
|
+
border_style="red",
|
349
|
+
)
|
350
|
+
)
|
351
|
+
|
352
|
+
|
353
|
+
if __name__ == "__main__":
|
354
|
+
interactive_shell(chatterer=Chatterer.openai())
|