langroid 0.10.2__py3-none-any.whl → 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/__init__.py +1 -2
- langroid/agent/base.py +138 -54
- langroid/agent/chat_agent.py +25 -4
- langroid/agent/chat_document.py +5 -1
- langroid/agent/task.py +129 -25
- langroid/agent/tool_message.py +15 -43
- langroid/agent/tools/__init__.py +4 -0
- langroid/agent/tools/orchestration.py +87 -8
- langroid/language_models/mock_lm.py +5 -4
- langroid/parsing/web_search.py +7 -4
- langroid/utils/.chainlit/config.toml +121 -0
- langroid/utils/.chainlit/translations/en-US.json +231 -0
- langroid/utils/types.py +93 -0
- {langroid-0.10.2.dist-info → langroid-0.11.0.dist-info}/METADATA +1 -1
- {langroid-0.10.2.dist-info → langroid-0.11.0.dist-info}/RECORD +18 -15
- pyproject.toml +2 -2
- {langroid-0.10.2.dist-info → langroid-0.11.0.dist-info}/LICENSE +0 -0
- {langroid-0.10.2.dist-info → langroid-0.11.0.dist-info}/WHEEL +0 -0
langroid/agent/task.py
CHANGED
@@ -18,7 +18,9 @@ from typing import (
|
|
18
18
|
Optional,
|
19
19
|
Tuple,
|
20
20
|
Type,
|
21
|
+
TypeVar,
|
21
22
|
cast,
|
23
|
+
overload,
|
22
24
|
)
|
23
25
|
|
24
26
|
import numpy as np
|
@@ -33,8 +35,8 @@ from langroid.agent.chat_document import (
|
|
33
35
|
ChatDocument,
|
34
36
|
StatusCode,
|
35
37
|
)
|
36
|
-
from langroid.agent.tool_message import
|
37
|
-
from langroid.agent.tools.orchestration import AgentDoneTool, DoneTool
|
38
|
+
from langroid.agent.tool_message import ToolMessage
|
39
|
+
from langroid.agent.tools.orchestration import AgentDoneTool, DoneTool, FinalResultTool
|
38
40
|
from langroid.cachedb.redis_cachedb import RedisCache, RedisCacheConfig
|
39
41
|
from langroid.exceptions import InfiniteLoopException
|
40
42
|
from langroid.mytypes import Entity
|
@@ -53,11 +55,14 @@ from langroid.utils.constants import (
|
|
53
55
|
from langroid.utils.logging import RichFileLogger, setup_file_logger
|
54
56
|
from langroid.utils.object_registry import scheduled_cleanup
|
55
57
|
from langroid.utils.system import hash
|
58
|
+
from langroid.utils.types import to_string
|
56
59
|
|
57
60
|
logger = logging.getLogger(__name__)
|
58
61
|
|
59
62
|
Responder = Entity | Type["Task"]
|
60
63
|
|
64
|
+
T = TypeVar("T")
|
65
|
+
|
61
66
|
|
62
67
|
def noop_fn(*args: List[Any], **kwargs: Dict[str, Any]) -> None:
|
63
68
|
pass
|
@@ -153,6 +158,7 @@ class Task:
|
|
153
158
|
erase_substeps: bool = False,
|
154
159
|
allow_null_result: bool = False,
|
155
160
|
max_stalled_steps: int = 5,
|
161
|
+
default_return_type: Optional[type] = None,
|
156
162
|
done_if_no_response: List[Responder] = [],
|
157
163
|
done_if_response: List[Responder] = [],
|
158
164
|
config: TaskConfig = TaskConfig(),
|
@@ -190,6 +196,8 @@ class Task:
|
|
190
196
|
default_human_response (str|None): default response from user; useful for
|
191
197
|
testing, to avoid interactive input from user.
|
192
198
|
[Instead of this, setting `interactive` usually suffices]
|
199
|
+
default_return_type: if not None, extracts a value of this type from the
|
200
|
+
result of self.run()
|
193
201
|
interactive (bool): if true, wait for human input after each non-human
|
194
202
|
response (prevents infinite loop of non-human responses).
|
195
203
|
Default is true. If false, then `default_human_response` is set to ""
|
@@ -298,6 +306,7 @@ class Task:
|
|
298
306
|
self.agent.interactive = interactive
|
299
307
|
self.only_user_quits_root = only_user_quits_root
|
300
308
|
self.message_history_idx = -1
|
309
|
+
self.default_return_type = default_return_type
|
301
310
|
|
302
311
|
# set to True if we want to collapse multi-turn conversation with sub-tasks into
|
303
312
|
# just the first outgoing message and last incoming message.
|
@@ -582,16 +591,50 @@ class Task:
|
|
582
591
|
for t in self.sub_tasks:
|
583
592
|
t.reset_all_sub_tasks()
|
584
593
|
|
594
|
+
def __getitem__(self, return_type: type) -> Task:
|
595
|
+
"""Returns a (shallow) copy of `self` with a default return type."""
|
596
|
+
clone = copy.copy(self)
|
597
|
+
clone.default_return_type = return_type
|
598
|
+
return clone
|
599
|
+
|
600
|
+
@overload
|
601
|
+
def run( # noqa
|
602
|
+
self,
|
603
|
+
msg: Any = None,
|
604
|
+
*,
|
605
|
+
turns: int = -1,
|
606
|
+
caller: None | Task = None,
|
607
|
+
max_cost: float = 0,
|
608
|
+
max_tokens: int = 0,
|
609
|
+
session_id: str = "",
|
610
|
+
allow_restart: bool = True,
|
611
|
+
) -> Optional[ChatDocument]: ... # noqa
|
612
|
+
|
613
|
+
@overload
|
614
|
+
def run( # noqa
|
615
|
+
self,
|
616
|
+
msg: Any = None,
|
617
|
+
*,
|
618
|
+
turns: int = -1,
|
619
|
+
caller: None | Task = None,
|
620
|
+
max_cost: float = 0,
|
621
|
+
max_tokens: int = 0,
|
622
|
+
session_id: str = "",
|
623
|
+
allow_restart: bool = True,
|
624
|
+
return_type: Type[T],
|
625
|
+
) -> Optional[T]: ... # noqa
|
626
|
+
|
585
627
|
def run(
|
586
628
|
self,
|
587
|
-
msg:
|
629
|
+
msg: Any = None,
|
588
630
|
turns: int = -1,
|
589
631
|
caller: None | Task = None,
|
590
632
|
max_cost: float = 0,
|
591
633
|
max_tokens: int = 0,
|
592
634
|
session_id: str = "",
|
593
635
|
allow_restart: bool = True,
|
594
|
-
|
636
|
+
return_type: Optional[Type[T]] = None,
|
637
|
+
) -> Optional[ChatDocument | T]:
|
595
638
|
"""Synchronous version of `run_async()`.
|
596
639
|
See `run_async()` for details."""
|
597
640
|
if allow_restart and (
|
@@ -614,19 +657,18 @@ class Task:
|
|
614
657
|
self._init_message_counter()
|
615
658
|
self.history.clear()
|
616
659
|
|
617
|
-
|
618
|
-
msg is None or isinstance(msg, str) or isinstance(msg, ChatDocument)
|
619
|
-
), f"msg arg in Task.run() must be None, str, or ChatDocument, not {type(msg)}"
|
660
|
+
msg_input = self.agent.to_ChatDocument(msg, author_entity=Entity.USER)
|
620
661
|
|
621
662
|
if (
|
622
|
-
isinstance(
|
623
|
-
and
|
624
|
-
and
|
663
|
+
isinstance(msg_input, ChatDocument)
|
664
|
+
and msg_input.metadata.recipient != ""
|
665
|
+
and msg_input.metadata.recipient != self.name
|
625
666
|
):
|
626
667
|
# this task is not the intended recipient so return None
|
627
668
|
return None
|
669
|
+
|
628
670
|
self._pre_run_loop(
|
629
|
-
msg=
|
671
|
+
msg=msg_input,
|
630
672
|
caller=caller,
|
631
673
|
is_async=False,
|
632
674
|
)
|
@@ -677,24 +719,60 @@ class Task:
|
|
677
719
|
|
678
720
|
final_result = self.result(status)
|
679
721
|
self._post_run_loop()
|
722
|
+
if final_result is None:
|
723
|
+
return None
|
724
|
+
|
725
|
+
if return_type is None:
|
726
|
+
return_type = self.default_return_type
|
727
|
+
|
728
|
+
if return_type is not None and return_type != ChatDocument:
|
729
|
+
return self.agent.from_ChatDocument(final_result, return_type)
|
680
730
|
return final_result
|
681
731
|
|
732
|
+
@overload
|
733
|
+
async def run_async( # noqa
|
734
|
+
self,
|
735
|
+
msg: Any = None,
|
736
|
+
*,
|
737
|
+
turns: int = -1,
|
738
|
+
caller: None | Task = None,
|
739
|
+
max_cost: float = 0,
|
740
|
+
max_tokens: int = 0,
|
741
|
+
session_id: str = "",
|
742
|
+
allow_restart: bool = True,
|
743
|
+
) -> Optional[ChatDocument]: ... # noqa
|
744
|
+
|
745
|
+
@overload
|
746
|
+
async def run_async( # noqa
|
747
|
+
self,
|
748
|
+
msg: Any = None,
|
749
|
+
*,
|
750
|
+
turns: int = -1,
|
751
|
+
caller: None | Task = None,
|
752
|
+
max_cost: float = 0,
|
753
|
+
max_tokens: int = 0,
|
754
|
+
session_id: str = "",
|
755
|
+
allow_restart: bool = True,
|
756
|
+
return_type: Type[T],
|
757
|
+
) -> Optional[T]: ... # noqa
|
758
|
+
|
682
759
|
async def run_async(
|
683
760
|
self,
|
684
|
-
msg:
|
761
|
+
msg: Any = None,
|
685
762
|
turns: int = -1,
|
686
763
|
caller: None | Task = None,
|
687
764
|
max_cost: float = 0,
|
688
765
|
max_tokens: int = 0,
|
689
766
|
session_id: str = "",
|
690
767
|
allow_restart: bool = True,
|
691
|
-
|
768
|
+
return_type: Optional[Type[T]] = None,
|
769
|
+
) -> Optional[ChatDocument | T]:
|
692
770
|
"""
|
693
771
|
Loop over `step()` until task is considered done or `turns` is reached.
|
694
772
|
Runs asynchronously.
|
695
773
|
|
696
774
|
Args:
|
697
|
-
msg (
|
775
|
+
msg (Any): initial *user-role* message to process; if None,
|
698
776
|
the LLM will respond to its initial `self.task_messages`
|
699
777
|
which set up and kick off the overall task.
|
700
778
|
The agent tries to achieve this goal by looping
|
@@ -710,6 +788,7 @@ class Task:
|
|
710
788
|
max_tokens (int): max tokens allowed for the task (default 0 -> no limit)
|
711
789
|
session_id (str): session id for the task
|
712
790
|
allow_restart (bool): whether to allow restarting the task
|
791
|
+
return_type (Optional[Type[T]]): desired final result type
|
713
792
|
|
714
793
|
Returns:
|
715
794
|
Optional[ChatDocument]: valid result of the task.
|
@@ -740,17 +819,20 @@ class Task:
|
|
740
819
|
self._init_message_counter()
|
741
820
|
self.history.clear()
|
742
821
|
|
822
|
+
msg_input = self.agent.to_ChatDocument(msg, author_entity=Entity.USER)
|
823
|
+
|
743
824
|
if (
|
744
|
-
isinstance(
|
745
|
-
and
|
746
|
-
and
|
825
|
+
isinstance(msg_input, ChatDocument)
|
826
|
+
and msg_input.metadata.recipient != ""
|
827
|
+
and msg_input.metadata.recipient != self.name
|
747
828
|
):
|
748
829
|
# this task is not the intended recipient so return None
|
749
830
|
return None
|
831
|
+
|
750
832
|
self._pre_run_loop(
|
751
|
-
msg=
|
833
|
+
msg=msg_input,
|
752
834
|
caller=caller,
|
753
|
-
is_async=
|
835
|
+
is_async=False,
|
754
836
|
)
|
755
837
|
# self.turns overrides if it is > 0 and turns not set (i.e. = -1)
|
756
838
|
turns = self.turns if turns < 0 else turns
|
@@ -800,6 +882,14 @@ class Task:
|
|
800
882
|
|
801
883
|
final_result = self.result(status)
|
802
884
|
self._post_run_loop()
|
885
|
+
if final_result is None:
|
886
|
+
return None
|
887
|
+
|
888
|
+
if return_type is None:
|
889
|
+
return_type = self.default_return_type
|
890
|
+
|
891
|
+
if return_type is not None and return_type != ChatDocument:
|
892
|
+
return self.agent.from_ChatDocument(final_result, return_type)
|
803
893
|
return final_result
|
804
894
|
|
805
895
|
def _pre_run_loop(
|
@@ -1246,7 +1336,13 @@ class Task:
|
|
1246
1336
|
else:
|
1247
1337
|
response_fn = self._entity_responder_map[cast(Entity, e)]
|
1248
1338
|
result = response_fn(self.pending_message)
|
1249
|
-
|
1339
|
+
|
1340
|
+
result_chat_doc = self.agent.to_ChatDocument(
|
1341
|
+
result,
|
1342
|
+
chat_doc=self.pending_message,
|
1343
|
+
author_entity=e if isinstance(e, Entity) else Entity.USER,
|
1344
|
+
)
|
1345
|
+
return self._process_result_routing(result_chat_doc, e)
|
1250
1346
|
|
1251
1347
|
def _process_result_routing(
|
1252
1348
|
self, result: ChatDocument | None, e: Responder
|
@@ -1364,7 +1460,13 @@ class Task:
|
|
1364
1460
|
else:
|
1365
1461
|
response_fn = self._entity_responder_async_map[cast(Entity, e)]
|
1366
1462
|
result = await response_fn(self.pending_message)
|
1367
|
-
|
1463
|
+
|
1464
|
+
result_chat_doc = self.agent.to_ChatDocument(
|
1465
|
+
result,
|
1466
|
+
chat_doc=self.pending_message,
|
1467
|
+
author_entity=e if isinstance(e, Entity) else Entity.USER,
|
1468
|
+
)
|
1469
|
+
return self._process_result_routing(result_chat_doc, e)
|
1368
1470
|
|
1369
1471
|
def result(self, status: StatusCode | None = None) -> ChatDocument | None:
|
1370
1472
|
"""
|
@@ -1386,6 +1488,7 @@ class Task:
|
|
1386
1488
|
result_msg = self.pending_message
|
1387
1489
|
|
1388
1490
|
content = result_msg.content if result_msg else ""
|
1491
|
+
content_any = result_msg.content_any if result_msg else None
|
1389
1492
|
if DONE in content:
|
1390
1493
|
# assuming it is of the form "DONE: <content>"
|
1391
1494
|
content = content.replace(DONE, "").strip()
|
@@ -1398,11 +1501,13 @@ class Task:
|
|
1398
1501
|
for t in tool_messages:
|
1399
1502
|
if isinstance(t, FinalResultTool):
|
1400
1503
|
content = ""
|
1504
|
+
content_any = None
|
1401
1505
|
tool_messages = [t] # pass it on to parent so it also quits
|
1402
1506
|
break
|
1403
1507
|
elif isinstance(t, (AgentDoneTool, DoneTool)):
|
1404
1508
|
# there shouldn't be multiple tools like this; just take the first
|
1405
|
-
content = t.content
|
1509
|
+
content = to_string(t.content)
|
1510
|
+
content_any = t.content
|
1406
1511
|
if isinstance(t, AgentDoneTool):
|
1407
1512
|
tool_messages = t.tools
|
1408
1513
|
break
|
@@ -1420,6 +1525,7 @@ class Task:
|
|
1420
1525
|
# since to the "parent" task, this result is equivalent to a response from USER
|
1421
1526
|
result_doc = ChatDocument(
|
1422
1527
|
content=content,
|
1528
|
+
content_any=content_any,
|
1423
1529
|
oai_tool_calls=oai_tool_calls,
|
1424
1530
|
oai_tool_id2result=oai_tool_id2result,
|
1425
1531
|
function_call=fun_call,
|
@@ -1778,9 +1884,7 @@ class Task:
|
|
1778
1884
|
|
1779
1885
|
if self.pending_message is None:
|
1780
1886
|
return True
|
1781
|
-
if isinstance(e, Task) and e.agent.
|
1782
|
-
self.pending_message
|
1783
|
-
):
|
1887
|
+
if isinstance(e, Task) and not e.agent.can_respond(self.pending_message):
|
1784
1888
|
return False
|
1785
1889
|
|
1786
1890
|
if self._recipient_mismatch(e):
|
langroid/agent/tool_message.py
CHANGED
@@ -15,11 +15,12 @@ from typing import Any, Dict, List, Tuple, Type
|
|
15
15
|
from docstring_parser import parse
|
16
16
|
|
17
17
|
from langroid.language_models.base import LLMFunctionSpec
|
18
|
-
from langroid.pydantic_v1 import BaseModel,
|
18
|
+
from langroid.pydantic_v1 import BaseModel, Extra
|
19
19
|
from langroid.utils.pydantic_utils import (
|
20
20
|
_recursive_purge_dict_key,
|
21
21
|
generate_simple_schema,
|
22
22
|
)
|
23
|
+
from langroid.utils.types import is_instance_of
|
23
24
|
|
24
25
|
|
25
26
|
class ToolMessage(ABC, BaseModel):
|
@@ -41,19 +42,18 @@ class ToolMessage(ABC, BaseModel):
|
|
41
42
|
purpose: str
|
42
43
|
id: str = "" # placeholder for OpenAI-API tool_call_id
|
43
44
|
|
44
|
-
|
45
|
+
_allow_llm_use: bool = True # allow an LLM to use (i.e. generate) this tool?
|
45
46
|
|
46
|
-
|
47
|
+
# model_config = ConfigDict(extra=Extra.allow)
|
47
48
|
|
48
49
|
class Config:
|
49
|
-
|
50
|
-
handle_only: bool = False
|
50
|
+
extra = Extra.allow
|
51
51
|
arbitrary_types_allowed = False
|
52
52
|
validate_all = True
|
53
53
|
validate_assignment = True
|
54
54
|
# do not include these fields in the generated schema
|
55
55
|
# since we don't require the LLM to specify them
|
56
|
-
schema_extra = {"exclude": {"purpose", "id"
|
56
|
+
schema_extra = {"exclude": {"purpose", "id"}}
|
57
57
|
|
58
58
|
@classmethod
|
59
59
|
def instructions(cls) -> str:
|
@@ -123,6 +123,15 @@ class ToolMessage(ABC, BaseModel):
|
|
123
123
|
def dict_example(self) -> Dict[str, Any]:
|
124
124
|
return self.dict(exclude=self.Config.schema_extra["exclude"])
|
125
125
|
|
126
|
+
def get_value_of_type(self, target_type: Type[Any]) -> Any:
|
127
|
+
"""Try to find a value of a desired type in the fields of the ToolMessage."""
|
128
|
+
ignore_fields = self.Config.schema_extra["exclude"].union(["request"])
|
129
|
+
for field_name in set(self.dict().keys()) - ignore_fields:
|
130
|
+
value = getattr(self, field_name)
|
131
|
+
if is_instance_of(value, target_type):
|
132
|
+
return value
|
133
|
+
return None
|
134
|
+
|
126
135
|
@classmethod
|
127
136
|
def default_value(cls, f: str) -> Any:
|
128
137
|
"""
|
@@ -273,40 +282,3 @@ class ToolMessage(ABC, BaseModel):
|
|
273
282
|
exclude=list(cls.Config.schema_extra["exclude"]),
|
274
283
|
)
|
275
284
|
return schema
|
276
|
-
|
277
|
-
|
278
|
-
class FinalResultTool(ToolMessage):
|
279
|
-
"""Class to use as a wrapper for sending arbitrary results from an Agent's
|
280
|
-
agent_response or tool handlers, to:
|
281
|
-
(a) trigger completion of the current task as well as all parent tasks, and
|
282
|
-
(b) be returned as the final result of the root task, i.e. this tool would appear
|
283
|
-
in the final ChatDocument's `tool_messages` list.
|
284
|
-
See test_tool_handlers_and_results in test_tool_messages.py, and
|
285
|
-
examples/basic/tool-extract-short-example.py.
|
286
|
-
|
287
|
-
Note:
|
288
|
-
- when defining a tool handler or agent_response, you can directly return
|
289
|
-
FinalResultTool(field1 = val1, ...),
|
290
|
-
where the values can be aribitrary data structures, including nested
|
291
|
-
Pydantic objs, or you can define a subclass of FinalResultTool with the
|
292
|
-
fields you want to return.
|
293
|
-
- This is a special ToolMessage that is NOT meant to be used or handled
|
294
|
-
by an agent.
|
295
|
-
"""
|
296
|
-
|
297
|
-
request: str = ""
|
298
|
-
purpose: str = "Ignored; Wrapper for a structured message"
|
299
|
-
id: str = "" # placeholder for OpenAI-API tool_call_id
|
300
|
-
|
301
|
-
_handle_only: bool = False # only allow handling, but not use (LLM-generation)?
|
302
|
-
|
303
|
-
class Config:
|
304
|
-
extra = Extra.allow
|
305
|
-
# only HANDLING allowed, NOT "use" (i.e LLM generation)
|
306
|
-
handle_only: bool = False
|
307
|
-
arbitrary_types_allowed = False
|
308
|
-
validate_all = True
|
309
|
-
validate_assignment = True
|
310
|
-
# do not include these fields in the generated schema
|
311
|
-
# since we don't require the LLM to specify them
|
312
|
-
schema_extra = {"exclude": {"purpose", "id"}}
|
langroid/agent/tools/__init__.py
CHANGED
@@ -13,6 +13,8 @@ from .orchestration import (
|
|
13
13
|
SendTool,
|
14
14
|
AgentSendTool,
|
15
15
|
DonePassTool,
|
16
|
+
ResultTool,
|
17
|
+
FinalResultTool,
|
16
18
|
)
|
17
19
|
|
18
20
|
__all__ = [
|
@@ -31,4 +33,6 @@ __all__ = [
|
|
31
33
|
"PassTool",
|
32
34
|
"SendTool",
|
33
35
|
"AgentSendTool",
|
36
|
+
"ResultTool",
|
37
|
+
"FinalResultTool",
|
34
38
|
]
|
@@ -3,12 +3,14 @@ Various tools to for agents to be able to control flow of Task, e.g.
|
|
3
3
|
termination, routing to another agent, etc.
|
4
4
|
"""
|
5
5
|
|
6
|
-
from typing import List, Tuple
|
6
|
+
from typing import Any, List, Tuple
|
7
7
|
|
8
8
|
from langroid.agent.chat_agent import ChatAgent
|
9
9
|
from langroid.agent.chat_document import ChatDocument
|
10
10
|
from langroid.agent.tool_message import ToolMessage
|
11
11
|
from langroid.mytypes import Entity
|
12
|
+
from langroid.pydantic_v1 import Extra
|
13
|
+
from langroid.utils.types import to_string
|
12
14
|
|
13
15
|
|
14
16
|
class AgentDoneTool(ToolMessage):
|
@@ -17,16 +19,20 @@ class AgentDoneTool(ToolMessage):
|
|
17
19
|
|
18
20
|
purpose: str = """
|
19
21
|
To signal the current task is done, along with an optional message <content>
|
20
|
-
(default
|
22
|
+
of arbitrary type (default None) and an
|
23
|
+
optional list of <tools> (default empty list).
|
21
24
|
"""
|
22
25
|
request: str = "agent_done_tool"
|
23
|
-
content:
|
26
|
+
content: Any = None
|
24
27
|
tools: List[ToolMessage] = []
|
25
|
-
|
28
|
+
# only meant for agent_response or tool-handlers, not for LLM generation:
|
29
|
+
_allow_llm_use: bool = False
|
26
30
|
|
27
31
|
def response(self, agent: ChatAgent) -> ChatDocument:
|
32
|
+
content_str = "" if self.content is None else to_string(self.content)
|
28
33
|
return agent.create_agent_response(
|
29
|
-
|
34
|
+
content=content_str,
|
35
|
+
content_any=self.content,
|
30
36
|
tool_messages=[self] + self.tools,
|
31
37
|
)
|
32
38
|
|
@@ -37,14 +43,15 @@ class DoneTool(ToolMessage):
|
|
37
43
|
|
38
44
|
purpose = """
|
39
45
|
To signal the current task is done, along with an optional message <content>
|
40
|
-
(default
|
46
|
+
of arbitrary type (default None).
|
41
47
|
"""
|
42
48
|
request = "done_tool"
|
43
49
|
content: str = ""
|
44
50
|
|
45
51
|
def response(self, agent: ChatAgent) -> ChatDocument:
|
46
52
|
return agent.create_agent_response(
|
47
|
-
self.content,
|
53
|
+
content=self.content,
|
54
|
+
content_any=self.content,
|
48
55
|
tool_messages=[self],
|
49
56
|
)
|
50
57
|
|
@@ -58,6 +65,78 @@ class DoneTool(ToolMessage):
|
|
58
65
|
"""
|
59
66
|
|
60
67
|
|
68
|
+
class ResultTool(ToolMessage):
|
69
|
+
"""Class to use as a wrapper for sending arbitrary results from an Agent's
|
70
|
+
agent_response or tool handlers, to:
|
71
|
+
(a) trigger completion of the current task (similar to (Agent)DoneTool), and
|
72
|
+
(b) be returned as the result of the current task, i.e. this tool would appear
|
73
|
+
in the resulting ChatDocument's `tool_messages` list.
|
74
|
+
See test_tool_handlers_and_results in test_tool_messages.py, and
|
75
|
+
examples/basic/tool-extract-short-example.py.
|
76
|
+
|
77
|
+
Note:
|
78
|
+
- when defining a tool handler or agent_response, you can directly return
|
79
|
+
ResultTool(field1 = val1, ...),
|
80
|
+
where the values can be aribitrary data structures, including nested
|
81
|
+
Pydantic objs, or you can define a subclass of ResultTool with the
|
82
|
+
fields you want to return.
|
83
|
+
- This is a special ToolMessage that is NOT meant to be used or handled
|
84
|
+
by an agent.
|
85
|
+
- AgentDoneTool is more restrictive in that you can only send a `content`
|
86
|
+
or `tools` in the result.
|
87
|
+
"""
|
88
|
+
|
89
|
+
request: str = "result_tool"
|
90
|
+
purpose: str = "Ignored; Wrapper for a structured message"
|
91
|
+
id: str = "" # placeholder for OpenAI-API tool_call_id
|
92
|
+
|
93
|
+
class Config:
|
94
|
+
extra = Extra.allow
|
95
|
+
arbitrary_types_allowed = False
|
96
|
+
validate_all = True
|
97
|
+
validate_assignment = True
|
98
|
+
# do not include these fields in the generated schema
|
99
|
+
# since we don't require the LLM to specify them
|
100
|
+
schema_extra = {"exclude": {"purpose", "id"}}
|
101
|
+
|
102
|
+
def handle(self) -> AgentDoneTool:
|
103
|
+
return AgentDoneTool(tools=[self])
|
104
|
+
|
105
|
+
|
106
|
+
class FinalResultTool(ToolMessage):
|
107
|
+
"""Class to use as a wrapper for sending arbitrary results from an Agent's
|
108
|
+
agent_response or tool handlers, to:
|
109
|
+
(a) trigger completion of the current task as well as all parent tasks, and
|
110
|
+
(b) be returned as the final result of the root task, i.e. this tool would appear
|
111
|
+
in the final ChatDocument's `tool_messages` list.
|
112
|
+
See test_tool_handlers_and_results in test_tool_messages.py, and
|
113
|
+
examples/basic/tool-extract-short-example.py.
|
114
|
+
|
115
|
+
Note:
|
116
|
+
- when defining a tool handler or agent_response, you can directly return
|
117
|
+
FinalResultTool(field1 = val1, ...),
|
118
|
+
where the values can be aribitrary data structures, including nested
|
119
|
+
Pydantic objs, or you can define a subclass of FinalResultTool with the
|
120
|
+
fields you want to return.
|
121
|
+
- This is a special ToolMessage that is NOT meant to be used or handled
|
122
|
+
by an agent.
|
123
|
+
"""
|
124
|
+
|
125
|
+
request: str = ""
|
126
|
+
purpose: str = "Ignored; Wrapper for a structured message"
|
127
|
+
id: str = "" # placeholder for OpenAI-API tool_call_id
|
128
|
+
_allow_llm_use: bool = False
|
129
|
+
|
130
|
+
class Config:
|
131
|
+
extra = Extra.allow
|
132
|
+
arbitrary_types_allowed = False
|
133
|
+
validate_all = True
|
134
|
+
validate_assignment = True
|
135
|
+
# do not include these fields in the generated schema
|
136
|
+
# since we don't require the LLM to specify them
|
137
|
+
schema_extra = {"exclude": {"purpose", "id"}}
|
138
|
+
|
139
|
+
|
61
140
|
class PassTool(ToolMessage):
|
62
141
|
"""Tool for "passing" on the received msg (ChatDocument),
|
63
142
|
so that an as-yet-unspecified agent can handle it.
|
@@ -206,7 +285,7 @@ class AgentSendTool(ToolMessage):
|
|
206
285
|
to: str
|
207
286
|
content: str = ""
|
208
287
|
tools: List[ToolMessage] = []
|
209
|
-
|
288
|
+
_allow_llm_use: bool = False
|
210
289
|
|
211
290
|
def response(self, agent: ChatAgent) -> ChatDocument:
|
212
291
|
return agent.create_agent_response(
|
@@ -10,6 +10,7 @@ from langroid.language_models.base import (
|
|
10
10
|
OpenAIToolSpec,
|
11
11
|
ToolChoiceTypes,
|
12
12
|
)
|
13
|
+
from langroid.utils.types import to_string
|
13
14
|
|
14
15
|
|
15
16
|
def none_fn(x: str) -> None | str:
|
@@ -43,11 +44,11 @@ class MockLM(LanguageModel):
|
|
43
44
|
# - response_dict
|
44
45
|
# - response_fn
|
45
46
|
# - default_response
|
47
|
+
mapped_response = self.config.response_dict.get(
|
48
|
+
msg, self.config.response_fn(msg) or self.config.default_response
|
49
|
+
)
|
46
50
|
return lm.LLMResponse(
|
47
|
-
message=
|
48
|
-
msg,
|
49
|
-
self.config.response_fn(msg) or self.config.default_response,
|
50
|
-
),
|
51
|
+
message=to_string(mapped_response),
|
51
52
|
cached=False,
|
52
53
|
)
|
53
54
|
|
langroid/parsing/web_search.py
CHANGED
@@ -48,10 +48,13 @@ class WebSearchResult:
|
|
48
48
|
return self.full_content[: self.max_summary_length]
|
49
49
|
|
50
50
|
def get_full_content(self) -> str:
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
51
|
+
try:
|
52
|
+
response: Response = requests.get(self.link)
|
53
|
+
soup: BeautifulSoup = BeautifulSoup(response.text, "lxml")
|
54
|
+
text = " ".join(soup.stripped_strings)
|
55
|
+
return text[: self.max_content_length]
|
56
|
+
except Exception as e:
|
57
|
+
return f"Error fetching content from {self.link}: {e}"
|
55
58
|
|
56
59
|
def __str__(self) -> str:
|
57
60
|
return f"Title: {self.title}\nLink: {self.link}\nSummary: {self.summary}"
|