langroid 0.1.181__py3-none-any.whl → 0.1.183__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/__init__.py +32 -0
- langroid/agent/base.py +28 -5
- langroid/agent/callbacks/__init__.py +0 -0
- langroid/agent/callbacks/chainlit.py +450 -0
- langroid/agent/chat_agent.py +70 -12
- langroid/agent/task.py +25 -8
- langroid/language_models/base.py +17 -3
- langroid/language_models/openai_gpt.py +4 -0
- langroid/language_models/prompt_formatter/hf_formatter.py +6 -2
- langroid/prompts/chat-gpt4-system-prompt.md +68 -0
- {langroid-0.1.181.dist-info → langroid-0.1.183.dist-info}/METADATA +4 -2
- {langroid-0.1.181.dist-info → langroid-0.1.183.dist-info}/RECORD +14 -11
- {langroid-0.1.181.dist-info → langroid-0.1.183.dist-info}/LICENSE +0 -0
- {langroid-0.1.181.dist-info → langroid-0.1.183.dist-info}/WHEEL +0 -0
langroid/__init__.py
CHANGED
@@ -20,6 +20,15 @@ from .agent.base import (
|
|
20
20
|
AgentConfig,
|
21
21
|
)
|
22
22
|
|
23
|
+
from .agent.chat_document import (
|
24
|
+
ChatDocument,
|
25
|
+
ChatDocMetaData,
|
26
|
+
)
|
27
|
+
|
28
|
+
from .agent.tool_message import (
|
29
|
+
ToolMessage,
|
30
|
+
)
|
31
|
+
|
23
32
|
from .agent.chat_agent import (
|
24
33
|
ChatAgent,
|
25
34
|
ChatAgentConfig,
|
@@ -27,6 +36,19 @@ from .agent.chat_agent import (
|
|
27
36
|
|
28
37
|
from .agent.task import Task
|
29
38
|
|
39
|
+
try:
|
40
|
+
from .agent.callbacks.chainlit import (
|
41
|
+
ChainlitAgentCallbacks,
|
42
|
+
ChainlitTaskCallbacks,
|
43
|
+
)
|
44
|
+
|
45
|
+
chainlit_available = True
|
46
|
+
ChainlitAgentCallbacks
|
47
|
+
ChainlitTaskCallbacks
|
48
|
+
except ImportError:
|
49
|
+
chainlit_available = False
|
50
|
+
|
51
|
+
|
30
52
|
from .mytypes import (
|
31
53
|
DocMetaData,
|
32
54
|
Document,
|
@@ -47,8 +69,18 @@ __all__ = [
|
|
47
69
|
"AgentConfig",
|
48
70
|
"ChatAgent",
|
49
71
|
"ChatAgentConfig",
|
72
|
+
"ChatDocument",
|
73
|
+
"ChatDocMetaData",
|
50
74
|
"Task",
|
51
75
|
"DocMetaData",
|
52
76
|
"Document",
|
53
77
|
"Entity",
|
78
|
+
"ToolMessage",
|
54
79
|
]
|
80
|
+
if chainlit_available:
|
81
|
+
__all__.extend(
|
82
|
+
[
|
83
|
+
"ChainlitAgentCallbacks",
|
84
|
+
"ChainlitTaskCallbacks",
|
85
|
+
]
|
86
|
+
)
|
langroid/agent/base.py
CHANGED
@@ -4,6 +4,7 @@ import json
|
|
4
4
|
import logging
|
5
5
|
from abc import ABC
|
6
6
|
from contextlib import ExitStack
|
7
|
+
from types import SimpleNamespace
|
7
8
|
from typing import (
|
8
9
|
Any,
|
9
10
|
Callable,
|
@@ -63,6 +64,10 @@ class AgentConfig(BaseSettings):
|
|
63
64
|
show_stats: bool = True # show token usage/cost stats?
|
64
65
|
|
65
66
|
|
67
|
+
def noop_fn(*args: List[Any], **kwargs: Dict[str, Any]) -> None:
|
68
|
+
pass
|
69
|
+
|
70
|
+
|
66
71
|
class Agent(ABC):
|
67
72
|
"""
|
68
73
|
An Agent is an abstraction that encapsulates mainly two components:
|
@@ -91,6 +96,16 @@ class Agent(ABC):
|
|
91
96
|
self.parser: Optional[Parser] = (
|
92
97
|
Parser(config.parsing) if config.parsing else None
|
93
98
|
)
|
99
|
+
self.callbacks = SimpleNamespace(
|
100
|
+
start_llm_stream=lambda: noop_fn,
|
101
|
+
cancel_llm_stream=noop_fn,
|
102
|
+
finish_llm_stream=noop_fn,
|
103
|
+
show_llm_response=noop_fn,
|
104
|
+
show_agent_response=noop_fn,
|
105
|
+
get_user_response=None,
|
106
|
+
get_last_step=noop_fn,
|
107
|
+
set_parent_agent=noop_fn,
|
108
|
+
)
|
94
109
|
|
95
110
|
def entity_responders(
|
96
111
|
self,
|
@@ -295,6 +310,7 @@ class Agent(ABC):
|
|
295
310
|
if not settings.quiet:
|
296
311
|
console.print(f"[red]{self.indent}", end="")
|
297
312
|
print(f"[red]Agent: {results}")
|
313
|
+
self.callbacks.show_agent_response(content=results)
|
298
314
|
sender_name = self.config.name
|
299
315
|
if isinstance(msg, ChatDocument) and msg.function_call is not None:
|
300
316
|
# if result was from handling an LLM `function_call`,
|
@@ -353,11 +369,18 @@ class Agent(ABC):
|
|
353
369
|
elif not settings.interactive:
|
354
370
|
user_msg = ""
|
355
371
|
else:
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
372
|
+
if self.callbacks.get_user_response is not None:
|
373
|
+
# ask user with empty prompt: no need for prompt
|
374
|
+
# since user has seen the conversation so far.
|
375
|
+
# But non-empty prompt can be useful when Agent
|
376
|
+
# uses a tool that requires user input, or in other scenarios.
|
377
|
+
user_msg = self.callbacks.get_user_response(prompt="")
|
378
|
+
else:
|
379
|
+
user_msg = Prompt.ask(
|
380
|
+
f"[blue]{self.indent}Human "
|
381
|
+
"(respond or q, x to exit current level, "
|
382
|
+
f"or hit enter to continue)\n{self.indent}",
|
383
|
+
).strip()
|
361
384
|
|
362
385
|
tool_ids = []
|
363
386
|
if msg is not None and isinstance(msg, ChatDocument):
|
File without changes
|
@@ -0,0 +1,450 @@
|
|
1
|
+
"""
|
2
|
+
Callbacks for Chainlit integration.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import json
|
6
|
+
import logging
|
7
|
+
import textwrap
|
8
|
+
from typing import Any, Callable, Dict, List, Literal, Optional, no_type_check
|
9
|
+
|
10
|
+
try:
|
11
|
+
import chainlit as cl
|
12
|
+
except ImportError:
|
13
|
+
raise ImportError(
|
14
|
+
"""
|
15
|
+
You are attempting to use `chainlit`, which is not installed
|
16
|
+
by default with `langroid`.
|
17
|
+
Please install langroid with the `chainlit` extra using:
|
18
|
+
`pip install langroid[chainlit]` or
|
19
|
+
`poetry install -E chainlit`
|
20
|
+
depending on your scenario
|
21
|
+
"""
|
22
|
+
)
|
23
|
+
|
24
|
+
from chainlit import run_sync
|
25
|
+
from chainlit.config import config
|
26
|
+
from chainlit.logger import logger
|
27
|
+
|
28
|
+
import langroid as lr
|
29
|
+
import langroid.language_models as lm
|
30
|
+
from langroid.utils.configuration import settings
|
31
|
+
from langroid.utils.constants import NO_ANSWER
|
32
|
+
|
33
|
+
# Attempt to reconfigure the root logger to your desired settings
|
34
|
+
log_level = logging.INFO if settings.debug else logging.WARNING
|
35
|
+
logger.setLevel(log_level)
|
36
|
+
|
37
|
+
USER_TIMEOUT = 60_000
|
38
|
+
|
39
|
+
|
40
|
+
@no_type_check
|
41
|
+
async def ask_helper(func, **kwargs):
|
42
|
+
res = await func(**kwargs).send()
|
43
|
+
while not res:
|
44
|
+
res = await func(**kwargs).send()
|
45
|
+
return res
|
46
|
+
|
47
|
+
|
48
|
+
@no_type_check
|
49
|
+
async def setup_llm() -> None:
|
50
|
+
llm_settings = cl.user_session.get("llm_settings", {})
|
51
|
+
model = llm_settings.get("chat_model")
|
52
|
+
context_length = llm_settings.get("context_length", 16_000)
|
53
|
+
temperature = llm_settings.get("temperature", 0.2)
|
54
|
+
timeout = llm_settings.get("timeout", 90)
|
55
|
+
print(f"Using model: {model}")
|
56
|
+
llm_config = lm.OpenAIGPTConfig(
|
57
|
+
chat_model=model or lm.OpenAIChatModel.GPT4_TURBO,
|
58
|
+
# or, other possibilities for example:
|
59
|
+
# "litellm/ollama_chat/mistral"
|
60
|
+
# "litellm/ollama_chat/mistral:7b-instruct-v0.2-q8_0"
|
61
|
+
# "litellm/ollama/llama2"
|
62
|
+
# "local/localhost:8000/v1"
|
63
|
+
# "local/localhost:8000"
|
64
|
+
chat_context_length=context_length, # adjust based on model
|
65
|
+
temperature=temperature,
|
66
|
+
timeout=timeout,
|
67
|
+
)
|
68
|
+
llm = lm.OpenAIGPT(llm_config)
|
69
|
+
cl.user_session.set("llm_config", llm_config)
|
70
|
+
cl.user_session.set("llm", llm)
|
71
|
+
|
72
|
+
|
73
|
+
@no_type_check
|
74
|
+
async def update_agent(settings: Dict[str, Any], agent="agent") -> None:
|
75
|
+
cl.user_session.set("llm_settings", settings)
|
76
|
+
await inform_llm_settings()
|
77
|
+
await setup_llm()
|
78
|
+
agent = cl.user_session.get(agent)
|
79
|
+
if agent is None:
|
80
|
+
raise ValueError(f"Agent {agent} not found in user session")
|
81
|
+
agent.llm = cl.user_session.get("llm")
|
82
|
+
agent.config.llm = cl.user_session.get("llm_config")
|
83
|
+
|
84
|
+
|
85
|
+
async def make_llm_settings_widgets() -> None:
|
86
|
+
await cl.ChatSettings(
|
87
|
+
[
|
88
|
+
cl.input_widget.TextInput(
|
89
|
+
id="chat_model",
|
90
|
+
label="Model Name (Default GPT4-Turbo)",
|
91
|
+
initial="",
|
92
|
+
placeholder="E.g. litellm/ollama_chat/mistral or "
|
93
|
+
"local/localhost:8000/v1",
|
94
|
+
),
|
95
|
+
cl.input_widget.NumberInput(
|
96
|
+
id="context_length",
|
97
|
+
label="Chat Context Length",
|
98
|
+
initial=16_000,
|
99
|
+
placeholder="E.g. 16000",
|
100
|
+
),
|
101
|
+
cl.input_widget.Slider(
|
102
|
+
id="temperature",
|
103
|
+
label="LLM temperature",
|
104
|
+
min=0.0,
|
105
|
+
max=1.0,
|
106
|
+
step=0.1,
|
107
|
+
initial=0.2,
|
108
|
+
tooltip="Adjust based on model",
|
109
|
+
),
|
110
|
+
cl.input_widget.Slider(
|
111
|
+
id="timeout",
|
112
|
+
label="Timeout (seconds)",
|
113
|
+
min=10,
|
114
|
+
max=200,
|
115
|
+
step=10,
|
116
|
+
initial=90,
|
117
|
+
tooltip="Timeout for LLM response, in seconds.",
|
118
|
+
),
|
119
|
+
]
|
120
|
+
).send() # type: ignore
|
121
|
+
|
122
|
+
|
123
|
+
@no_type_check
|
124
|
+
async def inform_llm_settings() -> None:
|
125
|
+
llm_settings: Dict[str, Any] = cl.user_session.get("llm_settings", {})
|
126
|
+
settings_dict = dict(
|
127
|
+
model=llm_settings.get("chat_model"),
|
128
|
+
context_length=llm_settings.get("context_length"),
|
129
|
+
temperature=llm_settings.get("temperature"),
|
130
|
+
timeout=llm_settings.get("timeout"),
|
131
|
+
)
|
132
|
+
await cl.Message(
|
133
|
+
author="System",
|
134
|
+
content="LLM settings updated",
|
135
|
+
elements=[
|
136
|
+
cl.Text(
|
137
|
+
name="settings",
|
138
|
+
display="side",
|
139
|
+
content=json.dumps(settings_dict, indent=4),
|
140
|
+
language="json",
|
141
|
+
)
|
142
|
+
],
|
143
|
+
).send()
|
144
|
+
|
145
|
+
|
146
|
+
async def add_instructions(
|
147
|
+
title: str = "Instructions",
|
148
|
+
content: str = "Enter your question/response in the dialog box below.",
|
149
|
+
display: Literal["side", "inline", "page"] = "inline",
|
150
|
+
) -> None:
|
151
|
+
await cl.Message(
|
152
|
+
author="",
|
153
|
+
content=title if display == "side" else "",
|
154
|
+
elements=[
|
155
|
+
cl.Text(
|
156
|
+
name=title,
|
157
|
+
content=content,
|
158
|
+
display=display,
|
159
|
+
)
|
160
|
+
],
|
161
|
+
).send()
|
162
|
+
|
163
|
+
|
164
|
+
async def ask_user_step(
|
165
|
+
name: str,
|
166
|
+
prompt: str,
|
167
|
+
parent_id: str | None = None,
|
168
|
+
timeout: int = USER_TIMEOUT,
|
169
|
+
suppress_values: List[str] = ["c"],
|
170
|
+
) -> str:
|
171
|
+
"""
|
172
|
+
Ask user for input, as a step nested under parent_id.
|
173
|
+
Rather than rely entirely on AskUserMessage (which doesn't let us
|
174
|
+
nest the question + answer under a step), we instead create fake
|
175
|
+
steps for the question and answer, and only rely on AskUserMessage
|
176
|
+
with an empty prompt to await user response.
|
177
|
+
|
178
|
+
Args:
|
179
|
+
name (str): Name of the agent
|
180
|
+
prompt (str): Prompt to display to user
|
181
|
+
parent_id (str): Id of the parent step under which this step should be nested
|
182
|
+
(If None, the step will be shown at root level)
|
183
|
+
timeout (int): Timeout in seconds
|
184
|
+
suppress_values (List[str]): List of values to suppress from display
|
185
|
+
(e.g. "c" for continue)
|
186
|
+
|
187
|
+
Returns:
|
188
|
+
str: User response
|
189
|
+
"""
|
190
|
+
|
191
|
+
# save hide_cot status to restore later
|
192
|
+
# (We should probably use a ctx mgr for this)
|
193
|
+
hide_cot = config.ui.hide_cot
|
194
|
+
|
195
|
+
# force hide_cot to False so that the user question + response is visible
|
196
|
+
config.ui.hide_cot = False
|
197
|
+
|
198
|
+
if prompt != "":
|
199
|
+
# Create a question step to ask user
|
200
|
+
question_step = cl.Step(
|
201
|
+
name=f"{name} (AskUser ❓)",
|
202
|
+
type="run",
|
203
|
+
parent_id=parent_id,
|
204
|
+
)
|
205
|
+
question_step.output = prompt
|
206
|
+
await question_step.send() # type: ignore
|
207
|
+
|
208
|
+
# Use AskUserMessage to await user response,
|
209
|
+
# but with an empty prompt so the question is not visible,
|
210
|
+
# but still pauses for user input in the input box.
|
211
|
+
res = await cl.AskUserMessage(
|
212
|
+
content="",
|
213
|
+
timeout=timeout,
|
214
|
+
).send()
|
215
|
+
|
216
|
+
if res is None:
|
217
|
+
run_sync(
|
218
|
+
cl.Message(
|
219
|
+
content=f"Timed out after {USER_TIMEOUT} seconds. Exiting."
|
220
|
+
).send()
|
221
|
+
)
|
222
|
+
return "x"
|
223
|
+
|
224
|
+
# The above will try to display user response in res
|
225
|
+
# but we create fake step with same id as res and
|
226
|
+
# erase it using empty output so it's not displayed
|
227
|
+
step = cl.Step(
|
228
|
+
id=res["id"], name="TempUserResponse", type="run", parent_id=parent_id
|
229
|
+
)
|
230
|
+
step.output = ""
|
231
|
+
await step.update() # type: ignore
|
232
|
+
|
233
|
+
# Finally, reproduce the user response at right nesting level
|
234
|
+
if res["output"] in suppress_values:
|
235
|
+
config.ui.hide_cot = hide_cot # restore original value
|
236
|
+
return ""
|
237
|
+
|
238
|
+
step = cl.Step(
|
239
|
+
name=f"{name}(You 😃)",
|
240
|
+
type="run",
|
241
|
+
parent_id=parent_id,
|
242
|
+
)
|
243
|
+
step.output = res["output"]
|
244
|
+
await step.send() # type: ignore
|
245
|
+
config.ui.hide_cot = hide_cot # restore original value
|
246
|
+
return res["output"]
|
247
|
+
|
248
|
+
|
249
|
+
def wrap_text_preserving_structure(text: str, width: int = 90) -> str:
|
250
|
+
"""Wrap text preserving paragraph breaks. Typically used to
|
251
|
+
format an agent_response output, which may have long lines
|
252
|
+
with no newlines or paragraph breaks."""
|
253
|
+
|
254
|
+
paragraphs = text.split("\n\n") # Split the text into paragraphs
|
255
|
+
wrapped_text = []
|
256
|
+
|
257
|
+
for para in paragraphs:
|
258
|
+
if para.strip(): # If the paragraph is not just whitespace
|
259
|
+
# Wrap this paragraph and add it to the result
|
260
|
+
wrapped_paragraph = textwrap.fill(para, width=width)
|
261
|
+
wrapped_text.append(wrapped_paragraph)
|
262
|
+
else:
|
263
|
+
# Preserve paragraph breaks
|
264
|
+
wrapped_text.append("")
|
265
|
+
|
266
|
+
return "\n\n".join(wrapped_text)
|
267
|
+
|
268
|
+
|
269
|
+
class ChainlitAgentCallbacks:
|
270
|
+
"""Inject Chainlit callbacks into a Langroid Agent"""
|
271
|
+
|
272
|
+
last_step: Optional[cl.Step] = None # used to display sub-steps under this
|
273
|
+
stream: Optional[cl.Step] = None # pushed into openai_gpt.py to stream tokens
|
274
|
+
parent_agent: Optional[lr.Agent] = None # used to get parent id, for step nesting
|
275
|
+
|
276
|
+
def __init__(self, agent: lr.Agent):
|
277
|
+
agent.callbacks.start_llm_stream = self.start_llm_stream
|
278
|
+
agent.callbacks.cancel_llm_stream = self.cancel_llm_stream
|
279
|
+
agent.callbacks.finish_llm_stream = self.finish_llm_stream
|
280
|
+
agent.callbacks.show_llm_response = self.show_llm_response
|
281
|
+
agent.callbacks.show_agent_response = self.show_agent_response
|
282
|
+
agent.callbacks.get_user_response = self.get_user_response
|
283
|
+
agent.callbacks.get_last_step = self.get_last_step
|
284
|
+
agent.callbacks.set_parent_agent = self.set_parent_agent
|
285
|
+
self.agent: lr.Agent = agent
|
286
|
+
self.name = agent.config.name
|
287
|
+
|
288
|
+
def _get_parent_id(self) -> str | None:
|
289
|
+
"""Get step id under which we need to nest the current step:
|
290
|
+
This should be the parent Agent's last_step.
|
291
|
+
"""
|
292
|
+
if self.parent_agent is None:
|
293
|
+
logger.info(f"No parent agent found for {self.name}")
|
294
|
+
return None
|
295
|
+
logger.info(
|
296
|
+
f"Parent agent found for {self.name} = {self.parent_agent.config.name}"
|
297
|
+
)
|
298
|
+
last_step = self.parent_agent.callbacks.get_last_step()
|
299
|
+
if last_step is None:
|
300
|
+
logger.info(f"No last step found for {self.parent_agent.config.name}")
|
301
|
+
return None
|
302
|
+
logger.info(
|
303
|
+
f"Last step found for {self.parent_agent.config.name} = {last_step.id}"
|
304
|
+
)
|
305
|
+
return last_step.id # type: ignore
|
306
|
+
|
307
|
+
def set_parent_agent(self, parent: lr.Agent) -> None:
|
308
|
+
self.parent_agent = parent
|
309
|
+
|
310
|
+
def get_last_step(self) -> Optional[cl.Step]:
|
311
|
+
return self.last_step
|
312
|
+
|
313
|
+
def start_llm_stream(self) -> Callable[[str], None]:
|
314
|
+
"""Returns a streaming fn that can be passed to the LLM class"""
|
315
|
+
logger.info(
|
316
|
+
f"""
|
317
|
+
Starting LLM stream for {self.agent.config.name}
|
318
|
+
under parent {self._get_parent_id()}
|
319
|
+
"""
|
320
|
+
)
|
321
|
+
self.stream = cl.Step(
|
322
|
+
name=self.agent.config.name + "(LLM 🧠)",
|
323
|
+
type="llm",
|
324
|
+
parent_id=self._get_parent_id(),
|
325
|
+
)
|
326
|
+
self.last_step = self.stream
|
327
|
+
run_sync(self.stream.send()) # type: ignore
|
328
|
+
|
329
|
+
def stream_token(t: str) -> None:
|
330
|
+
if self.stream is None:
|
331
|
+
raise ValueError("Stream not initialized")
|
332
|
+
run_sync(self.stream.stream_token(t))
|
333
|
+
|
334
|
+
return stream_token
|
335
|
+
|
336
|
+
def cancel_llm_stream(self) -> None:
|
337
|
+
"""Called when cached response found."""
|
338
|
+
self.last_step = None
|
339
|
+
if self.stream is not None:
|
340
|
+
run_sync(self.stream.remove()) # type: ignore
|
341
|
+
|
342
|
+
def finish_llm_stream(self, content: str, is_tool: bool = False) -> None:
|
343
|
+
"""Update the stream, and display entire response in the right language."""
|
344
|
+
tool_indicator = " => 🛠️" if is_tool else ""
|
345
|
+
if self.agent.llm is None or self.stream is None:
|
346
|
+
raise ValueError("LLM or stream not initialized")
|
347
|
+
model = self.agent.llm.config.chat_model
|
348
|
+
if content == "":
|
349
|
+
run_sync(self.stream.remove()) # type: ignore
|
350
|
+
else:
|
351
|
+
run_sync(self.stream.update()) # type: ignore
|
352
|
+
stream_id = self.stream.id if content else None
|
353
|
+
step = cl.Step(
|
354
|
+
id=stream_id,
|
355
|
+
name=self.agent.config.name + f"(LLM {model} 🧠{tool_indicator})",
|
356
|
+
type="llm",
|
357
|
+
parent_id=self._get_parent_id(),
|
358
|
+
language="json" if is_tool else None,
|
359
|
+
)
|
360
|
+
step.output = content or NO_ANSWER
|
361
|
+
run_sync(step.update()) # type: ignore
|
362
|
+
|
363
|
+
def show_llm_response(self, content: str, is_tool: bool = False) -> None:
|
364
|
+
"""Show non-streaming LLM response."""
|
365
|
+
model = self.agent.llm is not None and self.agent.llm.config.chat_model
|
366
|
+
tool_indicator = " => 🛠️" if is_tool else ""
|
367
|
+
step = cl.Step(
|
368
|
+
name=self.agent.config.name + f"(LLM {model} 🧠{tool_indicator})",
|
369
|
+
type="llm",
|
370
|
+
parent_id=self._get_parent_id(),
|
371
|
+
language="json" if is_tool else None,
|
372
|
+
)
|
373
|
+
self.last_step = step
|
374
|
+
step.output = content or NO_ANSWER
|
375
|
+
run_sync(step.send()) # type: ignore
|
376
|
+
|
377
|
+
def show_agent_response(self, content: str) -> None:
|
378
|
+
"""Show message from agent (typically tool handler).
|
379
|
+
Agent response can be considered as a "step"
|
380
|
+
between LLM response and user response
|
381
|
+
"""
|
382
|
+
step = cl.Step(
|
383
|
+
name=self.agent.config.name + "(Agent <>)",
|
384
|
+
type="tool",
|
385
|
+
parent_id=self._get_parent_id(),
|
386
|
+
language="text",
|
387
|
+
)
|
388
|
+
self.last_step = step
|
389
|
+
step.output = wrap_text_preserving_structure(content, width=90)
|
390
|
+
run_sync(step.send()) # type: ignore
|
391
|
+
|
392
|
+
def _get_user_response_buttons(self, prompt: str) -> str:
|
393
|
+
"""Not used. Save for future reference"""
|
394
|
+
res = run_sync(
|
395
|
+
ask_helper(
|
396
|
+
cl.AskActionMessage,
|
397
|
+
content="Continue, exit or say something?",
|
398
|
+
actions=[
|
399
|
+
cl.Action(
|
400
|
+
name="continue",
|
401
|
+
value="continue",
|
402
|
+
label="✅ Continue",
|
403
|
+
),
|
404
|
+
cl.Action(
|
405
|
+
name="feedback",
|
406
|
+
value="feedback",
|
407
|
+
label="💬 Say something",
|
408
|
+
),
|
409
|
+
cl.Action(name="exit", value="exit", label="🔚 Exit Conversation"),
|
410
|
+
],
|
411
|
+
)
|
412
|
+
)
|
413
|
+
if res.get("value") == "continue":
|
414
|
+
return ""
|
415
|
+
if res.get("value") == "exit":
|
416
|
+
return "x"
|
417
|
+
if res.get("value") == "feedback":
|
418
|
+
return self.get_user_response(prompt)
|
419
|
+
return "" # process the "feedback" case here
|
420
|
+
|
421
|
+
def get_user_response(self, prompt: str) -> str:
|
422
|
+
"""Ask for user response, wait for it, and return it,
|
423
|
+
as a cl.Step rather than as a cl.Message so we can nest it
|
424
|
+
under the parent step.
|
425
|
+
"""
|
426
|
+
return run_sync(
|
427
|
+
ask_user_step(
|
428
|
+
name=self.agent.config.name,
|
429
|
+
prompt=prompt,
|
430
|
+
parent_id=self._get_parent_id(),
|
431
|
+
suppress_values=["c"],
|
432
|
+
)
|
433
|
+
)
|
434
|
+
|
435
|
+
|
436
|
+
class ChainlitTaskCallbacks:
|
437
|
+
"""
|
438
|
+
Inject ChainlitCallbacks into a Langroid Task's agent and
|
439
|
+
agents of sub-tasks.
|
440
|
+
"""
|
441
|
+
|
442
|
+
def __init__(self, task: lr.Task):
|
443
|
+
ChainlitTaskCallbacks._inject_callbacks(task)
|
444
|
+
|
445
|
+
@staticmethod
|
446
|
+
def _inject_callbacks(task: lr.Task) -> None:
|
447
|
+
# recursively apply ChainlitCallbacks to agents of sub-tasks
|
448
|
+
ChainlitAgentCallbacks(task.agent)
|
449
|
+
for t in task.sub_tasks:
|
450
|
+
ChainlitTaskCallbacks._inject_callbacks(t)
|
langroid/agent/chat_agent.py
CHANGED
@@ -1,3 +1,4 @@
|
|
1
|
+
import copy
|
1
2
|
import inspect
|
2
3
|
import logging
|
3
4
|
import textwrap
|
@@ -8,7 +9,7 @@ from rich import print
|
|
8
9
|
from rich.console import Console
|
9
10
|
from rich.markup import escape
|
10
11
|
|
11
|
-
from langroid.agent.base import Agent, AgentConfig
|
12
|
+
from langroid.agent.base import Agent, AgentConfig, noop_fn
|
12
13
|
from langroid.agent.chat_document import ChatDocument
|
13
14
|
from langroid.agent.tool_message import ToolMessage
|
14
15
|
from langroid.language_models.base import (
|
@@ -134,6 +135,27 @@ class ChatAgent(Agent):
|
|
134
135
|
self.llm_functions_usable: Set[str] = set()
|
135
136
|
self.llm_function_force: Optional[Dict[str, str]] = None
|
136
137
|
|
138
|
+
def clone(self, i: int = 0) -> "ChatAgent":
|
139
|
+
"""Create i'th clone of this agent, ensuring tool use/handling is cloned.
|
140
|
+
Important: We assume all member variables are in the __init__ method here
|
141
|
+
and in the Agent class.
|
142
|
+
TODO: We are attempting to close an agent after its state has been
|
143
|
+
changed in possibly many ways. Below is an imperfect solution. Caution advised.
|
144
|
+
Revisit later.
|
145
|
+
"""
|
146
|
+
agent_cls = type(self)
|
147
|
+
config_copy = copy.deepcopy(self.config)
|
148
|
+
config_copy.name = f"{config_copy.name}-{i}"
|
149
|
+
new_agent = agent_cls(config_copy)
|
150
|
+
new_agent.system_tool_instructions = self.system_tool_instructions
|
151
|
+
new_agent.system_json_tool_instructions = self.system_json_tool_instructions
|
152
|
+
new_agent.llm_tools_map = self.llm_tools_map
|
153
|
+
new_agent.llm_functions_map = self.llm_functions_map
|
154
|
+
new_agent.llm_functions_handled = self.llm_functions_handled
|
155
|
+
new_agent.llm_functions_usable = self.llm_functions_usable
|
156
|
+
new_agent.llm_function_force = self.llm_function_force
|
157
|
+
return new_agent
|
158
|
+
|
137
159
|
def _fn_call_available(self) -> bool:
|
138
160
|
"""Does this agent's LLM support function calling?"""
|
139
161
|
return (
|
@@ -644,6 +666,10 @@ class ChatAgent(Agent):
|
|
644
666
|
"""
|
645
667
|
assert self.config.llm is not None and self.llm is not None
|
646
668
|
output_len = output_len or self.config.llm.max_output_tokens
|
669
|
+
streamer = noop_fn
|
670
|
+
if self.llm.get_stream():
|
671
|
+
streamer = self.callbacks.start_llm_stream()
|
672
|
+
self.llm.config.streamer = streamer
|
647
673
|
with ExitStack() as stack: # for conditionally using rich spinner
|
648
674
|
if not self.llm.get_stream() and not settings.quiet:
|
649
675
|
# show rich spinner only if not streaming!
|
@@ -659,17 +685,31 @@ class ChatAgent(Agent):
|
|
659
685
|
functions=functions,
|
660
686
|
function_call=fun_call,
|
661
687
|
)
|
688
|
+
if self.llm.get_stream():
|
689
|
+
self.callbacks.finish_llm_stream(
|
690
|
+
content=str(response),
|
691
|
+
is_tool=self.has_tool_message_attempt(
|
692
|
+
ChatDocument.from_LLMResponse(response, displayed=True)
|
693
|
+
),
|
694
|
+
)
|
695
|
+
self.llm.config.streamer = noop_fn
|
696
|
+
if response.cached:
|
697
|
+
self.callbacks.cancel_llm_stream()
|
698
|
+
|
662
699
|
if not self.llm.get_stream() or response.cached:
|
663
700
|
# We would have already displayed the msg "live" ONLY if
|
664
701
|
# streaming was enabled, AND we did not find a cached response.
|
665
702
|
# If we are here, it means the response has not yet been displayed.
|
666
703
|
cached = f"[red]{self.indent}(cached)[/red]" if response.cached else ""
|
667
|
-
if response.function_call is not None:
|
668
|
-
response_str = str(response.function_call)
|
669
|
-
else:
|
670
|
-
response_str = response.message
|
671
704
|
if not settings.quiet:
|
672
|
-
print(cached + "[green]" + escape(
|
705
|
+
print(cached + "[green]" + escape(str(response)))
|
706
|
+
cached = "[cached] " if response.cached else ""
|
707
|
+
self.callbacks.show_llm_response(
|
708
|
+
content=cached + " " + str(response),
|
709
|
+
is_tool=self.has_tool_message_attempt(
|
710
|
+
ChatDocument.from_LLMResponse(response, displayed=True)
|
711
|
+
),
|
712
|
+
)
|
673
713
|
self.update_token_usage(
|
674
714
|
response,
|
675
715
|
messages,
|
@@ -695,24 +735,42 @@ class ChatAgent(Agent):
|
|
695
735
|
"auto" if self.llm_function_force is None else self.llm_function_force
|
696
736
|
)
|
697
737
|
assert self.llm is not None
|
738
|
+
|
739
|
+
streamer = noop_fn
|
740
|
+
if self.llm.get_stream():
|
741
|
+
streamer = self.callbacks.start_llm_stream()
|
742
|
+
self.llm.config.streamer = streamer
|
743
|
+
|
698
744
|
response = await self.llm.achat(
|
699
745
|
messages,
|
700
746
|
output_len,
|
701
747
|
functions=functions,
|
702
748
|
function_call=fun_call,
|
703
749
|
)
|
704
|
-
|
750
|
+
if self.llm.get_stream():
|
751
|
+
self.callbacks.finish_llm_stream(
|
752
|
+
content=str(response),
|
753
|
+
is_tool=self.has_tool_message_attempt(
|
754
|
+
ChatDocument.from_LLMResponse(response, displayed=True)
|
755
|
+
),
|
756
|
+
)
|
757
|
+
self.llm.config.streamer = noop_fn
|
758
|
+
if response.cached:
|
759
|
+
self.callbacks.cancel_llm_stream()
|
705
760
|
if not self.llm.get_stream() or response.cached:
|
706
761
|
# We would have already displayed the msg "live" ONLY if
|
707
762
|
# streaming was enabled, AND we did not find a cached response.
|
708
763
|
# If we are here, it means the response has not yet been displayed.
|
709
764
|
cached = f"[red]{self.indent}(cached)[/red]" if response.cached else ""
|
710
|
-
if response.function_call is not None:
|
711
|
-
response_str = str(response.function_call)
|
712
|
-
else:
|
713
|
-
response_str = response.message
|
714
765
|
if not settings.quiet:
|
715
|
-
print(cached + "[green]" + escape(
|
766
|
+
print(cached + "[green]" + escape(str(response)))
|
767
|
+
cached = "[cached] " if response.cached else ""
|
768
|
+
self.callbacks.show_llm_response(
|
769
|
+
content=cached + " " + str(response),
|
770
|
+
is_tool=self.has_tool_message_attempt(
|
771
|
+
ChatDocument.from_LLMResponse(response, displayed=True)
|
772
|
+
),
|
773
|
+
)
|
716
774
|
|
717
775
|
self.update_token_usage(
|
718
776
|
response,
|
langroid/agent/task.py
CHANGED
@@ -77,7 +77,7 @@ class Task:
|
|
77
77
|
restart: bool = True,
|
78
78
|
default_human_response: Optional[str] = None,
|
79
79
|
interactive: bool = True,
|
80
|
-
only_user_quits_root: bool =
|
80
|
+
only_user_quits_root: bool = False,
|
81
81
|
erase_substeps: bool = False,
|
82
82
|
allow_null_result: bool = True,
|
83
83
|
max_stalled_steps: int = 5,
|
@@ -114,7 +114,8 @@ class Task:
|
|
114
114
|
response (prevents infinite loop of non-human responses).
|
115
115
|
Default is true. If false, then `default_human_response` is set to ""
|
116
116
|
only_user_quits_root (bool): if true, only user can quit the root task.
|
117
|
-
[
|
117
|
+
[This param is ignored & deprecated; Keeping for backward compatibility.
|
118
|
+
Instead of this, setting `interactive` suffices]
|
118
119
|
erase_substeps (bool): if true, when task completes, erase intermediate
|
119
120
|
conversation with subtasks from this agent's `message_history`, and also
|
120
121
|
erase all subtask agents' `message_history`.
|
@@ -134,6 +135,20 @@ class Task:
|
|
134
135
|
if agent is None:
|
135
136
|
agent = ChatAgent()
|
136
137
|
|
138
|
+
# copy the agent's config, so that we don't modify the original agent's config,
|
139
|
+
# which may be shared by other agents.
|
140
|
+
try:
|
141
|
+
config_copy = copy.deepcopy(agent.config)
|
142
|
+
agent.config = config_copy
|
143
|
+
except Exception:
|
144
|
+
logger.warning(
|
145
|
+
"""
|
146
|
+
Failed to deep-copy Agent config during task creation,
|
147
|
+
proceeding with original config. Be aware that changes to
|
148
|
+
the config may affect other agents using the same config.
|
149
|
+
"""
|
150
|
+
)
|
151
|
+
|
137
152
|
if isinstance(agent, ChatAgent) and len(agent.message_history) == 0 or restart:
|
138
153
|
agent = cast(ChatAgent, agent)
|
139
154
|
agent.clear_history(0)
|
@@ -157,6 +172,9 @@ class Task:
|
|
157
172
|
self.is_done = False # is task done (based on response)?
|
158
173
|
self.is_pass_thru = False # is current response a pass-thru?
|
159
174
|
self.task_progress = False # progress in current task (since run or run_async)?
|
175
|
+
if name:
|
176
|
+
# task name overrides name in agent config
|
177
|
+
agent.config.name = name
|
160
178
|
self.name = name or agent.config.name
|
161
179
|
self.value: str = self.name
|
162
180
|
self.default_human_response = default_human_response
|
@@ -229,11 +247,7 @@ class Task:
|
|
229
247
|
Returns a copy of this task, with a new agent.
|
230
248
|
"""
|
231
249
|
assert isinstance(self.agent, ChatAgent), "Task clone only works for ChatAgent"
|
232
|
-
|
233
|
-
agent_cls = type(self.agent)
|
234
|
-
config_copy = copy.deepcopy(self.agent.config)
|
235
|
-
config_copy.name = f"{config_copy.name}-{i}"
|
236
|
-
agent: ChatAgent = agent_cls(config_copy)
|
250
|
+
agent: ChatAgent = self.agent.clone(i)
|
237
251
|
return Task(
|
238
252
|
agent,
|
239
253
|
name=self.name + f"-{i}",
|
@@ -244,7 +258,6 @@ class Task:
|
|
244
258
|
restart=False,
|
245
259
|
default_human_response=self.default_human_response,
|
246
260
|
interactive=self.interactive,
|
247
|
-
only_user_quits_root=self.only_user_quits_root,
|
248
261
|
erase_substeps=self.erase_substeps,
|
249
262
|
allow_null_result=self.allow_null_result,
|
250
263
|
max_stalled_steps=self.max_stalled_steps,
|
@@ -784,6 +797,8 @@ class Task:
|
|
784
797
|
"""
|
785
798
|
if isinstance(e, Task):
|
786
799
|
actual_turns = e.turns if e.turns > 0 else turns
|
800
|
+
if e.agent.callbacks.set_parent_agent is not None:
|
801
|
+
e.agent.callbacks.set_parent_agent(self.agent)
|
787
802
|
result = e.run(
|
788
803
|
self.pending_message,
|
789
804
|
turns=actual_turns,
|
@@ -846,6 +861,8 @@ class Task:
|
|
846
861
|
"""
|
847
862
|
if isinstance(e, Task):
|
848
863
|
actual_turns = e.turns if e.turns > 0 else turns
|
864
|
+
if e.agent.callbacks.set_parent_agent is not None:
|
865
|
+
e.agent.callbacks.set_parent_agent(self.agent)
|
849
866
|
result = await e.run_async(
|
850
867
|
self.pending_message,
|
851
868
|
turns=actual_turns,
|
langroid/language_models/base.py
CHANGED
@@ -5,7 +5,7 @@ import logging
|
|
5
5
|
from abc import ABC, abstractmethod
|
6
6
|
from datetime import datetime
|
7
7
|
from enum import Enum
|
8
|
-
from typing import Any, Dict, List, Optional, Tuple, Type, Union
|
8
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
|
9
9
|
|
10
10
|
import aiohttp
|
11
11
|
from pydantic import BaseModel, BaseSettings, Field
|
@@ -26,8 +26,13 @@ from langroid.utils.output.printing import show_if_debug
|
|
26
26
|
logger = logging.getLogger(__name__)
|
27
27
|
|
28
28
|
|
29
|
+
def noop_fn(*args: List[Any], **kwargs: Dict[str, Any]) -> None:
|
30
|
+
pass
|
31
|
+
|
32
|
+
|
29
33
|
class LLMConfig(BaseSettings):
|
30
34
|
type: str = "openai"
|
35
|
+
streamer: Optional[Callable[[Any], None]] = noop_fn
|
31
36
|
api_base: str | None = None
|
32
37
|
formatter: None | str = None
|
33
38
|
timeout: int = 20 # timeout for API requests
|
@@ -72,8 +77,11 @@ class LLMFunctionCall(BaseModel):
|
|
72
77
|
fun_args_str = message["arguments"]
|
73
78
|
# sometimes may be malformed with invalid indents,
|
74
79
|
# so we try to be safe by removing newlines.
|
75
|
-
fun_args_str
|
76
|
-
|
80
|
+
if fun_args_str is not None:
|
81
|
+
fun_args_str = fun_args_str.replace("\n", "").strip()
|
82
|
+
fun_args = ast.literal_eval(fun_args_str)
|
83
|
+
else:
|
84
|
+
fun_args = None
|
77
85
|
fun_call.arguments = fun_args
|
78
86
|
|
79
87
|
return fun_call
|
@@ -181,6 +189,12 @@ class LLMResponse(BaseModel):
|
|
181
189
|
usage: Optional[LLMTokenUsage]
|
182
190
|
cached: bool = False
|
183
191
|
|
192
|
+
def __str__(self) -> str:
|
193
|
+
if self.function_call is not None:
|
194
|
+
return str(self.function_call)
|
195
|
+
else:
|
196
|
+
return self.message
|
197
|
+
|
184
198
|
def to_LLMMessage(self) -> LLMMessage:
|
185
199
|
content = self.message
|
186
200
|
role = Role.ASSISTANT if self.function_call is None else Role.FUNCTION
|
@@ -580,17 +580,21 @@ class OpenAIGPT(LanguageModel):
|
|
580
580
|
if not is_async:
|
581
581
|
sys.stdout.write(Colors().GREEN + event_text)
|
582
582
|
sys.stdout.flush()
|
583
|
+
self.config.streamer(event_text)
|
583
584
|
if event_fn_name:
|
584
585
|
function_name = event_fn_name
|
585
586
|
has_function = True
|
586
587
|
if not is_async:
|
587
588
|
sys.stdout.write(Colors().GREEN + "FUNC: " + event_fn_name + ": ")
|
588
589
|
sys.stdout.flush()
|
590
|
+
self.config.streamer(event_fn_name)
|
591
|
+
|
589
592
|
if event_args:
|
590
593
|
function_args += event_args
|
591
594
|
if not is_async:
|
592
595
|
sys.stdout.write(Colors().GREEN + event_args)
|
593
596
|
sys.stdout.flush()
|
597
|
+
self.config.streamer(event_args)
|
594
598
|
if choices[0].get("finish_reason", "") in ["stop", "function_call"]:
|
595
599
|
# for function_call, finish_reason does not necessarily
|
596
600
|
# contain "function_call" as mentioned in the docs.
|
@@ -6,7 +6,7 @@ models will have the same tokenizer, so we just use the first one.
|
|
6
6
|
"""
|
7
7
|
import logging
|
8
8
|
import re
|
9
|
-
from typing import List
|
9
|
+
from typing import List, Set
|
10
10
|
|
11
11
|
from huggingface_hub import HfApi, ModelFilter
|
12
12
|
from jinja2.exceptions import TemplateError
|
@@ -44,6 +44,8 @@ def find_hf_formatter(model_name: str) -> str:
|
|
44
44
|
|
45
45
|
|
46
46
|
class HFFormatter(PromptFormatter):
|
47
|
+
models: Set[str] = set() # which models have been used for formatting
|
48
|
+
|
47
49
|
def __init__(self, config: HFPromptFormatterConfig):
|
48
50
|
super().__init__(config)
|
49
51
|
self.config: HFPromptFormatterConfig = config
|
@@ -63,7 +65,8 @@ class HFFormatter(PromptFormatter):
|
|
63
65
|
raise ValueError(
|
64
66
|
f"Model {config.model_name} does not support chat template"
|
65
67
|
)
|
66
|
-
|
68
|
+
elif mdl.id not in HFFormatter.models:
|
69
|
+
# only warn if this is the first time we've used this mdl.id
|
67
70
|
logger.warning(
|
68
71
|
f"""
|
69
72
|
Using HuggingFace {mdl.id} for prompt formatting:
|
@@ -73,6 +76,7 @@ class HFFormatter(PromptFormatter):
|
|
73
76
|
{self.tokenizer.chat_template}
|
74
77
|
"""
|
75
78
|
)
|
79
|
+
HFFormatter.models.add(mdl.id)
|
76
80
|
|
77
81
|
def format(self, messages: List[LLMMessage]) -> str:
|
78
82
|
sys_msg, chat_msgs, user_msg = LanguageModel.get_chat_history_components(
|
@@ -0,0 +1,68 @@
|
|
1
|
+
Image input capabilities: Enabled
|
2
|
+
|
3
|
+
Tools
|
4
|
+
python
|
5
|
+
When you send a message containing Python code to python, it will be executed in a
|
6
|
+
stateful Jupyter notebook environment. python will respond with the output of the execution or time out after 60.0
|
7
|
+
seconds. The drive at '/mnt/data' can be used to save and persist user files. Internet access for this session is disabled. Do not make external web requests or API calls as they will fail.
|
8
|
+
|
9
|
+
dalle
|
10
|
+
// Whenever a description of an image is given, create a prompt that dalle can use to generate the image and abide to the following policy:
|
11
|
+
// 1. The prompt must be in English. Translate to English if needed.
|
12
|
+
// 2. DO NOT ask for permission to generate the image, just do it!
|
13
|
+
// 3. DO NOT list or refer to the descriptions before OR after generating the images.
|
14
|
+
// 4. Do not create more than 1 image, even if the user requests more.
|
15
|
+
// 5. Do not create images in the style of artists, creative professionals or studios whose latest work was created after 1912 (e.g. Picasso, Kahlo).
|
16
|
+
// - You can name artists, creative professionals or studios in prompts only if their latest work was created prior to 1912 (e.g. Van Gogh, Goya)
|
17
|
+
// - If asked to generate an image that would violate this policy, instead apply the following procedure: (a) substitute the artist's name with three adjectives that capture key aspects of the style; (b) include an associated artistic movement or era to provide context; and (c) mention the primary medium used by the artist
|
18
|
+
// 6. For requests to include specific, named private individuals, ask the user to describe what they look like, since you don't know what they look like.
|
19
|
+
// 7. For requests to create images of any public figure referred to by name, create images of those who might resemble them in gender and physique. But they shouldn't look like them. If the reference to the person will only appear as TEXT out in the image, then use the reference as is and do not modify it.
|
20
|
+
// 8. Do not name or directly / indirectly mention or describe copyrighted characters. Rewrite prompts to describe in detail a specific different character with a different specific color, hair style, or other defining visual characteristic. Do not discuss copyright policies in responses.
|
21
|
+
// The generated prompt sent to dalle should be very detailed, and around 100 words long.
|
22
|
+
// Example dalle invocation:
|
23
|
+
// // { // "prompt": "<insert prompt here>" // } //
|
24
|
+
namespace dalle {
|
25
|
+
|
26
|
+
// Create images from a text-only prompt.
|
27
|
+
type text2im = (_: {
|
28
|
+
// The size of the requested image. Use 1024x1024 (square) as the default, 1792x1024 if the user requests a wide image, and 1024x1792 for full-body portraits. Always include this parameter in the request.
|
29
|
+
size?: "1792x1024" | "1024x1024" | "1024x1792",
|
30
|
+
// The number of images to generate. If the user does not specify a number, generate 1 image.
|
31
|
+
n?: number, // default: 2
|
32
|
+
// The detailed image description, potentially modified to abide by the dalle policies. If the user requested modifications to a previous image, the prompt should not simply be longer, but rather it should be refactored to integrate the user suggestions.
|
33
|
+
prompt: string,
|
34
|
+
// If the user references a previous image, this field should be populated with the gen_id from the dalle image metadata.
|
35
|
+
referenced_image_ids?: string[],
|
36
|
+
}) => any;
|
37
|
+
|
38
|
+
} // namespace dalle
|
39
|
+
|
40
|
+
voice_mode
|
41
|
+
// Voice mode functions are not available in text conversations.
|
42
|
+
namespace voice_mode {
|
43
|
+
|
44
|
+
} // namespace voice_mode
|
45
|
+
|
46
|
+
browser
|
47
|
+
You have the tool browser. Use browser in the following circumstances:
|
48
|
+
- User is asking about current events or something that requires real-time information (weather, sports scores, etc.)
|
49
|
+
- User is asking about some term you are totally unfamiliar with (it might be new)
|
50
|
+
- User explicitly asks you to browse or provide links to references
|
51
|
+
|
52
|
+
Given a query that requires retrieval, your turn will consist of three steps:
|
53
|
+
|
54
|
+
Call the search function to get a list of results.
|
55
|
+
Call the mclick function to retrieve a diverse and high-quality subset of these results (in parallel). Remember to SELECT AT LEAST 3 sources when using mclick.
|
56
|
+
Write a response to the user based on these results. In your response, cite sources using the citation format below.
|
57
|
+
In some cases, you should repeat step 1 twice, if the initial results are unsatisfactory, and you believe that you can refine the query to get better results.
|
58
|
+
|
59
|
+
You can also open a url directly if one is provided by the user. Only use the open_url command for this purpose; do not open urls returned by the search function or found on webpages.
|
60
|
+
|
61
|
+
The browser tool has the following commands:
|
62
|
+
search(query: str, recency_days: int) Issues a query to a search engine and displays the results.
|
63
|
+
mclick(ids: list[str]). Retrieves the contents of the webpages with provided IDs (indices). You should ALWAYS SELECT AT LEAST 3 and at most 10 pages. Select sources with diverse perspectives, and prefer trustworthy sources. Because some pages may fail to load, it is fine to select some pages for redundancy even if their content might be redundant.
|
64
|
+
open_url(url: str) Opens the given URL and displays it.
|
65
|
+
|
66
|
+
For citing quotes from the 'browser' tool: please render in this format: 【{message idx}†{link text}】.
|
67
|
+
For long citations: please render in this format: [link text](message idx).
|
68
|
+
Otherwise do not render links.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.183
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
License: MIT
|
6
6
|
Author: Prasad Chalasani
|
@@ -10,6 +10,7 @@ Classifier: License :: OSI Approved :: MIT License
|
|
10
10
|
Classifier: Programming Language :: Python :: 3
|
11
11
|
Classifier: Programming Language :: Python :: 3.10
|
12
12
|
Classifier: Programming Language :: Python :: 3.11
|
13
|
+
Provides-Extra: chainlit
|
13
14
|
Provides-Extra: hf-embeddings
|
14
15
|
Provides-Extra: litellm
|
15
16
|
Provides-Extra: metaphor
|
@@ -23,6 +24,7 @@ Requires-Dist: async-generator (>=1.10,<2.0)
|
|
23
24
|
Requires-Dist: autopep8 (>=2.0.2,<3.0.0)
|
24
25
|
Requires-Dist: black[jupyter] (>=23.3.0,<24.0.0)
|
25
26
|
Requires-Dist: bs4 (>=0.0.1,<0.0.2)
|
27
|
+
Requires-Dist: chainlit (>=1.0.200,<2.0.0) ; extra == "chainlit"
|
26
28
|
Requires-Dist: chromadb (==0.3.21)
|
27
29
|
Requires-Dist: colorlog (>=6.7.0,<7.0.0)
|
28
30
|
Requires-Dist: docstring-parser (>=0.15,<0.16)
|
@@ -35,7 +37,7 @@ Requires-Dist: google-api-python-client (>=2.95.0,<3.0.0)
|
|
35
37
|
Requires-Dist: halo (>=0.0.31,<0.0.32)
|
36
38
|
Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
|
37
39
|
Requires-Dist: lancedb (>=0.4.1,<0.5.0)
|
38
|
-
Requires-Dist: litellm (>=1.
|
40
|
+
Requires-Dist: litellm (>=1.22.3,<2.0.0) ; extra == "litellm"
|
39
41
|
Requires-Dist: lxml (>=4.9.3,<5.0.0)
|
40
42
|
Requires-Dist: meilisearch (>=0.28.3,<0.29.0)
|
41
43
|
Requires-Dist: meilisearch-python-sdk (>=2.2.3,<3.0.0)
|
@@ -1,8 +1,10 @@
|
|
1
|
-
langroid/__init__.py,sha256=
|
1
|
+
langroid/__init__.py,sha256=nj2Cj9VOdy4WUqsoCUix5dhNAJbocBwgXyW41SjKBcs,1370
|
2
2
|
langroid/agent/__init__.py,sha256=w2pap-rHrp41gMzdtzur2YY_m62LqQhF2Du-AmoIQi4,752
|
3
|
-
langroid/agent/base.py,sha256=
|
3
|
+
langroid/agent/base.py,sha256=Fjja2BFd_F8QxqnW7KYr3vs8YCNDFPfi_qUyoq_hwgA,34783
|
4
4
|
langroid/agent/batch.py,sha256=8zHdM-863pRD3UoCXUPKEQ4Z4iqjkNVD2xXu1WspBak,6464
|
5
|
-
langroid/agent/
|
5
|
+
langroid/agent/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
+
langroid/agent/callbacks/chainlit.py,sha256=LG3b_YkCi0f_Wi86SpOU_hMur6pfukHpg3K5gaAtDDY,15466
|
7
|
+
langroid/agent/chat_agent.py,sha256=V7dtTFSVL6MWhz3iUE0RP-VJvK_zCCUE5If_XAiXEyk,37994
|
6
8
|
langroid/agent/chat_document.py,sha256=MRp2YCy5f3Q_yPoFXVyr1vGu48wz33UGxAUtMn7MJpo,7958
|
7
9
|
langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
10
|
langroid/agent/junk,sha256=LxfuuW7Cijsg0szAzT81OjWWv1PMNI-6w_-DspVIO2s,339
|
@@ -30,7 +32,7 @@ langroid/agent/special/sql/utils/populate_metadata.py,sha256=zRjw31a1ZXvpx9bcmbt
|
|
30
32
|
langroid/agent/special/sql/utils/system_message.py,sha256=qKLHkvQWRQodTtPLPxr1GSLUYUFASZU8x-ybV67cB68,1885
|
31
33
|
langroid/agent/special/sql/utils/tools.py,sha256=6uB2424SLtmapui9ggcEr0ZTiB6_dL1-JRGgN8RK9Js,1332
|
32
34
|
langroid/agent/special/table_chat_agent.py,sha256=GEUTP-VdtMXq4CcPV80gDQrCEn-ZFb9IhuRMtLN5I1o,9030
|
33
|
-
langroid/agent/task.py,sha256=
|
35
|
+
langroid/agent/task.py,sha256=GmHultMZHmWXQBbnX6QDMyDsGwb9uzSVQkr5Tf_n7Ig,48393
|
34
36
|
langroid/agent/tool_message.py,sha256=ngmWdiqMYbjF4Am0hsLyA9zK0Q9QF2ziec6FW0lPD90,7399
|
35
37
|
langroid/agent/tools/__init__.py,sha256=q-maq3k2BXhPAU99G0H6-j_ozoRvx15I1RFpPVicQIU,304
|
36
38
|
langroid/agent/tools/extract_tool.py,sha256=u5lL9rKBzaLBOrRyLnTAZ97pQ1uxyLP39XsWMnpaZpw,3789
|
@@ -52,13 +54,13 @@ langroid/embedding_models/clustering.py,sha256=tZWElUqXl9Etqla0FAa7og96iDKgjqWju
|
|
52
54
|
langroid/embedding_models/models.py,sha256=0bQ8u2ee2ODcopGPusz9WYWI_PjR5Gbdy47qcSU8gCo,4603
|
53
55
|
langroid/language_models/__init__.py,sha256=5L9ndEEC8iLJHjDJmYFTnv6-2-3xsxWUMHcugR8IeDs,821
|
54
56
|
langroid/language_models/azure_openai.py,sha256=ncRCbKooqLVOY-PWQUIo9C3yTuKEFbAwyngXT_M4P7k,5989
|
55
|
-
langroid/language_models/base.py,sha256=
|
57
|
+
langroid/language_models/base.py,sha256=RdXH-BnkFGS8xZTiukdxHTFxqELVSmf546itp9Fa8fs,21008
|
56
58
|
langroid/language_models/config.py,sha256=5UF3DzO1a-Dfsc3vghE0XGq7g9t_xDsRCsuRiU4dgBg,366
|
57
59
|
langroid/language_models/openai_assistants.py,sha256=9K-DEAL2aSWHeXj2hwCo2RAlK9_1oCPtqX2u1wISCj8,36
|
58
|
-
langroid/language_models/openai_gpt.py,sha256=
|
60
|
+
langroid/language_models/openai_gpt.py,sha256=9gm6URAIh2-katOQGl8BTpDp0JtE20JJptPRR_-HfWY,48252
|
59
61
|
langroid/language_models/prompt_formatter/__init__.py,sha256=9JXFF22QNMmbQV1q4nrIeQVTtA3Tx8tEZABLtLBdFyc,352
|
60
62
|
langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
|
61
|
-
langroid/language_models/prompt_formatter/hf_formatter.py,sha256=
|
63
|
+
langroid/language_models/prompt_formatter/hf_formatter.py,sha256=3MQhu8--p168qPWXqlp_nK4phi-SuAUMqahSVyLHIkA,4177
|
62
64
|
langroid/language_models/prompt_formatter/llama2_formatter.py,sha256=YdcO88qyBeuMENVIVvVqSYuEpvYSTndUe_jd6hVTko4,2899
|
63
65
|
langroid/language_models/utils.py,sha256=3stQOt3sAdbGT70thurlxfnQ4xPxH75YjFm-jaRuXlg,4474
|
64
66
|
langroid/mytypes.py,sha256=-0q-SyicLbdjFSIEcwt5u-EwhRcMWllSoWYE3OWk78M,2501
|
@@ -81,6 +83,7 @@ langroid/parsing/urls.py,sha256=Nv4yCWQLLBEjaiRdaZZVQNBEl_cfK_V6cVuPm91wGtU,7686
|
|
81
83
|
langroid/parsing/utils.py,sha256=Ft0YytDQh2-S1xjk3FDA4IZI9Qp1odrIYm8cuK8H81s,11642
|
82
84
|
langroid/parsing/web_search.py,sha256=sS6UPeVB_KSsIUNlv4VHHvc0uFywMExEbQ15k40TLcc,3951
|
83
85
|
langroid/prompts/__init__.py,sha256=B0vpJzIJlMR3mFRtoQwyALsFzBHvLp9f92acD8xJA_0,185
|
86
|
+
langroid/prompts/chat-gpt4-system-prompt.md,sha256=Q3uLCJTPQvmUkZN2XDnkBC7M2K3X0F3C3GIQBaFvYvw,5329
|
84
87
|
langroid/prompts/dialog.py,sha256=SpfiSyofSgy2pwD1YboHR_yHO3LEEMbv6j2sm874jKo,331
|
85
88
|
langroid/prompts/prompts_config.py,sha256=XRQHzod7KBnoKn3B_V878jZiqBA7rcn-CtGPkuAe_yM,131
|
86
89
|
langroid/prompts/templates.py,sha256=4X-07tnmUQ8Z_zaWRQAUUyKiErGztp3tERujqnG8sGA,6369
|
@@ -111,7 +114,7 @@ langroid/vector_store/meilisearch.py,sha256=d2huA9P-NoYRuAQ9ZeXJmMKr7ry8u90RUSR2
|
|
111
114
|
langroid/vector_store/momento.py,sha256=j6Eo6oIDN2fe7lsBOlCXJn3uvvERHHTFL5QJfeREeOM,10044
|
112
115
|
langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
|
113
116
|
langroid/vector_store/qdrantdb.py,sha256=_egbsP9SWBwmI827EDYSSOqfIQSmwNsmJfFTxrLpWYE,13457
|
114
|
-
langroid-0.1.
|
115
|
-
langroid-0.1.
|
116
|
-
langroid-0.1.
|
117
|
-
langroid-0.1.
|
117
|
+
langroid-0.1.183.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
118
|
+
langroid-0.1.183.dist-info/METADATA,sha256=s5Vo5vx5oF3UEmswpHY-cJVmu6HTmk_aFva6hQCzoQY,45309
|
119
|
+
langroid-0.1.183.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
120
|
+
langroid-0.1.183.dist-info/RECORD,,
|
File without changes
|
File without changes
|