langroid 0.1.21__py3-none-any.whl → 0.1.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/base.py +11 -1
- langroid/agent/chat_agent.py +10 -25
- langroid/agent/special/doc_chat_agent.py +7 -2
- langroid/agent/task.py +29 -13
- langroid/agent/tool_message.py +2 -1
- langroid/embedding_models/base.py +4 -1
- langroid/language_models/base.py +4 -0
- langroid/vector_store/base.py +4 -0
- {langroid-0.1.21.dist-info → langroid-0.1.23.dist-info}/METADATA +126 -95
- {langroid-0.1.21.dist-info → langroid-0.1.23.dist-info}/RECORD +12 -12
- {langroid-0.1.21.dist-info → langroid-0.1.23.dist-info}/LICENSE +0 -0
- {langroid-0.1.21.dist-info → langroid-0.1.23.dist-info}/WHEEL +0 -0
langroid/agent/base.py
CHANGED
@@ -31,7 +31,7 @@ logger = logging.getLogger(__name__)
|
|
31
31
|
class AgentConfig(BaseSettings):
|
32
32
|
"""
|
33
33
|
General config settings for an LLM agent. This is nested, combining configs of
|
34
|
-
various components
|
34
|
+
various components.
|
35
35
|
"""
|
36
36
|
|
37
37
|
name: str = "LLM-Agent"
|
@@ -43,6 +43,16 @@ class AgentConfig(BaseSettings):
|
|
43
43
|
|
44
44
|
|
45
45
|
class Agent(ABC):
|
46
|
+
"""
|
47
|
+
An Agent is an abstraction that encapsulates mainly two components:
|
48
|
+
|
49
|
+
- a language model (LLM)
|
50
|
+
- a vector store (vecdb)
|
51
|
+
|
52
|
+
plus associated components such as a parser, and variables that hold
|
53
|
+
information about any tool/function-calling messages that have been defined.
|
54
|
+
"""
|
55
|
+
|
46
56
|
def __init__(self, config: AgentConfig):
|
47
57
|
self.config = config
|
48
58
|
self.dialog: List[Tuple[str, str]] = [] # seq of LLM (prompt, response) tuples
|
langroid/agent/chat_agent.py
CHANGED
@@ -44,13 +44,16 @@ class ChatAgent(Agent):
|
|
44
44
|
"""
|
45
45
|
Chat Agent interacting with external env
|
46
46
|
(could be human, or external tools).
|
47
|
-
The agent (the LLM actually) is provided with
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
47
|
+
The agent (the LLM actually) is provided with an optional "Task Spec",
|
48
|
+
which is a sequence of `LLMMessage`s. These are used to initialize
|
49
|
+
the `task_messages` of the agent.
|
50
|
+
In most applications we will use a `ChatAgent` rather than a bare `Agent`.
|
51
|
+
The `Agent` class mainly exists to hold various common methods and attributes.
|
52
|
+
One difference between `ChatAgent` and `Agent` is that `ChatAgent`'s
|
53
|
+
`llm_response` method uses "chat mode" API (i.e. one that takes a
|
54
|
+
message sequence rather than a single message),
|
55
|
+
whereas the same method in the `Agent` class uses "completion mode" API (i.e. one
|
56
|
+
that takes a single message).
|
54
57
|
"""
|
55
58
|
|
56
59
|
def __init__(
|
@@ -61,24 +64,6 @@ class ChatAgent(Agent):
|
|
61
64
|
Args:
|
62
65
|
config: settings for the agent
|
63
66
|
|
64
|
-
!!! note
|
65
|
-
`self.message_history` is different from `self.dialog` (in Agent class):
|
66
|
-
|
67
|
-
- `self.message_history` is the sequence of messages sent to the LLM in
|
68
|
-
**chat mode** (e.g. when using OpenAI `ChatCompletion.create()`)
|
69
|
-
Typically we send a sequence of such messages to "prime"
|
70
|
-
the LLM context for some task, and we extend and re-send this sequence to
|
71
|
-
continue interaction. Note that consecutive messages in the sequence could
|
72
|
-
have different or same roles (e.g. "user", "assistant"). Each message has a
|
73
|
-
"dict" structure, which we call :class:`LLMMessage`.
|
74
|
-
|
75
|
-
- `self.dialog` is the sequence of `(prompt, response)` tuples produced
|
76
|
-
when interacting with an LLM in **completion mode**,
|
77
|
-
where `prompt (str)` is sent TO the LLM, and `response (str)` is received
|
78
|
-
FROM the LLM. Typically as an LLM conversation goes on, we collate
|
79
|
-
`self.dialog` into a single string, and insert it into the context part
|
80
|
-
of the next prompt to the LLM.
|
81
|
-
|
82
67
|
"""
|
83
68
|
super().__init__(config)
|
84
69
|
self.config: ChatAgentConfig = config
|
@@ -324,7 +324,7 @@ class DocChatAgent(ChatAgent):
|
|
324
324
|
|
325
325
|
passages = self.original_docs
|
326
326
|
|
327
|
-
# if original docs
|
327
|
+
# if original docs too long, no need to look for relevant parts.
|
328
328
|
if (
|
329
329
|
passages is None
|
330
330
|
or self.original_docs_length > self.config.max_context_tokens
|
@@ -380,8 +380,13 @@ class DocChatAgent(ChatAgent):
|
|
380
380
|
if self.parser is None:
|
381
381
|
raise ValueError("No parser defined")
|
382
382
|
tot_tokens = self.parser.num_tokens(full_text)
|
383
|
+
model = (
|
384
|
+
self.config.llm.chat_model
|
385
|
+
if self.config.llm.use_chat_for_completion
|
386
|
+
else self.config.llm.completion_model
|
387
|
+
)
|
383
388
|
MAX_INPUT_TOKENS = (
|
384
|
-
self.config.llm.context_length[
|
389
|
+
self.config.llm.context_length[model]
|
385
390
|
- self.config.llm.max_output_tokens
|
386
391
|
- 100
|
387
392
|
)
|
langroid/agent/task.py
CHANGED
@@ -1,8 +1,3 @@
|
|
1
|
-
"""
|
2
|
-
Class that runs the Task loop of an agent;
|
3
|
-
maintains state while various responders (agent's own methods, or external sub-tasks)
|
4
|
-
take turns attempting to respond to the `self.pending_message`.
|
5
|
-
"""
|
6
1
|
from __future__ import annotations
|
7
2
|
|
8
3
|
import logging
|
@@ -28,12 +23,32 @@ Responder = Entity | Type["Task"]
|
|
28
23
|
|
29
24
|
class Task:
|
30
25
|
"""
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
`pending_message`, which is
|
35
|
-
|
36
|
-
|
26
|
+
A `Task` wraps an `Agent` object, and sets up the `Agent`'s goals and instructions.
|
27
|
+
A `Task` maintains two key variables:
|
28
|
+
|
29
|
+
- `self.pending_message`, which is the message awaiting a response, and
|
30
|
+
- `self.pending_sender`, which is the entity that sent the pending message.
|
31
|
+
|
32
|
+
The possible responders to `self.pending_message` are the `Agent`'s own "native"
|
33
|
+
responders (`agent_response`, `llm_response`, and `user_response`), and
|
34
|
+
the `run()` methods of any sub-tasks. All responders have the same type-signature
|
35
|
+
(somewhat simplified):
|
36
|
+
```
|
37
|
+
str | ChatDocument -> ChatDocument
|
38
|
+
```
|
39
|
+
Responders may or may not specify an intended recipient of their generated response.
|
40
|
+
|
41
|
+
The main top-level method in the `Task` class is `run()`, which repeatedly calls
|
42
|
+
`step()` until `done()` returns true. The `step()` represents a "turn" in the
|
43
|
+
conversation: this method sequentially (in round-robin fashion) calls the responders
|
44
|
+
until it finds one that generates a *valid* response to the `pending_message`
|
45
|
+
(as determined by the `valid()` method). Once a valid response is found,
|
46
|
+
`step()` updates the `pending_message` and `pending_sender` variables,
|
47
|
+
and on the next iteration, `step()` re-starts its search for a valid response
|
48
|
+
*from the beginning* of the list of responders (the exception being that the
|
49
|
+
human user always gets a chance to respond after each non-human valid response).
|
50
|
+
This process repeats until `done()` returns true, at which point `run()` returns
|
51
|
+
the value of `result()`, which is the final result of the task.
|
37
52
|
"""
|
38
53
|
|
39
54
|
def __init__(
|
@@ -51,8 +66,10 @@ class Task:
|
|
51
66
|
):
|
52
67
|
"""
|
53
68
|
A task to be performed by an agent.
|
69
|
+
|
54
70
|
Args:
|
55
|
-
agent (Agent): agent
|
71
|
+
agent (Agent): agent associated with the task
|
72
|
+
name (str): name of the task
|
56
73
|
llm_delegate (bool): whether to delegate control to LLM; conceptually,
|
57
74
|
the "controlling entity" is the one "seeking" responses to its queries,
|
58
75
|
and has a goal it is aiming to achieve. The "controlling entity" is
|
@@ -73,7 +90,6 @@ class Task:
|
|
73
90
|
erase all subtask agents' `message_history`.
|
74
91
|
Note: erasing can reduce prompt sizes, but results in repetitive
|
75
92
|
sub-task delegation.
|
76
|
-
|
77
93
|
"""
|
78
94
|
if isinstance(agent, ChatAgent) and len(agent.message_history) == 0 or restart:
|
79
95
|
agent = cast(ChatAgent, agent)
|
langroid/agent/tool_message.py
CHANGED
@@ -68,7 +68,8 @@ class ToolMessage(ABC, BaseModel):
|
|
68
68
|
LLM. Depending on context, "tools" are also referred to as "plugins",
|
69
69
|
or "function calls" (in the context of OpenAI LLMs).
|
70
70
|
Essentially, they are a way for the LLM to express its intent to run a special
|
71
|
-
function or method. Currently
|
71
|
+
function or method. Currently these "tools" are handled by methods of the
|
72
|
+
agent.
|
72
73
|
|
73
74
|
Attributes:
|
74
75
|
request (str): name of agent method to map to.
|
@@ -12,7 +12,10 @@ class EmbeddingModelsConfig(BaseSettings):
|
|
12
12
|
|
13
13
|
|
14
14
|
class EmbeddingModel(ABC):
|
15
|
-
|
15
|
+
"""
|
16
|
+
Abstract base class for an embedding model.
|
17
|
+
"""
|
18
|
+
|
16
19
|
@classmethod
|
17
20
|
def create(cls, config: EmbeddingModelsConfig) -> "EmbeddingModel":
|
18
21
|
from langroid.embedding_models.models import (
|
langroid/language_models/base.py
CHANGED
langroid/vector_store/base.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.23
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
License: MIT
|
6
6
|
Author: Prasad Chalasani
|
@@ -61,61 +61,51 @@ Description-Content-Type: text/markdown
|
|
61
61
|
<h1>Langroid: Harness LLMs with Multi-Agent Programming</h1>
|
62
62
|
</div>
|
63
63
|
|
64
|
+
<div align="center">
|
65
|
+
|
64
66
|
[](https://github.com/langroid/langroid/actions/workflows/pytest.yml)
|
65
67
|
[](https://github.com/langroid/langroid/actions/workflows/validate.yml)
|
66
68
|
[](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml)
|
69
|
+
[](https://langroid.github.io/langroid)
|
70
|
+
[](https://discord.gg/g3nAXCbZ)
|
71
|
+
|
72
|
+
</div>
|
67
73
|
|
68
74
|
Langroid is an intuitive, lightweight, transparent, flexible, extensible and principled
|
69
|
-
Python framework to
|
75
|
+
Python framework to build LLM-powered applications using Multi-Agent Programming.
|
70
76
|
We welcome [contributions](CONTRIBUTING.md)!
|
71
77
|
|
78
|
+
Langroid is the first Python LLM-application framework that was explicitly
|
79
|
+
designed with Agents as first-class citizens, and Multi-Agent Programming
|
80
|
+
as the core design principle. The framework is inspired by ideas from the
|
81
|
+
[Actor Framework](https://en.wikipedia.org/wiki/Actor_model).
|
82
|
+
For more details see [here](https://langroid.github.io/langroid/).
|
83
|
+
|
72
84
|
Documentation: https://langroid.github.io/langroid/
|
73
85
|
|
74
86
|
Usage examples: https://github.com/langroid/langroid-examples
|
75
87
|
|
76
|
-
|
77
|
-
|
78
|
-
### The LLM Opportunity
|
79
|
-
|
80
|
-
Given the remarkable abilities of recent Large Language Models (LLMs), there
|
81
|
-
is an unprecedented opportunity to build intelligent applications powered by
|
82
|
-
this transformative technology. The top question for any enterprise is: how
|
83
|
-
best to harness the power of LLMs for complex applications? For technical and
|
84
|
-
practical reasons, building LLM-powered applications is not as simple as
|
85
|
-
throwing a task at an LLM-system and expecting it to do it.
|
88
|
+
Community: Join us on [Discord!](https://discord.gg/g3nAXCbZ)
|
86
89
|
|
87
|
-
|
90
|
+
## :rocket: Demo
|
88
91
|
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
92
|
+
A `LeaseExtractor` agent is tasked with extracting structured information
|
93
|
+
from a commercial lease document. It generates questions that are
|
94
|
+
answerred by a `DocAgent` using Retrieval from a vector-database
|
95
|
+
(into which the lease has been sharded + embedded).
|
96
|
+
When it has all the information it needs, the `LeaseExtractor` agent
|
97
|
+
presents the information in a structured format using a Tool/Function-calling.
|
93
98
|
|
94
|
-
|
95
|
-
state, along with access to long-term memory (vector-stores) and tools (a.k.a functions
|
96
|
-
or plugins). Thus a **Multi-Agent Programming** framework is a natural fit
|
97
|
-
for complex LLM-based applications.
|
99
|
+

|
98
100
|
|
99
|
-
> Langroid is the first Python LLM-application framework that was explicitly
|
100
|
-
designed with Agents as first-class citizens, and Multi-Agent Programming
|
101
|
-
as the core design principle. The framework is inspired by ideas from the
|
102
|
-
[Actor Framework](https://en.wikipedia.org/wiki/Actor_model).
|
103
101
|
|
104
|
-
|
105
|
-
among agents. There is a principled mechanism to orchestrate multi-agent
|
106
|
-
collaboration. Agents act as message-transformers, and take turns responding to (and
|
107
|
-
transforming) the current message. The architecture is lightweight, transparent,
|
108
|
-
flexible, and allows other types of orchestration to be implemented.
|
109
|
-
Besides Agents, Langroid also provides simple ways to directly interact with
|
110
|
-
LLMs and vector-stores.
|
102
|
+
## :zap: Highlights
|
111
103
|
|
112
|
-
### Highlights
|
113
104
|
Highlights of Langroid's features as of July 2023:
|
114
105
|
|
115
106
|
- **Agents as first-class citizens:** The `Agent` class encapsulates LLM conversation state,
|
116
107
|
and optionally a vector-store and tools. Agents are a core abstraction in Langroid;
|
117
|
-
Agents act as _message transformers_, and by default provide 3 _responder_ methods,
|
118
|
-
one corresponding to each entity: LLM, Agent, User.
|
108
|
+
Agents act as _message transformers_, and by default provide 3 _responder_ methods, one corresponding to each entity: LLM, Agent, User.
|
119
109
|
- **Tasks:** A Task class wraps an Agent, and gives the agent instructions (or roles, or goals),
|
120
110
|
manages iteration over an Agent's responder methods,
|
121
111
|
and orchestrates multi-agent interactions via hierarchical, recursive
|
@@ -130,7 +120,7 @@ Highlights of Langroid's features as of July 2023:
|
|
130
120
|
GPT-4-0613
|
131
121
|
- **Caching of LLM prompts, responses:** Langroid uses [Redis](https://redis.com/try-free/) for caching.
|
132
122
|
- **Vector-stores**: [Qdrant](https://qdrant.tech/) and [Chroma](https://www.trychroma.com/) are currently supported.
|
133
|
-
Vector stores allow for Retrieval-Augmented-
|
123
|
+
Vector stores allow for Retrieval-Augmented-Generation (RAG).
|
134
124
|
- **Grounding and source-citation:** Access to external documents via vector-stores
|
135
125
|
allows for grounding and source-citation.
|
136
126
|
- **Observability, Logging, Lineage:** Langroid generates detailed logs of multi-agent interactions and
|
@@ -148,11 +138,9 @@ Highlights of Langroid's features as of July 2023:
|
|
148
138
|
hallucinates malformed JSON, the Pydantic error message is sent back to
|
149
139
|
the LLM so it can fix it!
|
150
140
|
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
[`Getting Started Guide`](https://langroid.github.io/langroid/getting_started/)
|
155
|
-
for a detailed tutorial.
|
141
|
+
---
|
142
|
+
|
143
|
+
# :gear: Installation and Setup
|
156
144
|
|
157
145
|
## Install `langroid`
|
158
146
|
Use `pip` to install `langroid` (from PyPi) to your virtual environment:
|
@@ -173,7 +161,7 @@ Copy the `.env-template` file to a new file `.env` and
|
|
173
161
|
insert these secrets:
|
174
162
|
- **OpenAI API** key (required): If you don't have one, see [this OpenAI Page](https://help.openai.com/en/collections/3675940-getting-started-with-openai-api).
|
175
163
|
- **Qdrant** Vector Store API Key (required for apps that need retrieval from
|
176
|
-
documents): Sign up for a free 1GB account at [Qdrant cloud](https://cloud.qdrant.io)
|
164
|
+
documents): Sign up for a free 1GB account at [Qdrant cloud](https://cloud.qdrant.io).
|
177
165
|
Alternatively [Chroma](https://docs.trychroma.com/) is also currently supported.
|
178
166
|
We use the local-storage version of Chroma, so there is no need for an API key.
|
179
167
|
- **GitHub** Personal Access Token (required for apps that need to analyze git
|
@@ -198,7 +186,32 @@ QDRANT_API_KEY=<your key>
|
|
198
186
|
Currently only OpenAI models are supported. Others will be added later
|
199
187
|
(Pull Requests welcome!).
|
200
188
|
|
201
|
-
|
189
|
+
---
|
190
|
+
|
191
|
+
# :tada: Usage Examples
|
192
|
+
|
193
|
+
These are quick teasers to give a glimpse of what you can do with Langroid
|
194
|
+
and how your code would look.
|
195
|
+
|
196
|
+
:warning: The code snippets below are intended to give a flavor of the code
|
197
|
+
and they are **not** complete runnable examples! For that we encourage you to
|
198
|
+
consult the [`langroid-examples`](https://github.com/langroid/langroid-examples)
|
199
|
+
repository.
|
200
|
+
|
201
|
+
Also see the
|
202
|
+
[`Getting Started Guide`](https://langroid.github.io/langroid/quick-start/)
|
203
|
+
for a detailed tutorial.
|
204
|
+
|
205
|
+
- [Direct chat with LLM](#direct-llm)
|
206
|
+
- [Simple Agent and Task](#agent-task)
|
207
|
+
- [Three Communicating Agents](#three-agents)
|
208
|
+
- [Agent with Tool/Function-calling](#agent-tool)
|
209
|
+
- [Extract Structured Info with Tool/Function-calling](#agent-tool-structured)
|
210
|
+
- [Retrieval-Augmented-Generation: Chat with Docs](#agent-rag)
|
211
|
+
|
212
|
+
---
|
213
|
+
|
214
|
+
## Direct interaction with OpenAI LLM <a name="direct-llm"></a>
|
202
215
|
|
203
216
|
```python
|
204
217
|
from langroid.language_models.openai_gpt import (
|
@@ -213,11 +226,14 @@ mdl = OpenAIGPT(cfg)
|
|
213
226
|
messages = [
|
214
227
|
LLMMessage(content="You are a helpful assistant", role=Role.SYSTEM),
|
215
228
|
LLMMessage(content="What is the capital of Ontario?", role=Role.USER),
|
216
|
-
]
|
229
|
+
]
|
217
230
|
response = mdl.chat(messages, max_tokens=200)
|
231
|
+
print(response.message)
|
218
232
|
```
|
219
233
|
|
220
|
-
|
234
|
+
---
|
235
|
+
|
236
|
+
## Define an agent, set up a task, and run it <a name="agent-task"></a>
|
221
237
|
|
222
238
|
```python
|
223
239
|
from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
|
@@ -232,13 +248,15 @@ config = ChatAgentConfig(
|
|
232
248
|
)
|
233
249
|
agent = ChatAgent(config)
|
234
250
|
# get response from agent's LLM, and put this in an interactive loop...
|
235
|
-
answer = agent.llm_response("What is the capital of Ontario?")
|
236
|
-
# ...
|
251
|
+
# answer = agent.llm_response("What is the capital of Ontario?")
|
252
|
+
# ... OR instead, set up a task (which has a built-in loop) and run it
|
237
253
|
task = Task(agent, name="Bot")
|
238
|
-
task.run() # ... a loop seeking response from
|
254
|
+
task.run() # ... a loop seeking response from LLM or User at each turn
|
239
255
|
```
|
240
256
|
|
241
|
-
|
257
|
+
---
|
258
|
+
|
259
|
+
## Three communicating agents <a name="three-agents"></a>
|
242
260
|
|
243
261
|
A toy numbers game, where when given a number `n`:
|
244
262
|
- `repeater_agent`'s LLM simply returns `n`,
|
@@ -248,57 +266,66 @@ A toy numbers game, where when given a number `n`:
|
|
248
266
|
First define the 3 agents, and set up their tasks with instructions:
|
249
267
|
|
250
268
|
```python
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
269
|
+
from langroid.utils.constants import NO_ANSWER
|
270
|
+
from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
|
271
|
+
from langroid.agent.task import Task
|
272
|
+
from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
|
273
|
+
config = ChatAgentConfig(
|
274
|
+
llm = OpenAIGPTConfig(
|
275
|
+
chat_model=OpenAIChatModel.GPT4,
|
276
|
+
),
|
277
|
+
vecdb = None,
|
278
|
+
)
|
279
|
+
repeater_agent = ChatAgent(config)
|
280
|
+
repeater_task = Task(
|
281
|
+
repeater_agent,
|
282
|
+
name = "Repeater",
|
283
|
+
system_message="""
|
284
|
+
Your job is to repeat whatever number you receive.
|
285
|
+
""",
|
286
|
+
llm_delegate=True, # LLM takes charge of task
|
287
|
+
single_round=False,
|
288
|
+
)
|
289
|
+
even_agent = ChatAgent(config)
|
290
|
+
even_task = Task(
|
291
|
+
even_agent,
|
292
|
+
name = "EvenHandler",
|
293
|
+
system_message=f"""
|
294
|
+
You will be given a number.
|
295
|
+
If it is even, divide by 2 and say the result, nothing else.
|
296
|
+
If it is odd, say {NO_ANSWER}
|
297
|
+
""",
|
298
|
+
single_round=True, # task done after 1 step() with valid response
|
299
|
+
)
|
278
300
|
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
301
|
+
odd_agent = ChatAgent(config)
|
302
|
+
odd_task = Task(
|
303
|
+
odd_agent,
|
304
|
+
name = "OddHandler",
|
305
|
+
system_message=f"""
|
306
|
+
You will be given a number n.
|
307
|
+
If it is odd, return (n*3+1), say nothing else.
|
308
|
+
If it is even, say {NO_ANSWER}
|
309
|
+
""",
|
310
|
+
single_round=True, # task done after 1 step() with valid response
|
311
|
+
)
|
290
312
|
```
|
291
313
|
Then add the `even_task` and `odd_task` as sub-tasks of `repeater_task`,
|
292
314
|
and run the `repeater_task`, kicking it off with a number as input:
|
293
315
|
```python
|
294
|
-
|
295
|
-
|
316
|
+
repeater_task.add_sub_task([even_task, odd_task])
|
317
|
+
repeater_task.run("3")
|
296
318
|
```
|
319
|
+
---
|
320
|
+
|
321
|
+
## Simple Tool/Function-calling example <a name="agent-tool"></a>
|
297
322
|
|
298
|
-
### Simple Tool/Function-calling example
|
299
323
|
Langroid leverages Pydantic to support OpenAI's
|
300
324
|
[Function-calling API](https://platform.openai.com/docs/guides/gpt/function-calling)
|
301
|
-
as well as its own native tools.
|
325
|
+
as well as its own native tools. The benefits are that you don't have to write
|
326
|
+
any JSON to specify the schema, and also if the LLM hallucinates a malformed
|
327
|
+
tool syntax, Langroid sends the Pydantic validation error (suitiably sanitized)
|
328
|
+
to the LLM so it can fix it!
|
302
329
|
|
303
330
|
Simple example: Say the agent has a secret list of numbers,
|
304
331
|
and we want the LLM to find the smallest number in the list.
|
@@ -357,7 +384,9 @@ For a full working example see the
|
|
357
384
|
[chat-agent-tool.py](https://github.com/langroid/langroid-examples/blob/main/examples/quick-start/chat-agent-tool.py)
|
358
385
|
script in the `langroid-examples` repo.
|
359
386
|
|
360
|
-
|
387
|
+
---
|
388
|
+
|
389
|
+
## Tool/Function-calling to extract structured information from text <a name="agent-tool-structured"></a>
|
361
390
|
|
362
391
|
Suppose you want an agent to extract
|
363
392
|
the key terms of a lease, from a lease document, as a nested JSON structure.
|
@@ -381,7 +410,7 @@ class Lease(BaseModel):
|
|
381
410
|
```
|
382
411
|
|
383
412
|
Then define the `LeaseMessage` tool as a subclass of Langroid's `ToolMessage`.
|
384
|
-
Note the tool
|
413
|
+
Note the tool has a required argument `terms` of type `Lease`:
|
385
414
|
|
386
415
|
```python
|
387
416
|
class LeaseMessage(ToolMessage):
|
@@ -419,8 +448,9 @@ lease_extractor_agent.enable_message(LeaseMessage)
|
|
419
448
|
See the [`chat_multi_extract.py`](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat_multi_extract.py)
|
420
449
|
script in the `langroid-examples` repo for a full working example.
|
421
450
|
|
451
|
+
---
|
422
452
|
|
423
|
-
|
453
|
+
## Chat with documents (file paths, URLs, etc) <a name="agent-docs"></a>
|
424
454
|
|
425
455
|
Langroid provides a specialized agent class `DocChatAgent` for this purpose.
|
426
456
|
It incorporates document sharding, embedding, storage in a vector-DB,
|
@@ -467,8 +497,9 @@ See full working scripts in the
|
|
467
497
|
[`docqa`](https://github.com/langroid/langroid-examples/tree/main/examples/docqa)
|
468
498
|
folder of the `langroid-examples` repo.
|
469
499
|
|
500
|
+
---
|
470
501
|
|
471
|
-
|
502
|
+
# Contributors
|
472
503
|
|
473
504
|
- Prasad Chalasani (IIT BTech/CS, CMU PhD/ML; Independent ML Consultant)
|
474
505
|
- Somesh Jha (IIT BTech/CS, CMU PhD/CS; Professor of CS, U Wisc at Madison)
|
@@ -1,26 +1,26 @@
|
|
1
1
|
langroid/__init__.py,sha256=sEKJ_5WJBAMZApevfeE3gxLK-eotVzJMJlT83G0rAko,30
|
2
2
|
langroid/agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
-
langroid/agent/base.py,sha256=
|
4
|
-
langroid/agent/chat_agent.py,sha256=
|
3
|
+
langroid/agent/base.py,sha256=J-SyNvmG1nhWSnz7qFyP_tvOl8GF5rUv8a7fNsAhZFM,20921
|
4
|
+
langroid/agent/chat_agent.py,sha256=mLCHlYxU1lB7PGOLjjkaEQUqMNKcq0-HOjlG4ZcDjQE,20522
|
5
5
|
langroid/agent/chat_document.py,sha256=Rj7Hfp_FrNjuKsTMA3KyZhno5zKpmvnPPk7WgAuAF2Y,5745
|
6
6
|
langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
7
|
langroid/agent/junk,sha256=LxfuuW7Cijsg0szAzT81OjWWv1PMNI-6w_-DspVIO2s,339
|
8
8
|
langroid/agent/special/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
-
langroid/agent/special/doc_chat_agent.py,sha256=
|
9
|
+
langroid/agent/special/doc_chat_agent.py,sha256=NhoS02rlLr3nAfRvLB1YNcmfsWZJH24K8O-m-uYnb-I,15741
|
10
10
|
langroid/agent/special/recipient_validator_agent.py,sha256=x2UprcGlh-fyxQCZbb_fkKrruU5Om0mgOnNzk_PYBNM,4527
|
11
11
|
langroid/agent/special/retriever_agent.py,sha256=AiGKIJdndV4u5kr3woINw-vLKhwWnsINlvXtXxHcJgQ,6995
|
12
|
-
langroid/agent/task.py,sha256=
|
13
|
-
langroid/agent/tool_message.py,sha256=
|
12
|
+
langroid/agent/task.py,sha256=5P3S7thuYBvdaowOk1KMyDLukDtc-LLJactJcSgEJqA,25093
|
13
|
+
langroid/agent/tool_message.py,sha256=7OdVcV7UyOZD2ihYgV1C_1fIwiWM-2pR8FFxoA1IgOo,5379
|
14
14
|
langroid/agent_config.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
15
|
langroid/cachedb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
16
|
langroid/cachedb/base.py,sha256=F-QSDlRXrC0tBRbxL397MX8hulfBMAnZNs1e9zH71OQ,790
|
17
17
|
langroid/cachedb/redis_cachedb.py,sha256=TUBQJ5etrU4jN2bJ3wNHC3h3Le8ez65psNiMrVAfh4w,2335
|
18
18
|
langroid/embedding_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
|
-
langroid/embedding_models/base.py,sha256=
|
19
|
+
langroid/embedding_models/base.py,sha256=176jDrjEAAhNzdFCG8pfossd8SAhvHR8Q5Y8pOOm0LI,983
|
20
20
|
langroid/embedding_models/clustering.py,sha256=tZWElUqXl9Etqla0FAa7og96iDKgjqWjucZR_Egtp-A,6684
|
21
21
|
langroid/embedding_models/models.py,sha256=1xcv9hqmCTsbUbS8v7XeZRsf25Tu79JUoSipIYpvNoo,2765
|
22
22
|
langroid/language_models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
23
|
-
langroid/language_models/base.py,sha256=
|
23
|
+
langroid/language_models/base.py,sha256=Cr42pTm8XwGgq0bKGiSvedbnXpCTW37BYPm4LRcfBVY,12678
|
24
24
|
langroid/language_models/openai_gpt.py,sha256=tWRSnzswQlGurQbe1eGUs3HhgaJKe4xj_fshGEqWr78,17538
|
25
25
|
langroid/language_models/utils.py,sha256=rmnSn-sJ3aKl_wBdeLPkck0Li4Ed6zkCxZYYl7n1V34,4668
|
26
26
|
langroid/mytypes.py,sha256=YA42IJcooJnTxAwk-B4FmZ1hqzIIF1ZZKcpUKzBTGGo,1537
|
@@ -56,11 +56,11 @@ langroid/utils/web/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuF
|
|
56
56
|
langroid/utils/web/login.py,sha256=1iz9eUAHa87vpKIkzwkmFa00avwFWivDSAr7QUhK7U0,2528
|
57
57
|
langroid/utils/web/selenium_login.py,sha256=mYI6EvVmne34N9RajlsxxRqJQJvV-WG4LGp6sEECHPw,1156
|
58
58
|
langroid/vector_store/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
59
|
-
langroid/vector_store/base.py,sha256
|
59
|
+
langroid/vector_store/base.py,sha256=QZx3NUNwf2I0r3A7iuoUHIRGbqt_pFGD0hq1R-Yg8iM,3740
|
60
60
|
langroid/vector_store/chromadb.py,sha256=GPaXNPgPGIVUwYx2eO_-kreQoJ_33IFo13oigH5BP1c,5200
|
61
61
|
langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
|
62
62
|
langroid/vector_store/qdrantdb.py,sha256=y7-fMxOqMJoYbdSfIr_HKcTIWTRpnb9j3Yg5EcOyFd4,8522
|
63
|
-
langroid-0.1.
|
64
|
-
langroid-0.1.
|
65
|
-
langroid-0.1.
|
66
|
-
langroid-0.1.
|
63
|
+
langroid-0.1.23.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
64
|
+
langroid-0.1.23.dist-info/WHEEL,sha256=vVCvjcmxuUltf8cYhJ0sJMRDLr1XsPuxEId8YDzbyCY,88
|
65
|
+
langroid-0.1.23.dist-info/METADATA,sha256=xuEtaXD4fPlxFI9r1ieWlDbj8VUZ-moCd2WFtOdeX5E,19215
|
66
|
+
langroid-0.1.23.dist-info/RECORD,,
|
File without changes
|
File without changes
|