langroid 0.1.22__tar.gz → 0.1.23__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. {langroid-0.1.22 → langroid-0.1.23}/PKG-INFO +126 -95
  2. {langroid-0.1.22 → langroid-0.1.23}/README.md +125 -94
  3. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/special/doc_chat_agent.py +7 -2
  4. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/task.py +0 -5
  5. {langroid-0.1.22 → langroid-0.1.23}/pyproject.toml +1 -1
  6. langroid-0.1.23/setup.py +85 -0
  7. langroid-0.1.22/setup.py +0 -85
  8. {langroid-0.1.22 → langroid-0.1.23}/LICENSE +0 -0
  9. {langroid-0.1.22 → langroid-0.1.23}/langroid/__init__.py +0 -0
  10. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/__init__.py +0 -0
  11. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/base.py +0 -0
  12. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/chat_agent.py +0 -0
  13. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/chat_document.py +0 -0
  14. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/helpers.py +0 -0
  15. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/junk +0 -0
  16. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/special/__init__.py +0 -0
  17. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/special/recipient_validator_agent.py +0 -0
  18. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/special/retriever_agent.py +0 -0
  19. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent/tool_message.py +0 -0
  20. {langroid-0.1.22 → langroid-0.1.23}/langroid/agent_config.py +0 -0
  21. {langroid-0.1.22 → langroid-0.1.23}/langroid/cachedb/__init__.py +0 -0
  22. {langroid-0.1.22 → langroid-0.1.23}/langroid/cachedb/base.py +0 -0
  23. {langroid-0.1.22 → langroid-0.1.23}/langroid/cachedb/redis_cachedb.py +0 -0
  24. {langroid-0.1.22 → langroid-0.1.23}/langroid/embedding_models/__init__.py +0 -0
  25. {langroid-0.1.22 → langroid-0.1.23}/langroid/embedding_models/base.py +0 -0
  26. {langroid-0.1.22 → langroid-0.1.23}/langroid/embedding_models/clustering.py +0 -0
  27. {langroid-0.1.22 → langroid-0.1.23}/langroid/embedding_models/models.py +0 -0
  28. {langroid-0.1.22 → langroid-0.1.23}/langroid/language_models/__init__.py +0 -0
  29. {langroid-0.1.22 → langroid-0.1.23}/langroid/language_models/base.py +0 -0
  30. {langroid-0.1.22 → langroid-0.1.23}/langroid/language_models/openai_gpt.py +0 -0
  31. {langroid-0.1.22 → langroid-0.1.23}/langroid/language_models/utils.py +0 -0
  32. {langroid-0.1.22 → langroid-0.1.23}/langroid/mytypes.py +0 -0
  33. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/__init__.py +0 -0
  34. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/agent_chats.py +0 -0
  35. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/code-parsing.md +0 -0
  36. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/code_parser.py +0 -0
  37. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/json.py +0 -0
  38. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/para_sentence_split.py +0 -0
  39. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/parser.py +0 -0
  40. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/repo_loader.py +0 -0
  41. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/url_loader.py +0 -0
  42. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/url_loader_cookies.py +0 -0
  43. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/urls.py +0 -0
  44. {langroid-0.1.22 → langroid-0.1.23}/langroid/parsing/utils.py +0 -0
  45. {langroid-0.1.22 → langroid-0.1.23}/langroid/prompts/__init__.py +0 -0
  46. {langroid-0.1.22 → langroid-0.1.23}/langroid/prompts/dialog.py +0 -0
  47. {langroid-0.1.22 → langroid-0.1.23}/langroid/prompts/prompts_config.py +0 -0
  48. {langroid-0.1.22 → langroid-0.1.23}/langroid/prompts/templates.py +0 -0
  49. {langroid-0.1.22 → langroid-0.1.23}/langroid/prompts/transforms.py +0 -0
  50. {langroid-0.1.22 → langroid-0.1.23}/langroid/scripts/__init__.py +0 -0
  51. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/__init__.py +0 -0
  52. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/configuration.py +0 -0
  53. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/constants.py +0 -0
  54. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/docker.py +0 -0
  55. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/llms/__init__.py +0 -0
  56. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/llms/strings.py +0 -0
  57. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/logging.py +0 -0
  58. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/output/__init__.py +0 -0
  59. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/output/printing.py +0 -0
  60. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/system.py +0 -0
  61. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/web/__init__.py +0 -0
  62. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/web/login.py +0 -0
  63. {langroid-0.1.22 → langroid-0.1.23}/langroid/utils/web/selenium_login.py +0 -0
  64. {langroid-0.1.22 → langroid-0.1.23}/langroid/vector_store/__init__.py +0 -0
  65. {langroid-0.1.22 → langroid-0.1.23}/langroid/vector_store/base.py +0 -0
  66. {langroid-0.1.22 → langroid-0.1.23}/langroid/vector_store/chromadb.py +0 -0
  67. {langroid-0.1.22 → langroid-0.1.23}/langroid/vector_store/qdrant_cloud.py +0 -0
  68. {langroid-0.1.22 → langroid-0.1.23}/langroid/vector_store/qdrantdb.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.1.22
3
+ Version: 0.1.23
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -61,61 +61,51 @@ Description-Content-Type: text/markdown
61
61
  <h1>Langroid: Harness LLMs with Multi-Agent Programming</h1>
62
62
  </div>
63
63
 
64
+ <div align="center">
65
+
64
66
  [![Pytest](https://github.com/langroid/langroid/actions/workflows/pytest.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/pytest.yml)
65
67
  [![Lint](https://github.com/langroid/langroid/actions/workflows/validate.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/validate.yml)
66
68
  [![Docs](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml)
69
+ [![Static Badge](https://img.shields.io/badge/Documentation-blue?link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F&link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F)](https://langroid.github.io/langroid)
70
+ [![Static Badge](https://img.shields.io/badge/Discord-Orange?link=https%3A%2F%2Fdiscord.gg%2Fg3nAXCbZ&link=https%3A%2F%2Fdiscord.gg%2Fg3nAXCbZ)](https://discord.gg/g3nAXCbZ)
71
+
72
+ </div>
67
73
 
68
74
  Langroid is an intuitive, lightweight, transparent, flexible, extensible and principled
69
- Python framework to harness LLMs using Multi-Agent Programming (MAP).
75
+ Python framework to build LLM-powered applications using Multi-Agent Programming.
70
76
  We welcome [contributions](CONTRIBUTING.md)!
71
77
 
78
+ Langroid is the first Python LLM-application framework that was explicitly
79
+ designed with Agents as first-class citizens, and Multi-Agent Programming
80
+ as the core design principle. The framework is inspired by ideas from the
81
+ [Actor Framework](https://en.wikipedia.org/wiki/Actor_model).
82
+ For more details see [here](https://langroid.github.io/langroid/).
83
+
72
84
  Documentation: https://langroid.github.io/langroid/
73
85
 
74
86
  Usage examples: https://github.com/langroid/langroid-examples
75
87
 
76
- ## Overview
77
-
78
- ### The LLM Opportunity
79
-
80
- Given the remarkable abilities of recent Large Language Models (LLMs), there
81
- is an unprecedented opportunity to build intelligent applications powered by
82
- this transformative technology. The top question for any enterprise is: how
83
- best to harness the power of LLMs for complex applications? For technical and
84
- practical reasons, building LLM-powered applications is not as simple as
85
- throwing a task at an LLM-system and expecting it to do it.
88
+ Community: Join us on [Discord!](https://discord.gg/g3nAXCbZ)
86
89
 
87
- ### Langroid's Multi-Agent Programming Framework
90
+ ## :rocket: Demo
88
91
 
89
- Effectively leveraging LLMs at scale requires a *principled programming
90
- framework*. In particular, there is often a need to maintain multiple LLM
91
- conversations, each instructed in different ways, and "responsible" for
92
- different aspects of a task.
92
+ A `LeaseExtractor` agent is tasked with extracting structured information
93
+ from a commercial lease document. It generates questions that are
94
+ answerred by a `DocAgent` using Retrieval from a vector-database
95
+ (into which the lease has been sharded + embedded).
96
+ When it has all the information it needs, the `LeaseExtractor` agent
97
+ presents the information in a structured format using a Tool/Function-calling.
93
98
 
94
- An *agent* is a convenient abstraction that encapsulates LLM conversation
95
- state, along with access to long-term memory (vector-stores) and tools (a.k.a functions
96
- or plugins). Thus a **Multi-Agent Programming** framework is a natural fit
97
- for complex LLM-based applications.
99
+ ![Demo](lease-extractor-demo.gif)
98
100
 
99
- > Langroid is the first Python LLM-application framework that was explicitly
100
- designed with Agents as first-class citizens, and Multi-Agent Programming
101
- as the core design principle. The framework is inspired by ideas from the
102
- [Actor Framework](https://en.wikipedia.org/wiki/Actor_model).
103
101
 
104
- Langroid allows an intuitive definition of agents, tasks and task-delegation
105
- among agents. There is a principled mechanism to orchestrate multi-agent
106
- collaboration. Agents act as message-transformers, and take turns responding to (and
107
- transforming) the current message. The architecture is lightweight, transparent,
108
- flexible, and allows other types of orchestration to be implemented.
109
- Besides Agents, Langroid also provides simple ways to directly interact with
110
- LLMs and vector-stores.
102
+ ## :zap: Highlights
111
103
 
112
- ### Highlights
113
104
  Highlights of Langroid's features as of July 2023:
114
105
 
115
106
  - **Agents as first-class citizens:** The `Agent` class encapsulates LLM conversation state,
116
107
  and optionally a vector-store and tools. Agents are a core abstraction in Langroid;
117
- Agents act as _message transformers_, and by default provide 3 _responder_ methods,
118
- one corresponding to each entity: LLM, Agent, User.
108
+ Agents act as _message transformers_, and by default provide 3 _responder_ methods, one corresponding to each entity: LLM, Agent, User.
119
109
  - **Tasks:** A Task class wraps an Agent, and gives the agent instructions (or roles, or goals),
120
110
  manages iteration over an Agent's responder methods,
121
111
  and orchestrates multi-agent interactions via hierarchical, recursive
@@ -130,7 +120,7 @@ Highlights of Langroid's features as of July 2023:
130
120
  GPT-4-0613
131
121
  - **Caching of LLM prompts, responses:** Langroid uses [Redis](https://redis.com/try-free/) for caching.
132
122
  - **Vector-stores**: [Qdrant](https://qdrant.tech/) and [Chroma](https://www.trychroma.com/) are currently supported.
133
- Vector stores allow for Retrieval-Augmented-Generaation (RAG).
123
+ Vector stores allow for Retrieval-Augmented-Generation (RAG).
134
124
  - **Grounding and source-citation:** Access to external documents via vector-stores
135
125
  allows for grounding and source-citation.
136
126
  - **Observability, Logging, Lineage:** Langroid generates detailed logs of multi-agent interactions and
@@ -148,11 +138,9 @@ Highlights of Langroid's features as of July 2023:
148
138
  hallucinates malformed JSON, the Pydantic error message is sent back to
149
139
  the LLM so it can fix it!
150
140
 
151
- # Usage/quick-start
152
- These are quick teasers to give a glimpse of what you can do with Langroid
153
- and how your code would look. See the
154
- [`Getting Started Guide`](https://langroid.github.io/langroid/getting_started/)
155
- for a detailed tutorial.
141
+ ---
142
+
143
+ # :gear: Installation and Setup
156
144
 
157
145
  ## Install `langroid`
158
146
  Use `pip` to install `langroid` (from PyPi) to your virtual environment:
@@ -173,7 +161,7 @@ Copy the `.env-template` file to a new file `.env` and
173
161
  insert these secrets:
174
162
  - **OpenAI API** key (required): If you don't have one, see [this OpenAI Page](https://help.openai.com/en/collections/3675940-getting-started-with-openai-api).
175
163
  - **Qdrant** Vector Store API Key (required for apps that need retrieval from
176
- documents): Sign up for a free 1GB account at [Qdrant cloud](https://cloud.qdrant.io)
164
+ documents): Sign up for a free 1GB account at [Qdrant cloud](https://cloud.qdrant.io).
177
165
  Alternatively [Chroma](https://docs.trychroma.com/) is also currently supported.
178
166
  We use the local-storage version of Chroma, so there is no need for an API key.
179
167
  - **GitHub** Personal Access Token (required for apps that need to analyze git
@@ -198,7 +186,32 @@ QDRANT_API_KEY=<your key>
198
186
  Currently only OpenAI models are supported. Others will be added later
199
187
  (Pull Requests welcome!).
200
188
 
201
- ## Direct interaction with OpenAI LLM
189
+ ---
190
+
191
+ # :tada: Usage Examples
192
+
193
+ These are quick teasers to give a glimpse of what you can do with Langroid
194
+ and how your code would look.
195
+
196
+ :warning: The code snippets below are intended to give a flavor of the code
197
+ and they are **not** complete runnable examples! For that we encourage you to
198
+ consult the [`langroid-examples`](https://github.com/langroid/langroid-examples)
199
+ repository.
200
+
201
+ Also see the
202
+ [`Getting Started Guide`](https://langroid.github.io/langroid/quick-start/)
203
+ for a detailed tutorial.
204
+
205
+ - [Direct chat with LLM](#direct-llm)
206
+ - [Simple Agent and Task](#agent-task)
207
+ - [Three Communicating Agents](#three-agents)
208
+ - [Agent with Tool/Function-calling](#agent-tool)
209
+ - [Extract Structured Info with Tool/Function-calling](#agent-tool-structured)
210
+ - [Retrieval-Augmented-Generation: Chat with Docs](#agent-rag)
211
+
212
+ ---
213
+
214
+ ## Direct interaction with OpenAI LLM <a name="direct-llm"></a>
202
215
 
203
216
  ```python
204
217
  from langroid.language_models.openai_gpt import (
@@ -213,11 +226,14 @@ mdl = OpenAIGPT(cfg)
213
226
  messages = [
214
227
  LLMMessage(content="You are a helpful assistant", role=Role.SYSTEM),
215
228
  LLMMessage(content="What is the capital of Ontario?", role=Role.USER),
216
- ],
229
+ ]
217
230
  response = mdl.chat(messages, max_tokens=200)
231
+ print(response.message)
218
232
  ```
219
233
 
220
- ## Define an agent, set up a task, and run it
234
+ ---
235
+
236
+ ## Define an agent, set up a task, and run it <a name="agent-task"></a>
221
237
 
222
238
  ```python
223
239
  from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
@@ -232,13 +248,15 @@ config = ChatAgentConfig(
232
248
  )
233
249
  agent = ChatAgent(config)
234
250
  # get response from agent's LLM, and put this in an interactive loop...
235
- answer = agent.llm_response("What is the capital of Ontario?")
236
- # ... or set up a task (which has a built-in loop) and run it
251
+ # answer = agent.llm_response("What is the capital of Ontario?")
252
+ # ... OR instead, set up a task (which has a built-in loop) and run it
237
253
  task = Task(agent, name="Bot")
238
- task.run() # ... a loop seeking response from Agent, LLM or User at each turn
254
+ task.run() # ... a loop seeking response from LLM or User at each turn
239
255
  ```
240
256
 
241
- ## Three communicating agents
257
+ ---
258
+
259
+ ## Three communicating agents <a name="three-agents"></a>
242
260
 
243
261
  A toy numbers game, where when given a number `n`:
244
262
  - `repeater_agent`'s LLM simply returns `n`,
@@ -248,57 +266,66 @@ A toy numbers game, where when given a number `n`:
248
266
  First define the 3 agents, and set up their tasks with instructions:
249
267
 
250
268
  ```python
251
- config = ChatAgentConfig(
252
- llm = OpenAIGPTConfig(
253
- chat_model=OpenAIChatModel.GPT4,
254
- ),
255
- vecdb = None,
256
- )
257
- repeater_agent = ChatAgent(config)
258
- repeater_task = Task(
259
- repeater_agent,
260
- name = "Repeater",
261
- system_message="""
262
- Your job is to repeat whatever number you receive.
263
- """,
264
- llm_delegate=True, # LLM takes charge of task
265
- single_round=False,
266
- )
267
- even_agent = ChatAgent(config)
268
- even_task = Task(
269
- even_agent,
270
- name = "EvenHandler",
271
- system_message=f"""
272
- You will be given a number.
273
- If it is even, divide by 2 and say the result, nothing else.
274
- If it is odd, say {NO_ANSWER}
275
- """,
276
- single_round=True, # task done after 1 step() with valid response
277
- )
269
+ from langroid.utils.constants import NO_ANSWER
270
+ from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
271
+ from langroid.agent.task import Task
272
+ from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
273
+ config = ChatAgentConfig(
274
+ llm = OpenAIGPTConfig(
275
+ chat_model=OpenAIChatModel.GPT4,
276
+ ),
277
+ vecdb = None,
278
+ )
279
+ repeater_agent = ChatAgent(config)
280
+ repeater_task = Task(
281
+ repeater_agent,
282
+ name = "Repeater",
283
+ system_message="""
284
+ Your job is to repeat whatever number you receive.
285
+ """,
286
+ llm_delegate=True, # LLM takes charge of task
287
+ single_round=False,
288
+ )
289
+ even_agent = ChatAgent(config)
290
+ even_task = Task(
291
+ even_agent,
292
+ name = "EvenHandler",
293
+ system_message=f"""
294
+ You will be given a number.
295
+ If it is even, divide by 2 and say the result, nothing else.
296
+ If it is odd, say {NO_ANSWER}
297
+ """,
298
+ single_round=True, # task done after 1 step() with valid response
299
+ )
278
300
 
279
- odd_agent = ChatAgent(config)
280
- odd_task = Task(
281
- odd_agent,
282
- name = "OddHandler",
283
- system_message=f"""
284
- You will be given a number n.
285
- If it is odd, return (n*3+1), say nothing else.
286
- If it is even, say {NO_ANSWER}
287
- """,
288
- single_round=True, # task done after 1 step() with valid response
289
- )
301
+ odd_agent = ChatAgent(config)
302
+ odd_task = Task(
303
+ odd_agent,
304
+ name = "OddHandler",
305
+ system_message=f"""
306
+ You will be given a number n.
307
+ If it is odd, return (n*3+1), say nothing else.
308
+ If it is even, say {NO_ANSWER}
309
+ """,
310
+ single_round=True, # task done after 1 step() with valid response
311
+ )
290
312
  ```
291
313
  Then add the `even_task` and `odd_task` as sub-tasks of `repeater_task`,
292
314
  and run the `repeater_task`, kicking it off with a number as input:
293
315
  ```python
294
- repeater_task.add_sub_task([even_task, odd_task])
295
- repeater_task.run("3")
316
+ repeater_task.add_sub_task([even_task, odd_task])
317
+ repeater_task.run("3")
296
318
  ```
319
+ ---
320
+
321
+ ## Simple Tool/Function-calling example <a name="agent-tool"></a>
297
322
 
298
- ### Simple Tool/Function-calling example
299
323
  Langroid leverages Pydantic to support OpenAI's
300
324
  [Function-calling API](https://platform.openai.com/docs/guides/gpt/function-calling)
301
- as well as its own native tools.
325
+ as well as its own native tools. The benefits are that you don't have to write
326
+ any JSON to specify the schema, and also if the LLM hallucinates a malformed
327
+ tool syntax, Langroid sends the Pydantic validation error (suitiably sanitized)
328
+ to the LLM so it can fix it!
302
329
 
303
330
  Simple example: Say the agent has a secret list of numbers,
304
331
  and we want the LLM to find the smallest number in the list.
@@ -357,7 +384,9 @@ For a full working example see the
357
384
  [chat-agent-tool.py](https://github.com/langroid/langroid-examples/blob/main/examples/quick-start/chat-agent-tool.py)
358
385
  script in the `langroid-examples` repo.
359
386
 
360
- ### Tool/Function-calling to extract structured information from text
387
+ ---
388
+
389
+ ## Tool/Function-calling to extract structured information from text <a name="agent-tool-structured"></a>
361
390
 
362
391
  Suppose you want an agent to extract
363
392
  the key terms of a lease, from a lease document, as a nested JSON structure.
@@ -381,7 +410,7 @@ class Lease(BaseModel):
381
410
  ```
382
411
 
383
412
  Then define the `LeaseMessage` tool as a subclass of Langroid's `ToolMessage`.
384
- Note the tool as a required argument `terms` of type `Lease`:
413
+ Note the tool has a required argument `terms` of type `Lease`:
385
414
 
386
415
  ```python
387
416
  class LeaseMessage(ToolMessage):
@@ -419,8 +448,9 @@ lease_extractor_agent.enable_message(LeaseMessage)
419
448
  See the [`chat_multi_extract.py`](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat_multi_extract.py)
420
449
  script in the `langroid-examples` repo for a full working example.
421
450
 
451
+ ---
422
452
 
423
- ### Chat with documents (file paths, URLs, etc)
453
+ ## Chat with documents (file paths, URLs, etc) <a name="agent-docs"></a>
424
454
 
425
455
  Langroid provides a specialized agent class `DocChatAgent` for this purpose.
426
456
  It incorporates document sharding, embedding, storage in a vector-DB,
@@ -467,8 +497,9 @@ See full working scripts in the
467
497
  [`docqa`](https://github.com/langroid/langroid-examples/tree/main/examples/docqa)
468
498
  folder of the `langroid-examples` repo.
469
499
 
500
+ ---
470
501
 
471
- ## Contributors
502
+ # Contributors
472
503
 
473
504
  - Prasad Chalasani (IIT BTech/CS, CMU PhD/ML; Independent ML Consultant)
474
505
  - Somesh Jha (IIT BTech/CS, CMU PhD/CS; Professor of CS, U Wisc at Madison)
@@ -4,61 +4,51 @@
4
4
  <h1>Langroid: Harness LLMs with Multi-Agent Programming</h1>
5
5
  </div>
6
6
 
7
+ <div align="center">
8
+
7
9
  [![Pytest](https://github.com/langroid/langroid/actions/workflows/pytest.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/pytest.yml)
8
10
  [![Lint](https://github.com/langroid/langroid/actions/workflows/validate.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/validate.yml)
9
11
  [![Docs](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml)
12
+ [![Static Badge](https://img.shields.io/badge/Documentation-blue?link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F&link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F)](https://langroid.github.io/langroid)
13
+ [![Static Badge](https://img.shields.io/badge/Discord-Orange?link=https%3A%2F%2Fdiscord.gg%2Fg3nAXCbZ&link=https%3A%2F%2Fdiscord.gg%2Fg3nAXCbZ)](https://discord.gg/g3nAXCbZ)
14
+
15
+ </div>
10
16
 
11
17
  Langroid is an intuitive, lightweight, transparent, flexible, extensible and principled
12
- Python framework to harness LLMs using Multi-Agent Programming (MAP).
18
+ Python framework to build LLM-powered applications using Multi-Agent Programming.
13
19
  We welcome [contributions](CONTRIBUTING.md)!
14
20
 
21
+ Langroid is the first Python LLM-application framework that was explicitly
22
+ designed with Agents as first-class citizens, and Multi-Agent Programming
23
+ as the core design principle. The framework is inspired by ideas from the
24
+ [Actor Framework](https://en.wikipedia.org/wiki/Actor_model).
25
+ For more details see [here](https://langroid.github.io/langroid/).
26
+
15
27
  Documentation: https://langroid.github.io/langroid/
16
28
 
17
29
  Usage examples: https://github.com/langroid/langroid-examples
18
30
 
19
- ## Overview
20
-
21
- ### The LLM Opportunity
22
-
23
- Given the remarkable abilities of recent Large Language Models (LLMs), there
24
- is an unprecedented opportunity to build intelligent applications powered by
25
- this transformative technology. The top question for any enterprise is: how
26
- best to harness the power of LLMs for complex applications? For technical and
27
- practical reasons, building LLM-powered applications is not as simple as
28
- throwing a task at an LLM-system and expecting it to do it.
31
+ Community: Join us on [Discord!](https://discord.gg/g3nAXCbZ)
29
32
 
30
- ### Langroid's Multi-Agent Programming Framework
33
+ ## :rocket: Demo
31
34
 
32
- Effectively leveraging LLMs at scale requires a *principled programming
33
- framework*. In particular, there is often a need to maintain multiple LLM
34
- conversations, each instructed in different ways, and "responsible" for
35
- different aspects of a task.
35
+ A `LeaseExtractor` agent is tasked with extracting structured information
36
+ from a commercial lease document. It generates questions that are
37
+ answerred by a `DocAgent` using Retrieval from a vector-database
38
+ (into which the lease has been sharded + embedded).
39
+ When it has all the information it needs, the `LeaseExtractor` agent
40
+ presents the information in a structured format using a Tool/Function-calling.
36
41
 
37
- An *agent* is a convenient abstraction that encapsulates LLM conversation
38
- state, along with access to long-term memory (vector-stores) and tools (a.k.a functions
39
- or plugins). Thus a **Multi-Agent Programming** framework is a natural fit
40
- for complex LLM-based applications.
42
+ ![Demo](lease-extractor-demo.gif)
41
43
 
42
- > Langroid is the first Python LLM-application framework that was explicitly
43
- designed with Agents as first-class citizens, and Multi-Agent Programming
44
- as the core design principle. The framework is inspired by ideas from the
45
- [Actor Framework](https://en.wikipedia.org/wiki/Actor_model).
46
44
 
47
- Langroid allows an intuitive definition of agents, tasks and task-delegation
48
- among agents. There is a principled mechanism to orchestrate multi-agent
49
- collaboration. Agents act as message-transformers, and take turns responding to (and
50
- transforming) the current message. The architecture is lightweight, transparent,
51
- flexible, and allows other types of orchestration to be implemented.
52
- Besides Agents, Langroid also provides simple ways to directly interact with
53
- LLMs and vector-stores.
45
+ ## :zap: Highlights
54
46
 
55
- ### Highlights
56
47
  Highlights of Langroid's features as of July 2023:
57
48
 
58
49
  - **Agents as first-class citizens:** The `Agent` class encapsulates LLM conversation state,
59
50
  and optionally a vector-store and tools. Agents are a core abstraction in Langroid;
60
- Agents act as _message transformers_, and by default provide 3 _responder_ methods,
61
- one corresponding to each entity: LLM, Agent, User.
51
+ Agents act as _message transformers_, and by default provide 3 _responder_ methods, one corresponding to each entity: LLM, Agent, User.
62
52
  - **Tasks:** A Task class wraps an Agent, and gives the agent instructions (or roles, or goals),
63
53
  manages iteration over an Agent's responder methods,
64
54
  and orchestrates multi-agent interactions via hierarchical, recursive
@@ -73,7 +63,7 @@ Highlights of Langroid's features as of July 2023:
73
63
  GPT-4-0613
74
64
  - **Caching of LLM prompts, responses:** Langroid uses [Redis](https://redis.com/try-free/) for caching.
75
65
  - **Vector-stores**: [Qdrant](https://qdrant.tech/) and [Chroma](https://www.trychroma.com/) are currently supported.
76
- Vector stores allow for Retrieval-Augmented-Generaation (RAG).
66
+ Vector stores allow for Retrieval-Augmented-Generation (RAG).
77
67
  - **Grounding and source-citation:** Access to external documents via vector-stores
78
68
  allows for grounding and source-citation.
79
69
  - **Observability, Logging, Lineage:** Langroid generates detailed logs of multi-agent interactions and
@@ -91,11 +81,9 @@ Highlights of Langroid's features as of July 2023:
91
81
  hallucinates malformed JSON, the Pydantic error message is sent back to
92
82
  the LLM so it can fix it!
93
83
 
94
- # Usage/quick-start
95
- These are quick teasers to give a glimpse of what you can do with Langroid
96
- and how your code would look. See the
97
- [`Getting Started Guide`](https://langroid.github.io/langroid/getting_started/)
98
- for a detailed tutorial.
84
+ ---
85
+
86
+ # :gear: Installation and Setup
99
87
 
100
88
  ## Install `langroid`
101
89
  Use `pip` to install `langroid` (from PyPi) to your virtual environment:
@@ -116,7 +104,7 @@ Copy the `.env-template` file to a new file `.env` and
116
104
  insert these secrets:
117
105
  - **OpenAI API** key (required): If you don't have one, see [this OpenAI Page](https://help.openai.com/en/collections/3675940-getting-started-with-openai-api).
118
106
  - **Qdrant** Vector Store API Key (required for apps that need retrieval from
119
- documents): Sign up for a free 1GB account at [Qdrant cloud](https://cloud.qdrant.io)
107
+ documents): Sign up for a free 1GB account at [Qdrant cloud](https://cloud.qdrant.io).
120
108
  Alternatively [Chroma](https://docs.trychroma.com/) is also currently supported.
121
109
  We use the local-storage version of Chroma, so there is no need for an API key.
122
110
  - **GitHub** Personal Access Token (required for apps that need to analyze git
@@ -141,7 +129,32 @@ QDRANT_API_KEY=<your key>
141
129
  Currently only OpenAI models are supported. Others will be added later
142
130
  (Pull Requests welcome!).
143
131
 
144
- ## Direct interaction with OpenAI LLM
132
+ ---
133
+
134
+ # :tada: Usage Examples
135
+
136
+ These are quick teasers to give a glimpse of what you can do with Langroid
137
+ and how your code would look.
138
+
139
+ :warning: The code snippets below are intended to give a flavor of the code
140
+ and they are **not** complete runnable examples! For that we encourage you to
141
+ consult the [`langroid-examples`](https://github.com/langroid/langroid-examples)
142
+ repository.
143
+
144
+ Also see the
145
+ [`Getting Started Guide`](https://langroid.github.io/langroid/quick-start/)
146
+ for a detailed tutorial.
147
+
148
+ - [Direct chat with LLM](#direct-llm)
149
+ - [Simple Agent and Task](#agent-task)
150
+ - [Three Communicating Agents](#three-agents)
151
+ - [Agent with Tool/Function-calling](#agent-tool)
152
+ - [Extract Structured Info with Tool/Function-calling](#agent-tool-structured)
153
+ - [Retrieval-Augmented-Generation: Chat with Docs](#agent-rag)
154
+
155
+ ---
156
+
157
+ ## Direct interaction with OpenAI LLM <a name="direct-llm"></a>
145
158
 
146
159
  ```python
147
160
  from langroid.language_models.openai_gpt import (
@@ -156,11 +169,14 @@ mdl = OpenAIGPT(cfg)
156
169
  messages = [
157
170
  LLMMessage(content="You are a helpful assistant", role=Role.SYSTEM),
158
171
  LLMMessage(content="What is the capital of Ontario?", role=Role.USER),
159
- ],
172
+ ]
160
173
  response = mdl.chat(messages, max_tokens=200)
174
+ print(response.message)
161
175
  ```
162
176
 
163
- ## Define an agent, set up a task, and run it
177
+ ---
178
+
179
+ ## Define an agent, set up a task, and run it <a name="agent-task"></a>
164
180
 
165
181
  ```python
166
182
  from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
@@ -175,13 +191,15 @@ config = ChatAgentConfig(
175
191
  )
176
192
  agent = ChatAgent(config)
177
193
  # get response from agent's LLM, and put this in an interactive loop...
178
- answer = agent.llm_response("What is the capital of Ontario?")
179
- # ... or set up a task (which has a built-in loop) and run it
194
+ # answer = agent.llm_response("What is the capital of Ontario?")
195
+ # ... OR instead, set up a task (which has a built-in loop) and run it
180
196
  task = Task(agent, name="Bot")
181
- task.run() # ... a loop seeking response from Agent, LLM or User at each turn
197
+ task.run() # ... a loop seeking response from LLM or User at each turn
182
198
  ```
183
199
 
184
- ## Three communicating agents
200
+ ---
201
+
202
+ ## Three communicating agents <a name="three-agents"></a>
185
203
 
186
204
  A toy numbers game, where when given a number `n`:
187
205
  - `repeater_agent`'s LLM simply returns `n`,
@@ -191,57 +209,66 @@ A toy numbers game, where when given a number `n`:
191
209
  First define the 3 agents, and set up their tasks with instructions:
192
210
 
193
211
  ```python
194
- config = ChatAgentConfig(
195
- llm = OpenAIGPTConfig(
196
- chat_model=OpenAIChatModel.GPT4,
197
- ),
198
- vecdb = None,
199
- )
200
- repeater_agent = ChatAgent(config)
201
- repeater_task = Task(
202
- repeater_agent,
203
- name = "Repeater",
204
- system_message="""
205
- Your job is to repeat whatever number you receive.
206
- """,
207
- llm_delegate=True, # LLM takes charge of task
208
- single_round=False,
209
- )
210
- even_agent = ChatAgent(config)
211
- even_task = Task(
212
- even_agent,
213
- name = "EvenHandler",
214
- system_message=f"""
215
- You will be given a number.
216
- If it is even, divide by 2 and say the result, nothing else.
217
- If it is odd, say {NO_ANSWER}
218
- """,
219
- single_round=True, # task done after 1 step() with valid response
220
- )
212
+ from langroid.utils.constants import NO_ANSWER
213
+ from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
214
+ from langroid.agent.task import Task
215
+ from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
216
+ config = ChatAgentConfig(
217
+ llm = OpenAIGPTConfig(
218
+ chat_model=OpenAIChatModel.GPT4,
219
+ ),
220
+ vecdb = None,
221
+ )
222
+ repeater_agent = ChatAgent(config)
223
+ repeater_task = Task(
224
+ repeater_agent,
225
+ name = "Repeater",
226
+ system_message="""
227
+ Your job is to repeat whatever number you receive.
228
+ """,
229
+ llm_delegate=True, # LLM takes charge of task
230
+ single_round=False,
231
+ )
232
+ even_agent = ChatAgent(config)
233
+ even_task = Task(
234
+ even_agent,
235
+ name = "EvenHandler",
236
+ system_message=f"""
237
+ You will be given a number.
238
+ If it is even, divide by 2 and say the result, nothing else.
239
+ If it is odd, say {NO_ANSWER}
240
+ """,
241
+ single_round=True, # task done after 1 step() with valid response
242
+ )
221
243
 
222
- odd_agent = ChatAgent(config)
223
- odd_task = Task(
224
- odd_agent,
225
- name = "OddHandler",
226
- system_message=f"""
227
- You will be given a number n.
228
- If it is odd, return (n*3+1), say nothing else.
229
- If it is even, say {NO_ANSWER}
230
- """,
231
- single_round=True, # task done after 1 step() with valid response
232
- )
244
+ odd_agent = ChatAgent(config)
245
+ odd_task = Task(
246
+ odd_agent,
247
+ name = "OddHandler",
248
+ system_message=f"""
249
+ You will be given a number n.
250
+ If it is odd, return (n*3+1), say nothing else.
251
+ If it is even, say {NO_ANSWER}
252
+ """,
253
+ single_round=True, # task done after 1 step() with valid response
254
+ )
233
255
  ```
234
256
  Then add the `even_task` and `odd_task` as sub-tasks of `repeater_task`,
235
257
  and run the `repeater_task`, kicking it off with a number as input:
236
258
  ```python
237
- repeater_task.add_sub_task([even_task, odd_task])
238
- repeater_task.run("3")
259
+ repeater_task.add_sub_task([even_task, odd_task])
260
+ repeater_task.run("3")
239
261
  ```
262
+ ---
263
+
264
+ ## Simple Tool/Function-calling example <a name="agent-tool"></a>
240
265
 
241
- ### Simple Tool/Function-calling example
242
266
  Langroid leverages Pydantic to support OpenAI's
243
267
  [Function-calling API](https://platform.openai.com/docs/guides/gpt/function-calling)
244
- as well as its own native tools.
268
+ as well as its own native tools. The benefits are that you don't have to write
269
+ any JSON to specify the schema, and also if the LLM hallucinates a malformed
270
+ tool syntax, Langroid sends the Pydantic validation error (suitiably sanitized)
271
+ to the LLM so it can fix it!
245
272
 
246
273
  Simple example: Say the agent has a secret list of numbers,
247
274
  and we want the LLM to find the smallest number in the list.
@@ -300,7 +327,9 @@ For a full working example see the
300
327
  [chat-agent-tool.py](https://github.com/langroid/langroid-examples/blob/main/examples/quick-start/chat-agent-tool.py)
301
328
  script in the `langroid-examples` repo.
302
329
 
303
- ### Tool/Function-calling to extract structured information from text
330
+ ---
331
+
332
+ ## Tool/Function-calling to extract structured information from text <a name="agent-tool-structured"></a>
304
333
 
305
334
  Suppose you want an agent to extract
306
335
  the key terms of a lease, from a lease document, as a nested JSON structure.
@@ -324,7 +353,7 @@ class Lease(BaseModel):
324
353
  ```
325
354
 
326
355
  Then define the `LeaseMessage` tool as a subclass of Langroid's `ToolMessage`.
327
- Note the tool as a required argument `terms` of type `Lease`:
356
+ Note the tool has a required argument `terms` of type `Lease`:
328
357
 
329
358
  ```python
330
359
  class LeaseMessage(ToolMessage):
@@ -362,8 +391,9 @@ lease_extractor_agent.enable_message(LeaseMessage)
362
391
  See the [`chat_multi_extract.py`](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat_multi_extract.py)
363
392
  script in the `langroid-examples` repo for a full working example.
364
393
 
394
+ ---
365
395
 
366
- ### Chat with documents (file paths, URLs, etc)
396
+ ## Chat with documents (file paths, URLs, etc) <a name="agent-docs"></a>
367
397
 
368
398
  Langroid provides a specialized agent class `DocChatAgent` for this purpose.
369
399
  It incorporates document sharding, embedding, storage in a vector-DB,
@@ -410,8 +440,9 @@ See full working scripts in the
410
440
  [`docqa`](https://github.com/langroid/langroid-examples/tree/main/examples/docqa)
411
441
  folder of the `langroid-examples` repo.
412
442
 
443
+ ---
413
444
 
414
- ## Contributors
445
+ # Contributors
415
446
 
416
447
  - Prasad Chalasani (IIT BTech/CS, CMU PhD/ML; Independent ML Consultant)
417
448
  - Somesh Jha (IIT BTech/CS, CMU PhD/CS; Professor of CS, U Wisc at Madison)
@@ -324,7 +324,7 @@ class DocChatAgent(ChatAgent):
324
324
 
325
325
  passages = self.original_docs
326
326
 
327
- # if original docs not too long, no need to look for relevant parts.
327
+ # if original docs too long, no need to look for relevant parts.
328
328
  if (
329
329
  passages is None
330
330
  or self.original_docs_length > self.config.max_context_tokens
@@ -380,8 +380,13 @@ class DocChatAgent(ChatAgent):
380
380
  if self.parser is None:
381
381
  raise ValueError("No parser defined")
382
382
  tot_tokens = self.parser.num_tokens(full_text)
383
+ model = (
384
+ self.config.llm.chat_model
385
+ if self.config.llm.use_chat_for_completion
386
+ else self.config.llm.completion_model
387
+ )
383
388
  MAX_INPUT_TOKENS = (
384
- self.config.llm.context_length[self.config.llm.completion_model]
389
+ self.config.llm.context_length[model]
385
390
  - self.config.llm.max_output_tokens
386
391
  - 100
387
392
  )
@@ -1,8 +1,3 @@
1
- """
2
- Class that runs the Task loop of an agent;
3
- maintains state while various responders (agent's own methods, or external sub-tasks)
4
- take turns attempting to respond to the `self.pending_message`.
5
- """
6
1
  from __future__ import annotations
7
2
 
8
3
  import logging
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "langroid"
3
- version = "0.1.22"
3
+ version = "0.1.23"
4
4
  description = "Harness LLMs with Multi-Agent Programming"
5
5
  authors = ["Prasad Chalasani <pchalasani@gmail.com>"]
6
6
  readme = "README.md"
@@ -0,0 +1,85 @@
1
+ # -*- coding: utf-8 -*-
2
+ from setuptools import setup
3
+
4
+ packages = \
5
+ ['langroid',
6
+ 'langroid.agent',
7
+ 'langroid.agent.special',
8
+ 'langroid.cachedb',
9
+ 'langroid.embedding_models',
10
+ 'langroid.language_models',
11
+ 'langroid.parsing',
12
+ 'langroid.prompts',
13
+ 'langroid.scripts',
14
+ 'langroid.utils',
15
+ 'langroid.utils.llms',
16
+ 'langroid.utils.output',
17
+ 'langroid.utils.web',
18
+ 'langroid.vector_store']
19
+
20
+ package_data = \
21
+ {'': ['*']}
22
+
23
+ install_requires = \
24
+ ['autopep8>=2.0.2,<3.0.0',
25
+ 'black[jupyter]>=23.3.0,<24.0.0',
26
+ 'bs4>=0.0.1,<0.0.2',
27
+ 'chromadb>=0.3.21,<0.4.0',
28
+ 'colorlog>=6.7.0,<7.0.0',
29
+ 'faker>=18.9.0,<19.0.0',
30
+ 'fakeredis>=2.12.1,<3.0.0',
31
+ 'fire>=0.5.0,<0.6.0',
32
+ 'flake8>=6.0.0,<7.0.0',
33
+ 'halo>=0.0.31,<0.0.32',
34
+ 'mkdocs-awesome-pages-plugin>=2.8.0,<3.0.0',
35
+ 'mkdocs-gen-files>=0.4.0,<0.5.0',
36
+ 'mkdocs-jupyter>=0.24.1,<0.25.0',
37
+ 'mkdocs-literate-nav>=0.6.0,<0.7.0',
38
+ 'mkdocs-material>=9.1.5,<10.0.0',
39
+ 'mkdocs-section-index>=0.3.5,<0.4.0',
40
+ 'mkdocs>=1.4.2,<2.0.0',
41
+ 'mkdocstrings[python]>=0.21.2,<0.22.0',
42
+ 'mypy>=1.2.0,<2.0.0',
43
+ 'nltk>=3.8.1,<4.0.0',
44
+ 'openai>=0.27.5,<0.28.0',
45
+ 'pre-commit>=3.3.2,<4.0.0',
46
+ 'pydantic==1.10.11',
47
+ 'pygithub>=1.58.1,<2.0.0',
48
+ 'pygments>=2.15.1,<3.0.0',
49
+ 'pyparsing>=3.0.9,<4.0.0',
50
+ 'python-dotenv>=1.0.0,<2.0.0',
51
+ 'qdrant-client>=1.3.1,<2.0.0',
52
+ 'redis>=4.5.5,<5.0.0',
53
+ 'requests-oauthlib>=1.3.1,<2.0.0',
54
+ 'requests>=2.31.0,<3.0.0',
55
+ 'rich>=13.3.4,<14.0.0',
56
+ 'ruff>=0.0.270,<0.0.271',
57
+ 'tiktoken>=0.3.3,<0.4.0',
58
+ 'trafilatura>=1.5.0,<2.0.0',
59
+ 'typer>=0.7.0,<0.8.0',
60
+ 'types-redis>=4.5.5.2,<5.0.0.0',
61
+ 'types-requests>=2.31.0.1,<3.0.0.0',
62
+ 'wget>=3.2,<4.0']
63
+
64
+ extras_require = \
65
+ {'hf-embeddings': ['sentence-transformers==2.2.2', 'torch==2.0.0']}
66
+
67
+ setup_kwargs = {
68
+ 'name': 'langroid',
69
+ 'version': '0.1.23',
70
+ 'description': 'Harness LLMs with Multi-Agent Programming',
71
+ 'long_description': '<div style="display: flex; align-items: center;">\n <img src="docs/assets/orange-logo.png" alt="Logo" \n width="80" height="80"align="left">\n <h1>Langroid: Harness LLMs with Multi-Agent Programming</h1>\n</div>\n\n<div align="center">\n\n[![Pytest](https://github.com/langroid/langroid/actions/workflows/pytest.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/pytest.yml)\n[![Lint](https://github.com/langroid/langroid/actions/workflows/validate.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/validate.yml)\n[![Docs](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml)\n[![Static Badge](https://img.shields.io/badge/Documentation-blue?link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F&link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F)](https://langroid.github.io/langroid)\n[![Static Badge](https://img.shields.io/badge/Discord-Orange?link=https%3A%2F%2Fdiscord.gg%2Fg3nAXCbZ&link=https%3A%2F%2Fdiscord.gg%2Fg3nAXCbZ)](https://discord.gg/g3nAXCbZ)\n\n</div>\n\nLangroid is an intuitive, lightweight, transparent, flexible, extensible and principled\nPython framework to build LLM-powered applications using Multi-Agent Programming. \nWe welcome [contributions](CONTRIBUTING.md)!\n\nLangroid is the first Python LLM-application framework that was explicitly\ndesigned with Agents as first-class citizens, and Multi-Agent Programming\nas the core design principle. The framework is inspired by ideas from the\n[Actor Framework](https://en.wikipedia.org/wiki/Actor_model).\nFor more details see [here](https://langroid.github.io/langroid/).\n\nDocumentation: https://langroid.github.io/langroid/\n\nUsage examples: https://github.com/langroid/langroid-examples\n\nCommunity: Join us on [Discord!](https://discord.gg/g3nAXCbZ)\n\n## :rocket: Demo\n\nA `LeaseExtractor` agent is tasked with extracting structured information\nfrom a commercial lease document. It generates questions that are \nanswerred by a `DocAgent` using Retrieval from a vector-database\n(into which the lease has been sharded + embedded).\nWhen it has all the information it needs, the `LeaseExtractor` agent\npresents the information in a structured format using a Tool/Function-calling.\n\n![Demo](lease-extractor-demo.gif)\n\n\n## :zap: Highlights\n\nHighlights of Langroid\'s features as of July 2023:\n\n- **Agents as first-class citizens:** The `Agent` class encapsulates LLM conversation state,\n and optionally a vector-store and tools. Agents are a core abstraction in Langroid;\n Agents act as _message transformers_, and by default provide 3 _responder_ methods, one corresponding to each entity: LLM, Agent, User.\n- **Tasks:** A Task class wraps an Agent, and gives the agent instructions (or roles, or goals), \n manages iteration over an Agent\'s responder methods, \n and orchestrates multi-agent interactions via hierarchical, recursive\n task-delegation. The `Task.run()` method has the same \n type-signature as an Agent\'s responder\'s methods, and this is key to how \n a task of an agent can delegate to other sub-tasks: from the point of view of a Task,\n sub-tasks are simply additional responders, to be used in a round-robin fashion \n after the agent\'s own responders.\n- **Modularity, Reusabilily, Loose coupling:** The `Agent` and `Task` abstractions allow users to design\n Agents with specific skills, wrap them in Tasks, and combine tasks in a flexible way.\n- **LLM Support**: Langroid supports OpenAI LLMs including GPT-3.5-Turbo,\n GPT-4-0613\n- **Caching of LLM prompts, responses:** Langroid uses [Redis](https://redis.com/try-free/) for caching.\n- **Vector-stores**: [Qdrant](https://qdrant.tech/) and [Chroma](https://www.trychroma.com/) are currently supported.\n Vector stores allow for Retrieval-Augmented-Generation (RAG).\n- **Grounding and source-citation:** Access to external documents via vector-stores \n allows for grounding and source-citation.\n- **Observability, Logging, Lineage:** Langroid generates detailed logs of multi-agent interactions and\n maintains provenance/lineage of messages, so that you can trace back\n the origin of a message.\n- **Tools/Plugins/Function-calling**: Langroid supports OpenAI\'s recently\n released [function calling](https://platform.openai.com/docs/guides/gpt/function-calling)\n feature. In addition, Langroid has its own native equivalent, which we\n call **tools** (also known as "plugins" in other contexts). Function\n calling and tools have the same developer-facing interface, implemented\n using [Pydantic](https://docs.pydantic.dev/latest/),\n which makes it very easy to define tools/functions and enable agents\n to use them. Benefits of using Pydantic are that you never have to write\n complex JSON specs for function calling, and when the LLM\n hallucinates malformed JSON, the Pydantic error message is sent back to\n the LLM so it can fix it!\n\n--- \n\n# :gear: Installation and Setup\n\n## Install `langroid` \nUse `pip` to install `langroid` (from PyPi) to your virtual environment:\n```bash\npip install langroid\n```\nThe core Langroid package lets you use OpenAI Embeddings models via their API. \nIf you instead want to use the `all-MiniLM-L6-v2` embeddings model\nfrom from HuggingFace, install Langroid like this:\n```bash\npip install langroid[hf-embeddings]\n```\nNote that this will install `torch` and `sentence-transfoemers` libraries.\n\n## Set up environment variables (API keys, etc)\n\nCopy the `.env-template` file to a new file `.env` and \ninsert these secrets:\n- **OpenAI API** key (required): If you don\'t have one, see [this OpenAI Page](https://help.openai.com/en/collections/3675940-getting-started-with-openai-api).\n- **Qdrant** Vector Store API Key (required for apps that need retrieval from\n documents): Sign up for a free 1GB account at [Qdrant cloud](https://cloud.qdrant.io).\n Alternatively [Chroma](https://docs.trychroma.com/) is also currently supported. \n We use the local-storage version of Chroma, so there is no need for an API key.\n- **GitHub** Personal Access Token (required for apps that need to analyze git\n repos; token-based API calls are less rate-limited). See this\n [GitHub page](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens).\n- **Redis** Password (optional, only needed to cache LLM API responses):\n Redis [offers](https://redis.com/try-free/) a free 30MB Redis account\n which is more than sufficient to try out Langroid and even beyond.\n \n```bash\ncp .env-template .env\n# now edit the .env file, insert your secrets as above\n``` \nYour `.env` file should look like this:\n```bash\nOPENAI_API_KEY=<your key>\nGITHUB_ACCESS_TOKEN=<your token>\nREDIS_PASSWORD=<your password>\nQDRANT_API_KEY=<your key>\n```\n\nCurrently only OpenAI models are supported. Others will be added later\n(Pull Requests welcome!).\n\n---\n\n# :tada: Usage Examples\n\nThese are quick teasers to give a glimpse of what you can do with Langroid\nand how your code would look. \n\n:warning: The code snippets below are intended to give a flavor of the code\nand they are **not** complete runnable examples! For that we encourage you to \nconsult the [`langroid-examples`](https://github.com/langroid/langroid-examples) \nrepository.\n\nAlso see the\n[`Getting Started Guide`](https://langroid.github.io/langroid/quick-start/)\nfor a detailed tutorial. \n\n- [Direct chat with LLM](#direct-llm)\n- [Simple Agent and Task](#agent-task)\n- [Three Communicating Agents](#three-agents)\n- [Agent with Tool/Function-calling](#agent-tool)\n- [Extract Structured Info with Tool/Function-calling](#agent-tool-structured)\n- [Retrieval-Augmented-Generation: Chat with Docs](#agent-rag)\n\n---\n\n## Direct interaction with OpenAI LLM <a name="direct-llm"></a>\n\n```python\nfrom langroid.language_models.openai_gpt import ( \n OpenAIGPTConfig, OpenAIChatModel, OpenAIGPT,\n)\nfrom langroid.language_models.base import LLMMessage, Role\n\ncfg = OpenAIGPTConfig(chat_model=OpenAIChatModel.GPT4)\n\nmdl = OpenAIGPT(cfg)\n\nmessages = [\n LLMMessage(content="You are a helpful assistant", role=Role.SYSTEM), \n LLMMessage(content="What is the capital of Ontario?", role=Role.USER),\n]\nresponse = mdl.chat(messages, max_tokens=200)\nprint(response.message)\n```\n\n---\n\n## Define an agent, set up a task, and run it <a name="agent-task"></a>\n\n```python\nfrom langroid.agent.chat_agent import ChatAgent, ChatAgentConfig\nfrom langroid.agent.task import Task\nfrom langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig\n\nconfig = ChatAgentConfig(\n llm = OpenAIGPTConfig(\n chat_model=OpenAIChatModel.GPT4,\n ),\n vecdb=None, # no vector store\n)\nagent = ChatAgent(config)\n# get response from agent\'s LLM, and put this in an interactive loop...\n# answer = agent.llm_response("What is the capital of Ontario?")\n # ... OR instead, set up a task (which has a built-in loop) and run it\ntask = Task(agent, name="Bot") \ntask.run() # ... a loop seeking response from LLM or User at each turn\n```\n\n---\n\n## Three communicating agents <a name="three-agents"></a>\n\nA toy numbers game, where when given a number `n`:\n- `repeater_agent`\'s LLM simply returns `n`,\n- `even_agent`\'s LLM returns `n/2` if `n` is even, else says "DO-NOT-KNOW"\n- `odd_agent`\'s LLM returns `3*n+1` if `n` is odd, else says "DO-NOT-KNOW"\n\nFirst define the 3 agents, and set up their tasks with instructions:\n\n```python\nfrom langroid.utils.constants import NO_ANSWER\nfrom langroid.agent.chat_agent import ChatAgent, ChatAgentConfig\nfrom langroid.agent.task import Task\nfrom langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig\nconfig = ChatAgentConfig(\n llm = OpenAIGPTConfig(\n chat_model=OpenAIChatModel.GPT4,\n ),\n vecdb = None,\n)\nrepeater_agent = ChatAgent(config)\nrepeater_task = Task(\n repeater_agent,\n name = "Repeater",\n system_message="""\n Your job is to repeat whatever number you receive.\n """,\n llm_delegate=True, # LLM takes charge of task\n single_round=False, \n)\neven_agent = ChatAgent(config)\neven_task = Task(\n even_agent,\n name = "EvenHandler",\n system_message=f"""\n You will be given a number. \n If it is even, divide by 2 and say the result, nothing else.\n If it is odd, say {NO_ANSWER}\n """,\n single_round=True, # task done after 1 step() with valid response\n)\n\nodd_agent = ChatAgent(config)\nodd_task = Task(\n odd_agent,\n name = "OddHandler",\n system_message=f"""\n You will be given a number n. \n If it is odd, return (n*3+1), say nothing else. \n If it is even, say {NO_ANSWER}\n """,\n single_round=True, # task done after 1 step() with valid response\n)\n```\nThen add the `even_task` and `odd_task` as sub-tasks of `repeater_task`, \nand run the `repeater_task`, kicking it off with a number as input:\n```python\nrepeater_task.add_sub_task([even_task, odd_task])\nrepeater_task.run("3")\n```\n---\n\n## Simple Tool/Function-calling example <a name="agent-tool"></a>\n\nLangroid leverages Pydantic to support OpenAI\'s\n[Function-calling API](https://platform.openai.com/docs/guides/gpt/function-calling)\nas well as its own native tools. The benefits are that you don\'t have to write\nany JSON to specify the schema, and also if the LLM hallucinates a malformed\ntool syntax, Langroid sends the Pydantic validation error (suitiably sanitized) \nto the LLM so it can fix it!\n\nSimple example: Say the agent has a secret list of numbers, \nand we want the LLM to find the smallest number in the list. \nWe want to give the LLM a `probe` tool/function which takes a\nsingle number `n` as argument. The tool handler method in the agent\nreturns how many numbers in its list are at most `n`.\n\nFirst define the tool using Langroid\'s `ToolMessage` class:\n\n\n```python\nfrom langroid.agent.tool_message import ToolMessage\nclass ProbeTool(ToolMessage):\n request: str = "probe" # specifies which agent method handles this tool\n purpose: str = """\n To find how many numbers in my list are less than or equal to \n the <number> you specify.\n """ # description used to instruct the LLM on when/how to use the tool\n number: int # required argument to the tool\n```\n\nThen define a `SpyGameAgent` as a subclass of `ChatAgent`, \nwith a method `probe` that handles this tool:\n\n```python\nfrom langroid.agent.chat_agent import ChatAgent, ChatAgentConfig\nclass SpyGameAgent(ChatAgent):\n def __init__(self, config: ChatAgentConfig):\n super().__init__(config)\n self.numbers = [3, 4, 8, 11, 15, 25, 40, 80, 90]\n\n def probe(self, msg: ProbeTool) -> str:\n # return how many numbers in self.numbers are less or equal to msg.number\n return str(len([n for n in self.numbers if n <= msg.number]))\n```\n\nWe then instantiate the agent and enable it to use and respond to the tool:\n\n```python\nfrom langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig\nspy_game_agent = SpyGameAgent(\n ChatAgentConfig(\n name="Spy",\n llm = OpenAIGPTConfig(\n chat_model=OpenAIChatModel.GPT4,\n ),\n vecdb=None,\n use_tools=False, # don\'t use Langroid native tool\n use_functions_api=True, # use OpenAI function-call API\n )\n)\nspy_game_agent.enable_message(ProbeTool)\n```\n\nFor a full working example see the\n[chat-agent-tool.py](https://github.com/langroid/langroid-examples/blob/main/examples/quick-start/chat-agent-tool.py)\nscript in the `langroid-examples` repo.\n\n---\n\n## Tool/Function-calling to extract structured information from text <a name="agent-tool-structured"></a>\n\nSuppose you want an agent to extract \nthe key terms of a lease, from a lease document, as a nested JSON structure.\nFirst define the desired structure via Pydantic models:\n\n```python\nfrom pydantic import BaseModel\nclass LeasePeriod(BaseModel):\n start_date: str\n end_date: str\n\n\nclass LeaseFinancials(BaseModel):\n monthly_rent: str\n deposit: str\n\nclass Lease(BaseModel):\n period: LeasePeriod\n financials: LeaseFinancials\n address: str\n```\n\nThen define the `LeaseMessage` tool as a subclass of Langroid\'s `ToolMessage`.\nNote the tool has a required argument `terms` of type `Lease`:\n\n```python\nclass LeaseMessage(ToolMessage):\n request: str = "lease_info"\n purpose: str = """\n Collect information about a Commercial Lease.\n """\n terms: Lease\n```\n\nThen define a `LeaseExtractorAgent` with a method `lease_info` that handles this tool,\ninstantiate the agent, and enable it to use and respond to this tool:\n\n```python\nclass LeaseExtractorAgent(ChatAgent):\n def lease_info(self, message: LeaseMessage) -> str:\n print(\n f"""\n DONE! Successfully extracted Lease Info:\n {message.terms}\n """\n )\n return json.dumps(message.terms.dict())\n \nlease_extractor_agent = LeaseExtractorAgent(\n ChatAgentConfig(\n llm=OpenAIGPTConfig(),\n use_functions_api=False,\n use_tools=True,\n )\n)\nlease_extractor_agent.enable_message(LeaseMessage)\n```\n\nSee the [`chat_multi_extract.py`](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat_multi_extract.py)\nscript in the `langroid-examples` repo for a full working example.\n\n---\n\n## Chat with documents (file paths, URLs, etc) <a name="agent-docs"></a>\n\nLangroid provides a specialized agent class `DocChatAgent` for this purpose.\nIt incorporates document sharding, embedding, storage in a vector-DB, \nand retrieval-augmented query-answer generation.\nUsing this class to chat with a collection of documents is easy.\nFirst create a `DocChatAgentConfig` instance, with a \n`doc_paths` field that specifies the documents to chat with.\n\n```python\nfrom langroid.agent.doc_chat_agent import DocChatAgentConfig\nconfig = DocChatAgentConfig(\n doc_paths = [\n "https://en.wikipedia.org/wiki/Language_model",\n "https://en.wikipedia.org/wiki/N-gram_language_model",\n "/path/to/my/notes-on-language-models.txt",\n ]\n llm = OpenAIGPTConfig(\n chat_model=OpenAIChatModel.GPT4,\n ),\n vecdb=VectorStoreConfig(\n type="qdrant",\n ),\n)\n```\n\nThen instantiate the `DocChatAgent`, ingest the docs into the vector-store:\n\n```python\nagent = DocChatAgent(config)\nagent.ingest()\n```\nThen we can either ask the agent one-off questions,\n```python\nagent.chat("What is a language model?")\n```\nor wrap it in a `Task` and run an interactive loop with the user:\n```python\nfrom langroid.task import Task\ntask = Task(agent)\ntask.run()\n```\n\nSee full working scripts in the \n[`docqa`](https://github.com/langroid/langroid-examples/tree/main/examples/docqa)\nfolder of the `langroid-examples` repo.\n\n---\n\n# Contributors\n\n- Prasad Chalasani (IIT BTech/CS, CMU PhD/ML; Independent ML Consultant)\n- Somesh Jha (IIT BTech/CS, CMU PhD/CS; Professor of CS, U Wisc at Madison)\n- Mohannad Alhanahnah (Research Associate, U Wisc at Madison)\n- Ashish Hooda (IIT BTech/CS; PhD Candidate, U Wisc at Madison)\n\n',
72
+ 'author': 'Prasad Chalasani',
73
+ 'author_email': 'pchalasani@gmail.com',
74
+ 'maintainer': 'None',
75
+ 'maintainer_email': 'None',
76
+ 'url': 'None',
77
+ 'packages': packages,
78
+ 'package_data': package_data,
79
+ 'install_requires': install_requires,
80
+ 'extras_require': extras_require,
81
+ 'python_requires': '>=3.8.1,<3.12',
82
+ }
83
+
84
+
85
+ setup(**setup_kwargs)
langroid-0.1.22/setup.py DELETED
@@ -1,85 +0,0 @@
1
- # -*- coding: utf-8 -*-
2
- from setuptools import setup
3
-
4
- packages = \
5
- ['langroid',
6
- 'langroid.agent',
7
- 'langroid.agent.special',
8
- 'langroid.cachedb',
9
- 'langroid.embedding_models',
10
- 'langroid.language_models',
11
- 'langroid.parsing',
12
- 'langroid.prompts',
13
- 'langroid.scripts',
14
- 'langroid.utils',
15
- 'langroid.utils.llms',
16
- 'langroid.utils.output',
17
- 'langroid.utils.web',
18
- 'langroid.vector_store']
19
-
20
- package_data = \
21
- {'': ['*']}
22
-
23
- install_requires = \
24
- ['autopep8>=2.0.2,<3.0.0',
25
- 'black[jupyter]>=23.3.0,<24.0.0',
26
- 'bs4>=0.0.1,<0.0.2',
27
- 'chromadb>=0.3.21,<0.4.0',
28
- 'colorlog>=6.7.0,<7.0.0',
29
- 'faker>=18.9.0,<19.0.0',
30
- 'fakeredis>=2.12.1,<3.0.0',
31
- 'fire>=0.5.0,<0.6.0',
32
- 'flake8>=6.0.0,<7.0.0',
33
- 'halo>=0.0.31,<0.0.32',
34
- 'mkdocs-awesome-pages-plugin>=2.8.0,<3.0.0',
35
- 'mkdocs-gen-files>=0.4.0,<0.5.0',
36
- 'mkdocs-jupyter>=0.24.1,<0.25.0',
37
- 'mkdocs-literate-nav>=0.6.0,<0.7.0',
38
- 'mkdocs-material>=9.1.5,<10.0.0',
39
- 'mkdocs-section-index>=0.3.5,<0.4.0',
40
- 'mkdocs>=1.4.2,<2.0.0',
41
- 'mkdocstrings[python]>=0.21.2,<0.22.0',
42
- 'mypy>=1.2.0,<2.0.0',
43
- 'nltk>=3.8.1,<4.0.0',
44
- 'openai>=0.27.5,<0.28.0',
45
- 'pre-commit>=3.3.2,<4.0.0',
46
- 'pydantic==1.10.11',
47
- 'pygithub>=1.58.1,<2.0.0',
48
- 'pygments>=2.15.1,<3.0.0',
49
- 'pyparsing>=3.0.9,<4.0.0',
50
- 'python-dotenv>=1.0.0,<2.0.0',
51
- 'qdrant-client>=1.3.1,<2.0.0',
52
- 'redis>=4.5.5,<5.0.0',
53
- 'requests-oauthlib>=1.3.1,<2.0.0',
54
- 'requests>=2.31.0,<3.0.0',
55
- 'rich>=13.3.4,<14.0.0',
56
- 'ruff>=0.0.270,<0.0.271',
57
- 'tiktoken>=0.3.3,<0.4.0',
58
- 'trafilatura>=1.5.0,<2.0.0',
59
- 'typer>=0.7.0,<0.8.0',
60
- 'types-redis>=4.5.5.2,<5.0.0.0',
61
- 'types-requests>=2.31.0.1,<3.0.0.0',
62
- 'wget>=3.2,<4.0']
63
-
64
- extras_require = \
65
- {'hf-embeddings': ['sentence-transformers==2.2.2', 'torch==2.0.0']}
66
-
67
- setup_kwargs = {
68
- 'name': 'langroid',
69
- 'version': '0.1.22',
70
- 'description': 'Harness LLMs with Multi-Agent Programming',
71
- 'long_description': '<div style="display: flex; align-items: center;">\n <img src="docs/assets/orange-logo.png" alt="Logo" \n width="80" height="80"align="left">\n <h1>Langroid: Harness LLMs with Multi-Agent Programming</h1>\n</div>\n\n[![Pytest](https://github.com/langroid/langroid/actions/workflows/pytest.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/pytest.yml)\n[![Lint](https://github.com/langroid/langroid/actions/workflows/validate.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/validate.yml)\n[![Docs](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml)\n\nLangroid is an intuitive, lightweight, transparent, flexible, extensible and principled\nPython framework to harness LLMs using Multi-Agent Programming (MAP).\nWe welcome [contributions](CONTRIBUTING.md)!\n\nDocumentation: https://langroid.github.io/langroid/\n\nUsage examples: https://github.com/langroid/langroid-examples\n\n## Overview\n\n### The LLM Opportunity\n\nGiven the remarkable abilities of recent Large Language Models (LLMs), there\nis an unprecedented opportunity to build intelligent applications powered by\nthis transformative technology. The top question for any enterprise is: how\nbest to harness the power of LLMs for complex applications? For technical and\npractical reasons, building LLM-powered applications is not as simple as\nthrowing a task at an LLM-system and expecting it to do it.\n\n### Langroid\'s Multi-Agent Programming Framework\n\nEffectively leveraging LLMs at scale requires a *principled programming\nframework*. In particular, there is often a need to maintain multiple LLM\nconversations, each instructed in different ways, and "responsible" for\ndifferent aspects of a task.\n\nAn *agent* is a convenient abstraction that encapsulates LLM conversation\nstate, along with access to long-term memory (vector-stores) and tools (a.k.a functions\nor plugins). Thus a **Multi-Agent Programming** framework is a natural fit\nfor complex LLM-based applications.\n\n> Langroid is the first Python LLM-application framework that was explicitly\ndesigned with Agents as first-class citizens, and Multi-Agent Programming\nas the core design principle. The framework is inspired by ideas from the\n[Actor Framework](https://en.wikipedia.org/wiki/Actor_model).\n\nLangroid allows an intuitive definition of agents, tasks and task-delegation\namong agents. There is a principled mechanism to orchestrate multi-agent\ncollaboration. Agents act as message-transformers, and take turns responding to (and\ntransforming) the current message. The architecture is lightweight, transparent,\nflexible, and allows other types of orchestration to be implemented.\nBesides Agents, Langroid also provides simple ways to directly interact with \nLLMs and vector-stores.\n\n### Highlights\nHighlights of Langroid\'s features as of July 2023:\n\n- **Agents as first-class citizens:** The `Agent` class encapsulates LLM conversation state,\n and optionally a vector-store and tools. Agents are a core abstraction in Langroid;\n Agents act as _message transformers_, and by default provide 3 _responder_ methods, \n one corresponding to each entity: LLM, Agent, User.\n- **Tasks:** A Task class wraps an Agent, and gives the agent instructions (or roles, or goals), \n manages iteration over an Agent\'s responder methods, \n and orchestrates multi-agent interactions via hierarchical, recursive\n task-delegation. The `Task.run()` method has the same \n type-signature as an Agent\'s responder\'s methods, and this is key to how \n a task of an agent can delegate to other sub-tasks: from the point of view of a Task,\n sub-tasks are simply additional responders, to be used in a round-robin fashion \n after the agent\'s own responders.\n- **Modularity, Reusabilily, Loose coupling:** The `Agent` and `Task` abstractions allow users to design\n Agents with specific skills, wrap them in Tasks, and combine tasks in a flexible way.\n- **LLM Support**: Langroid supports OpenAI LLMs including GPT-3.5-Turbo,\n GPT-4-0613\n- **Caching of LLM prompts, responses:** Langroid uses [Redis](https://redis.com/try-free/) for caching.\n- **Vector-stores**: [Qdrant](https://qdrant.tech/) and [Chroma](https://www.trychroma.com/) are currently supported.\n Vector stores allow for Retrieval-Augmented-Generaation (RAG).\n- **Grounding and source-citation:** Access to external documents via vector-stores \n allows for grounding and source-citation.\n- **Observability, Logging, Lineage:** Langroid generates detailed logs of multi-agent interactions and\n maintains provenance/lineage of messages, so that you can trace back\n the origin of a message.\n- **Tools/Plugins/Function-calling**: Langroid supports OpenAI\'s recently\n released [function calling](https://platform.openai.com/docs/guides/gpt/function-calling)\n feature. In addition, Langroid has its own native equivalent, which we\n call **tools** (also known as "plugins" in other contexts). Function\n calling and tools have the same developer-facing interface, implemented\n using [Pydantic](https://docs.pydantic.dev/latest/),\n which makes it very easy to define tools/functions and enable agents\n to use them. Benefits of using Pydantic are that you never have to write\n complex JSON specs for function calling, and when the LLM\n hallucinates malformed JSON, the Pydantic error message is sent back to\n the LLM so it can fix it!\n\n# Usage/quick-start\nThese are quick teasers to give a glimpse of what you can do with Langroid\nand how your code would look. See the \n[`Getting Started Guide`](https://langroid.github.io/langroid/getting_started/)\nfor a detailed tutorial.\n\n## Install `langroid` \nUse `pip` to install `langroid` (from PyPi) to your virtual environment:\n```bash\npip install langroid\n```\nThe core Langroid package lets you use OpenAI Embeddings models via their API. \nIf you instead want to use the `all-MiniLM-L6-v2` embeddings model\nfrom from HuggingFace, install Langroid like this:\n```bash\npip install langroid[hf-embeddings]\n```\nNote that this will install `torch` and `sentence-transfoemers` libraries.\n\n## Set up environment variables (API keys, etc)\n\nCopy the `.env-template` file to a new file `.env` and \ninsert these secrets:\n- **OpenAI API** key (required): If you don\'t have one, see [this OpenAI Page](https://help.openai.com/en/collections/3675940-getting-started-with-openai-api).\n- **Qdrant** Vector Store API Key (required for apps that need retrieval from\n documents): Sign up for a free 1GB account at [Qdrant cloud](https://cloud.qdrant.io)\n Alternatively [Chroma](https://docs.trychroma.com/) is also currently supported. \n We use the local-storage version of Chroma, so there is no need for an API key.\n- **GitHub** Personal Access Token (required for apps that need to analyze git\n repos; token-based API calls are less rate-limited). See this\n [GitHub page](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens).\n- **Redis** Password (optional, only needed to cache LLM API responses):\n Redis [offers](https://redis.com/try-free/) a free 30MB Redis account\n which is more than sufficient to try out Langroid and even beyond.\n \n```bash\ncp .env-template .env\n# now edit the .env file, insert your secrets as above\n``` \nYour `.env` file should look like this:\n```bash\nOPENAI_API_KEY=<your key>\nGITHUB_ACCESS_TOKEN=<your token>\nREDIS_PASSWORD=<your password>\nQDRANT_API_KEY=<your key>\n```\n\nCurrently only OpenAI models are supported. Others will be added later\n(Pull Requests welcome!).\n\n## Direct interaction with OpenAI LLM\n\n```python\nfrom langroid.language_models.openai_gpt import ( \n OpenAIGPTConfig, OpenAIChatModel, OpenAIGPT,\n)\nfrom langroid.language_models.base import LLMMessage, Role\n\ncfg = OpenAIGPTConfig(chat_model=OpenAIChatModel.GPT4)\n\nmdl = OpenAIGPT(cfg)\n\nmessages = [\n LLMMessage(content="You are a helpful assistant", role=Role.SYSTEM), \n LLMMessage(content="What is the capital of Ontario?", role=Role.USER),\n],\nresponse = mdl.chat(messages, max_tokens=200)\n```\n\n## Define an agent, set up a task, and run it\n\n```python\nfrom langroid.agent.chat_agent import ChatAgent, ChatAgentConfig\nfrom langroid.agent.task import Task\nfrom langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig\n\nconfig = ChatAgentConfig(\n llm = OpenAIGPTConfig(\n chat_model=OpenAIChatModel.GPT4,\n ),\n vecdb=None, # no vector store\n)\nagent = ChatAgent(config)\n# get response from agent\'s LLM, and put this in an interactive loop...\nanswer = agent.llm_response("What is the capital of Ontario?")\n# ... or set up a task (which has a built-in loop) and run it\ntask = Task(agent, name="Bot") \ntask.run() # ... a loop seeking response from Agent, LLM or User at each turn\n```\n\n## Three communicating agents\n\nA toy numbers game, where when given a number `n`:\n- `repeater_agent`\'s LLM simply returns `n`,\n- `even_agent`\'s LLM returns `n/2` if `n` is even, else says "DO-NOT-KNOW"\n- `odd_agent`\'s LLM returns `3*n+1` if `n` is odd, else says "DO-NOT-KNOW"\n\nFirst define the 3 agents, and set up their tasks with instructions:\n\n```python\n config = ChatAgentConfig(\n llm = OpenAIGPTConfig(\n chat_model=OpenAIChatModel.GPT4,\n ),\n vecdb = None,\n )\n repeater_agent = ChatAgent(config)\n repeater_task = Task(\n repeater_agent,\n name = "Repeater",\n system_message="""\n Your job is to repeat whatever number you receive.\n """,\n llm_delegate=True, # LLM takes charge of task\n single_round=False, \n )\n even_agent = ChatAgent(config)\n even_task = Task(\n even_agent,\n name = "EvenHandler",\n system_message=f"""\n You will be given a number. \n If it is even, divide by 2 and say the result, nothing else.\n If it is odd, say {NO_ANSWER}\n """,\n single_round=True, # task done after 1 step() with valid response\n )\n\n odd_agent = ChatAgent(config)\n odd_task = Task(\n odd_agent,\n name = "OddHandler",\n system_message=f"""\n You will be given a number n. \n If it is odd, return (n*3+1), say nothing else. \n If it is even, say {NO_ANSWER}\n """,\n single_round=True, # task done after 1 step() with valid response\n )\n```\nThen add the `even_task` and `odd_task` as sub-tasks of `repeater_task`, \nand run the `repeater_task`, kicking it off with a number as input:\n```python\n repeater_task.add_sub_task([even_task, odd_task])\n repeater_task.run("3")\n```\n\n### Simple Tool/Function-calling example\nLangroid leverages Pydantic to support OpenAI\'s\n[Function-calling API](https://platform.openai.com/docs/guides/gpt/function-calling)\nas well as its own native tools.\n\nSimple example: Say the agent has a secret list of numbers, \nand we want the LLM to find the smallest number in the list. \nWe want to give the LLM a `probe` tool/function which takes a\nsingle number `n` as argument. The tool handler method in the agent\nreturns how many numbers in its list are at most `n`.\n\nFirst define the tool using Langroid\'s `ToolMessage` class:\n\n\n```python\nfrom langroid.agent.tool_message import ToolMessage\nclass ProbeTool(ToolMessage):\n request: str = "probe" # specifies which agent method handles this tool\n purpose: str = """\n To find how many numbers in my list are less than or equal to \n the <number> you specify.\n """ # description used to instruct the LLM on when/how to use the tool\n number: int # required argument to the tool\n```\n\nThen define a `SpyGameAgent` as a subclass of `ChatAgent`, \nwith a method `probe` that handles this tool:\n\n```python\nfrom langroid.agent.chat_agent import ChatAgent, ChatAgentConfig\nclass SpyGameAgent(ChatAgent):\n def __init__(self, config: ChatAgentConfig):\n super().__init__(config)\n self.numbers = [3, 4, 8, 11, 15, 25, 40, 80, 90]\n\n def probe(self, msg: ProbeTool) -> str:\n # return how many numbers in self.numbers are less or equal to msg.number\n return str(len([n for n in self.numbers if n <= msg.number]))\n```\n\nWe then instantiate the agent and enable it to use and respond to the tool:\n\n```python\nfrom langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig\nspy_game_agent = SpyGameAgent(\n ChatAgentConfig(\n name="Spy",\n llm = OpenAIGPTConfig(\n chat_model=OpenAIChatModel.GPT4,\n ),\n vecdb=None,\n use_tools=False, # don\'t use Langroid native tool\n use_functions_api=True, # use OpenAI function-call API\n )\n)\nspy_game_agent.enable_message(ProbeTool)\n```\n\nFor a full working example see the\n[chat-agent-tool.py](https://github.com/langroid/langroid-examples/blob/main/examples/quick-start/chat-agent-tool.py)\nscript in the `langroid-examples` repo.\n\n### Tool/Function-calling to extract structured information from text\n\nSuppose you want an agent to extract \nthe key terms of a lease, from a lease document, as a nested JSON structure.\nFirst define the desired structure via Pydantic models:\n\n```python\nfrom pydantic import BaseModel\nclass LeasePeriod(BaseModel):\n start_date: str\n end_date: str\n\n\nclass LeaseFinancials(BaseModel):\n monthly_rent: str\n deposit: str\n\nclass Lease(BaseModel):\n period: LeasePeriod\n financials: LeaseFinancials\n address: str\n```\n\nThen define the `LeaseMessage` tool as a subclass of Langroid\'s `ToolMessage`.\nNote the tool as a required argument `terms` of type `Lease`:\n\n```python\nclass LeaseMessage(ToolMessage):\n request: str = "lease_info"\n purpose: str = """\n Collect information about a Commercial Lease.\n """\n terms: Lease\n```\n\nThen define a `LeaseExtractorAgent` with a method `lease_info` that handles this tool,\ninstantiate the agent, and enable it to use and respond to this tool:\n\n```python\nclass LeaseExtractorAgent(ChatAgent):\n def lease_info(self, message: LeaseMessage) -> str:\n print(\n f"""\n DONE! Successfully extracted Lease Info:\n {message.terms}\n """\n )\n return json.dumps(message.terms.dict())\n \nlease_extractor_agent = LeaseExtractorAgent(\n ChatAgentConfig(\n llm=OpenAIGPTConfig(),\n use_functions_api=False,\n use_tools=True,\n )\n)\nlease_extractor_agent.enable_message(LeaseMessage)\n```\n\nSee the [`chat_multi_extract.py`](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat_multi_extract.py)\nscript in the `langroid-examples` repo for a full working example.\n\n\n### Chat with documents (file paths, URLs, etc)\n\nLangroid provides a specialized agent class `DocChatAgent` for this purpose.\nIt incorporates document sharding, embedding, storage in a vector-DB, \nand retrieval-augmented query-answer generation.\nUsing this class to chat with a collection of documents is easy.\nFirst create a `DocChatAgentConfig` instance, with a \n`doc_paths` field that specifies the documents to chat with.\n\n```python\nfrom langroid.agent.doc_chat_agent import DocChatAgentConfig\nconfig = DocChatAgentConfig(\n doc_paths = [\n "https://en.wikipedia.org/wiki/Language_model",\n "https://en.wikipedia.org/wiki/N-gram_language_model",\n "/path/to/my/notes-on-language-models.txt",\n ]\n llm = OpenAIGPTConfig(\n chat_model=OpenAIChatModel.GPT4,\n ),\n vecdb=VectorStoreConfig(\n type="qdrant",\n ),\n)\n```\n\nThen instantiate the `DocChatAgent`, ingest the docs into the vector-store:\n\n```python\nagent = DocChatAgent(config)\nagent.ingest()\n```\nThen we can either ask the agent one-off questions,\n```python\nagent.chat("What is a language model?")\n```\nor wrap it in a `Task` and run an interactive loop with the user:\n```python\nfrom langroid.task import Task\ntask = Task(agent)\ntask.run()\n```\n\nSee full working scripts in the \n[`docqa`](https://github.com/langroid/langroid-examples/tree/main/examples/docqa)\nfolder of the `langroid-examples` repo.\n\n\n## Contributors\n\n- Prasad Chalasani (IIT BTech/CS, CMU PhD/ML; Independent ML Consultant)\n- Somesh Jha (IIT BTech/CS, CMU PhD/CS; Professor of CS, U Wisc at Madison)\n- Mohannad Alhanahnah (Research Associate, U Wisc at Madison)\n- Ashish Hooda (IIT BTech/CS; PhD Candidate, U Wisc at Madison)\n\n',
72
- 'author': 'Prasad Chalasani',
73
- 'author_email': 'pchalasani@gmail.com',
74
- 'maintainer': 'None',
75
- 'maintainer_email': 'None',
76
- 'url': 'None',
77
- 'packages': packages,
78
- 'package_data': package_data,
79
- 'install_requires': install_requires,
80
- 'extras_require': extras_require,
81
- 'python_requires': '>=3.8.1,<3.12',
82
- }
83
-
84
-
85
- setup(**setup_kwargs)
File without changes
File without changes
File without changes