langroid 0.1.139__py3-none-any.whl → 0.1.219__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. langroid/__init__.py +70 -0
  2. langroid/agent/__init__.py +22 -0
  3. langroid/agent/base.py +120 -33
  4. langroid/agent/batch.py +134 -35
  5. langroid/agent/callbacks/__init__.py +0 -0
  6. langroid/agent/callbacks/chainlit.py +608 -0
  7. langroid/agent/chat_agent.py +164 -100
  8. langroid/agent/chat_document.py +19 -2
  9. langroid/agent/openai_assistant.py +20 -10
  10. langroid/agent/special/__init__.py +33 -10
  11. langroid/agent/special/doc_chat_agent.py +521 -108
  12. langroid/agent/special/lance_doc_chat_agent.py +258 -0
  13. langroid/agent/special/lance_rag/__init__.py +9 -0
  14. langroid/agent/special/lance_rag/critic_agent.py +136 -0
  15. langroid/agent/special/lance_rag/lance_rag_task.py +80 -0
  16. langroid/agent/special/lance_rag/query_planner_agent.py +180 -0
  17. langroid/agent/special/lance_tools.py +44 -0
  18. langroid/agent/special/neo4j/__init__.py +0 -0
  19. langroid/agent/special/neo4j/csv_kg_chat.py +174 -0
  20. langroid/agent/special/neo4j/neo4j_chat_agent.py +370 -0
  21. langroid/agent/special/neo4j/utils/__init__.py +0 -0
  22. langroid/agent/special/neo4j/utils/system_message.py +46 -0
  23. langroid/agent/special/relevance_extractor_agent.py +23 -7
  24. langroid/agent/special/retriever_agent.py +29 -174
  25. langroid/agent/special/sql/__init__.py +7 -0
  26. langroid/agent/special/sql/sql_chat_agent.py +47 -23
  27. langroid/agent/special/sql/utils/__init__.py +11 -0
  28. langroid/agent/special/sql/utils/description_extractors.py +95 -46
  29. langroid/agent/special/sql/utils/populate_metadata.py +28 -21
  30. langroid/agent/special/table_chat_agent.py +43 -9
  31. langroid/agent/task.py +423 -114
  32. langroid/agent/tool_message.py +67 -10
  33. langroid/agent/tools/__init__.py +8 -0
  34. langroid/agent/tools/duckduckgo_search_tool.py +66 -0
  35. langroid/agent/tools/google_search_tool.py +11 -0
  36. langroid/agent/tools/metaphor_search_tool.py +67 -0
  37. langroid/agent/tools/recipient_tool.py +6 -24
  38. langroid/agent/tools/sciphi_search_rag_tool.py +79 -0
  39. langroid/cachedb/__init__.py +6 -0
  40. langroid/embedding_models/__init__.py +24 -0
  41. langroid/embedding_models/base.py +9 -1
  42. langroid/embedding_models/models.py +117 -17
  43. langroid/embedding_models/protoc/embeddings.proto +19 -0
  44. langroid/embedding_models/protoc/embeddings_pb2.py +33 -0
  45. langroid/embedding_models/protoc/embeddings_pb2.pyi +50 -0
  46. langroid/embedding_models/protoc/embeddings_pb2_grpc.py +79 -0
  47. langroid/embedding_models/remote_embeds.py +153 -0
  48. langroid/language_models/__init__.py +22 -0
  49. langroid/language_models/azure_openai.py +47 -4
  50. langroid/language_models/base.py +26 -10
  51. langroid/language_models/config.py +5 -0
  52. langroid/language_models/openai_gpt.py +407 -121
  53. langroid/language_models/prompt_formatter/__init__.py +9 -0
  54. langroid/language_models/prompt_formatter/base.py +4 -6
  55. langroid/language_models/prompt_formatter/hf_formatter.py +135 -0
  56. langroid/language_models/utils.py +10 -9
  57. langroid/mytypes.py +10 -4
  58. langroid/parsing/__init__.py +33 -1
  59. langroid/parsing/document_parser.py +259 -63
  60. langroid/parsing/image_text.py +32 -0
  61. langroid/parsing/parse_json.py +143 -0
  62. langroid/parsing/parser.py +20 -7
  63. langroid/parsing/repo_loader.py +108 -46
  64. langroid/parsing/search.py +8 -0
  65. langroid/parsing/table_loader.py +44 -0
  66. langroid/parsing/url_loader.py +59 -13
  67. langroid/parsing/urls.py +18 -9
  68. langroid/parsing/utils.py +130 -9
  69. langroid/parsing/web_search.py +73 -0
  70. langroid/prompts/__init__.py +7 -0
  71. langroid/prompts/chat-gpt4-system-prompt.md +68 -0
  72. langroid/prompts/prompts_config.py +1 -1
  73. langroid/utils/__init__.py +10 -0
  74. langroid/utils/algorithms/__init__.py +3 -0
  75. langroid/utils/configuration.py +0 -1
  76. langroid/utils/constants.py +4 -0
  77. langroid/utils/logging.py +2 -5
  78. langroid/utils/output/__init__.py +15 -2
  79. langroid/utils/output/status.py +33 -0
  80. langroid/utils/pandas_utils.py +30 -0
  81. langroid/utils/pydantic_utils.py +446 -4
  82. langroid/utils/system.py +36 -1
  83. langroid/vector_store/__init__.py +34 -2
  84. langroid/vector_store/base.py +33 -2
  85. langroid/vector_store/chromadb.py +42 -13
  86. langroid/vector_store/lancedb.py +226 -60
  87. langroid/vector_store/meilisearch.py +7 -6
  88. langroid/vector_store/momento.py +3 -2
  89. langroid/vector_store/qdrantdb.py +82 -11
  90. {langroid-0.1.139.dist-info → langroid-0.1.219.dist-info}/METADATA +190 -129
  91. langroid-0.1.219.dist-info/RECORD +127 -0
  92. langroid/agent/special/recipient_validator_agent.py +0 -157
  93. langroid/parsing/json.py +0 -64
  94. langroid/utils/web/selenium_login.py +0 -36
  95. langroid-0.1.139.dist-info/RECORD +0 -103
  96. {langroid-0.1.139.dist-info → langroid-0.1.219.dist-info}/LICENSE +0 -0
  97. {langroid-0.1.139.dist-info → langroid-0.1.219.dist-info}/WHEEL +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.1.139
3
+ Version: 0.1.219
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -10,80 +10,101 @@ Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
+ Provides-Extra: chainlit
14
+ Provides-Extra: chromadb
13
15
  Provides-Extra: hf-embeddings
14
16
  Provides-Extra: litellm
17
+ Provides-Extra: metaphor
18
+ Provides-Extra: mkdocs
15
19
  Provides-Extra: mysql
20
+ Provides-Extra: neo4j
16
21
  Provides-Extra: postgres
22
+ Provides-Extra: sciphi
23
+ Provides-Extra: transformers
24
+ Provides-Extra: unstructured
25
+ Requires-Dist: agent-search (>=0.0.7,<0.0.8) ; extra == "sciphi"
26
+ Requires-Dist: aiohttp (>=3.9.1,<4.0.0)
17
27
  Requires-Dist: async-generator (>=1.10,<2.0)
18
28
  Requires-Dist: autopep8 (>=2.0.2,<3.0.0)
19
- Requires-Dist: black[jupyter] (>=23.3.0,<24.0.0)
29
+ Requires-Dist: black[jupyter] (>=24.3.0,<25.0.0)
20
30
  Requires-Dist: bs4 (>=0.0.1,<0.0.2)
21
- Requires-Dist: chromadb (==0.3.21)
31
+ Requires-Dist: chainlit (>=1.0.400,<2.0.0) ; extra == "chainlit"
32
+ Requires-Dist: chromadb (>=0.4.21,<=0.4.23) ; extra == "chromadb"
22
33
  Requires-Dist: colorlog (>=6.7.0,<7.0.0)
23
34
  Requires-Dist: docstring-parser (>=0.15,<0.16)
35
+ Requires-Dist: duckduckgo-search (>=4.4,<5.0)
24
36
  Requires-Dist: faker (>=18.9.0,<19.0.0)
25
37
  Requires-Dist: fakeredis (>=2.12.1,<3.0.0)
26
- Requires-Dist: farm-haystack[file-conversion,ocr,pdf,preprocessing] (>=1.21.1,<2.0.0)
27
38
  Requires-Dist: fire (>=0.5.0,<0.6.0)
28
39
  Requires-Dist: flake8 (>=6.0.0,<7.0.0)
29
40
  Requires-Dist: google-api-python-client (>=2.95.0,<3.0.0)
41
+ Requires-Dist: grpcio (>=1.62.1,<2.0.0)
30
42
  Requires-Dist: halo (>=0.0.31,<0.0.32)
43
+ Requires-Dist: huggingface-hub (>=0.21.2,<0.22.0) ; extra == "transformers"
31
44
  Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
32
- Requires-Dist: lancedb (>=0.3.0,<0.4.0)
33
- Requires-Dist: litellm (>=1.0.0,<2.0.0) ; extra == "litellm"
45
+ Requires-Dist: lancedb (>=0.6.2,<0.7.0)
46
+ Requires-Dist: litellm (>=1.30.1,<2.0.0) ; extra == "litellm"
34
47
  Requires-Dist: lxml (>=4.9.3,<5.0.0)
35
48
  Requires-Dist: meilisearch (>=0.28.3,<0.29.0)
36
- Requires-Dist: meilisearch-python-sdk (>=2.0.1,<3.0.0)
37
- Requires-Dist: mkdocs (>=1.4.2,<2.0.0)
38
- Requires-Dist: mkdocs-awesome-pages-plugin (>=2.8.0,<3.0.0)
39
- Requires-Dist: mkdocs-gen-files (>=0.4.0,<0.5.0)
40
- Requires-Dist: mkdocs-jupyter (>=0.24.1,<0.25.0)
41
- Requires-Dist: mkdocs-literate-nav (>=0.6.0,<0.7.0)
42
- Requires-Dist: mkdocs-material (>=9.1.5,<10.0.0)
43
- Requires-Dist: mkdocs-rss-plugin (>=1.8.0,<2.0.0)
44
- Requires-Dist: mkdocs-section-index (>=0.3.5,<0.4.0)
45
- Requires-Dist: mkdocstrings[python] (>=0.21.2,<0.22.0)
49
+ Requires-Dist: meilisearch-python-sdk (>=2.2.3,<3.0.0)
50
+ Requires-Dist: metaphor-python (>=0.1.23,<0.2.0) ; extra == "metaphor"
51
+ Requires-Dist: mkdocs (>=1.4.2,<2.0.0) ; extra == "mkdocs"
52
+ Requires-Dist: mkdocs-awesome-pages-plugin (>=2.8.0,<3.0.0) ; extra == "mkdocs"
53
+ Requires-Dist: mkdocs-gen-files (>=0.4.0,<0.5.0) ; extra == "mkdocs"
54
+ Requires-Dist: mkdocs-jupyter (>=0.24.1,<0.25.0) ; extra == "mkdocs"
55
+ Requires-Dist: mkdocs-literate-nav (>=0.6.0,<0.7.0) ; extra == "mkdocs"
56
+ Requires-Dist: mkdocs-material (>=9.1.5,<10.0.0) ; extra == "mkdocs"
57
+ Requires-Dist: mkdocs-rss-plugin (>=1.8.0,<2.0.0) ; extra == "mkdocs"
58
+ Requires-Dist: mkdocs-section-index (>=0.3.5,<0.4.0) ; extra == "mkdocs"
59
+ Requires-Dist: mkdocstrings[python] (>=0.21.2,<0.22.0) ; extra == "mkdocs"
46
60
  Requires-Dist: momento (>=1.10.2,<2.0.0)
47
61
  Requires-Dist: mypy (>=1.7.0,<2.0.0)
62
+ Requires-Dist: neo4j (>=5.14.1,<6.0.0) ; extra == "neo4j"
48
63
  Requires-Dist: nltk (>=3.8.1,<4.0.0)
49
64
  Requires-Dist: onnxruntime (==1.16.1)
50
- Requires-Dist: openai (>=1.2.3,<2.0.0)
65
+ Requires-Dist: openai (>=1.14.0,<2.0.0)
51
66
  Requires-Dist: pandas (>=2.0.3,<3.0.0)
67
+ Requires-Dist: pdf2image (>=1.17.0,<2.0.0)
52
68
  Requires-Dist: pdfplumber (>=0.10.2,<0.11.0)
53
69
  Requires-Dist: pre-commit (>=3.3.2,<4.0.0)
54
70
  Requires-Dist: prettytable (>=3.8.0,<4.0.0)
55
71
  Requires-Dist: psycopg2 (>=2.9.7,<3.0.0) ; extra == "postgres"
56
- Requires-Dist: pydantic (==1.10.11)
72
+ Requires-Dist: pyarrow (==15.0.0)
73
+ Requires-Dist: pydantic (==1.10.13)
57
74
  Requires-Dist: pygithub (>=1.58.1,<2.0.0)
58
75
  Requires-Dist: pygments (>=2.15.1,<3.0.0)
59
76
  Requires-Dist: pymupdf (>=1.23.3,<2.0.0)
60
77
  Requires-Dist: pymysql (>=1.1.0,<2.0.0) ; extra == "mysql"
61
78
  Requires-Dist: pyparsing (>=3.0.9,<4.0.0)
62
79
  Requires-Dist: pypdf (>=3.12.2,<4.0.0)
80
+ Requires-Dist: pytesseract (>=0.3.10,<0.4.0)
63
81
  Requires-Dist: pytest-asyncio (>=0.21.1,<0.22.0)
64
82
  Requires-Dist: pytest-mysql (>=2.4.2,<3.0.0) ; extra == "mysql"
65
83
  Requires-Dist: pytest-postgresql (>=5.0.0,<6.0.0) ; extra == "postgres"
66
84
  Requires-Dist: pytest-redis (>=3.0.2,<4.0.0)
85
+ Requires-Dist: python-docx (>=1.1.0,<2.0.0)
67
86
  Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
68
- Requires-Dist: qdrant-client (>=1.7.0,<2.0.0)
87
+ Requires-Dist: python-socketio (>=5.11.0,<6.0.0) ; extra == "chainlit"
88
+ Requires-Dist: qdrant-client (>=1.8.0,<2.0.0)
69
89
  Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0)
70
90
  Requires-Dist: redis (>=5.0.1,<6.0.0)
71
91
  Requires-Dist: requests (>=2.31.0,<3.0.0)
72
92
  Requires-Dist: requests-oauthlib (>=1.3.1,<2.0.0)
73
93
  Requires-Dist: rich (>=13.3.4,<14.0.0)
74
- Requires-Dist: ruff (>=0.0.270,<0.0.271)
94
+ Requires-Dist: ruff (>=0.2.2,<0.3.0)
75
95
  Requires-Dist: scrapy (>=2.11.0,<3.0.0)
76
96
  Requires-Dist: sentence-transformers (==2.2.2) ; extra == "hf-embeddings"
77
97
  Requires-Dist: sqlalchemy (>=2.0.19,<3.0.0)
78
- Requires-Dist: tantivy (==0.20.1)
98
+ Requires-Dist: tantivy (>=0.21.0,<0.22.0)
79
99
  Requires-Dist: thefuzz (>=0.20.0,<0.21.0)
80
100
  Requires-Dist: tiktoken (>=0.5.1,<0.6.0)
81
101
  Requires-Dist: torch (==2.0.0) ; extra == "hf-embeddings"
82
102
  Requires-Dist: trafilatura (>=1.5.0,<2.0.0)
83
103
  Requires-Dist: typer (>=0.9.0,<0.10.0)
104
+ Requires-Dist: types-pyyaml (>=6.0.12.20240311,<7.0.0.0)
84
105
  Requires-Dist: types-redis (>=4.5.5.2,<5.0.0.0)
85
106
  Requires-Dist: types-requests (>=2.31.0.1,<3.0.0.0)
86
- Requires-Dist: unstructured[docx,pdf,pptx] (>=0.10.16,<0.10.18)
107
+ Requires-Dist: unstructured[docx,pdf,pptx] (>=0.10.16,<0.10.18) ; extra == "unstructured"
87
108
  Requires-Dist: wget (>=3.2,<4.0)
88
109
  Description-Content-Type: text/markdown
89
110
 
@@ -134,11 +155,16 @@ This Multi-Agent paradigm is inspired by the
134
155
  `Langroid` is a fresh take on LLM app-development, where considerable thought has gone
135
156
  into simplifying the developer experience; it does not use `Langchain`.
136
157
 
158
+ :fire: See this [Intro to Langroid](https://lancedb.substack.com/p/langoid-multi-agent-programming-framework)
159
+ blog post from the LanceDB team
160
+
161
+
137
162
  We welcome contributions -- See the [contributions](./CONTRIBUTING.md) document
138
163
  for ideas on what to contribute.
139
164
 
140
-
141
- Building LLM Applications? [Prasad Chalasani](https://www.linkedin.com/in/pchalasani/) is available for consulting
165
+ Are you building LLM Applications, or want help with Langroid for your company,
166
+ or want to prioritize Langroid features for your company use-cases?
167
+ [Prasad Chalasani](https://www.linkedin.com/in/pchalasani/) is available for consulting
142
168
  (advisory/development): pchalasani at gmail dot com.
143
169
 
144
170
  Sponsorship is also accepted via [GitHub Sponsors](https://github.com/sponsors/langroid)
@@ -149,46 +175,49 @@ Sponsorship is also accepted via [GitHub Sponsors](https://github.com/sponsors/l
149
175
  This is just a teaser; there's much more, like function-calling/tools,
150
176
  Multi-Agent Collaboration, Structured Information Extraction, DocChatAgent
151
177
  (RAG), SQLChatAgent, non-OpenAI local/remote LLMs, etc. Scroll down or see docs for more.
178
+ See the Langroid Quick-Start [Colab](https://colab.research.google.com/github/langroid/langroid/blob/main/examples/Langroid_quick_start.ipynb)
179
+ that builds up to a 2-agent information-extraction example using the OpenAI ChatCompletion API.
180
+ See also this [version](https://colab.research.google.com/drive/190Tk7t4AdY1P9F_NlZ33-YEoGnHweQQ0) that uses the OpenAI Assistants API instead.
152
181
 
153
- :fire: Just released! Updated Langroid Quick-Start [Colab](https://colab.research.google.com/github/langroid/langroid/blob/main/examples/Langroid_quick_start.ipynb)
154
- that builds up to a 2-agent chat example using the OpenAI ChatCompletion API.
155
- See also this [version](https://colab.research.google.com/drive/190Tk7t4AdY1P9F_NlZ33-YEoGnHweQQ0)
156
- that uses the OpenAI Assistants API instead.
182
+ :fire: just released! [Example](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat-multi-extract-local.py)
183
+ script showing how you can use Langroid multi-agents and tools
184
+ to extract structured information from a document using **only a local LLM**
185
+ (Mistral-7b-instruct-v0.2).
157
186
 
158
187
  ```python
159
- from langroid.language_models import OpenAIGPTConfig, OpenAIChatModel, OpenAIGPT
160
- from langroid import ChatAgent, ChatAgentConfig, Task
188
+ import langroid as lr
189
+ import langroid.language_models as lm
161
190
 
162
191
  # set up LLM
163
- llm_cfg = OpenAIGPTConfig( # or OpenAIAssistant to use Assistant API
192
+ llm_cfg = lm.OpenAIGPTConfig( # or OpenAIAssistant to use Assistant API
164
193
  # any model served via an OpenAI-compatible API
165
- chat_model=OpenAIChatModel.GPT4_TURBO, # or, e.g., "local/ollama/mistral"
194
+ chat_model=lm.OpenAIChatModel.GPT4_TURBO, # or, e.g., "ollama/mistral"
166
195
  )
167
196
  # use LLM directly
168
- mdl = OpenAIGPT(llm_cfg)
197
+ mdl = lm.OpenAIGPT(llm_cfg)
169
198
  response = mdl.chat("What is the capital of Ontario?", max_tokens=10)
170
199
 
171
200
  # use LLM in an Agent
172
- agent_cfg = ChatAgentConfig(llm=llm_cfg)
173
- agent = ChatAgent(agent_cfg)
201
+ agent_cfg = lr.ChatAgentConfig(llm=llm_cfg)
202
+ agent = lr.ChatAgent(agent_cfg)
174
203
  agent.llm_response("What is the capital of China?")
175
204
  response = agent.llm_response("And India?") # maintains conversation state
176
205
 
177
206
  # wrap Agent in a Task to run interactive loop with user (or other agents)
178
- task = Task(agent, name="Bot", system_message="You are a helpful assistant")
207
+ task = lr.Task(agent, name="Bot", system_message="You are a helpful assistant")
179
208
  task.run("Hello") # kick off with user saying "Hello"
180
209
 
181
210
  # 2-Agent chat loop: Teacher Agent asks questions to Student Agent
182
- teacher_agent = ChatAgent(agent_cfg)
183
- teacher_task = Task(
211
+ teacher_agent = lr.ChatAgent(agent_cfg)
212
+ teacher_task = lr.Task(
184
213
  teacher_agent, name="Teacher",
185
214
  system_message="""
186
215
  Ask your student concise numbers questions, and give feedback.
187
216
  Start with a question.
188
217
  """
189
218
  )
190
- student_agent = ChatAgent(agent_cfg)
191
- student_task = Task(
219
+ student_agent = lr.ChatAgent(agent_cfg)
220
+ student_task = lr.Task(
192
221
  student_agent, name="Student",
193
222
  system_message="Concisely answer the teacher's questions.",
194
223
  single_round=True,
@@ -198,11 +227,68 @@ teacher_task.add_sub_task(student_task)
198
227
  teacher_task.run()
199
228
  ```
200
229
 
201
- <details>
202
- <summary> <b>:fire: Updates/Releases</b></summary>
230
+ # :fire: Updates/Releases
203
231
 
232
+ <details>
233
+ <summary> <b>Click to expand</b></summary>
234
+
235
+ - **Mar 2024:**
236
+ - **0.1.216:** Improvements to allow concurrent runs of `DocChatAgent`, see the
237
+ [`test_doc_chat_agent.py`](https://github.com/langroid/langroid/blob/main/tests/main/test_doc_chat_agent.py)
238
+ in particular the `test_doc_chat_batch()`;
239
+ New task run utility: [`run_batch_task_gen`](https://github.com/langroid/langroid/blob/main/langroid/agent/batch.py)
240
+ where a task generator can be specified, to generate one task per input.
241
+ - **0.1.212:** ImagePdfParser: support for extracting text from image-based PDFs.
242
+ (this means `DocChatAgent` will now work with image-pdfs).
243
+ - **0.1.194 - 0.1.211:** Misc fixes, improvements, and features:
244
+ - Big enhancement in RAG performance (mainly, recall) due to a [fix in Relevance
245
+ Extractor](https://github.com/langroid/langroid/releases/tag/0.1.209)
246
+ - `DocChatAgent` [context-window fixes](https://github.com/langroid/langroid/releases/tag/0.1.208)
247
+ - Anthropic/Claude3 support via Litellm
248
+ - `URLLoader`: detect file time from header when URL doesn't end with a
249
+ recognizable suffix like `.pdf`, `.docx`, etc.
250
+ - Misc lancedb integration fixes
251
+ - Auto-select embedding config based on whether `sentence_transformer` module is available.
252
+ - Slim down dependencies, make some heavy ones optional, e.g. `unstructured`,
253
+ `haystack`, `chromadb`, `mkdocs`, `huggingface-hub`, `sentence-transformers`.
254
+ - Easier top-level imports from `import langroid as lr`
255
+ - Improve JSON detection, esp from weak LLMs
256
+ - **Feb 2024:**
257
+ - **0.1.193:** Support local LLMs using Ollama's new OpenAI-Compatible server:
258
+ simply specify `chat_model="ollama/mistral"`. See [release notes](https://github.com/langroid/langroid/releases/tag/0.1.193).
259
+ - **0.1.183:** Added Chainlit support via [callbacks](https://github.com/langroid/langroid/blob/main/langroid/agent/callbacks/chainlit.py).
260
+ See [examples](https://github.com/langroid/langroid/tree/main/examples/chainlit).
261
+ - **Jan 2024:**
262
+ - **0.1.175**
263
+ - [Neo4jChatAgent](https://github.com/langroid/langroid/tree/main/langroid/agent/special/neo4j) to chat with a neo4j knowledge-graph.
264
+ (Thanks to [Mohannad](https://github.com/Mohannadcse)!). The agent uses tools to query the Neo4j schema and translate user queries to Cypher queries,
265
+ and the tool handler executes these queries, returning them to the LLM to compose
266
+ a natural language response (analogous to how `SQLChatAgent` works).
267
+ See example [script](https://github.com/langroid/langroid/tree/main/examples/kg-chat) using this Agent to answer questions about Python pkg dependencies.
268
+ - Support for `.doc` file parsing (in addition to `.docx`)
269
+ - Specify optional [`formatter` param](https://github.com/langroid/langroid/releases/tag/0.1.171)
270
+ in `OpenAIGPTConfig` to ensure accurate chat formatting for local LLMs.
271
+ - **[0.1.157](https://github.com/langroid/langroid/releases/tag/0.1.157):** `DocChatAgentConfig`
272
+ has a new param: `add_fields_to_content`, to specify additional document fields to insert into
273
+ the main `content` field, to help improve retrieval.
274
+ - **[0.1.156](https://github.com/langroid/langroid/releases/tag/0.1.156):** New Task control signals
275
+ PASS_TO, SEND_TO; VectorStore: Compute Pandas expression on documents; LanceRAGTaskCreator creates 3-agent RAG system with Query Planner, Critic and RAG Agent.
276
+ - **Dec 2023:**
277
+ - **0.1.154:** (For details see release notes of [0.1.149](https://github.com/langroid/langroid/releases/tag/0.1.149)
278
+ and [0.1.154](https://github.com/langroid/langroid/releases/tag/0.1.154)).
279
+ - `DocChatAgent`: Ingest Pandas dataframes and filtering.
280
+ - `LanceDocChatAgent` leverages `LanceDB` vector-db for efficient vector search
281
+ and full-text search and filtering.
282
+ - Improved task and multi-agent control mechanisms
283
+ - `LanceRAGTaskCreator` to create a 2-agent system consisting of a `LanceFilterAgent` that
284
+ decides a filter and rephrase query to send to a RAG agent.
285
+ - **[0.1.141](https://github.com/langroid/langroid/releases/tag/0.1.141):**
286
+ API Simplifications to reduce boilerplate:
287
+ auto-select an available OpenAI model (preferring gpt-4-turbo), simplifies defaults.
288
+ Simpler `Task` initialization with default `ChatAgent`.
204
289
  - **Nov 2023:**
205
- - **0.1.126:** OpenAIAssistant agent: Caching Support. See [release notes](https://github.com/langroid/langroid/releases/tag/0.1.126).
290
+ - **[0.1.126](https://github.com/langroid/langroid/releases/tag/0.1.126):**
291
+ OpenAIAssistant agent: Caching Support.
206
292
  - **0.1.117:** Support for OpenAI Assistant API tools: Function-calling,
207
293
  Code-intepreter, and Retriever (RAG), file uploads. These work seamlessly
208
294
  with Langroid's task-orchestration.
@@ -283,6 +369,8 @@ See [this test](tests/main/test_recipient_tool.py) for example usage.
283
369
  Suppose you want to extract structured information about the key terms
284
370
  of a commercial lease document. You can easily do this with Langroid using a two-agent system,
285
371
  as we show in the [langroid-examples](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat_multi_extract.py) repo.
372
+ (See [this script](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat-multi-extract-local.py)
373
+ for a version with the same functionality using a local Mistral-7b model.)
286
374
  The demo showcases just a few of the many features of Langroid, such as:
287
375
  - Multi-agent collaboration: `LeaseExtractor` is in charge of the task, and its LLM (GPT4) generates questions
288
376
  to be answered by the `DocAgent`.
@@ -299,7 +387,9 @@ Here is what it looks like in action
299
387
 
300
388
 
301
389
  # :zap: Highlights
302
-
390
+ (For a more up-to-date list see the
391
+ [release](https://github.com/langroid/langroid?tab=readme-ov-file#fire-updatesreleases)
392
+ section above)
303
393
  - **Agents as first-class citizens:** The [Agent](https://langroid.github.io/langroid/reference/agent/base/#langroid.agent.base.Agent) class encapsulates LLM conversation state,
304
394
  and optionally a vector-store and tools. Agents are a core abstraction in Langroid;
305
395
  Agents act as _message transformers_, and by default provide 3 _responder_ methods, one corresponding to each entity: LLM, Agent, User.
@@ -353,6 +443,15 @@ install Langroid like this:
353
443
  ```bash
354
444
  pip install langroid[hf-embeddings]
355
445
  ```
446
+ If using `zsh` (or similar shells), you may need to escape the square brackets, e.g.:
447
+ ```
448
+ pip install langroid\[hf-embeddings\]
449
+ ```
450
+ or use quotes:
451
+ ```
452
+ pip install "langroid[hf-embeddings]"
453
+ ```
454
+
356
455
 
357
456
  <details>
358
457
  <summary><b>Optional Installs for using SQL Chat with a PostgreSQL DB </b></summary>
@@ -460,8 +559,8 @@ provides more information, and you can set each environment variable as follows:
460
559
  - `AZURE_OPENAI_API_BASE` from the value of `ENDPOINT`, typically looks like `https://your.domain.azure.com`.
461
560
  - For `AZURE_OPENAI_API_VERSION`, you can use the default value in `.env-template`, and latest version can be found [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new#azure-openai-chat-completion-general-availability-ga)
462
561
  - `AZURE_OPENAI_DEPLOYMENT_NAME` is the name of the deployed model, which is defined by the user during the model setup
463
- - `AZURE_OPENAI_MODEL_NAME` GPT-3.5-Turbo or GPT-4 model names that you chose when you setup your Azure OpenAI account.
464
-
562
+ - `AZURE_OPENAI_MODEL_NAME` Azure OpenAI allows specific model names when you select the model for your deployment. You need to put precisly the exact model name that was selected. For example, GPT-3.5 (should be `gpt-35-turbo-16k` or `gpt-35-turbo`) or GPT-4 (should be `gpt-4-32k` or `gpt-4`).
563
+ - `AZURE_OPENAI_MODEL_VERSION` is required if `AZURE_OPENAI_MODEL_NAME = gpt=4`, which will assist Langroid to determine the cost of the model
465
564
  </details>
466
565
 
467
566
  ---
@@ -520,19 +619,15 @@ All of these can be run in a Colab notebook:
520
619
  <summary> <b> Direct interaction with OpenAI LLM </b> </summary>
521
620
 
522
621
  ```python
523
- from langroid.language_models.openai_gpt import (
524
- OpenAIGPTConfig, OpenAIChatModel, OpenAIGPT,
525
- )
526
- from langroid.language_models.base import LLMMessage, Role
622
+ import langroid.language_models as lm
527
623
 
528
- cfg = OpenAIGPTConfig(chat_model=OpenAIChatModel.GPT4)
529
-
530
- mdl = OpenAIGPT(cfg)
624
+ mdl = lm.OpenAIGPT()
531
625
 
532
626
  messages = [
533
- LLMMessage(content="You are a helpful assistant", role=Role.SYSTEM),
534
- LLMMessage(content="What is the capital of Ontario?", role=Role.USER),
627
+ lm.LLMMessage(content="You are a helpful assistant", role=lm.Role.SYSTEM),
628
+ lm.LLMMessage(content="What is the capital of Ontario?", role=lm.Role.USER),
535
629
  ]
630
+
536
631
  response = mdl.chat(messages, max_tokens=200)
537
632
  print(response.message)
538
633
  ```
@@ -543,11 +638,11 @@ print(response.message)
543
638
  Local model: if model is served at `http://localhost:8000`:
544
639
 
545
640
  ```python
546
- cfg = OpenAIGPTConfig(
641
+ cfg = lm.OpenAIGPTConfig(
547
642
  chat_model="local/localhost:8000",
548
643
  chat_context_length=4096
549
644
  )
550
- mdl = OpenAIGPT(cfg)
645
+ mdl = lm.OpenAIGPT(cfg)
551
646
  # now interact with it as above, or create an Agent + Task as shown below.
552
647
  ```
553
648
 
@@ -574,21 +669,14 @@ pip install langroid[litellm]
574
669
  <summary> <b> Define an agent, set up a task, and run it </b> </summary>
575
670
 
576
671
  ```python
577
- from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
578
- from langroid.agent.task import Task
579
- from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
580
-
581
- config = ChatAgentConfig(
582
- llm = OpenAIGPTConfig(
583
- chat_model=OpenAIChatModel.GPT4,
584
- ),
585
- vecdb=None, # no vector store
586
- )
587
- agent = ChatAgent(config)
672
+ import langroid as lr
673
+
674
+ agent = lr.ChatAgent()
675
+
588
676
  # get response from agent's LLM, and put this in an interactive loop...
589
677
  # answer = agent.llm_response("What is the capital of Ontario?")
590
678
  # ... OR instead, set up a task (which has a built-in loop) and run it
591
- task = Task(agent, name="Bot")
679
+ task = lr.Task(agent, name="Bot")
592
680
  task.run() # ... a loop seeking response from LLM or User at each turn
593
681
  ```
594
682
  </details>
@@ -597,26 +685,17 @@ task.run() # ... a loop seeking response from LLM or User at each turn
597
685
  <summary><b> Three communicating agents </b></summary>
598
686
 
599
687
  A toy numbers game, where when given a number `n`:
600
- - `repeater_agent`'s LLM simply returns `n`,
601
- - `even_agent`'s LLM returns `n/2` if `n` is even, else says "DO-NOT-KNOW"
602
- - `odd_agent`'s LLM returns `3*n+1` if `n` is odd, else says "DO-NOT-KNOW"
688
+ - `repeater_task`'s LLM simply returns `n`,
689
+ - `even_task`'s LLM returns `n/2` if `n` is even, else says "DO-NOT-KNOW"
690
+ - `odd_task`'s LLM returns `3*n+1` if `n` is odd, else says "DO-NOT-KNOW"
603
691
 
604
- First define the 3 agents, and set up their tasks with instructions:
692
+ Each of these `Task`s automatically configures a default `ChatAgent`.
605
693
 
606
694
  ```python
695
+ import langroid as lr
607
696
  from langroid.utils.constants import NO_ANSWER
608
- from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
609
- from langroid.agent.task import Task
610
- from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
611
- config = ChatAgentConfig(
612
- llm = OpenAIGPTConfig(
613
- chat_model=OpenAIChatModel.GPT4,
614
- ),
615
- vecdb = None,
616
- )
617
- repeater_agent = ChatAgent(config)
618
- repeater_task = Task(
619
- repeater_agent,
697
+
698
+ repeater_task = lr.Task(
620
699
  name = "Repeater",
621
700
  system_message="""
622
701
  Your job is to repeat whatever number you receive.
@@ -624,9 +703,8 @@ repeater_task = Task(
624
703
  llm_delegate=True, # LLM takes charge of task
625
704
  single_round=False,
626
705
  )
627
- even_agent = ChatAgent(config)
628
- even_task = Task(
629
- even_agent,
706
+
707
+ even_task = lr.Task(
630
708
  name = "EvenHandler",
631
709
  system_message=f"""
632
710
  You will be given a number.
@@ -636,9 +714,7 @@ even_task = Task(
636
714
  single_round=True, # task done after 1 step() with valid response
637
715
  )
638
716
 
639
- odd_agent = ChatAgent(config)
640
- odd_task = Task(
641
- odd_agent,
717
+ odd_task = lr.Task(
642
718
  name = "OddHandler",
643
719
  system_message=f"""
644
720
  You will be given a number n.
@@ -677,8 +753,9 @@ First define the tool using Langroid's `ToolMessage` class:
677
753
 
678
754
 
679
755
  ```python
680
- from langroid.agent.tool_message import ToolMessage
681
- class ProbeTool(ToolMessage):
756
+ import langroid as lr
757
+
758
+ class ProbeTool(lr.agent.ToolMessage):
682
759
  request: str = "probe" # specifies which agent method handles this tool
683
760
  purpose: str = """
684
761
  To find how many numbers in my list are less than or equal to
@@ -691,9 +768,8 @@ Then define a `SpyGameAgent` as a subclass of `ChatAgent`,
691
768
  with a method `probe` that handles this tool:
692
769
 
693
770
  ```python
694
- from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
695
- class SpyGameAgent(ChatAgent):
696
- def __init__(self, config: ChatAgentConfig):
771
+ class SpyGameAgent(lr.ChatAgent):
772
+ def __init__(self, config: lr.ChatAgentConfig):
697
773
  super().__init__(config)
698
774
  self.numbers = [3, 4, 8, 11, 15, 25, 40, 80, 90]
699
775
 
@@ -705,13 +781,9 @@ class SpyGameAgent(ChatAgent):
705
781
  We then instantiate the agent and enable it to use and respond to the tool:
706
782
 
707
783
  ```python
708
- from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
709
784
  spy_game_agent = SpyGameAgent(
710
- ChatAgentConfig(
785
+ lr.ChatAgentConfig(
711
786
  name="Spy",
712
- llm = OpenAIGPTConfig(
713
- chat_model=OpenAIChatModel.GPT4,
714
- ),
715
787
  vecdb=None,
716
788
  use_tools=False, # don't use Langroid native tool
717
789
  use_functions_api=True, # use OpenAI function-call API
@@ -753,7 +825,9 @@ Then define the `LeaseMessage` tool as a subclass of Langroid's `ToolMessage`.
753
825
  Note the tool has a required argument `terms` of type `Lease`:
754
826
 
755
827
  ```python
756
- class LeaseMessage(ToolMessage):
828
+ import langroid as lr
829
+
830
+ class LeaseMessage(lr.agent.ToolMessage):
757
831
  request: str = "lease_info"
758
832
  purpose: str = """
759
833
  Collect information about a Commercial Lease.
@@ -765,7 +839,7 @@ Then define a `LeaseExtractorAgent` with a method `lease_info` that handles this
765
839
  instantiate the agent, and enable it to use and respond to this tool:
766
840
 
767
841
  ```python
768
- class LeaseExtractorAgent(ChatAgent):
842
+ class LeaseExtractorAgent(lr.ChatAgent):
769
843
  def lease_info(self, message: LeaseMessage) -> str:
770
844
  print(
771
845
  f"""
@@ -775,13 +849,7 @@ class LeaseExtractorAgent(ChatAgent):
775
849
  )
776
850
  return json.dumps(message.terms.dict())
777
851
 
778
- lease_extractor_agent = LeaseExtractorAgent(
779
- ChatAgentConfig(
780
- llm=OpenAIGPTConfig(),
781
- use_functions_api=False,
782
- use_tools=True,
783
- )
784
- )
852
+ lease_extractor_agent = LeaseExtractorAgent()
785
853
  lease_extractor_agent.enable_message(LeaseMessage)
786
854
  ```
787
855
 
@@ -800,18 +868,16 @@ First create a `DocChatAgentConfig` instance, with a
800
868
  `doc_paths` field that specifies the documents to chat with.
801
869
 
802
870
  ```python
803
- from langroid.agent.doc_chat_agent import DocChatAgentConfig
804
- from langroid.vector_store.lancedb import LanceDBConfig
871
+ import langroid as lr
872
+ from langroid.agent.special import DocChatAgentConfig, DocChatAgent
873
+
805
874
  config = DocChatAgentConfig(
806
875
  doc_paths = [
807
876
  "https://en.wikipedia.org/wiki/Language_model",
808
877
  "https://en.wikipedia.org/wiki/N-gram_language_model",
809
878
  "/path/to/my/notes-on-language-models.txt",
810
- ]
811
- llm = OpenAIGPTConfig(
812
- chat_model=OpenAIChatModel.GPT4,
813
- ),
814
- vecdb=LanceDBConfig(),
879
+ ],
880
+ vecdb=lr.vector_store.LanceDBConfig(),
815
881
  )
816
882
  ```
817
883
 
@@ -822,12 +888,11 @@ agent = DocChatAgent(config)
822
888
  ```
823
889
  Then we can either ask the agent one-off questions,
824
890
  ```python
825
- agent.chat("What is a language model?")
891
+ agent.llm_response("What is a language model?")
826
892
  ```
827
893
  or wrap it in a `Task` and run an interactive loop with the user:
828
894
  ```python
829
- from langroid.task import Task
830
- task = Task(agent)
895
+ task = lr.Task(agent)
831
896
  task.run()
832
897
  ```
833
898
 
@@ -847,9 +912,8 @@ executes the code and returns the answer.
847
912
  Here is how you can do this:
848
913
 
849
914
  ```python
850
- from langroid.agent.special.table_chat_agent import TableChatAgent, TableChatAgentConfig
851
- from langroid.agent.task import Task
852
- from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
915
+ import langroid as lr
916
+ from langroid.agent.special import TableChatAgent, TableChatAgentConfig
853
917
  ```
854
918
 
855
919
  Set up a `TableChatAgent` for a data file, URL or dataframe
@@ -860,17 +924,14 @@ dataset = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quali
860
924
  # or dataset = pd.read_csv("/path/to/my/data.csv")
861
925
  agent = TableChatAgent(
862
926
  config=TableChatAgentConfig(
863
- data=dataset,
864
- llm=OpenAIGPTConfig(
865
- chat_model=OpenAIChatModel.GPT4,
866
- ),
927
+ data=dataset,
867
928
  )
868
929
  )
869
930
  ```
870
931
  Set up a task, and ask one-off questions like this:
871
932
 
872
933
  ```python
873
- task = Task(
934
+ task = lr.Task(
874
935
  agent,
875
936
  name = "DataAssistant",
876
937
  default_human_response="", # to avoid waiting for user input
@@ -884,7 +945,7 @@ print(result.content)
884
945
  Or alternatively, set up a task and run it in an interactive loop with the user:
885
946
 
886
947
  ```python
887
- task = Task(agent, name="DataAssistant")
948
+ task = lr.Task(agent, name="DataAssistant")
888
949
  task.run()
889
950
  ```
890
951