langroid 0.1.85__py3-none-any.whl → 0.1.219__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. langroid/__init__.py +95 -0
  2. langroid/agent/__init__.py +40 -0
  3. langroid/agent/base.py +222 -91
  4. langroid/agent/batch.py +264 -0
  5. langroid/agent/callbacks/chainlit.py +608 -0
  6. langroid/agent/chat_agent.py +247 -101
  7. langroid/agent/chat_document.py +41 -4
  8. langroid/agent/openai_assistant.py +842 -0
  9. langroid/agent/special/__init__.py +50 -0
  10. langroid/agent/special/doc_chat_agent.py +837 -141
  11. langroid/agent/special/lance_doc_chat_agent.py +258 -0
  12. langroid/agent/special/lance_rag/__init__.py +9 -0
  13. langroid/agent/special/lance_rag/critic_agent.py +136 -0
  14. langroid/agent/special/lance_rag/lance_rag_task.py +80 -0
  15. langroid/agent/special/lance_rag/query_planner_agent.py +180 -0
  16. langroid/agent/special/lance_tools.py +44 -0
  17. langroid/agent/special/neo4j/__init__.py +0 -0
  18. langroid/agent/special/neo4j/csv_kg_chat.py +174 -0
  19. langroid/agent/special/neo4j/neo4j_chat_agent.py +370 -0
  20. langroid/agent/special/neo4j/utils/__init__.py +0 -0
  21. langroid/agent/special/neo4j/utils/system_message.py +46 -0
  22. langroid/agent/special/relevance_extractor_agent.py +127 -0
  23. langroid/agent/special/retriever_agent.py +32 -198
  24. langroid/agent/special/sql/__init__.py +11 -0
  25. langroid/agent/special/sql/sql_chat_agent.py +47 -23
  26. langroid/agent/special/sql/utils/__init__.py +22 -0
  27. langroid/agent/special/sql/utils/description_extractors.py +95 -46
  28. langroid/agent/special/sql/utils/populate_metadata.py +28 -21
  29. langroid/agent/special/table_chat_agent.py +43 -9
  30. langroid/agent/task.py +475 -122
  31. langroid/agent/tool_message.py +75 -13
  32. langroid/agent/tools/__init__.py +13 -0
  33. langroid/agent/tools/duckduckgo_search_tool.py +66 -0
  34. langroid/agent/tools/google_search_tool.py +11 -0
  35. langroid/agent/tools/metaphor_search_tool.py +67 -0
  36. langroid/agent/tools/recipient_tool.py +16 -29
  37. langroid/agent/tools/run_python_code.py +60 -0
  38. langroid/agent/tools/sciphi_search_rag_tool.py +79 -0
  39. langroid/agent/tools/segment_extract_tool.py +36 -0
  40. langroid/cachedb/__init__.py +9 -0
  41. langroid/cachedb/base.py +22 -2
  42. langroid/cachedb/momento_cachedb.py +26 -2
  43. langroid/cachedb/redis_cachedb.py +78 -11
  44. langroid/embedding_models/__init__.py +34 -0
  45. langroid/embedding_models/base.py +21 -2
  46. langroid/embedding_models/models.py +120 -18
  47. langroid/embedding_models/protoc/embeddings.proto +19 -0
  48. langroid/embedding_models/protoc/embeddings_pb2.py +33 -0
  49. langroid/embedding_models/protoc/embeddings_pb2.pyi +50 -0
  50. langroid/embedding_models/protoc/embeddings_pb2_grpc.py +79 -0
  51. langroid/embedding_models/remote_embeds.py +153 -0
  52. langroid/language_models/__init__.py +45 -0
  53. langroid/language_models/azure_openai.py +80 -27
  54. langroid/language_models/base.py +117 -12
  55. langroid/language_models/config.py +5 -0
  56. langroid/language_models/openai_assistants.py +3 -0
  57. langroid/language_models/openai_gpt.py +558 -174
  58. langroid/language_models/prompt_formatter/__init__.py +15 -0
  59. langroid/language_models/prompt_formatter/base.py +4 -6
  60. langroid/language_models/prompt_formatter/hf_formatter.py +135 -0
  61. langroid/language_models/utils.py +18 -21
  62. langroid/mytypes.py +25 -8
  63. langroid/parsing/__init__.py +46 -0
  64. langroid/parsing/document_parser.py +260 -63
  65. langroid/parsing/image_text.py +32 -0
  66. langroid/parsing/parse_json.py +143 -0
  67. langroid/parsing/parser.py +122 -59
  68. langroid/parsing/repo_loader.py +114 -52
  69. langroid/parsing/search.py +68 -63
  70. langroid/parsing/spider.py +3 -2
  71. langroid/parsing/table_loader.py +44 -0
  72. langroid/parsing/url_loader.py +59 -11
  73. langroid/parsing/urls.py +85 -37
  74. langroid/parsing/utils.py +298 -4
  75. langroid/parsing/web_search.py +73 -0
  76. langroid/prompts/__init__.py +11 -0
  77. langroid/prompts/chat-gpt4-system-prompt.md +68 -0
  78. langroid/prompts/prompts_config.py +1 -1
  79. langroid/utils/__init__.py +17 -0
  80. langroid/utils/algorithms/__init__.py +3 -0
  81. langroid/utils/algorithms/graph.py +103 -0
  82. langroid/utils/configuration.py +36 -5
  83. langroid/utils/constants.py +4 -0
  84. langroid/utils/globals.py +2 -2
  85. langroid/utils/logging.py +2 -5
  86. langroid/utils/output/__init__.py +21 -0
  87. langroid/utils/output/printing.py +47 -1
  88. langroid/utils/output/status.py +33 -0
  89. langroid/utils/pandas_utils.py +30 -0
  90. langroid/utils/pydantic_utils.py +616 -2
  91. langroid/utils/system.py +98 -0
  92. langroid/vector_store/__init__.py +40 -0
  93. langroid/vector_store/base.py +203 -6
  94. langroid/vector_store/chromadb.py +59 -32
  95. langroid/vector_store/lancedb.py +463 -0
  96. langroid/vector_store/meilisearch.py +10 -7
  97. langroid/vector_store/momento.py +262 -0
  98. langroid/vector_store/qdrantdb.py +104 -22
  99. {langroid-0.1.85.dist-info → langroid-0.1.219.dist-info}/METADATA +329 -149
  100. langroid-0.1.219.dist-info/RECORD +127 -0
  101. {langroid-0.1.85.dist-info → langroid-0.1.219.dist-info}/WHEEL +1 -1
  102. langroid/agent/special/recipient_validator_agent.py +0 -157
  103. langroid/parsing/json.py +0 -64
  104. langroid/utils/web/selenium_login.py +0 -36
  105. langroid-0.1.85.dist-info/RECORD +0 -94
  106. /langroid/{scripts → agent/callbacks}/__init__.py +0 -0
  107. {langroid-0.1.85.dist-info → langroid-0.1.219.dist-info}/LICENSE +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.1.85
3
+ Version: 0.1.219
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -10,74 +10,101 @@ Classifier: License :: OSI Approved :: MIT License
10
10
  Classifier: Programming Language :: Python :: 3
11
11
  Classifier: Programming Language :: Python :: 3.10
12
12
  Classifier: Programming Language :: Python :: 3.11
13
+ Provides-Extra: chainlit
14
+ Provides-Extra: chromadb
13
15
  Provides-Extra: hf-embeddings
16
+ Provides-Extra: litellm
17
+ Provides-Extra: metaphor
18
+ Provides-Extra: mkdocs
14
19
  Provides-Extra: mysql
20
+ Provides-Extra: neo4j
15
21
  Provides-Extra: postgres
22
+ Provides-Extra: sciphi
23
+ Provides-Extra: transformers
24
+ Provides-Extra: unstructured
25
+ Requires-Dist: agent-search (>=0.0.7,<0.0.8) ; extra == "sciphi"
26
+ Requires-Dist: aiohttp (>=3.9.1,<4.0.0)
27
+ Requires-Dist: async-generator (>=1.10,<2.0)
16
28
  Requires-Dist: autopep8 (>=2.0.2,<3.0.0)
17
- Requires-Dist: black[jupyter] (>=23.3.0,<24.0.0)
29
+ Requires-Dist: black[jupyter] (>=24.3.0,<25.0.0)
18
30
  Requires-Dist: bs4 (>=0.0.1,<0.0.2)
19
- Requires-Dist: chromadb (>=0.3.21,<0.4.0)
31
+ Requires-Dist: chainlit (>=1.0.400,<2.0.0) ; extra == "chainlit"
32
+ Requires-Dist: chromadb (>=0.4.21,<=0.4.23) ; extra == "chromadb"
20
33
  Requires-Dist: colorlog (>=6.7.0,<7.0.0)
21
34
  Requires-Dist: docstring-parser (>=0.15,<0.16)
35
+ Requires-Dist: duckduckgo-search (>=4.4,<5.0)
22
36
  Requires-Dist: faker (>=18.9.0,<19.0.0)
23
37
  Requires-Dist: fakeredis (>=2.12.1,<3.0.0)
24
- Requires-Dist: farm-haystack[file-conversion,ocr,pdf,preprocessing] (>=1.21.1,<2.0.0)
25
38
  Requires-Dist: fire (>=0.5.0,<0.6.0)
26
39
  Requires-Dist: flake8 (>=6.0.0,<7.0.0)
27
40
  Requires-Dist: google-api-python-client (>=2.95.0,<3.0.0)
41
+ Requires-Dist: grpcio (>=1.62.1,<2.0.0)
28
42
  Requires-Dist: halo (>=0.0.31,<0.0.32)
43
+ Requires-Dist: huggingface-hub (>=0.21.2,<0.22.0) ; extra == "transformers"
29
44
  Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
30
- Requires-Dist: litellm (>=0.1.821,<0.2.0)
45
+ Requires-Dist: lancedb (>=0.6.2,<0.7.0)
46
+ Requires-Dist: litellm (>=1.30.1,<2.0.0) ; extra == "litellm"
31
47
  Requires-Dist: lxml (>=4.9.3,<5.0.0)
32
48
  Requires-Dist: meilisearch (>=0.28.3,<0.29.0)
33
- Requires-Dist: meilisearch-python-sdk (>=2.0.1,<3.0.0)
34
- Requires-Dist: mkdocs (>=1.4.2,<2.0.0)
35
- Requires-Dist: mkdocs-awesome-pages-plugin (>=2.8.0,<3.0.0)
36
- Requires-Dist: mkdocs-gen-files (>=0.4.0,<0.5.0)
37
- Requires-Dist: mkdocs-jupyter (>=0.24.1,<0.25.0)
38
- Requires-Dist: mkdocs-literate-nav (>=0.6.0,<0.7.0)
39
- Requires-Dist: mkdocs-material (>=9.1.5,<10.0.0)
40
- Requires-Dist: mkdocs-rss-plugin (>=1.8.0,<2.0.0)
41
- Requires-Dist: mkdocs-section-index (>=0.3.5,<0.4.0)
42
- Requires-Dist: mkdocstrings[python] (>=0.21.2,<0.22.0)
43
- Requires-Dist: momento (>=1.7.0,<2.0.0)
44
- Requires-Dist: mypy (>=1.2.0,<2.0.0)
49
+ Requires-Dist: meilisearch-python-sdk (>=2.2.3,<3.0.0)
50
+ Requires-Dist: metaphor-python (>=0.1.23,<0.2.0) ; extra == "metaphor"
51
+ Requires-Dist: mkdocs (>=1.4.2,<2.0.0) ; extra == "mkdocs"
52
+ Requires-Dist: mkdocs-awesome-pages-plugin (>=2.8.0,<3.0.0) ; extra == "mkdocs"
53
+ Requires-Dist: mkdocs-gen-files (>=0.4.0,<0.5.0) ; extra == "mkdocs"
54
+ Requires-Dist: mkdocs-jupyter (>=0.24.1,<0.25.0) ; extra == "mkdocs"
55
+ Requires-Dist: mkdocs-literate-nav (>=0.6.0,<0.7.0) ; extra == "mkdocs"
56
+ Requires-Dist: mkdocs-material (>=9.1.5,<10.0.0) ; extra == "mkdocs"
57
+ Requires-Dist: mkdocs-rss-plugin (>=1.8.0,<2.0.0) ; extra == "mkdocs"
58
+ Requires-Dist: mkdocs-section-index (>=0.3.5,<0.4.0) ; extra == "mkdocs"
59
+ Requires-Dist: mkdocstrings[python] (>=0.21.2,<0.22.0) ; extra == "mkdocs"
60
+ Requires-Dist: momento (>=1.10.2,<2.0.0)
61
+ Requires-Dist: mypy (>=1.7.0,<2.0.0)
62
+ Requires-Dist: neo4j (>=5.14.1,<6.0.0) ; extra == "neo4j"
45
63
  Requires-Dist: nltk (>=3.8.1,<4.0.0)
46
- Requires-Dist: openai (>=0.27.5,<0.28.0)
64
+ Requires-Dist: onnxruntime (==1.16.1)
65
+ Requires-Dist: openai (>=1.14.0,<2.0.0)
47
66
  Requires-Dist: pandas (>=2.0.3,<3.0.0)
67
+ Requires-Dist: pdf2image (>=1.17.0,<2.0.0)
48
68
  Requires-Dist: pdfplumber (>=0.10.2,<0.11.0)
49
69
  Requires-Dist: pre-commit (>=3.3.2,<4.0.0)
50
70
  Requires-Dist: prettytable (>=3.8.0,<4.0.0)
51
71
  Requires-Dist: psycopg2 (>=2.9.7,<3.0.0) ; extra == "postgres"
52
- Requires-Dist: pydantic (==1.10.11)
72
+ Requires-Dist: pyarrow (==15.0.0)
73
+ Requires-Dist: pydantic (==1.10.13)
53
74
  Requires-Dist: pygithub (>=1.58.1,<2.0.0)
54
75
  Requires-Dist: pygments (>=2.15.1,<3.0.0)
55
76
  Requires-Dist: pymupdf (>=1.23.3,<2.0.0)
56
77
  Requires-Dist: pymysql (>=1.1.0,<2.0.0) ; extra == "mysql"
57
78
  Requires-Dist: pyparsing (>=3.0.9,<4.0.0)
58
79
  Requires-Dist: pypdf (>=3.12.2,<4.0.0)
80
+ Requires-Dist: pytesseract (>=0.3.10,<0.4.0)
59
81
  Requires-Dist: pytest-asyncio (>=0.21.1,<0.22.0)
60
82
  Requires-Dist: pytest-mysql (>=2.4.2,<3.0.0) ; extra == "mysql"
61
83
  Requires-Dist: pytest-postgresql (>=5.0.0,<6.0.0) ; extra == "postgres"
84
+ Requires-Dist: pytest-redis (>=3.0.2,<4.0.0)
85
+ Requires-Dist: python-docx (>=1.1.0,<2.0.0)
62
86
  Requires-Dist: python-dotenv (>=1.0.0,<2.0.0)
63
- Requires-Dist: qdrant-client (>=1.3.1,<2.0.0)
87
+ Requires-Dist: python-socketio (>=5.11.0,<6.0.0) ; extra == "chainlit"
88
+ Requires-Dist: qdrant-client (>=1.8.0,<2.0.0)
64
89
  Requires-Dist: rank-bm25 (>=0.2.2,<0.3.0)
65
- Requires-Dist: redis (>=4.5.5,<5.0.0)
90
+ Requires-Dist: redis (>=5.0.1,<6.0.0)
66
91
  Requires-Dist: requests (>=2.31.0,<3.0.0)
67
92
  Requires-Dist: requests-oauthlib (>=1.3.1,<2.0.0)
68
93
  Requires-Dist: rich (>=13.3.4,<14.0.0)
69
- Requires-Dist: ruff (>=0.0.270,<0.0.271)
94
+ Requires-Dist: ruff (>=0.2.2,<0.3.0)
70
95
  Requires-Dist: scrapy (>=2.11.0,<3.0.0)
71
96
  Requires-Dist: sentence-transformers (==2.2.2) ; extra == "hf-embeddings"
72
97
  Requires-Dist: sqlalchemy (>=2.0.19,<3.0.0)
98
+ Requires-Dist: tantivy (>=0.21.0,<0.22.0)
73
99
  Requires-Dist: thefuzz (>=0.20.0,<0.21.0)
74
100
  Requires-Dist: tiktoken (>=0.5.1,<0.6.0)
75
101
  Requires-Dist: torch (==2.0.0) ; extra == "hf-embeddings"
76
102
  Requires-Dist: trafilatura (>=1.5.0,<2.0.0)
77
103
  Requires-Dist: typer (>=0.9.0,<0.10.0)
104
+ Requires-Dist: types-pyyaml (>=6.0.12.20240311,<7.0.0.0)
78
105
  Requires-Dist: types-redis (>=4.5.5.2,<5.0.0.0)
79
106
  Requires-Dist: types-requests (>=2.31.0.1,<3.0.0.0)
80
- Requires-Dist: unstructured[docx,pdf,pptx] (>=0.10.16,<0.10.18)
107
+ Requires-Dist: unstructured[docx,pdf,pptx] (>=0.10.16,<0.10.18) ; extra == "unstructured"
81
108
  Requires-Dist: wget (>=3.2,<4.0)
82
109
  Description-Content-Type: text/markdown
83
110
 
@@ -91,25 +118,12 @@ Description-Content-Type: text/markdown
91
118
  [![PyPI - Version](https://img.shields.io/pypi/v/langroid)](https://pypi.org/project/langroid/)
92
119
  [![Pytest](https://github.com/langroid/langroid/actions/workflows/pytest.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/pytest.yml)
93
120
  [![codecov](https://codecov.io/gh/langroid/langroid/branch/main/graph/badge.svg?token=H94BX5F0TE)](https://codecov.io/gh/langroid/langroid)
94
- [![Lint](https://github.com/langroid/langroid/actions/workflows/validate.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/validate.yml)
95
- [![Docs](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/mkdocs-deploy.yml)
96
-
97
- [![Static Badge](https://img.shields.io/badge/Documentation-blue?link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F&link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F)](https://langroid.github.io/langroid)
98
- [![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/ZU36McDgDs)
99
- [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langroid/langroid/blob/main/examples/langroid_quick_examples.ipynb)
100
-
101
- [![Docker Pulls](https://img.shields.io/docker/pulls/langroid/langroid.svg)](https://hub.docker.com/r/langroid/langroid)
102
- ![Docker Image Size (tag)](https://img.shields.io/docker/image-size/langroid/langroid/latest)
103
121
  [![Multi-Architecture DockerHub](https://github.com/langroid/langroid/actions/workflows/docker-publish.yml/badge.svg)](https://github.com/langroid/langroid/actions/workflows/docker-publish.yml)
104
122
 
105
- [![Substack](https://img.shields.io/badge/Substack-%23006f5c.svg?style=for-the-badge&logo=substack&logoColor=FF6719)](https://langroid.substack.com/p/langroid-harness-llms-with-multi-agent-programming)
106
-
107
- [![Share on Hacker News](https://img.shields.io/badge/-Share%20on%20Hacker%20News-orange)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Flangroid%2Flangroid&t=Harness%20LLMs%20with%20Multi-Agent%20Programming)
108
- [![Share on Reddit](https://img.shields.io/badge/Reddit-FF4500?style=for-the-badge&logo=reddit&logoColor=white)](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Flangroid%2Flangroid&title=Harness%20LLMs%20with%20Multi-Agent%20Programming)
109
- [![Share on Twitter](https://img.shields.io/twitter/url?style=social&url=https://github.com/langroid/langroid)](https://twitter.com/intent/tweet?text=Langroid%20is%20a%20powerful,%20elegant%20new%20framework%20to%20easily%20build%20%23LLM%20applications.%20You%20set%20up%20LLM-powered%20Agents%20with%20vector-stores,%20assign%20tasks,%20and%20have%20them%20collaboratively%20solve%20problems%20via%20message-transformations.%20https://github.com/langroid/langroid)
110
- [![LinkedIn](https://img.shields.io/badge/linkedin-%230077B5.svg?style=for-the-badge&logo=linkedin&logoColor=white)](https://www.linkedin.com/shareArticle?mini=true&url=https://github.com/langroid/langroid&title=Langroid:%20A%20Powerful,%20Elegant%20Framework&summary=Langroid%20is%20a%20powerful,%20elegant%20new%20framework%20to%20easily%20build%20%23LLM%20applications.%20You%20set%20up%20LLM-powered%20Agents%20with%20vector-stores,%20assign%20tasks,%20and%20have%20them%20collaboratively%20solve%20problems%20via%20message-transformations.)
111
-
112
-
123
+ [![Static Badge](https://img.shields.io/badge/Documentation-blue?link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F&link=https%3A%2F%2Flangroid.github.io%2Flangroid%2F)](https://langroid.github.io/langroid)
124
+ [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langroid/langroid/blob/main/examples/Langroid_quick_start.ipynb)
125
+ [![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?style=flat&logo=discord&logoColor=white)](https://discord.gg/ZU36McDgDs)
126
+ [![Substack](https://img.shields.io/badge/Substack-%23006f5c.svg?style=flat&logo=substack&logoColor=FF6719)](https://langroid.substack.com/p/langroid-harness-llms-with-multi-agent-programming)
113
127
  </div>
114
128
 
115
129
  <h3 align="center">
@@ -130,9 +144,9 @@ Description-Content-Type: text/markdown
130
144
  </h3>
131
145
 
132
146
  `Langroid` is an intuitive, lightweight, extensible and principled
133
- Python framework to easily build LLM-powered applications.
147
+ Python framework to easily build LLM-powered applications, from ex-CMU and UW-Madison researchers.
134
148
  You set up Agents, equip them with optional components (LLM,
135
- vector-store and methods), assign them tasks, and have them
149
+ vector-store and tools/functions), assign them tasks, and have them
136
150
  collaboratively solve a problem by exchanging messages.
137
151
  This Multi-Agent paradigm is inspired by the
138
152
  [Actor Framework](https://en.wikipedia.org/wiki/Actor_model)
@@ -141,15 +155,179 @@ This Multi-Agent paradigm is inspired by the
141
155
  `Langroid` is a fresh take on LLM app-development, where considerable thought has gone
142
156
  into simplifying the developer experience; it does not use `Langchain`.
143
157
 
158
+ :fire: See this [Intro to Langroid](https://lancedb.substack.com/p/langoid-multi-agent-programming-framework)
159
+ blog post from the LanceDB team
160
+
161
+
144
162
  We welcome contributions -- See the [contributions](./CONTRIBUTING.md) document
145
163
  for ideas on what to contribute.
146
164
 
165
+ Are you building LLM Applications, or want help with Langroid for your company,
166
+ or want to prioritize Langroid features for your company use-cases?
167
+ [Prasad Chalasani](https://www.linkedin.com/in/pchalasani/) is available for consulting
168
+ (advisory/development): pchalasani at gmail dot com.
169
+
170
+ Sponsorship is also accepted via [GitHub Sponsors](https://github.com/sponsors/langroid)
171
+
147
172
  **Questions, Feedback, Ideas? Join us on [Discord](https://discord.gg/ZU36McDgDs)!**
148
173
 
149
- <details>
150
- <summary> <b>:fire: Updates/Releases</b></summary>
174
+ # Quick glimpse of coding with Langroid
175
+ This is just a teaser; there's much more, like function-calling/tools,
176
+ Multi-Agent Collaboration, Structured Information Extraction, DocChatAgent
177
+ (RAG), SQLChatAgent, non-OpenAI local/remote LLMs, etc. Scroll down or see docs for more.
178
+ See the Langroid Quick-Start [Colab](https://colab.research.google.com/github/langroid/langroid/blob/main/examples/Langroid_quick_start.ipynb)
179
+ that builds up to a 2-agent information-extraction example using the OpenAI ChatCompletion API.
180
+ See also this [version](https://colab.research.google.com/drive/190Tk7t4AdY1P9F_NlZ33-YEoGnHweQQ0) that uses the OpenAI Assistants API instead.
151
181
 
182
+ :fire: just released! [Example](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat-multi-extract-local.py)
183
+ script showing how you can use Langroid multi-agents and tools
184
+ to extract structured information from a document using **only a local LLM**
185
+ (Mistral-7b-instruct-v0.2).
186
+
187
+ ```python
188
+ import langroid as lr
189
+ import langroid.language_models as lm
190
+
191
+ # set up LLM
192
+ llm_cfg = lm.OpenAIGPTConfig( # or OpenAIAssistant to use Assistant API
193
+ # any model served via an OpenAI-compatible API
194
+ chat_model=lm.OpenAIChatModel.GPT4_TURBO, # or, e.g., "ollama/mistral"
195
+ )
196
+ # use LLM directly
197
+ mdl = lm.OpenAIGPT(llm_cfg)
198
+ response = mdl.chat("What is the capital of Ontario?", max_tokens=10)
199
+
200
+ # use LLM in an Agent
201
+ agent_cfg = lr.ChatAgentConfig(llm=llm_cfg)
202
+ agent = lr.ChatAgent(agent_cfg)
203
+ agent.llm_response("What is the capital of China?")
204
+ response = agent.llm_response("And India?") # maintains conversation state
205
+
206
+ # wrap Agent in a Task to run interactive loop with user (or other agents)
207
+ task = lr.Task(agent, name="Bot", system_message="You are a helpful assistant")
208
+ task.run("Hello") # kick off with user saying "Hello"
209
+
210
+ # 2-Agent chat loop: Teacher Agent asks questions to Student Agent
211
+ teacher_agent = lr.ChatAgent(agent_cfg)
212
+ teacher_task = lr.Task(
213
+ teacher_agent, name="Teacher",
214
+ system_message="""
215
+ Ask your student concise numbers questions, and give feedback.
216
+ Start with a question.
217
+ """
218
+ )
219
+ student_agent = lr.ChatAgent(agent_cfg)
220
+ student_task = lr.Task(
221
+ student_agent, name="Student",
222
+ system_message="Concisely answer the teacher's questions.",
223
+ single_round=True,
224
+ )
225
+
226
+ teacher_task.add_sub_task(student_task)
227
+ teacher_task.run()
228
+ ```
229
+
230
+ # :fire: Updates/Releases
231
+
232
+ <details>
233
+ <summary> <b>Click to expand</b></summary>
234
+
235
+ - **Mar 2024:**
236
+ - **0.1.216:** Improvements to allow concurrent runs of `DocChatAgent`, see the
237
+ [`test_doc_chat_agent.py`](https://github.com/langroid/langroid/blob/main/tests/main/test_doc_chat_agent.py)
238
+ in particular the `test_doc_chat_batch()`;
239
+ New task run utility: [`run_batch_task_gen`](https://github.com/langroid/langroid/blob/main/langroid/agent/batch.py)
240
+ where a task generator can be specified, to generate one task per input.
241
+ - **0.1.212:** ImagePdfParser: support for extracting text from image-based PDFs.
242
+ (this means `DocChatAgent` will now work with image-pdfs).
243
+ - **0.1.194 - 0.1.211:** Misc fixes, improvements, and features:
244
+ - Big enhancement in RAG performance (mainly, recall) due to a [fix in Relevance
245
+ Extractor](https://github.com/langroid/langroid/releases/tag/0.1.209)
246
+ - `DocChatAgent` [context-window fixes](https://github.com/langroid/langroid/releases/tag/0.1.208)
247
+ - Anthropic/Claude3 support via Litellm
248
+ - `URLLoader`: detect file time from header when URL doesn't end with a
249
+ recognizable suffix like `.pdf`, `.docx`, etc.
250
+ - Misc lancedb integration fixes
251
+ - Auto-select embedding config based on whether `sentence_transformer` module is available.
252
+ - Slim down dependencies, make some heavy ones optional, e.g. `unstructured`,
253
+ `haystack`, `chromadb`, `mkdocs`, `huggingface-hub`, `sentence-transformers`.
254
+ - Easier top-level imports from `import langroid as lr`
255
+ - Improve JSON detection, esp from weak LLMs
256
+ - **Feb 2024:**
257
+ - **0.1.193:** Support local LLMs using Ollama's new OpenAI-Compatible server:
258
+ simply specify `chat_model="ollama/mistral"`. See [release notes](https://github.com/langroid/langroid/releases/tag/0.1.193).
259
+ - **0.1.183:** Added Chainlit support via [callbacks](https://github.com/langroid/langroid/blob/main/langroid/agent/callbacks/chainlit.py).
260
+ See [examples](https://github.com/langroid/langroid/tree/main/examples/chainlit).
261
+ - **Jan 2024:**
262
+ - **0.1.175**
263
+ - [Neo4jChatAgent](https://github.com/langroid/langroid/tree/main/langroid/agent/special/neo4j) to chat with a neo4j knowledge-graph.
264
+ (Thanks to [Mohannad](https://github.com/Mohannadcse)!). The agent uses tools to query the Neo4j schema and translate user queries to Cypher queries,
265
+ and the tool handler executes these queries, returning them to the LLM to compose
266
+ a natural language response (analogous to how `SQLChatAgent` works).
267
+ See example [script](https://github.com/langroid/langroid/tree/main/examples/kg-chat) using this Agent to answer questions about Python pkg dependencies.
268
+ - Support for `.doc` file parsing (in addition to `.docx`)
269
+ - Specify optional [`formatter` param](https://github.com/langroid/langroid/releases/tag/0.1.171)
270
+ in `OpenAIGPTConfig` to ensure accurate chat formatting for local LLMs.
271
+ - **[0.1.157](https://github.com/langroid/langroid/releases/tag/0.1.157):** `DocChatAgentConfig`
272
+ has a new param: `add_fields_to_content`, to specify additional document fields to insert into
273
+ the main `content` field, to help improve retrieval.
274
+ - **[0.1.156](https://github.com/langroid/langroid/releases/tag/0.1.156):** New Task control signals
275
+ PASS_TO, SEND_TO; VectorStore: Compute Pandas expression on documents; LanceRAGTaskCreator creates 3-agent RAG system with Query Planner, Critic and RAG Agent.
276
+ - **Dec 2023:**
277
+ - **0.1.154:** (For details see release notes of [0.1.149](https://github.com/langroid/langroid/releases/tag/0.1.149)
278
+ and [0.1.154](https://github.com/langroid/langroid/releases/tag/0.1.154)).
279
+ - `DocChatAgent`: Ingest Pandas dataframes and filtering.
280
+ - `LanceDocChatAgent` leverages `LanceDB` vector-db for efficient vector search
281
+ and full-text search and filtering.
282
+ - Improved task and multi-agent control mechanisms
283
+ - `LanceRAGTaskCreator` to create a 2-agent system consisting of a `LanceFilterAgent` that
284
+ decides a filter and rephrase query to send to a RAG agent.
285
+ - **[0.1.141](https://github.com/langroid/langroid/releases/tag/0.1.141):**
286
+ API Simplifications to reduce boilerplate:
287
+ auto-select an available OpenAI model (preferring gpt-4-turbo), simplifies defaults.
288
+ Simpler `Task` initialization with default `ChatAgent`.
289
+ - **Nov 2023:**
290
+ - **[0.1.126](https://github.com/langroid/langroid/releases/tag/0.1.126):**
291
+ OpenAIAssistant agent: Caching Support.
292
+ - **0.1.117:** Support for OpenAI Assistant API tools: Function-calling,
293
+ Code-intepreter, and Retriever (RAG), file uploads. These work seamlessly
294
+ with Langroid's task-orchestration.
295
+ Until docs are ready, it's best to see these usage examples:
296
+
297
+ - **Tests:**
298
+ - [test_openai_assistant.py](https://github.com/langroid/langroid/blob/main/tests/main/test_openai_assistant.py)
299
+ - [test_openai_assistant_async.py](https://github.com/langroid/langroid/blob/main/tests/main/test_openai_assistant_async.py)
300
+
301
+ - **Example scripts:**
302
+ - [The most basic chat app](https://github.com/langroid/langroid/blob/main/examples/basic/oai-asst-chat.py)
303
+ - [Chat with code interpreter](https://github.com/langroid/langroid/blob/main/examples/basic/oai-code-chat.py)
304
+ - [Chat with retrieval (RAG)](https://github.com/langroid/langroid/blob/main/examples/docqa/oai-retrieval-assistant.py)
305
+ - [2-agent RAG chat](https://github.com/langroid/langroid/blob/main/examples/docqa/oai-retrieval-2.py)
306
+ - **0.1.112:** [`OpenAIAssistant`](https://github.com/langroid/langroid/blob/main/langroid/agent/openai_assistant.py) is a subclass of `ChatAgent` that
307
+ leverages the new OpenAI Assistant API. It can be used as a drop-in
308
+ replacement for `ChatAgent`, and relies on the Assistant API to
309
+ maintain conversation state, and leverages persistent threads and
310
+ assistants to reconnect to them if needed. Examples:
311
+ [`test_openai_assistant.py`](https://github.com/langroid/langroid/blob/main/tests/main/test_openai_assistant.py),
312
+ [`test_openai_assistant_async.py`](https://github.com/langroid/langroid/blob/main/tests/main/test_openai_assistant_async.py)
313
+ - **0.1.111:** Support latest OpenAI model: `GPT4_TURBO`
314
+ (see [test_llm.py](tests/main/test_llm.py) for example usage)
315
+ - **0.1.110:** Upgrade from OpenAI v0.x to v1.1.1 (in preparation for
316
+ Assistants API and more); (`litellm` temporarily disabled due to OpenAI
317
+ version conflict).
152
318
  - **Oct 2023:**
319
+ - **0.1.107:** `DocChatAgent` re-rankers: `rank_with_diversity`, `rank_to_periphery` (lost in middle).
320
+ - **0.1.102:** `DocChatAgentConfig.n_neighbor_chunks > 0` allows returning context chunks around match.
321
+ - **0.1.101:** `DocChatAgent` uses `RelevanceExtractorAgent` to have
322
+ the LLM extract relevant portions of a chunk using
323
+ sentence-numbering, resulting in huge speed up and cost reduction
324
+ compared to the naive "sentence-parroting" approach (writing out full
325
+ sentences out relevant whole sentences) which `LangChain` uses in their
326
+ `LLMChainExtractor`.
327
+ - **0.1.100:** API update: all of Langroid is accessible with a single import, i.e. `import langroid as lr`. See the [documentation]("https://langroid.github.io/langroid/") for usage.
328
+ - **0.1.99:** Convenience batch functions to run tasks, agent methods on a list of inputs concurrently in async mode. See examples in [test_batch.py](https://github.com/langroid/langroid/blob/main/tests/main/test_batch.py).
329
+ - **0.1.95:** Added support for [Momento Serverless Vector Index](https://docs.momentohq.com/vector-index)
330
+ - **0.1.94:** Added support for [LanceDB](https://lancedb.github.io/lancedb/) vector-store -- allows vector, Full-text, SQL search.
153
331
  - **0.1.84:** Added [LiteLLM](https://docs.litellm.ai/docs/providers), so now Langroid can be used with over 100 LLM providers (remote or local)!
154
332
  See guide [here](https://langroid.github.io/langroid/tutorials/non-openai-llms/).
155
333
  - **Sep 2023:**
@@ -172,7 +350,7 @@ See [this test](tests/main/test_recipient_tool.py) for example usage.
172
350
  - **Example:** [Answer questions](examples/docqa/chat-search.py) using Google Search + vecdb-retrieval from URL contents.
173
351
  - **0.1.39:** [`GoogleSearchTool`](langroid/agent/tools/google_search_tool.py) to enable Agents (their LLM) to do Google searches via function-calling/tools.
174
352
  See [this chat example](examples/basic/chat-search.py) for how easy it is to add this tool to an agent.
175
- - **Colab notebook** to try the quick-start examples: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langroid/langroid/blob/main/examples/langroid_quick_examples.ipynb)
353
+ - **Colab notebook** to try the quick-start examples: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langroid/langroid/blob/main/examples/Langroid_quick_start.ipynb)
176
354
  - **0.1.37:** Added [`SQLChatAgent`](langroid/agent/special/sql_chat_agent.py) -- thanks to our latest contributor [Rithwik Babu](https://github.com/rithwikbabu)!
177
355
  - Multi-agent Example: [Autocorrect chat](examples/basic/autocorrect.py)
178
356
  - **July 2023:**
@@ -191,6 +369,8 @@ See [this test](tests/main/test_recipient_tool.py) for example usage.
191
369
  Suppose you want to extract structured information about the key terms
192
370
  of a commercial lease document. You can easily do this with Langroid using a two-agent system,
193
371
  as we show in the [langroid-examples](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat_multi_extract.py) repo.
372
+ (See [this script](https://github.com/langroid/langroid-examples/blob/main/examples/docqa/chat-multi-extract-local.py)
373
+ for a version with the same functionality using a local Mistral-7b model.)
194
374
  The demo showcases just a few of the many features of Langroid, such as:
195
375
  - Multi-agent collaboration: `LeaseExtractor` is in charge of the task, and its LLM (GPT4) generates questions
196
376
  to be answered by the `DocAgent`.
@@ -207,7 +387,9 @@ Here is what it looks like in action
207
387
 
208
388
 
209
389
  # :zap: Highlights
210
-
390
+ (For a more up-to-date list see the
391
+ [release](https://github.com/langroid/langroid?tab=readme-ov-file#fire-updatesreleases)
392
+ section above)
211
393
  - **Agents as first-class citizens:** The [Agent](https://langroid.github.io/langroid/reference/agent/base/#langroid.agent.base.Agent) class encapsulates LLM conversation state,
212
394
  and optionally a vector-store and tools. Agents are a core abstraction in Langroid;
213
395
  Agents act as _message transformers_, and by default provide 3 _responder_ methods, one corresponding to each entity: LLM, Agent, User.
@@ -221,11 +403,12 @@ Here is what it looks like in action
221
403
  after the agent's own responders.
222
404
  - **Modularity, Reusabilily, Loose coupling:** The `Agent` and `Task` abstractions allow users to design
223
405
  Agents with specific skills, wrap them in Tasks, and combine tasks in a flexible way.
224
- - **LLM Support**: Langroid supports OpenAI LLMs including GPT-3.5-Turbo,
225
- GPT-4.
406
+ - **LLM Support**: Langroid supports OpenAI LLMs as well as LLMs from hundreds of
407
+ providers (local/open or remote/commercial) via proxy libraries and local model servers
408
+ such as [LiteLLM](https://docs.litellm.ai/docs/providers) that in effect mimic the OpenAI API.
226
409
  - **Caching of LLM responses:** Langroid supports [Redis](https://redis.com/try-free/) and
227
410
  [Momento](https://www.gomomento.com/) to cache LLM responses.
228
- - **Vector-stores**: [Qdrant](https://qdrant.tech/) and [Chroma](https://www.trychroma.com/) are currently supported.
411
+ - **Vector-stores**: [LanceDB](https://github.com/lancedb/lancedb), [Qdrant](https://qdrant.tech/), [Chroma](https://www.trychroma.com/) are currently supported.
229
412
  Vector stores allow for Retrieval-Augmented-Generation (RAG).
230
413
  - **Grounding and source-citation:** Access to external documents via vector-stores
231
414
  allows for grounding and source-citation.
@@ -260,6 +443,15 @@ install Langroid like this:
260
443
  ```bash
261
444
  pip install langroid[hf-embeddings]
262
445
  ```
446
+ If using `zsh` (or similar shells), you may need to escape the square brackets, e.g.:
447
+ ```
448
+ pip install langroid\[hf-embeddings\]
449
+ ```
450
+ or use quotes:
451
+ ```
452
+ pip install "langroid[hf-embeddings]"
453
+ ```
454
+
263
455
 
264
456
  <details>
265
457
  <summary><b>Optional Installs for using SQL Chat with a PostgreSQL DB </b></summary>
@@ -288,9 +480,11 @@ In the root of the repo, copy the `.env-template` file to a new file `.env`:
288
480
  cp .env-template .env
289
481
  ```
290
482
  Then insert your OpenAI API Key.
291
- Your `.env` file should look like this:
483
+ Your `.env` file should look like this (the organization is optional
484
+ but may be required in some scenarios).
292
485
  ```bash
293
486
  OPENAI_API_KEY=your-key-here-without-quotes
487
+ OPENAI_ORGANIZATION=optionally-your-organization-id
294
488
  ````
295
489
 
296
490
  Alternatively, you can set this as an environment variable in your shell
@@ -307,11 +501,10 @@ All of the following environment variable settings are optional, and some are on
307
501
  to use specific features (as noted below).
308
502
 
309
503
  - **Qdrant** Vector Store API Key, URL. This is only required if you want to use Qdrant cloud.
310
- You can sign up for a free 1GB account at [Qdrant cloud](https://cloud.qdrant.io).
311
- If you skip setting up these, Langroid will use Qdrant in local-storage mode.
504
+ The default vector store in our RAG agent (`DocChatAgent`) is LanceDB which uses file storage,
505
+ and you do not need to set up any environment variables for that.
312
506
  Alternatively [Chroma](https://docs.trychroma.com/) is also currently supported.
313
507
  We use the local-storage version of Chroma, so there is no need for an API key.
314
- Langroid uses Qdrant by default.
315
508
  - **Redis** Password, host, port: This is optional, and only needed to cache LLM API responses
316
509
  using Redis Cloud. Redis [offers](https://redis.com/try-free/) a free 30MB Redis account
317
510
  which is more than sufficient to try out Langroid and even beyond.
@@ -362,12 +555,12 @@ When using Azure OpenAI, additional environment variables are required in the
362
555
  This page [Microsoft Azure OpenAI](https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart?tabs=command-line&pivots=programming-language-python#environment-variables)
363
556
  provides more information, and you can set each environment variable as follows:
364
557
 
365
- - `AZURE_API_KEY`, from the value of `API_KEY`
558
+ - `AZURE_OPENAI_API_KEY`, from the value of `API_KEY`
366
559
  - `AZURE_OPENAI_API_BASE` from the value of `ENDPOINT`, typically looks like `https://your.domain.azure.com`.
367
560
  - For `AZURE_OPENAI_API_VERSION`, you can use the default value in `.env-template`, and latest version can be found [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new#azure-openai-chat-completion-general-availability-ga)
368
561
  - `AZURE_OPENAI_DEPLOYMENT_NAME` is the name of the deployed model, which is defined by the user during the model setup
369
- - `AZURE_GPT_MODEL_NAME` GPT-3.5-Turbo or GPT-4 model names that you chose when you setup your Azure OpenAI account.
370
-
562
+ - `AZURE_OPENAI_MODEL_NAME` Azure OpenAI allows specific model names when you select the model for your deployment. You need to put precisly the exact model name that was selected. For example, GPT-3.5 (should be `gpt-35-turbo-16k` or `gpt-35-turbo`) or GPT-4 (should be `gpt-4-32k` or `gpt-4`).
563
+ - `AZURE_OPENAI_MODEL_VERSION` is required if `AZURE_OPENAI_MODEL_NAME = gpt=4`, which will assist Langroid to determine the cost of the model
371
564
  </details>
372
565
 
373
566
  ---
@@ -381,14 +574,13 @@ Please follow these steps to setup the container:
381
574
 
382
575
  ```bash
383
576
  # get the .env file template from `langroid` repo
384
- wget https://github.com/langroid/langroid/blob/main/.env-template .env
577
+ wget -O .env https://raw.githubusercontent.com/langroid/langroid/main/.env-template
385
578
 
386
- # Edit the .env file with your favorite editor (here nano),
387
- # and add API keys as explained above
579
+ # Edit the .env file with your favorite editor (here nano), and remove any un-used settings. E.g. there are "dummy" values like "your-redis-port" etc -- if you are not using them, you MUST remove them.
388
580
  nano .env
389
581
 
390
582
  # launch the container
391
- docker run -it -v ./.env:/.env langroid/langroid
583
+ docker run -it --rm -v ./.env:/langroid/.env langroid/langroid
392
584
 
393
585
  # Use this command to run any of the scripts in the `examples` directory
394
586
  python examples/<Path/To/Example.py>
@@ -421,49 +613,70 @@ for a detailed tutorial.
421
613
 
422
614
  Click to expand any of the code examples below.
423
615
  All of these can be run in a Colab notebook:
424
- [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langroid/langroid/blob/main/examples/langroid_quick_examples.ipynb)
616
+ [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/langroid/langroid/blob/main/examples/Langroid_quick_start.ipynb)
425
617
 
426
618
  <details>
427
619
  <summary> <b> Direct interaction with OpenAI LLM </b> </summary>
428
620
 
429
621
  ```python
430
- from langroid.language_models.openai_gpt import (
431
- OpenAIGPTConfig, OpenAIChatModel, OpenAIGPT,
432
- )
433
- from langroid.language_models.base import LLMMessage, Role
434
-
435
- cfg = OpenAIGPTConfig(chat_model=OpenAIChatModel.GPT4)
622
+ import langroid.language_models as lm
436
623
 
437
- mdl = OpenAIGPT(cfg)
624
+ mdl = lm.OpenAIGPT()
438
625
 
439
626
  messages = [
440
- LLMMessage(content="You are a helpful assistant", role=Role.SYSTEM),
441
- LLMMessage(content="What is the capital of Ontario?", role=Role.USER),
627
+ lm.LLMMessage(content="You are a helpful assistant", role=lm.Role.SYSTEM),
628
+ lm.LLMMessage(content="What is the capital of Ontario?", role=lm.Role.USER),
442
629
  ]
630
+
443
631
  response = mdl.chat(messages, max_tokens=200)
444
632
  print(response.message)
445
633
  ```
446
634
  </details>
447
635
 
448
636
  <details>
449
- <summary> <b> Define an agent, set up a task, and run it </b> </summary>
637
+ <summary> <b> Interaction with non-OpenAI LLM (local or remote) </b> </summary>
638
+ Local model: if model is served at `http://localhost:8000`:
450
639
 
451
640
  ```python
452
- from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
453
- from langroid.agent.task import Task
454
- from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
455
-
456
- config = ChatAgentConfig(
457
- llm = OpenAIGPTConfig(
458
- chat_model=OpenAIChatModel.GPT4,
459
- ),
460
- vecdb=None, # no vector store
641
+ cfg = lm.OpenAIGPTConfig(
642
+ chat_model="local/localhost:8000",
643
+ chat_context_length=4096
461
644
  )
462
- agent = ChatAgent(config)
645
+ mdl = lm.OpenAIGPT(cfg)
646
+ # now interact with it as above, or create an Agent + Task as shown below.
647
+ ```
648
+
649
+ If the model is [supported by `liteLLM`](https://docs.litellm.ai/docs/providers),
650
+ then no need to launch the proxy server.
651
+ Just set the `chat_model` param above to `litellm/[provider]/[model]`, e.g.
652
+ `litellm/anthropic/claude-instant-1` and use the config object as above.
653
+ Note that to use `litellm` you need to install langroid with the `litellm` extra:
654
+ `poetry install -E litellm` or `pip install langroid[litellm]`.
655
+ For remote models, you will typically need to set API Keys etc as environment variables.
656
+ You can set those based on the LiteLLM docs.
657
+ If any required environment variables are missing, Langroid gives a helpful error
658
+ message indicating which ones are needed.
659
+ Note that to use `langroid` with `litellm` you need to install the `litellm`
660
+ extra, i.e. either `pip install langroid[litellm]` in your virtual env,
661
+ or if you are developing within the `langroid` repo,
662
+ `poetry install -E litellm`.
663
+ ```bash
664
+ pip install langroid[litellm]
665
+ ```
666
+ </details>
667
+
668
+ <details>
669
+ <summary> <b> Define an agent, set up a task, and run it </b> </summary>
670
+
671
+ ```python
672
+ import langroid as lr
673
+
674
+ agent = lr.ChatAgent()
675
+
463
676
  # get response from agent's LLM, and put this in an interactive loop...
464
677
  # answer = agent.llm_response("What is the capital of Ontario?")
465
678
  # ... OR instead, set up a task (which has a built-in loop) and run it
466
- task = Task(agent, name="Bot")
679
+ task = lr.Task(agent, name="Bot")
467
680
  task.run() # ... a loop seeking response from LLM or User at each turn
468
681
  ```
469
682
  </details>
@@ -472,26 +685,17 @@ task.run() # ... a loop seeking response from LLM or User at each turn
472
685
  <summary><b> Three communicating agents </b></summary>
473
686
 
474
687
  A toy numbers game, where when given a number `n`:
475
- - `repeater_agent`'s LLM simply returns `n`,
476
- - `even_agent`'s LLM returns `n/2` if `n` is even, else says "DO-NOT-KNOW"
477
- - `odd_agent`'s LLM returns `3*n+1` if `n` is odd, else says "DO-NOT-KNOW"
688
+ - `repeater_task`'s LLM simply returns `n`,
689
+ - `even_task`'s LLM returns `n/2` if `n` is even, else says "DO-NOT-KNOW"
690
+ - `odd_task`'s LLM returns `3*n+1` if `n` is odd, else says "DO-NOT-KNOW"
478
691
 
479
- First define the 3 agents, and set up their tasks with instructions:
692
+ Each of these `Task`s automatically configures a default `ChatAgent`.
480
693
 
481
694
  ```python
695
+ import langroid as lr
482
696
  from langroid.utils.constants import NO_ANSWER
483
- from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
484
- from langroid.agent.task import Task
485
- from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
486
- config = ChatAgentConfig(
487
- llm = OpenAIGPTConfig(
488
- chat_model=OpenAIChatModel.GPT4,
489
- ),
490
- vecdb = None,
491
- )
492
- repeater_agent = ChatAgent(config)
493
- repeater_task = Task(
494
- repeater_agent,
697
+
698
+ repeater_task = lr.Task(
495
699
  name = "Repeater",
496
700
  system_message="""
497
701
  Your job is to repeat whatever number you receive.
@@ -499,9 +703,8 @@ repeater_task = Task(
499
703
  llm_delegate=True, # LLM takes charge of task
500
704
  single_round=False,
501
705
  )
502
- even_agent = ChatAgent(config)
503
- even_task = Task(
504
- even_agent,
706
+
707
+ even_task = lr.Task(
505
708
  name = "EvenHandler",
506
709
  system_message=f"""
507
710
  You will be given a number.
@@ -511,9 +714,7 @@ even_task = Task(
511
714
  single_round=True, # task done after 1 step() with valid response
512
715
  )
513
716
 
514
- odd_agent = ChatAgent(config)
515
- odd_task = Task(
516
- odd_agent,
717
+ odd_task = lr.Task(
517
718
  name = "OddHandler",
518
719
  system_message=f"""
519
720
  You will be given a number n.
@@ -539,7 +740,7 @@ Langroid leverages Pydantic to support OpenAI's
539
740
  [Function-calling API](https://platform.openai.com/docs/guides/gpt/function-calling)
540
741
  as well as its own native tools. The benefits are that you don't have to write
541
742
  any JSON to specify the schema, and also if the LLM hallucinates a malformed
542
- tool syntax, Langroid sends the Pydantic validation error (suitiably sanitized)
743
+ tool syntax, Langroid sends the Pydantic validation error (suitably sanitized)
543
744
  to the LLM so it can fix it!
544
745
 
545
746
  Simple example: Say the agent has a secret list of numbers,
@@ -552,8 +753,9 @@ First define the tool using Langroid's `ToolMessage` class:
552
753
 
553
754
 
554
755
  ```python
555
- from langroid.agent.tool_message import ToolMessage
556
- class ProbeTool(ToolMessage):
756
+ import langroid as lr
757
+
758
+ class ProbeTool(lr.agent.ToolMessage):
557
759
  request: str = "probe" # specifies which agent method handles this tool
558
760
  purpose: str = """
559
761
  To find how many numbers in my list are less than or equal to
@@ -566,9 +768,8 @@ Then define a `SpyGameAgent` as a subclass of `ChatAgent`,
566
768
  with a method `probe` that handles this tool:
567
769
 
568
770
  ```python
569
- from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
570
- class SpyGameAgent(ChatAgent):
571
- def __init__(self, config: ChatAgentConfig):
771
+ class SpyGameAgent(lr.ChatAgent):
772
+ def __init__(self, config: lr.ChatAgentConfig):
572
773
  super().__init__(config)
573
774
  self.numbers = [3, 4, 8, 11, 15, 25, 40, 80, 90]
574
775
 
@@ -580,13 +781,9 @@ class SpyGameAgent(ChatAgent):
580
781
  We then instantiate the agent and enable it to use and respond to the tool:
581
782
 
582
783
  ```python
583
- from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
584
784
  spy_game_agent = SpyGameAgent(
585
- ChatAgentConfig(
785
+ lr.ChatAgentConfig(
586
786
  name="Spy",
587
- llm = OpenAIGPTConfig(
588
- chat_model=OpenAIChatModel.GPT4,
589
- ),
590
787
  vecdb=None,
591
788
  use_tools=False, # don't use Langroid native tool
592
789
  use_functions_api=True, # use OpenAI function-call API
@@ -628,7 +825,9 @@ Then define the `LeaseMessage` tool as a subclass of Langroid's `ToolMessage`.
628
825
  Note the tool has a required argument `terms` of type `Lease`:
629
826
 
630
827
  ```python
631
- class LeaseMessage(ToolMessage):
828
+ import langroid as lr
829
+
830
+ class LeaseMessage(lr.agent.ToolMessage):
632
831
  request: str = "lease_info"
633
832
  purpose: str = """
634
833
  Collect information about a Commercial Lease.
@@ -640,7 +839,7 @@ Then define a `LeaseExtractorAgent` with a method `lease_info` that handles this
640
839
  instantiate the agent, and enable it to use and respond to this tool:
641
840
 
642
841
  ```python
643
- class LeaseExtractorAgent(ChatAgent):
842
+ class LeaseExtractorAgent(lr.ChatAgent):
644
843
  def lease_info(self, message: LeaseMessage) -> str:
645
844
  print(
646
845
  f"""
@@ -650,13 +849,7 @@ class LeaseExtractorAgent(ChatAgent):
650
849
  )
651
850
  return json.dumps(message.terms.dict())
652
851
 
653
- lease_extractor_agent = LeaseExtractorAgent(
654
- ChatAgentConfig(
655
- llm=OpenAIGPTConfig(),
656
- use_functions_api=False,
657
- use_tools=True,
658
- )
659
- )
852
+ lease_extractor_agent = LeaseExtractorAgent()
660
853
  lease_extractor_agent.enable_message(LeaseMessage)
661
854
  ```
662
855
 
@@ -675,18 +868,16 @@ First create a `DocChatAgentConfig` instance, with a
675
868
  `doc_paths` field that specifies the documents to chat with.
676
869
 
677
870
  ```python
678
- from langroid.agent.doc_chat_agent import DocChatAgentConfig
679
- from langroid.vector_store.qdrantdb import QdrantDBConfig
871
+ import langroid as lr
872
+ from langroid.agent.special import DocChatAgentConfig, DocChatAgent
873
+
680
874
  config = DocChatAgentConfig(
681
875
  doc_paths = [
682
876
  "https://en.wikipedia.org/wiki/Language_model",
683
877
  "https://en.wikipedia.org/wiki/N-gram_language_model",
684
878
  "/path/to/my/notes-on-language-models.txt",
685
- ]
686
- llm = OpenAIGPTConfig(
687
- chat_model=OpenAIChatModel.GPT4,
688
- ),
689
- vecdb=QdrantDBConfig()
879
+ ],
880
+ vecdb=lr.vector_store.LanceDBConfig(),
690
881
  )
691
882
  ```
692
883
 
@@ -697,12 +888,11 @@ agent = DocChatAgent(config)
697
888
  ```
698
889
  Then we can either ask the agent one-off questions,
699
890
  ```python
700
- agent.chat("What is a language model?")
891
+ agent.llm_response("What is a language model?")
701
892
  ```
702
893
  or wrap it in a `Task` and run an interactive loop with the user:
703
894
  ```python
704
- from langroid.task import Task
705
- task = Task(agent)
895
+ task = lr.Task(agent)
706
896
  task.run()
707
897
  ```
708
898
 
@@ -722,9 +912,8 @@ executes the code and returns the answer.
722
912
  Here is how you can do this:
723
913
 
724
914
  ```python
725
- from langroid.agent.special.table_chat_agent import TableChatAgent, TableChatAgentConfig
726
- from langroid.agent.task import Task
727
- from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
915
+ import langroid as lr
916
+ from langroid.agent.special import TableChatAgent, TableChatAgentConfig
728
917
  ```
729
918
 
730
919
  Set up a `TableChatAgent` for a data file, URL or dataframe
@@ -735,17 +924,14 @@ dataset = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quali
735
924
  # or dataset = pd.read_csv("/path/to/my/data.csv")
736
925
  agent = TableChatAgent(
737
926
  config=TableChatAgentConfig(
738
- data=dataset,
739
- llm=OpenAIGPTConfig(
740
- chat_model=OpenAIChatModel.GPT4,
741
- ),
927
+ data=dataset,
742
928
  )
743
929
  )
744
930
  ```
745
931
  Set up a task, and ask one-off questions like this:
746
932
 
747
933
  ```python
748
- task = Task(
934
+ task = lr.Task(
749
935
  agent,
750
936
  name = "DataAssistant",
751
937
  default_human_response="", # to avoid waiting for user input
@@ -759,7 +945,7 @@ print(result.content)
759
945
  Or alternatively, set up a task and run it in an interactive loop with the user:
760
946
 
761
947
  ```python
762
- task = Task(agent, name="DataAssistant")
948
+ task = lr.Task(agent, name="DataAssistant")
763
949
  task.run()
764
950
  ```
765
951
 
@@ -781,14 +967,8 @@ If you like this project, please give it a star ⭐ and 📢 spread the word in
781
967
  [![Share on Hacker News](https://img.shields.io/badge/-Share%20on%20Hacker%20News-orange)](https://news.ycombinator.com/submitlink?u=https%3A%2F%2Fgithub.com%2Flangroid%2Flangroid&t=Harness%20LLMs%20with%20Multi-Agent%20Programming)
782
968
  [![Share on Reddit](https://img.shields.io/badge/-Share%20on%20Reddit-blue)](https://www.reddit.com/submit?url=https%3A%2F%2Fgithub.com%2Flangroid%2Flangroid&title=Harness%20LLMs%20with%20Multi-Agent%20Programming)
783
969
 
784
-
785
-
786
-
787
970
  Your support will help build Langroid's momentum and community.
788
971
 
789
-
790
-
791
-
792
972
  # Langroid Co-Founders
793
973
 
794
974
  - [Prasad Chalasani](https://www.linkedin.com/in/pchalasani/) (IIT BTech/CS, CMU PhD/ML; Independent ML Consultant)