langroid 0.1.261__tar.gz → 0.1.263__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langroid-0.1.261 → langroid-0.1.263}/PKG-INFO +19 -17
- {langroid-0.1.261 → langroid-0.1.263}/README.md +12 -12
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/base.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/callbacks/chainlit.py +1 -2
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/chat_document.py +2 -3
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/openai_assistant.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/doc_chat_agent.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/lance_rag/query_planner_agent.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/lance_tools.py +1 -2
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/neo4j/neo4j_chat_agent.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/sql/utils/tools.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/task.py +26 -6
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tool_message.py +2 -2
- {langroid-0.1.261 → langroid-0.1.263}/langroid/cachedb/base.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/embedding_models/base.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/base.py +2 -111
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/config.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/openai_gpt.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/mytypes.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/code_parser.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/parser.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/repo_loader.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/urls.py +2 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/prompts/__init__.py +0 -2
- {langroid-0.1.261 → langroid-0.1.263}/langroid/prompts/prompts_config.py +1 -1
- langroid-0.1.263/langroid/pydantic_v1/__init__.py +10 -0
- langroid-0.1.263/langroid/pydantic_v1/main.py +4 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/configuration.py +2 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/constants.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/globals.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/pydantic_utils.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/vector_store/base.py +1 -1
- {langroid-0.1.261 → langroid-0.1.263}/langroid/vector_store/lancedb.py +39 -9
- {langroid-0.1.261 → langroid-0.1.263}/pyproject.toml +8 -5
- langroid-0.1.261/langroid/parsing/parser.pyi +0 -56
- langroid-0.1.261/langroid/prompts/transforms.py +0 -84
- {langroid-0.1.261 → langroid-0.1.263}/LICENSE +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/batch.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/callbacks/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/chat_agent.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/helpers.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/junk +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/lance_doc_chat_agent.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/lance_rag/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/lance_rag/critic_agent.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/lance_rag/lance_rag_task.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/neo4j/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/neo4j/csv_kg_chat.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/neo4j/utils/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/neo4j/utils/system_message.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/relevance_extractor_agent.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/retriever_agent.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/sql/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/sql/sql_chat_agent.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/sql/utils/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/sql/utils/description_extractors.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/sql/utils/populate_metadata.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/sql/utils/system_message.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/table_chat_agent.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tools/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tools/duckduckgo_search_tool.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tools/extract_tool.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tools/generator_tool.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tools/google_search_tool.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tools/metaphor_search_tool.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tools/recipient_tool.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tools/retrieval_tool.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tools/run_python_code.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent/tools/segment_extract_tool.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/agent_config.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/cachedb/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/cachedb/momento_cachedb.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/cachedb/redis_cachedb.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/embedding_models/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/embedding_models/clustering.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/embedding_models/models.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/embedding_models/protoc/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/embedding_models/protoc/embeddings.proto +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/embedding_models/protoc/embeddings_pb2.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/embedding_models/protoc/embeddings_pb2.pyi +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/embedding_models/protoc/embeddings_pb2_grpc.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/embedding_models/remote_embeds.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/exceptions.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/azure_openai.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/openai_assistants.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/prompt_formatter/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/prompt_formatter/base.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/prompt_formatter/hf_formatter.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/prompt_formatter/llama2_formatter.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/language_models/utils.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/agent_chats.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/code-parsing.md +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/config.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/document_parser.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/image_text.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/para_sentence_split.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/parse_json.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/routing.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/search.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/spider.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/table_loader.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/url_loader.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/url_loader_cookies.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/utils.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/parsing/web_search.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/prompts/chat-gpt4-system-prompt.md +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/prompts/dialog.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/prompts/templates.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/algorithms/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/algorithms/graph.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/docker.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/llms/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/llms/strings.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/logging.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/output/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/output/citations.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/output/printing.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/output/status.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/pandas_utils.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/system.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/web/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/utils/web/login.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/vector_store/__init__.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/vector_store/chromadb.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/vector_store/meilisearch.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/vector_store/momento.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/vector_store/qdrant_cloud.py +0 -0
- {langroid-0.1.261 → langroid-0.1.263}/langroid/vector_store/qdrantdb.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.263
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
License: MIT
|
6
6
|
Author: Prasad Chalasani
|
@@ -19,6 +19,7 @@ Provides-Extra: docx
|
|
19
19
|
Provides-Extra: hf-embeddings
|
20
20
|
Provides-Extra: hf-transformers
|
21
21
|
Provides-Extra: lancedb
|
22
|
+
Provides-Extra: langroid-pydantic-v1
|
22
23
|
Provides-Extra: litellm
|
23
24
|
Provides-Extra: meilisearch
|
24
25
|
Provides-Extra: metaphor
|
@@ -51,7 +52,8 @@ Requires-Dist: grpcio (>=1.62.1,<2.0.0)
|
|
51
52
|
Requires-Dist: halo (>=0.0.31,<0.0.32)
|
52
53
|
Requires-Dist: huggingface-hub (>=0.21.2,<0.22.0) ; extra == "hf-transformers" or extra == "all" or extra == "transformers"
|
53
54
|
Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
|
54
|
-
Requires-Dist: lancedb (>=0.
|
55
|
+
Requires-Dist: lancedb (>=0.8.2,<0.9.0) ; extra == "vecdbs" or extra == "lancedb"
|
56
|
+
Requires-Dist: langroid_pydantic_v1 (>=0.1.0,<0.2.0) ; extra == "langroid-pydantic-v1" or extra == "lancedb"
|
55
57
|
Requires-Dist: litellm (>=1.30.1,<2.0.0) ; extra == "all" or extra == "litellm"
|
56
58
|
Requires-Dist: lxml (>=4.9.3,<5.0.0)
|
57
59
|
Requires-Dist: meilisearch (>=0.28.3,<0.29.0) ; extra == "meilisearch"
|
@@ -68,8 +70,8 @@ Requires-Dist: pdf2image (>=1.17.0,<2.0.0) ; extra == "doc-chat" or extra == "al
|
|
68
70
|
Requires-Dist: pdfplumber (>=0.10.2,<0.11.0) ; extra == "doc-chat" or extra == "all" or extra == "pdf-parsers"
|
69
71
|
Requires-Dist: prettytable (>=3.8.0,<4.0.0)
|
70
72
|
Requires-Dist: psycopg2 (>=2.9.7,<3.0.0) ; extra == "db" or extra == "all" or extra == "postgres" or extra == "sql"
|
71
|
-
Requires-Dist: pyarrow (==15.0.0) ; extra == "vecdbs" or extra == "
|
72
|
-
Requires-Dist: pydantic (
|
73
|
+
Requires-Dist: pyarrow (==15.0.0) ; extra == "vecdbs" or extra == "lancedb"
|
74
|
+
Requires-Dist: pydantic (>=1,<3)
|
73
75
|
Requires-Dist: pygithub (>=1.58.1,<2.0.0)
|
74
76
|
Requires-Dist: pygments (>=2.15.1,<3.0.0)
|
75
77
|
Requires-Dist: pymupdf (>=1.23.3,<2.0.0) ; extra == "doc-chat" or extra == "all" or extra == "pdf-parsers"
|
@@ -91,7 +93,7 @@ Requires-Dist: rich (>=13.3.4,<14.0.0)
|
|
91
93
|
Requires-Dist: scrapy (>=2.11.0,<3.0.0) ; extra == "scrapy"
|
92
94
|
Requires-Dist: sentence-transformers (==2.2.2) ; extra == "hf-transformers" or extra == "all" or extra == "hf-embeddings"
|
93
95
|
Requires-Dist: sqlalchemy (>=2.0.19,<3.0.0) ; extra == "db" or extra == "all" or extra == "sql"
|
94
|
-
Requires-Dist: tantivy (>=0.21.0,<0.22.0) ; extra == "vecdbs" or extra == "
|
96
|
+
Requires-Dist: tantivy (>=0.21.0,<0.22.0) ; extra == "vecdbs" or extra == "lancedb"
|
95
97
|
Requires-Dist: thefuzz (>=0.20.0,<0.21.0)
|
96
98
|
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
97
99
|
Requires-Dist: torch (==2.0.0) ; extra == "hf-transformers" or extra == "all" or extra == "hf-embeddings" or extra == "transformers"
|
@@ -515,9 +517,10 @@ with a postgres db, you will need to:
|
|
515
517
|
### Set up environment variables (API keys, etc)
|
516
518
|
|
517
519
|
To get started, all you need is an OpenAI API Key.
|
518
|
-
If you don't have one, see [this OpenAI Page](https://
|
519
|
-
|
520
|
-
|
520
|
+
If you don't have one, see [this OpenAI Page](https://platform.openai.com/docs/quickstart).
|
521
|
+
(Note that while this is the simplest way to get started, Langroid works with practically any LLM, not just those from OpenAI.
|
522
|
+
See the guides to using [Open/Local LLMs](https://langroid.github.io/langroid/tutorials/local-llm-setup/),
|
523
|
+
and other [non-OpenAI](https://langroid.github.io/langroid/tutorials/non-openai-llms/) proprietary LLMs.)
|
521
524
|
|
522
525
|
In the root of the repo, copy the `.env-template` file to a new file `.env`:
|
523
526
|
```bash
|
@@ -545,9 +548,7 @@ All of the following environment variable settings are optional, and some are on
|
|
545
548
|
to use specific features (as noted below).
|
546
549
|
|
547
550
|
- **Qdrant** Vector Store API Key, URL. This is only required if you want to use Qdrant cloud.
|
548
|
-
|
549
|
-
and you do not need to set up any environment variables for that.
|
550
|
-
Alternatively [Chroma](https://docs.trychroma.com/) is also currently supported.
|
551
|
+
Alternatively [Chroma](https://docs.trychroma.com/) or [LanceDB](https://lancedb.com/) are also currently supported.
|
551
552
|
We use the local-storage version of Chroma, so there is no need for an API key.
|
552
553
|
- **Redis** Password, host, port: This is optional, and only needed to cache LLM API responses
|
553
554
|
using Redis Cloud. Redis [offers](https://redis.com/try-free/) a free 30MB Redis account
|
@@ -642,11 +643,12 @@ and they are **not** complete runnable examples! For that we encourage you to
|
|
642
643
|
consult the [`langroid-examples`](https://github.com/langroid/langroid-examples)
|
643
644
|
repository.
|
644
645
|
|
645
|
-
:information_source:
|
646
|
-
|
647
|
-
|
648
|
-
(
|
649
|
-
and may suffice for some applications, but in general you may see inferior results
|
646
|
+
:information_source:
|
647
|
+
The various LLM prompts and instructions in Langroid
|
648
|
+
have been tested to work well with GPT-4 (and to some extent GPT-4o).
|
649
|
+
Switching to other LLMs (local/open and proprietary) is easy (see guides mentioned above),
|
650
|
+
and may suffice for some applications, but in general you may see inferior results
|
651
|
+
unless you adjust the prompts and/or the multi-agent setup.
|
650
652
|
|
651
653
|
|
652
654
|
:book: Also see the
|
@@ -921,7 +923,7 @@ config = DocChatAgentConfig(
|
|
921
923
|
"https://en.wikipedia.org/wiki/N-gram_language_model",
|
922
924
|
"/path/to/my/notes-on-language-models.txt",
|
923
925
|
],
|
924
|
-
vecdb=lr.vector_store.
|
926
|
+
vecdb=lr.vector_store.QdrantDBConfig(),
|
925
927
|
)
|
926
928
|
```
|
927
929
|
|
@@ -411,9 +411,10 @@ with a postgres db, you will need to:
|
|
411
411
|
### Set up environment variables (API keys, etc)
|
412
412
|
|
413
413
|
To get started, all you need is an OpenAI API Key.
|
414
|
-
If you don't have one, see [this OpenAI Page](https://
|
415
|
-
|
416
|
-
|
414
|
+
If you don't have one, see [this OpenAI Page](https://platform.openai.com/docs/quickstart).
|
415
|
+
(Note that while this is the simplest way to get started, Langroid works with practically any LLM, not just those from OpenAI.
|
416
|
+
See the guides to using [Open/Local LLMs](https://langroid.github.io/langroid/tutorials/local-llm-setup/),
|
417
|
+
and other [non-OpenAI](https://langroid.github.io/langroid/tutorials/non-openai-llms/) proprietary LLMs.)
|
417
418
|
|
418
419
|
In the root of the repo, copy the `.env-template` file to a new file `.env`:
|
419
420
|
```bash
|
@@ -441,9 +442,7 @@ All of the following environment variable settings are optional, and some are on
|
|
441
442
|
to use specific features (as noted below).
|
442
443
|
|
443
444
|
- **Qdrant** Vector Store API Key, URL. This is only required if you want to use Qdrant cloud.
|
444
|
-
|
445
|
-
and you do not need to set up any environment variables for that.
|
446
|
-
Alternatively [Chroma](https://docs.trychroma.com/) is also currently supported.
|
445
|
+
Alternatively [Chroma](https://docs.trychroma.com/) or [LanceDB](https://lancedb.com/) are also currently supported.
|
447
446
|
We use the local-storage version of Chroma, so there is no need for an API key.
|
448
447
|
- **Redis** Password, host, port: This is optional, and only needed to cache LLM API responses
|
449
448
|
using Redis Cloud. Redis [offers](https://redis.com/try-free/) a free 30MB Redis account
|
@@ -538,11 +537,12 @@ and they are **not** complete runnable examples! For that we encourage you to
|
|
538
537
|
consult the [`langroid-examples`](https://github.com/langroid/langroid-examples)
|
539
538
|
repository.
|
540
539
|
|
541
|
-
:information_source:
|
542
|
-
|
543
|
-
|
544
|
-
(
|
545
|
-
and may suffice for some applications, but in general you may see inferior results
|
540
|
+
:information_source:
|
541
|
+
The various LLM prompts and instructions in Langroid
|
542
|
+
have been tested to work well with GPT-4 (and to some extent GPT-4o).
|
543
|
+
Switching to other LLMs (local/open and proprietary) is easy (see guides mentioned above),
|
544
|
+
and may suffice for some applications, but in general you may see inferior results
|
545
|
+
unless you adjust the prompts and/or the multi-agent setup.
|
546
546
|
|
547
547
|
|
548
548
|
:book: Also see the
|
@@ -817,7 +817,7 @@ config = DocChatAgentConfig(
|
|
817
817
|
"https://en.wikipedia.org/wiki/N-gram_language_model",
|
818
818
|
"/path/to/my/notes-on-language-models.txt",
|
819
819
|
],
|
820
|
-
vecdb=lr.vector_store.
|
820
|
+
vecdb=lr.vector_store.QdrantDBConfig(),
|
821
821
|
)
|
822
822
|
```
|
823
823
|
|
@@ -20,7 +20,6 @@ from typing import (
|
|
20
20
|
no_type_check,
|
21
21
|
)
|
22
22
|
|
23
|
-
from pydantic import BaseSettings, ValidationError, validator
|
24
23
|
from rich import print
|
25
24
|
from rich.console import Console
|
26
25
|
from rich.markup import escape
|
@@ -41,6 +40,7 @@ from langroid.mytypes import Entity
|
|
41
40
|
from langroid.parsing.parse_json import extract_top_level_json
|
42
41
|
from langroid.parsing.parser import Parser, ParsingConfig
|
43
42
|
from langroid.prompts.prompts_config import PromptsConfig
|
43
|
+
from langroid.pydantic_v1 import BaseSettings, ValidationError, validator
|
44
44
|
from langroid.utils.configuration import settings
|
45
45
|
from langroid.utils.constants import NO_ANSWER
|
46
46
|
from langroid.utils.output import status
|
@@ -7,9 +7,8 @@ import logging
|
|
7
7
|
import textwrap
|
8
8
|
from typing import Any, Callable, Dict, List, Literal, Optional, no_type_check
|
9
9
|
|
10
|
-
from pydantic import BaseSettings
|
11
|
-
|
12
10
|
from langroid.exceptions import LangroidImportError
|
11
|
+
from langroid.pydantic_v1 import BaseSettings
|
13
12
|
|
14
13
|
try:
|
15
14
|
import chainlit as cl
|
@@ -2,8 +2,6 @@ import json
|
|
2
2
|
from enum import Enum
|
3
3
|
from typing import List, Optional, Union
|
4
4
|
|
5
|
-
from pydantic import BaseModel, Extra
|
6
|
-
|
7
5
|
from langroid.agent.tool_message import ToolMessage
|
8
6
|
from langroid.language_models.base import (
|
9
7
|
LLMFunctionCall,
|
@@ -15,6 +13,7 @@ from langroid.language_models.base import (
|
|
15
13
|
from langroid.mytypes import DocMetaData, Document, Entity
|
16
14
|
from langroid.parsing.agent_chats import parse_message
|
17
15
|
from langroid.parsing.parse_json import extract_top_level_json, top_level_json_field
|
16
|
+
from langroid.pydantic_v1 import BaseModel, Extra
|
18
17
|
from langroid.utils.output.printing import shorten_text
|
19
18
|
|
20
19
|
|
@@ -48,7 +47,7 @@ class ChatDocMetaData(DocMetaData):
|
|
48
47
|
block: None | Entity = None
|
49
48
|
sender_name: str = ""
|
50
49
|
recipient: str = ""
|
51
|
-
usage: Optional[LLMTokenUsage]
|
50
|
+
usage: Optional[LLMTokenUsage] = None
|
52
51
|
cached: bool = False
|
53
52
|
displayed: bool = False
|
54
53
|
has_citation: bool = False
|
@@ -15,7 +15,6 @@ from openai.types.beta.assistant_update_params import (
|
|
15
15
|
)
|
16
16
|
from openai.types.beta.threads import Message, Run
|
17
17
|
from openai.types.beta.threads.runs import RunStep
|
18
|
-
from pydantic import BaseModel
|
19
18
|
from rich import print
|
20
19
|
|
21
20
|
from langroid.agent.chat_agent import ChatAgent, ChatAgentConfig
|
@@ -27,6 +26,7 @@ from langroid.language_models.openai_gpt import (
|
|
27
26
|
OpenAIGPT,
|
28
27
|
OpenAIGPTConfig,
|
29
28
|
)
|
29
|
+
from langroid.pydantic_v1 import BaseModel
|
30
30
|
from langroid.utils.configuration import settings
|
31
31
|
from langroid.utils.system import generate_user_id, update_hash
|
32
32
|
|
@@ -1313,7 +1313,7 @@ class DocChatAgent(ChatAgent):
|
|
1313
1313
|
meta.update(extracts[0].metadata)
|
1314
1314
|
return ChatDocument(
|
1315
1315
|
content="\n\n".join([e.content for e in extracts]),
|
1316
|
-
metadata=ChatDocMetaData(**meta),
|
1316
|
+
metadata=ChatDocMetaData(**meta), # type: ignore
|
1317
1317
|
)
|
1318
1318
|
response = self.get_summary_answer(query, extracts)
|
1319
1319
|
|
{langroid-0.1.261 → langroid-0.1.263}/langroid/agent/special/lance_rag/query_planner_agent.py
RENAMED
@@ -191,7 +191,7 @@ class LanceQueryPlanAgent(ChatAgent):
|
|
191
191
|
# save result, to be used in query_plan_feedback()
|
192
192
|
self.result = msg.content
|
193
193
|
# assemble QueryPlanAnswerTool...
|
194
|
-
query_plan_answer_tool = QueryPlanAnswerTool(
|
194
|
+
query_plan_answer_tool = QueryPlanAnswerTool( # type: ignore
|
195
195
|
plan=self.curr_query_plan,
|
196
196
|
answer=self.result,
|
197
197
|
)
|
@@ -2,11 +2,11 @@ import json
|
|
2
2
|
import logging
|
3
3
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
4
4
|
|
5
|
-
from pydantic import BaseModel, BaseSettings
|
6
5
|
from rich import print
|
7
6
|
from rich.console import Console
|
8
7
|
|
9
8
|
from langroid.agent import ToolMessage
|
9
|
+
from langroid.pydantic_v1 import BaseModel, BaseSettings
|
10
10
|
|
11
11
|
if TYPE_CHECKING:
|
12
12
|
import neo4j
|
@@ -20,7 +20,6 @@ from typing import (
|
|
20
20
|
)
|
21
21
|
|
22
22
|
import numpy as np
|
23
|
-
from pydantic import BaseModel
|
24
23
|
from rich import print
|
25
24
|
from rich.markup import escape
|
26
25
|
|
@@ -37,6 +36,7 @@ from langroid.exceptions import InfiniteLoopException
|
|
37
36
|
from langroid.mytypes import Entity
|
38
37
|
from langroid.parsing.parse_json import extract_top_level_json
|
39
38
|
from langroid.parsing.routing import parse_addressed_message
|
39
|
+
from langroid.pydantic_v1 import BaseModel
|
40
40
|
from langroid.utils.configuration import settings
|
41
41
|
from langroid.utils.constants import (
|
42
42
|
DONE,
|
@@ -787,7 +787,7 @@ class Task:
|
|
787
787
|
# skip trying other responders in this step
|
788
788
|
break
|
789
789
|
if not found_response:
|
790
|
-
self._process_invalid_step_result()
|
790
|
+
self._process_invalid_step_result(parent)
|
791
791
|
self._show_pending_message_if_debug()
|
792
792
|
return self.pending_message
|
793
793
|
|
@@ -879,7 +879,7 @@ class Task:
|
|
879
879
|
# skip trying other responders in this step
|
880
880
|
break
|
881
881
|
if not found_response:
|
882
|
-
self._process_invalid_step_result()
|
882
|
+
self._process_invalid_step_result(parent)
|
883
883
|
self._show_pending_message_if_debug()
|
884
884
|
return self.pending_message
|
885
885
|
|
@@ -916,11 +916,28 @@ class Task:
|
|
916
916
|
self.message_counter.update([hashed_msg])
|
917
917
|
self.history.append(hashed_msg)
|
918
918
|
|
919
|
-
def _process_invalid_step_result(self) -> None:
|
919
|
+
def _process_invalid_step_result(self, parent: ChatDocument | None) -> None:
|
920
920
|
"""
|
921
|
-
|
921
|
+
Since step had no valid result from any responder, decide whether to update the
|
922
|
+
self.pending_message to a NO_ANSWER message from the opposite entity,
|
923
|
+
or leave it as is.
|
924
|
+
Args:
|
925
|
+
parent (ChatDocument|None): parent message of the current message
|
922
926
|
"""
|
923
927
|
self.n_stalled_steps += 1
|
928
|
+
if (not self.task_progress or self.allow_null_result) and not self.is_pass_thru:
|
929
|
+
# There has been no progress at all in this task, so we
|
930
|
+
# update the pending_message to a dummy NO_ANSWER msg
|
931
|
+
# from the entity 'opposite' to the current pending_sender,
|
932
|
+
# so we show "progress" and avoid getting stuck in an infinite loop.
|
933
|
+
responder = (
|
934
|
+
Entity.LLM if self.pending_sender == Entity.USER else Entity.USER
|
935
|
+
)
|
936
|
+
self.pending_message = ChatDocument(
|
937
|
+
content=NO_ANSWER,
|
938
|
+
metadata=ChatDocMetaData(sender=responder, parent=parent),
|
939
|
+
)
|
940
|
+
self.pending_sender = responder
|
924
941
|
self.log_message(self.pending_sender, self.pending_message, mark=True)
|
925
942
|
|
926
943
|
def _show_pending_message_if_debug(self) -> None:
|
@@ -1042,6 +1059,9 @@ class Task:
|
|
1042
1059
|
"""
|
1043
1060
|
Get result of task. This is the default behavior.
|
1044
1061
|
Derived classes can override this.
|
1062
|
+
|
1063
|
+
Note the result of a task is returned as if it is from the User entity.
|
1064
|
+
|
1045
1065
|
Returns:
|
1046
1066
|
ChatDocument: result of task
|
1047
1067
|
"""
|
@@ -1054,7 +1074,7 @@ class Task:
|
|
1054
1074
|
fun_call = result_msg.function_call if result_msg else None
|
1055
1075
|
tool_messages = result_msg.tool_messages if result_msg else []
|
1056
1076
|
block = result_msg.metadata.block if result_msg else None
|
1057
|
-
recipient = result_msg.metadata.recipient if result_msg else
|
1077
|
+
recipient = result_msg.metadata.recipient if result_msg else ""
|
1058
1078
|
tool_ids = result_msg.metadata.tool_ids if result_msg else []
|
1059
1079
|
status = result_msg.metadata.status if result_msg else None
|
1060
1080
|
|
@@ -13,9 +13,9 @@ from random import choice
|
|
13
13
|
from typing import Any, Dict, List, Tuple, Type
|
14
14
|
|
15
15
|
from docstring_parser import parse
|
16
|
-
from pydantic import BaseModel
|
17
16
|
|
18
17
|
from langroid.language_models.base import LLMFunctionSpec
|
18
|
+
from langroid.pydantic_v1 import BaseModel
|
19
19
|
from langroid.utils.pydantic_utils import (
|
20
20
|
_recursive_purge_dict_key,
|
21
21
|
generate_simple_schema,
|
@@ -73,7 +73,7 @@ class ToolMessage(ABC, BaseModel):
|
|
73
73
|
- a tuple (description, ToolMessage instance), where the description is
|
74
74
|
a natural language "thought" that leads to the tool usage,
|
75
75
|
e.g. ("I want to find the square of 5", SquareTool(num=5))
|
76
|
-
In some scenarios,
|
76
|
+
In some scenarios, including such a description can significantly
|
77
77
|
enhance reliability of tool use.
|
78
78
|
Returns:
|
79
79
|
"""
|
@@ -2,9 +2,9 @@ import logging
|
|
2
2
|
from abc import ABC, abstractmethod
|
3
3
|
|
4
4
|
import numpy as np
|
5
|
-
from pydantic import BaseSettings
|
6
5
|
|
7
6
|
from langroid.mytypes import EmbeddingFunction
|
7
|
+
from langroid.pydantic_v1 import BaseSettings
|
8
8
|
|
9
9
|
logging.getLogger("openai").setLevel(logging.ERROR)
|
10
10
|
|
@@ -1,5 +1,4 @@
|
|
1
1
|
import ast
|
2
|
-
import asyncio
|
3
2
|
import json
|
4
3
|
import logging
|
5
4
|
from abc import ABC, abstractmethod
|
@@ -7,18 +6,11 @@ from datetime import datetime
|
|
7
6
|
from enum import Enum
|
8
7
|
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
|
9
8
|
|
10
|
-
import aiohttp
|
11
|
-
from pydantic import BaseModel, BaseSettings, Field
|
12
|
-
|
13
9
|
from langroid.cachedb.base import CacheDBConfig
|
14
|
-
from langroid.mytypes import Document
|
15
10
|
from langroid.parsing.agent_chats import parse_message
|
16
11
|
from langroid.parsing.parse_json import top_level_json_field
|
17
12
|
from langroid.prompts.dialog import collate_chat_history
|
18
|
-
from langroid.
|
19
|
-
EXTRACTION_PROMPT_GPT4,
|
20
|
-
SUMMARY_ANSWER_PROMPT_GPT4,
|
21
|
-
)
|
13
|
+
from langroid.pydantic_v1 import BaseModel, BaseSettings, Field
|
22
14
|
from langroid.utils.configuration import settings
|
23
15
|
from langroid.utils.output.printing import show_if_debug
|
24
16
|
|
@@ -184,7 +176,7 @@ class LLMResponse(BaseModel):
|
|
184
176
|
message: str
|
185
177
|
tool_id: str = "" # used by OpenAIAssistant
|
186
178
|
function_call: Optional[LLMFunctionCall] = None
|
187
|
-
usage: Optional[LLMTokenUsage]
|
179
|
+
usage: Optional[LLMTokenUsage] = None
|
188
180
|
cached: bool = False
|
189
181
|
|
190
182
|
def __str__(self) -> str:
|
@@ -487,107 +479,6 @@ class LanguageModel(ABC):
|
|
487
479
|
show_if_debug(prompt, "FOLLOWUP->STANDALONE-RESPONSE= ")
|
488
480
|
return standalone
|
489
481
|
|
490
|
-
async def get_verbatim_extract_async(self, question: str, passage: Document) -> str:
|
491
|
-
"""
|
492
|
-
Asynchronously, get verbatim extract from passage
|
493
|
-
that is relevant to a question.
|
494
|
-
Asynch allows parallel calls to the LLM API.
|
495
|
-
"""
|
496
|
-
async with aiohttp.ClientSession():
|
497
|
-
templatized_prompt = EXTRACTION_PROMPT_GPT4
|
498
|
-
final_prompt = templatized_prompt.format(
|
499
|
-
question=question, content=passage.content
|
500
|
-
)
|
501
|
-
show_if_debug(final_prompt, "EXTRACT-PROMPT= ")
|
502
|
-
final_extract = await self.agenerate(prompt=final_prompt, max_tokens=1024)
|
503
|
-
show_if_debug(final_extract.message.strip(), "EXTRACT-RESPONSE= ")
|
504
|
-
return final_extract.message.strip()
|
505
|
-
|
506
|
-
async def _get_verbatim_extracts(
|
507
|
-
self,
|
508
|
-
question: str,
|
509
|
-
passages: List[Document],
|
510
|
-
) -> List[Document]:
|
511
|
-
async with aiohttp.ClientSession():
|
512
|
-
verbatim_extracts = await asyncio.gather(
|
513
|
-
*(self.get_verbatim_extract_async(question, P) for P in passages)
|
514
|
-
)
|
515
|
-
metadatas = [P.metadata for P in passages]
|
516
|
-
# return with metadata so we can use it downstream, e.g. to cite sources
|
517
|
-
return [
|
518
|
-
Document(content=e, metadata=m)
|
519
|
-
for e, m in zip(verbatim_extracts, metadatas)
|
520
|
-
]
|
521
|
-
|
522
|
-
def get_verbatim_extracts(
|
523
|
-
self, question: str, passages: List[Document]
|
524
|
-
) -> List[Document]:
|
525
|
-
"""
|
526
|
-
From each passage, extract verbatim text that is relevant to a question,
|
527
|
-
using concurrent API calls to the LLM.
|
528
|
-
Args:
|
529
|
-
question: question to be answered
|
530
|
-
passages: list of passages from which to extract relevant verbatim text
|
531
|
-
LLM: LanguageModel to use for generating the prompt and extract
|
532
|
-
Returns:
|
533
|
-
list of verbatim extracts from passages that are relevant to question
|
534
|
-
"""
|
535
|
-
docs = asyncio.run(self._get_verbatim_extracts(question, passages))
|
536
|
-
return docs
|
537
|
-
|
538
|
-
def get_summary_answer(self, question: str, passages: List[Document]) -> Document:
|
539
|
-
"""
|
540
|
-
Given a question and a list of (possibly) doc snippets,
|
541
|
-
generate an answer if possible
|
542
|
-
Args:
|
543
|
-
question: question to answer
|
544
|
-
passages: list of `Document` objects each containing a possibly relevant
|
545
|
-
snippet, and metadata
|
546
|
-
Returns:
|
547
|
-
a `Document` object containing the answer,
|
548
|
-
and metadata containing source citations
|
549
|
-
|
550
|
-
"""
|
551
|
-
|
552
|
-
# Define an auxiliary function to transform the list of
|
553
|
-
# passages into a single string
|
554
|
-
def stringify_passages(passages: List[Document]) -> str:
|
555
|
-
return "\n".join(
|
556
|
-
[
|
557
|
-
f"""
|
558
|
-
Extract: {p.content}
|
559
|
-
Source: {p.metadata.source}
|
560
|
-
"""
|
561
|
-
for p in passages
|
562
|
-
]
|
563
|
-
)
|
564
|
-
|
565
|
-
passages_str = stringify_passages(passages)
|
566
|
-
# Substitute Q and P into the templatized prompt
|
567
|
-
|
568
|
-
final_prompt = SUMMARY_ANSWER_PROMPT_GPT4.format(
|
569
|
-
question=f"Question:{question}", extracts=passages_str
|
570
|
-
)
|
571
|
-
show_if_debug(final_prompt, "SUMMARIZE_PROMPT= ")
|
572
|
-
# Generate the final verbatim extract based on the final prompt
|
573
|
-
llm_response = self.generate(prompt=final_prompt, max_tokens=1024)
|
574
|
-
final_answer = llm_response.message.strip()
|
575
|
-
show_if_debug(final_answer, "SUMMARIZE_RESPONSE= ")
|
576
|
-
parts = final_answer.split("SOURCE:", maxsplit=1)
|
577
|
-
if len(parts) > 1:
|
578
|
-
content = parts[0].strip()
|
579
|
-
sources = parts[1].strip()
|
580
|
-
else:
|
581
|
-
content = final_answer
|
582
|
-
sources = ""
|
583
|
-
return Document(
|
584
|
-
content=content,
|
585
|
-
metadata={
|
586
|
-
"source": "SOURCE: " + sources,
|
587
|
-
"cached": llm_response.cached,
|
588
|
-
},
|
589
|
-
)
|
590
|
-
|
591
482
|
|
592
483
|
class StreamingIfAllowed:
|
593
484
|
"""Context to temporarily enable or disable streaming, if allowed globally via
|
@@ -24,7 +24,6 @@ import openai
|
|
24
24
|
from groq import AsyncGroq, Groq
|
25
25
|
from httpx import Timeout
|
26
26
|
from openai import AsyncOpenAI, OpenAI
|
27
|
-
from pydantic import BaseModel
|
28
27
|
from rich import print
|
29
28
|
from rich.markup import escape
|
30
29
|
|
@@ -50,6 +49,7 @@ from langroid.language_models.utils import (
|
|
50
49
|
async_retry_with_exponential_backoff,
|
51
50
|
retry_with_exponential_backoff,
|
52
51
|
)
|
52
|
+
from langroid.pydantic_v1 import BaseModel
|
53
53
|
from langroid.utils.configuration import settings
|
54
54
|
from langroid.utils.constants import Colors
|
55
55
|
from langroid.utils.system import friendly_error
|
@@ -2,12 +2,12 @@ from functools import reduce
|
|
2
2
|
from typing import Callable, List
|
3
3
|
|
4
4
|
import tiktoken
|
5
|
-
from pydantic import BaseSettings
|
6
5
|
from pygments import lex
|
7
6
|
from pygments.lexers import get_lexer_by_name
|
8
7
|
from pygments.token import Token
|
9
8
|
|
10
9
|
from langroid.mytypes import Document
|
10
|
+
from langroid.pydantic_v1 import BaseSettings
|
11
11
|
|
12
12
|
|
13
13
|
def chunk_code(
|
@@ -3,10 +3,10 @@ from enum import Enum
|
|
3
3
|
from typing import Dict, List, Literal
|
4
4
|
|
5
5
|
import tiktoken
|
6
|
-
from pydantic import BaseSettings
|
7
6
|
|
8
7
|
from langroid.mytypes import Document
|
9
8
|
from langroid.parsing.para_sentence_split import create_chunks, remove_extra_whitespace
|
9
|
+
from langroid.pydantic_v1 import BaseSettings
|
10
10
|
|
11
11
|
logger = logging.getLogger(__name__)
|
12
12
|
logger.setLevel(logging.WARNING)
|
@@ -15,11 +15,11 @@ from github import Github
|
|
15
15
|
from github.ContentFile import ContentFile
|
16
16
|
from github.Label import Label
|
17
17
|
from github.Repository import Repository
|
18
|
-
from pydantic import BaseModel, BaseSettings, Field
|
19
18
|
|
20
19
|
from langroid.mytypes import DocMetaData, Document
|
21
20
|
from langroid.parsing.document_parser import DocumentParser, DocumentType
|
22
21
|
from langroid.parsing.parser import Parser, ParsingConfig
|
22
|
+
from langroid.pydantic_v1 import BaseModel, BaseSettings, Field
|
23
23
|
|
24
24
|
logger = logging.getLogger(__name__)
|
25
25
|
|
@@ -9,11 +9,12 @@ from urllib.parse import urldefrag, urljoin, urlparse
|
|
9
9
|
import fire
|
10
10
|
import requests
|
11
11
|
from bs4 import BeautifulSoup
|
12
|
-
from pydantic import BaseModel, HttpUrl, ValidationError, parse_obj_as
|
13
12
|
from rich import print
|
14
13
|
from rich.prompt import Prompt
|
15
14
|
from trafilatura.spider import focused_crawler
|
16
15
|
|
16
|
+
from langroid.pydantic_v1 import BaseModel, HttpUrl, ValidationError, parse_obj_as
|
17
|
+
|
17
18
|
logger = logging.getLogger(__name__)
|
18
19
|
|
19
20
|
|