langroid 0.2.12__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/team.py +1758 -0
- langroid/embedding_models/base.py +4 -0
- langroid/embedding_models/models.py +54 -1
- langroid/language_models/azure_openai.py +10 -5
- langroid/language_models/base.py +1 -0
- langroid/language_models/openai_gpt.py +18 -5
- langroid/parsing/parse_json.py +27 -1
- langroid/vector_store/momento.py +1 -0
- langroid/vector_store/qdrantdb.py +16 -2
- {langroid-0.2.12.dist-info → langroid-0.3.1.dist-info}/METADATA +7 -3
- {langroid-0.2.12.dist-info → langroid-0.3.1.dist-info}/RECORD +14 -13
- pyproject.toml +6 -3
- {langroid-0.2.12.dist-info → langroid-0.3.1.dist-info}/LICENSE +0 -0
- {langroid-0.2.12.dist-info → langroid-0.3.1.dist-info}/WHEEL +0 -0
@@ -24,6 +24,8 @@ class EmbeddingModel(ABC):
|
|
24
24
|
@classmethod
|
25
25
|
def create(cls, config: EmbeddingModelsConfig) -> "EmbeddingModel":
|
26
26
|
from langroid.embedding_models.models import (
|
27
|
+
FastEmbedEmbeddings,
|
28
|
+
FastEmbedEmbeddingsConfig,
|
27
29
|
OpenAIEmbeddings,
|
28
30
|
OpenAIEmbeddingsConfig,
|
29
31
|
SentenceTransformerEmbeddings,
|
@@ -40,6 +42,8 @@ class EmbeddingModel(ABC):
|
|
40
42
|
return OpenAIEmbeddings(config)
|
41
43
|
elif isinstance(config, SentenceTransformerEmbeddingsConfig):
|
42
44
|
return SentenceTransformerEmbeddings(config)
|
45
|
+
elif isinstance(config, FastEmbedEmbeddingsConfig):
|
46
|
+
return FastEmbedEmbeddings(config)
|
43
47
|
else:
|
44
48
|
raise ValueError(f"Unknown embedding config: {config.__repr_name__}")
|
45
49
|
|
@@ -1,12 +1,14 @@
|
|
1
1
|
import atexit
|
2
2
|
import os
|
3
|
-
from
|
3
|
+
from functools import cached_property
|
4
|
+
from typing import Any, Callable, Dict, List, Optional
|
4
5
|
|
5
6
|
import tiktoken
|
6
7
|
from dotenv import load_dotenv
|
7
8
|
from openai import OpenAI
|
8
9
|
|
9
10
|
from langroid.embedding_models.base import EmbeddingModel, EmbeddingModelsConfig
|
11
|
+
from langroid.exceptions import LangroidImportError
|
10
12
|
from langroid.mytypes import Embeddings
|
11
13
|
from langroid.parsing.utils import batched
|
12
14
|
|
@@ -32,6 +34,20 @@ class SentenceTransformerEmbeddingsConfig(EmbeddingModelsConfig):
|
|
32
34
|
devices: Optional[list[str]] = None
|
33
35
|
|
34
36
|
|
37
|
+
class FastEmbedEmbeddingsConfig(EmbeddingModelsConfig):
|
38
|
+
"""Config for qdrant/fastembed embeddings,
|
39
|
+
see here: https://github.com/qdrant/fastembed
|
40
|
+
"""
|
41
|
+
|
42
|
+
model_type: str = "fastembed"
|
43
|
+
model_name: str = "BAAI/bge-small-en-v1.5"
|
44
|
+
batch_size: int = 256
|
45
|
+
cache_dir: Optional[str] = None
|
46
|
+
threads: Optional[int] = None
|
47
|
+
parallel: Optional[int] = None
|
48
|
+
additional_kwargs: Dict[str, Any] = {}
|
49
|
+
|
50
|
+
|
35
51
|
class EmbeddingFunctionCallable:
|
36
52
|
"""
|
37
53
|
A callable class designed to generate embeddings for a list of texts using
|
@@ -189,6 +205,41 @@ class SentenceTransformerEmbeddings(EmbeddingModel):
|
|
189
205
|
return dims # type: ignore
|
190
206
|
|
191
207
|
|
208
|
+
class FastEmbedEmbeddings(EmbeddingModel):
|
209
|
+
def __init__(self, config: FastEmbedEmbeddingsConfig = FastEmbedEmbeddingsConfig()):
|
210
|
+
try:
|
211
|
+
from fastembed import TextEmbedding
|
212
|
+
except ImportError:
|
213
|
+
raise LangroidImportError("fastembed", extra="fastembed")
|
214
|
+
|
215
|
+
super().__init__()
|
216
|
+
self.config = config
|
217
|
+
self._batch_size = config.batch_size
|
218
|
+
self._parallel = config.parallel
|
219
|
+
|
220
|
+
self._model = TextEmbedding(
|
221
|
+
model_name=self.config.model_name,
|
222
|
+
cache_dir=self.config.cache_dir,
|
223
|
+
threads=self.config.threads,
|
224
|
+
**self.config.additional_kwargs,
|
225
|
+
)
|
226
|
+
|
227
|
+
def embedding_fn(self) -> Callable[[List[str]], Embeddings]:
|
228
|
+
def fn(texts: List[str]) -> Embeddings:
|
229
|
+
embeddings = self._model.embed(
|
230
|
+
texts, batch_size=self._batch_size, parallel=self._parallel
|
231
|
+
)
|
232
|
+
|
233
|
+
return [embedding.tolist() for embedding in embeddings]
|
234
|
+
|
235
|
+
return fn
|
236
|
+
|
237
|
+
@cached_property
|
238
|
+
def embedding_dims(self) -> int:
|
239
|
+
embed_func = self.embedding_fn()
|
240
|
+
return len(embed_func(["text"])[0])
|
241
|
+
|
242
|
+
|
192
243
|
def embedding_model(embedding_fn_type: str = "openai") -> EmbeddingModel:
|
193
244
|
"""
|
194
245
|
Args:
|
@@ -198,5 +249,7 @@ def embedding_model(embedding_fn_type: str = "openai") -> EmbeddingModel:
|
|
198
249
|
"""
|
199
250
|
if embedding_fn_type == "openai":
|
200
251
|
return OpenAIEmbeddings # type: ignore
|
252
|
+
elif embedding_fn_type == "fastembed":
|
253
|
+
return FastEmbedEmbeddings # type: ignore
|
201
254
|
else: # default sentence transformer
|
202
255
|
return SentenceTransformerEmbeddings # type: ignore
|
@@ -133,12 +133,15 @@ class AzureGPT(OpenAIGPT):
|
|
133
133
|
"""
|
134
134
|
Handles the setting of the GPT-4 model in the configuration.
|
135
135
|
This function checks the `model_version` in the configuration.
|
136
|
-
If the version is not set, it raises a ValueError indicating
|
137
|
-
version needs to be specified in the ``.env``
|
138
|
-
It sets `
|
139
|
-
'
|
136
|
+
If the version is not set, it raises a ValueError indicating
|
137
|
+
that the model version needs to be specified in the ``.env``
|
138
|
+
file. It sets `OpenAIChatMode.GPT4o` if the version is
|
139
|
+
'2024-05-13', `OpenAIChatModel.GPT4_TURBO` if the version is
|
140
|
+
'1106-Preview', otherwise, it defaults to setting
|
141
|
+
`OpenAIChatModel.GPT4`.
|
140
142
|
"""
|
141
143
|
VERSION_1106_PREVIEW = "1106-Preview"
|
144
|
+
VERSION_GPT4o = "2024-05-13"
|
142
145
|
|
143
146
|
if self.config.model_version == "":
|
144
147
|
raise ValueError(
|
@@ -146,7 +149,9 @@ class AzureGPT(OpenAIGPT):
|
|
146
149
|
"Please set it to the chat model version used in your deployment."
|
147
150
|
)
|
148
151
|
|
149
|
-
if self.config.model_version ==
|
152
|
+
if self.config.model_version == VERSION_GPT4o:
|
153
|
+
self.config.chat_model = OpenAIChatModel.GPT4o
|
154
|
+
elif self.config.model_version == VERSION_1106_PREVIEW:
|
150
155
|
self.config.chat_model = OpenAIChatModel.GPT4_TURBO
|
151
156
|
else:
|
152
157
|
self.config.chat_model = OpenAIChatModel.GPT4
|
langroid/language_models/base.py
CHANGED
@@ -234,6 +234,7 @@ class LLMResponse(BaseModel):
|
|
234
234
|
# in this case we ignore message, since all information is in function_call
|
235
235
|
msg = ""
|
236
236
|
args = self.function_call.arguments
|
237
|
+
recipient = ""
|
237
238
|
if isinstance(args, dict):
|
238
239
|
recipient = args.get("recipient", "")
|
239
240
|
return recipient, msg
|
@@ -1,4 +1,3 @@
|
|
1
|
-
import ast
|
2
1
|
import hashlib
|
3
2
|
import json
|
4
3
|
import logging
|
@@ -49,6 +48,7 @@ from langroid.language_models.utils import (
|
|
49
48
|
async_retry_with_exponential_backoff,
|
50
49
|
retry_with_exponential_backoff,
|
51
50
|
)
|
51
|
+
from langroid.parsing.parse_json import parse_imperfect_json
|
52
52
|
from langroid.pydantic_v1 import BaseModel
|
53
53
|
from langroid.utils.configuration import settings
|
54
54
|
from langroid.utils.constants import Colors
|
@@ -797,11 +797,24 @@ class OpenAIGPT(LanguageModel):
|
|
797
797
|
args = {}
|
798
798
|
if has_function and function_args != "":
|
799
799
|
try:
|
800
|
-
|
801
|
-
|
800
|
+
stripped_fn_args = function_args.strip()
|
801
|
+
dict_or_list = parse_imperfect_json(stripped_fn_args)
|
802
|
+
if not isinstance(dict_or_list, dict):
|
803
|
+
raise ValueError(
|
804
|
+
f"""
|
805
|
+
Invalid function args: {stripped_fn_args}
|
806
|
+
parsed as {dict_or_list},
|
807
|
+
which is not a valid dict.
|
808
|
+
"""
|
809
|
+
)
|
810
|
+
args = dict_or_list
|
811
|
+
except (SyntaxError, ValueError) as e:
|
802
812
|
logging.warning(
|
803
|
-
f"
|
804
|
-
|
813
|
+
f"""
|
814
|
+
Parsing OpenAI function args failed: {function_args};
|
815
|
+
treating args as normal message. Error detail:
|
816
|
+
{e}
|
817
|
+
"""
|
805
818
|
)
|
806
819
|
has_function = False
|
807
820
|
completion = completion + function_args
|
langroid/parsing/parse_json.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
|
+
import ast
|
1
2
|
import json
|
2
|
-
from typing import Any, Iterator, List
|
3
|
+
from typing import Any, Dict, Iterator, List, Union
|
3
4
|
|
4
5
|
import yaml
|
5
6
|
from pyparsing import nestedExpr, originalTextFor
|
@@ -73,6 +74,31 @@ def add_quotes(s: str) -> str:
|
|
73
74
|
return s
|
74
75
|
|
75
76
|
|
77
|
+
def parse_imperfect_json(json_string: str) -> Union[Dict[str, Any], List[Any]]:
|
78
|
+
if not json_string.strip():
|
79
|
+
raise ValueError("Empty string is not valid JSON")
|
80
|
+
|
81
|
+
# First, try parsing with ast.literal_eval
|
82
|
+
try:
|
83
|
+
result = ast.literal_eval(json_string)
|
84
|
+
if isinstance(result, (dict, list)):
|
85
|
+
return result
|
86
|
+
except (ValueError, SyntaxError):
|
87
|
+
pass
|
88
|
+
|
89
|
+
# If ast.literal_eval fails or returns non-dict/list, try json.loads
|
90
|
+
try:
|
91
|
+
str = add_quotes(json_string)
|
92
|
+
result = json.loads(str)
|
93
|
+
if isinstance(result, (dict, list)):
|
94
|
+
return result
|
95
|
+
except json.JSONDecodeError:
|
96
|
+
pass
|
97
|
+
|
98
|
+
# If all methods fail, raise ValueError
|
99
|
+
raise ValueError(f"Unable to parse as JSON: {json_string}")
|
100
|
+
|
101
|
+
|
76
102
|
def repair_newlines(s: str) -> str:
|
77
103
|
"""
|
78
104
|
Attempt to load as json, and if it fails, try with newlines replaced by space.
|
langroid/vector_store/momento.py
CHANGED
@@ -63,6 +63,7 @@ def is_valid_uuid(uuid_to_test: str) -> bool:
|
|
63
63
|
|
64
64
|
class QdrantDBConfig(VectorStoreConfig):
|
65
65
|
cloud: bool = True
|
66
|
+
docker: bool = False
|
66
67
|
collection_name: str | None = "temp"
|
67
68
|
storage_path: str = ".qdrant/data"
|
68
69
|
embedding: EmbeddingModelsConfig = OpenAIEmbeddingsConfig()
|
@@ -102,7 +103,19 @@ class QdrantDB(VectorStore):
|
|
102
103
|
load_dotenv()
|
103
104
|
key = os.getenv("QDRANT_API_KEY")
|
104
105
|
url = os.getenv("QDRANT_API_URL")
|
105
|
-
if config.
|
106
|
+
if config.docker:
|
107
|
+
if url is None:
|
108
|
+
logger.warning(
|
109
|
+
f"""The QDRANT_API_URL env variable must be set to use
|
110
|
+
QdrantDB in local docker mode. Please set this
|
111
|
+
value in your .env file.
|
112
|
+
Switching to local storage at {config.storage_path}
|
113
|
+
"""
|
114
|
+
)
|
115
|
+
config.cloud = False
|
116
|
+
else:
|
117
|
+
config.cloud = True
|
118
|
+
elif config.cloud and None in [key, url]:
|
106
119
|
logger.warning(
|
107
120
|
f"""QDRANT_API_KEY, QDRANT_API_URL env variable must be set to use
|
108
121
|
QdrantDB in cloud mode. Please set these values
|
@@ -111,6 +124,7 @@ class QdrantDB(VectorStore):
|
|
111
124
|
"""
|
112
125
|
)
|
113
126
|
config.cloud = False
|
127
|
+
|
114
128
|
if config.cloud:
|
115
129
|
self.client = QdrantClient(
|
116
130
|
url=url,
|
@@ -385,7 +399,7 @@ class QdrantDB(VectorStore):
|
|
385
399
|
# Note the records may NOT be in the order of the ids,
|
386
400
|
# so we re-order them here.
|
387
401
|
id2payload = {record.id: record.payload for record in records}
|
388
|
-
ordered_payloads = [id2payload[id] for id in _ids]
|
402
|
+
ordered_payloads = [id2payload[id] for id in _ids if id in id2payload]
|
389
403
|
docs = [Document(**payload) for payload in ordered_payloads] # type: ignore
|
390
404
|
return docs
|
391
405
|
|
@@ -1,11 +1,11 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.3.1
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
License: MIT
|
6
6
|
Author: Prasad Chalasani
|
7
7
|
Author-email: pchalasani@gmail.com
|
8
|
-
Requires-Python: >=3.10,<
|
8
|
+
Requires-Python: >=3.10,<3.13
|
9
9
|
Classifier: License :: OSI Approved :: MIT License
|
10
10
|
Classifier: Programming Language :: Python :: 3
|
11
11
|
Classifier: Programming Language :: Python :: 3.10
|
@@ -17,6 +17,7 @@ Provides-Extra: chromadb
|
|
17
17
|
Provides-Extra: db
|
18
18
|
Provides-Extra: doc-chat
|
19
19
|
Provides-Extra: docx
|
20
|
+
Provides-Extra: fastembed
|
20
21
|
Provides-Extra: hf-embeddings
|
21
22
|
Provides-Extra: hf-transformers
|
22
23
|
Provides-Extra: lancedb
|
@@ -44,6 +45,7 @@ Requires-Dist: docstring-parser (>=0.15,<0.16)
|
|
44
45
|
Requires-Dist: duckduckgo-search (>=6.0.0,<7.0.0)
|
45
46
|
Requires-Dist: faker (>=18.9.0,<19.0.0)
|
46
47
|
Requires-Dist: fakeredis (>=2.12.1,<3.0.0)
|
48
|
+
Requires-Dist: fastembed (>=0.3.1,<0.4.0) ; extra == "all" or extra == "fastembed"
|
47
49
|
Requires-Dist: fire (>=0.5.0,<0.6.0)
|
48
50
|
Requires-Dist: google-api-python-client (>=2.95.0,<3.0.0)
|
49
51
|
Requires-Dist: google-generativeai (>=0.5.2,<0.6.0)
|
@@ -57,7 +59,7 @@ Requires-Dist: litellm (>=1.30.1,<2.0.0) ; extra == "all" or extra == "litellm"
|
|
57
59
|
Requires-Dist: lxml (>=4.9.3,<5.0.0)
|
58
60
|
Requires-Dist: meilisearch-python-sdk (>=2.2.3,<3.0.0) ; extra == "meilisearch"
|
59
61
|
Requires-Dist: metaphor-python (>=0.1.23,<0.2.0) ; extra == "all" or extra == "metaphor"
|
60
|
-
Requires-Dist: momento (>=1.10.2,<
|
62
|
+
Requires-Dist: momento (>=1.10.2,<1.21) ; extra == "momento"
|
61
63
|
Requires-Dist: neo4j (>=5.14.1,<6.0.0) ; extra == "all" or extra == "neo4j"
|
62
64
|
Requires-Dist: nest-asyncio (>=1.6.0,<2.0.0)
|
63
65
|
Requires-Dist: nltk (>=3.8.1,<4.0.0)
|
@@ -233,6 +235,8 @@ teacher_task.run()
|
|
233
235
|
<details>
|
234
236
|
<summary> <b>Click to expand</b></summary>
|
235
237
|
|
238
|
+
- **Jul 2024:**
|
239
|
+
- **[0.3.0](https://github.com/langroid/langroid/releases/tag/0.3.0)**: Added [FastEmbed](https://qdrant.github.io/fastembed/qdrant/Usage_With_Qdrant/) embeddings from Qdrant
|
236
240
|
- **Jun 2024:**
|
237
241
|
- **0.2.0:** Improved lineage tracking, granular sub-task configs, and a new tool, `RewindTool`,
|
238
242
|
that lets an agent "rewind and redo" a past message (and all dependent messages are cleared out
|
@@ -33,6 +33,7 @@ langroid/agent/special/sql/utils/system_message.py,sha256=qKLHkvQWRQodTtPLPxr1GS
|
|
33
33
|
langroid/agent/special/sql/utils/tools.py,sha256=vFYysk6Vi7HJjII8B4RitA3pt_z3gkSglDNdhNVMiFc,1332
|
34
34
|
langroid/agent/special/table_chat_agent.py,sha256=d9v2wsblaRx7oMnKhLV7uO_ujvk9gh59pSGvBXyeyNc,9659
|
35
35
|
langroid/agent/task.py,sha256=vKM2dmRYSH4i_VA0lf2axUtZcTGU44rVHz6EyxI4kG0,73990
|
36
|
+
langroid/agent/team.py,sha256=88VNRSmK35WEl620GfBzuIrBASXYSeBZ8yDKX-nP_Bo,75778
|
36
37
|
langroid/agent/tool_message.py,sha256=wIyZnUcZpxkiRPvM9O3MO3b5BBAdLEEan9kqPbvtApc,9743
|
37
38
|
langroid/agent/tools/__init__.py,sha256=e-63cfwQNk_ftRKQwgDAJQK16QLbRVWDBILeXIc7wLk,402
|
38
39
|
langroid/agent/tools/duckduckgo_search_tool.py,sha256=NhsCaGZkdv28nja7yveAhSK_w6l_Ftym8agbrdzqgfo,1935
|
@@ -52,9 +53,9 @@ langroid/cachedb/base.py,sha256=ztVjB1DtN6pLCujCWnR6xruHxwVj3XkYniRTYAKKqk0,1354
|
|
52
53
|
langroid/cachedb/momento_cachedb.py,sha256=YEOJ62hEcV6iIeMr5aGgRYgWQqFYaej9gEDEcY0sm7M,3172
|
53
54
|
langroid/cachedb/redis_cachedb.py,sha256=7kgnbf4b5CKsCrlL97mHWKvdvlLt8zgn7lc528jEpiE,5141
|
54
55
|
langroid/embedding_models/__init__.py,sha256=lsu8qxCjfGujXGueJWU-VI3LMZYGjLSYgqUKDd4F3Qo,715
|
55
|
-
langroid/embedding_models/base.py,sha256=
|
56
|
+
langroid/embedding_models/base.py,sha256=3dK0nW3XNjK3Vyh2kxhIffzDuUYumVVkCIimB3UPHeU,2009
|
56
57
|
langroid/embedding_models/clustering.py,sha256=tZWElUqXl9Etqla0FAa7og96iDKgjqWjucZR_Egtp-A,6684
|
57
|
-
langroid/embedding_models/models.py,sha256
|
58
|
+
langroid/embedding_models/models.py,sha256=NQ1Cfw5MmeR69fzthIWjnkSAQuWcxnUecS5z_U2IPNs,8900
|
58
59
|
langroid/embedding_models/protoc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
59
60
|
langroid/embedding_models/protoc/embeddings.proto,sha256=_O-SgFpTaylQeOTgSpxhEJ7CUw7PeCQQJLaPqpPYKJg,321
|
60
61
|
langroid/embedding_models/protoc/embeddings_pb2.py,sha256=4Q57PhOunv-uZNJrxYrWBXAI0ZtfnVZXFRhRj5JuRSg,1662
|
@@ -63,11 +64,11 @@ langroid/embedding_models/protoc/embeddings_pb2_grpc.py,sha256=9dYQqkW3JPyBpSEje
|
|
63
64
|
langroid/embedding_models/remote_embeds.py,sha256=6_kjXByVbqhY9cGwl9R83ZcYC2km-nGieNNAo1McHaY,5151
|
64
65
|
langroid/exceptions.py,sha256=w_Cr41nPAmsa6gW5nNFaO9yDcBCWdQqRspL1jYvZf5w,2209
|
65
66
|
langroid/language_models/__init__.py,sha256=1sUGobooTqq77XC7LxKsvME0RgSd5GGmeyrPo9SMh4U,940
|
66
|
-
langroid/language_models/azure_openai.py,sha256=
|
67
|
-
langroid/language_models/base.py,sha256=
|
67
|
+
langroid/language_models/azure_openai.py,sha256=G4le3j4YLHV7IwgB2C37hO3MKijZ1KjynbYlEvpIF7Y,6214
|
68
|
+
langroid/language_models/base.py,sha256=nhY-AdSkfqaW4hzeIekxxZs29AWLd7X7GYhRygU9L74,17527
|
68
69
|
langroid/language_models/config.py,sha256=9Q8wk5a7RQr8LGMT_0WkpjY8S4ywK06SalVRjXlfCiI,378
|
69
70
|
langroid/language_models/mock_lm.py,sha256=qdgj-wtbQBXlibo_0rIRfCt0hGTPRoxy1C4VjN6quI4,2707
|
70
|
-
langroid/language_models/openai_gpt.py,sha256=
|
71
|
+
langroid/language_models/openai_gpt.py,sha256=JOgENlOGBTg9r94hjvuqB2OteHRbX2JtMkrApoNu-jc,52257
|
71
72
|
langroid/language_models/prompt_formatter/__init__.py,sha256=2-5cdE24XoFDhifOLl8yiscohil1ogbP1ECkYdBlBsk,372
|
72
73
|
langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
|
73
74
|
langroid/language_models/prompt_formatter/hf_formatter.py,sha256=TFL6ppmeQWnzr6CKQzRZFYY810zE1mr8DZnhw6i85ok,5217
|
@@ -82,7 +83,7 @@ langroid/parsing/config.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
82
83
|
langroid/parsing/document_parser.py,sha256=WGnA5ADwMHliGJt6WW9rc4RiFXQcKU33b5zdPiGrtEY,24265
|
83
84
|
langroid/parsing/image_text.py,sha256=sbLIQ5nHe2UnYUksBaQsmZGaX-X0qgEpPd7CEzi_z5M,910
|
84
85
|
langroid/parsing/para_sentence_split.py,sha256=AJBzZojP3zpB-_IMiiHismhqcvkrVBQ3ZINoQyx_bE4,2000
|
85
|
-
langroid/parsing/parse_json.py,sha256=
|
86
|
+
langroid/parsing/parse_json.py,sha256=Rcy0iCyMMICk2QstrlJtW7zDPs9lOZ3N_mlaJGWkC40,5033
|
86
87
|
langroid/parsing/parser.py,sha256=AgtmlVUvrkSG1l7-YZPX8rlldgXjh_HqXAMqpXkBxUo,11746
|
87
88
|
langroid/parsing/repo_loader.py,sha256=3GjvPJS6Vf5L6gV2zOU8s-Tf1oq_fZm-IB_RL_7CTsY,29373
|
88
89
|
langroid/parsing/routing.py,sha256=-FcnlqldzL4ZoxuDwXjQPNHgBe9F9-F4R6q7b_z9CvI,1232
|
@@ -126,11 +127,11 @@ langroid/vector_store/base.py,sha256=tuEPaxJcuU_39sRnUjjNd8D8n8IjP6jrbwQv_ecNpSw
|
|
126
127
|
langroid/vector_store/chromadb.py,sha256=bZ5HjwgKgfJj1PUHsatYsrHv-v0dpOfMR2l0tJ2H0_A,7890
|
127
128
|
langroid/vector_store/lancedb.py,sha256=9x7e_5zo7nLhMbhjYby2ZpBJ-vyawcC0_XAuatfHJf8,20517
|
128
129
|
langroid/vector_store/meilisearch.py,sha256=6frB7GFWeWmeKzRfLZIvzRjllniZ1cYj3HmhHQICXLs,11663
|
129
|
-
langroid/vector_store/momento.py,sha256=
|
130
|
+
langroid/vector_store/momento.py,sha256=qR-zBF1RKVHQZPZQYW_7g-XpTwr46p8HJuYPCkfJbM4,10534
|
130
131
|
langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
|
131
|
-
langroid/vector_store/qdrantdb.py,sha256=
|
132
|
-
pyproject.toml,sha256=
|
133
|
-
langroid-0.
|
134
|
-
langroid-0.
|
135
|
-
langroid-0.
|
136
|
-
langroid-0.
|
132
|
+
langroid/vector_store/qdrantdb.py,sha256=HkcK6jOf-FEDoOiG94MpsYDJr98T7vZkDyG__1BlnWI,17354
|
133
|
+
pyproject.toml,sha256=x0YGXi9ennkubMYlFO-Eeyp6h2YE_aOBbeRJrUtTm34,7063
|
134
|
+
langroid-0.3.1.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
135
|
+
langroid-0.3.1.dist-info/METADATA,sha256=9WLpuCfOtRfjB30PZa2jwGmnlotxXRZgHqt6UWiNh4E,54402
|
136
|
+
langroid-0.3.1.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
137
|
+
langroid-0.3.1.dist-info/RECORD,,
|
pyproject.toml
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "langroid"
|
3
|
-
version = "0.
|
3
|
+
version = "0.3.1"
|
4
4
|
description = "Harness LLMs with Multi-Agent Programming"
|
5
5
|
authors = ["Prasad Chalasani <pchalasani@gmail.com>"]
|
6
6
|
readme = "README.md"
|
@@ -10,11 +10,11 @@ include = ["pyproject.toml"]
|
|
10
10
|
|
11
11
|
# =============== MAIN DEPS ==============
|
12
12
|
[tool.poetry.dependencies]
|
13
|
-
python = "
|
13
|
+
python = ">=3.10,<3.13"
|
14
14
|
|
15
15
|
# =========== OPTIONALS ==============================
|
16
16
|
chromadb = {version=">=0.4.21, <=0.4.23", optional=true}
|
17
|
-
momento = {version="
|
17
|
+
momento = {version=">=1.10.2, < 1.21", optional=true}
|
18
18
|
unstructured = {extras = ["docx", "pptx", "pdf"], version = ">=0.10.16,<0.10.18", optional=true}
|
19
19
|
sentence-transformers = {version="^2.2.2", optional=true}
|
20
20
|
torch = {version="^2.0.0", optional=true}
|
@@ -39,6 +39,7 @@ pyarrow = {version="15.0.0", optional=true}
|
|
39
39
|
pdfplumber = {version="^0.10.2", optional=true}
|
40
40
|
python-docx = {version="^1.1.0", optional=true}
|
41
41
|
scrapy = {version="^2.11.0", optional=true}
|
42
|
+
fastembed = {version="^0.3.1", optional=true}
|
42
43
|
|
43
44
|
# ====CORE================================
|
44
45
|
pyyaml = "^6.0.1"
|
@@ -111,6 +112,7 @@ all = [
|
|
111
112
|
"metaphor-python", "neo4j",
|
112
113
|
"litellm",
|
113
114
|
"chainlit", "python-socketio",
|
115
|
+
"fastembed"
|
114
116
|
]
|
115
117
|
# more granular groupings
|
116
118
|
lancedb = ["lancedb", "tantivy", "pyarrow"]
|
@@ -135,6 +137,7 @@ mkdocs = [
|
|
135
137
|
]
|
136
138
|
meilisearch = ["meilisearch-python-sdk"]
|
137
139
|
momento = ["momento"]
|
140
|
+
fastembed = ["fastembed"]
|
138
141
|
|
139
142
|
|
140
143
|
# ================= DEV DEPS =================
|
File without changes
|
File without changes
|