langroid 0.1.143__py3-none-any.whl → 0.1.145__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/base.py +0 -6
- langroid/agent/chat_agent.py +14 -8
- langroid/agent/special/recipient_validator_agent.py +7 -0
- langroid/agent/special/relevance_extractor_agent.py +1 -1
- langroid/agent/special/retriever_agent.py +1 -1
- langroid/agent/special/table_chat_agent.py +2 -0
- langroid/agent/task.py +42 -8
- langroid/language_models/openai_gpt.py +4 -2
- langroid/mytypes.py +3 -3
- langroid/parsing/parser.py +2 -2
- langroid/vector_store/chromadb.py +3 -1
- langroid/vector_store/lancedb.py +9 -25
- langroid/vector_store/meilisearch.py +1 -1
- {langroid-0.1.143.dist-info → langroid-0.1.145.dist-info}/METADATA +6 -5
- {langroid-0.1.143.dist-info → langroid-0.1.145.dist-info}/RECORD +17 -17
- {langroid-0.1.143.dist-info → langroid-0.1.145.dist-info}/LICENSE +0 -0
- {langroid-0.1.143.dist-info → langroid-0.1.145.dist-info}/WHEEL +0 -0
langroid/agent/base.py
CHANGED
@@ -632,12 +632,6 @@ class Agent(ABC):
|
|
632
632
|
|
633
633
|
str_doc_results = [r for r in results_list if isinstance(r, str)]
|
634
634
|
final = "\n".join(str_doc_results)
|
635
|
-
if final == "":
|
636
|
-
logger.warning(
|
637
|
-
"""final result from a tool handler should not be empty str, since
|
638
|
-
it would be considered an invalid result and other responders
|
639
|
-
will be tried, and we may not necessarily want that"""
|
640
|
-
)
|
641
635
|
return final
|
642
636
|
|
643
637
|
def handle_message_fallback(
|
langroid/agent/chat_agent.py
CHANGED
@@ -465,6 +465,8 @@ class ChatAgent(Agent):
|
|
465
465
|
if self.llm is None:
|
466
466
|
return None
|
467
467
|
hist, output_len = self._prep_llm_messages(message)
|
468
|
+
if len(hist) == 0:
|
469
|
+
return None
|
468
470
|
with StreamingIfAllowed(self.llm, self.llm.get_stream()):
|
469
471
|
response = self.llm_response_messages(hist, output_len)
|
470
472
|
# TODO - when response contains function_call we should include
|
@@ -787,12 +789,14 @@ class ChatAgent(Agent):
|
|
787
789
|
"""
|
788
790
|
# explicitly call THIS class's respond method,
|
789
791
|
# not a derived class's (or else there would be infinite recursion!)
|
792
|
+
n_msgs = len(self.message_history)
|
790
793
|
with StreamingIfAllowed(self.llm, self.llm.get_stream()): # type: ignore
|
791
794
|
response = cast(ChatDocument, ChatAgent.llm_response(self, message))
|
792
|
-
#
|
793
|
-
# user message and the
|
794
|
-
|
795
|
-
self.message_history.pop()
|
795
|
+
# If there is a response, then we will have two additional
|
796
|
+
# messages in the message history, i.e. the user message and the
|
797
|
+
# assistant response. We want to (carefully) remove these two messages.
|
798
|
+
self.message_history.pop() if len(self.message_history) > n_msgs else None
|
799
|
+
self.message_history.pop() if len(self.message_history) > n_msgs else None
|
796
800
|
return response
|
797
801
|
|
798
802
|
async def llm_response_forget_async(self, message: str) -> ChatDocument:
|
@@ -801,14 +805,16 @@ class ChatAgent(Agent):
|
|
801
805
|
"""
|
802
806
|
# explicitly call THIS class's respond method,
|
803
807
|
# not a derived class's (or else there would be infinite recursion!)
|
808
|
+
n_msgs = len(self.message_history)
|
804
809
|
with StreamingIfAllowed(self.llm, self.llm.get_stream()): # type: ignore
|
805
810
|
response = cast(
|
806
811
|
ChatDocument, await ChatAgent.llm_response_async(self, message)
|
807
812
|
)
|
808
|
-
#
|
809
|
-
# user message and the
|
810
|
-
|
811
|
-
self.message_history.pop()
|
813
|
+
# If there is a response, then we will have two additional
|
814
|
+
# messages in the message history, i.e. the user message and the
|
815
|
+
# assistant response. We want to (carefully) remove these two messages.
|
816
|
+
self.message_history.pop() if len(self.message_history) > n_msgs else None
|
817
|
+
self.message_history.pop() if len(self.message_history) > n_msgs else None
|
812
818
|
return response
|
813
819
|
|
814
820
|
def chat_num_tokens(self, messages: Optional[List[LLMMessage]] = None) -> int:
|
@@ -63,6 +63,13 @@ class RecipientValidator(ChatAgent):
|
|
63
63
|
self.config: RecipientValidatorConfig = config
|
64
64
|
self.llm = None
|
65
65
|
self.vecdb = None
|
66
|
+
logger.warning(
|
67
|
+
"""
|
68
|
+
RecipientValidator is deprecated. Use RecipientTool instead:
|
69
|
+
See code at langroid/agent/tools/recipient_tool.py, and usage examples in
|
70
|
+
tests/main/test_multi_agent_complex.py and
|
71
|
+
"""
|
72
|
+
)
|
66
73
|
|
67
74
|
def user_response(
|
68
75
|
self,
|
@@ -102,7 +102,7 @@ class RelevanceExtractorAgent(ChatAgent):
|
|
102
102
|
spec = msg.segment_list
|
103
103
|
if len(self.message_history) == 0:
|
104
104
|
return NO_ANSWER
|
105
|
-
if spec is None or spec.strip()
|
105
|
+
if spec is None or spec.strip() in ["", NO_ANSWER]:
|
106
106
|
return NO_ANSWER
|
107
107
|
assert self.numbered_passage is not None, "No numbered passage"
|
108
108
|
# assume this has numbered segments
|
@@ -49,6 +49,8 @@ Once you have the answer to the question, say DONE and show me the answer.
|
|
49
49
|
If you receive an error message, try using the `run_code` tool/function
|
50
50
|
again with the corrected code.
|
51
51
|
|
52
|
+
VERY IMPORTANT: When using the `run_code` tool/function, DO NOT EXPLAIN ANYTHING,
|
53
|
+
SIMPLY USE THE TOOL, with the CODE.
|
52
54
|
Start by asking me what I want to know about the data.
|
53
55
|
"""
|
54
56
|
|
langroid/agent/task.py
CHANGED
@@ -66,6 +66,7 @@ class Task:
|
|
66
66
|
interactive: bool = True,
|
67
67
|
only_user_quits_root: bool = True,
|
68
68
|
erase_substeps: bool = False,
|
69
|
+
allow_null_result: bool = True,
|
69
70
|
):
|
70
71
|
"""
|
71
72
|
A task to be performed by an agent.
|
@@ -96,6 +97,9 @@ class Task:
|
|
96
97
|
erase all subtask agents' `message_history`.
|
97
98
|
Note: erasing can reduce prompt sizes, but results in repetitive
|
98
99
|
sub-task delegation.
|
100
|
+
allow_null_result (bool): if true, allow null (empty or NO_ANSWER)
|
101
|
+
as the result of a step or overall task result.
|
102
|
+
Optional, default is True.
|
99
103
|
"""
|
100
104
|
if agent is None:
|
101
105
|
agent = ChatAgent()
|
@@ -115,6 +119,8 @@ class Task:
|
|
115
119
|
self.tsv_logger: None | logging.Logger = None
|
116
120
|
self.color_log: bool = False if settings.notebook else True
|
117
121
|
self.agent = agent
|
122
|
+
self.step_progress = False
|
123
|
+
self.task_progress = False
|
118
124
|
self.name = name or agent.config.name
|
119
125
|
self.default_human_response = default_human_response
|
120
126
|
self.interactive = interactive
|
@@ -129,6 +135,7 @@ class Task:
|
|
129
135
|
# just the first outgoing message and last incoming message.
|
130
136
|
# Note this also completely erases sub-task agents' message_history.
|
131
137
|
self.erase_substeps = erase_substeps
|
138
|
+
self.allow_null_result = allow_null_result
|
132
139
|
|
133
140
|
agent_entity_responders = agent.entity_responders()
|
134
141
|
agent_entity_responders_async = agent.entity_responders_async()
|
@@ -300,7 +307,7 @@ class Task:
|
|
300
307
|
) -> Optional[ChatDocument]:
|
301
308
|
"""Synchronous version of `run_async()`.
|
302
309
|
See `run_async()` for details."""
|
303
|
-
|
310
|
+
self.task_progress = False
|
304
311
|
assert (
|
305
312
|
msg is None or isinstance(msg, str) or isinstance(msg, ChatDocument)
|
306
313
|
), f"msg arg in Task.run() must be None, str, or ChatDocument, not {type(msg)}"
|
@@ -364,7 +371,7 @@ class Task:
|
|
364
371
|
# have come from another LLM), as far as this agent is concerned, the initial
|
365
372
|
# message can be considered to be from the USER
|
366
373
|
# (from the POV of this agent's LLM).
|
367
|
-
|
374
|
+
self.task_progress = False
|
368
375
|
if (
|
369
376
|
isinstance(msg, ChatDocument)
|
370
377
|
and msg.metadata.recipient != ""
|
@@ -463,6 +470,7 @@ class Task:
|
|
463
470
|
Synchronous version of `step_async()`. See `step_async()` for details.
|
464
471
|
"""
|
465
472
|
result = None
|
473
|
+
self.step_progress = False
|
466
474
|
parent = self.pending_message
|
467
475
|
recipient = (
|
468
476
|
""
|
@@ -533,6 +541,7 @@ class Task:
|
|
533
541
|
different context.
|
534
542
|
"""
|
535
543
|
result = None
|
544
|
+
self.step_progress = False
|
536
545
|
parent = self.pending_message
|
537
546
|
recipient = (
|
538
547
|
""
|
@@ -615,18 +624,34 @@ class Task:
|
|
615
624
|
if result.attachment is None:
|
616
625
|
self.pending_message.attachment = old_attachment
|
617
626
|
self.log_message(self.pending_sender, result, mark=True)
|
627
|
+
self.step_progress = True
|
628
|
+
self.task_progress = True
|
618
629
|
return True
|
619
630
|
else:
|
620
631
|
self.log_message(r, result)
|
621
632
|
return False
|
622
633
|
|
623
634
|
def _process_invalid_step_result(self, parent: ChatDocument | None) -> None:
|
624
|
-
|
625
|
-
|
626
|
-
|
627
|
-
|
628
|
-
|
629
|
-
|
635
|
+
"""
|
636
|
+
Since step had no valid result, decide whether to update the
|
637
|
+
self.pending_message to a NO_ANSWER message from the opposite entity,
|
638
|
+
or leave it as is.
|
639
|
+
Args:
|
640
|
+
parent (ChatDocument|None): parent message of the current message
|
641
|
+
"""
|
642
|
+
if not self.task_progress or self.allow_null_result:
|
643
|
+
# There has been no progress at all in this task, so we
|
644
|
+
# update the pending_message to a dummy NO_ANSWER msg
|
645
|
+
# from the entity 'opposite' to the current pending_sender,
|
646
|
+
# so we show "progress" and avoid getting stuck in an infinite loop.
|
647
|
+
responder = (
|
648
|
+
Entity.LLM if self.pending_sender == Entity.USER else Entity.USER
|
649
|
+
)
|
650
|
+
self.pending_message = ChatDocument(
|
651
|
+
content=NO_ANSWER,
|
652
|
+
metadata=ChatDocMetaData(sender=responder, parent=parent),
|
653
|
+
)
|
654
|
+
self.pending_sender = responder
|
630
655
|
self.log_message(self.pending_sender, self.pending_message, mark=True)
|
631
656
|
|
632
657
|
def _show_pending_message_if_debug(self) -> None:
|
@@ -745,6 +770,15 @@ class Task:
|
|
745
770
|
# for top-level task, only user can quit out
|
746
771
|
return user_quit
|
747
772
|
|
773
|
+
if (
|
774
|
+
not self.step_progress
|
775
|
+
and self.pending_sender == Entity.LLM
|
776
|
+
and not self.llm_delegate
|
777
|
+
):
|
778
|
+
# LLM is NOT driving the task, and no progress in latest step,
|
779
|
+
# and it is NOT the LLM's turn to respond, so we are done.
|
780
|
+
return True
|
781
|
+
|
748
782
|
return (
|
749
783
|
# no valid response from any entity/agent in current turn
|
750
784
|
self.pending_message is None
|
@@ -469,6 +469,7 @@ class OpenAIGPT(LanguageModel):
|
|
469
469
|
return True, has_function, function_name, function_args, completion
|
470
470
|
return False, has_function, function_name, function_args, completion
|
471
471
|
|
472
|
+
@retry_with_exponential_backoff
|
472
473
|
def _stream_response( # type: ignore
|
473
474
|
self, response, chat: bool = False
|
474
475
|
) -> Tuple[LLMResponse, Dict[str, Any]]:
|
@@ -520,6 +521,7 @@ class OpenAIGPT(LanguageModel):
|
|
520
521
|
is_async=False,
|
521
522
|
)
|
522
523
|
|
524
|
+
@async_retry_with_exponential_backoff
|
523
525
|
async def _stream_response_async( # type: ignore
|
524
526
|
self, response, chat: bool = False
|
525
527
|
) -> Tuple[LLMResponse, Dict[str, Any]]:
|
@@ -1084,7 +1086,7 @@ class OpenAIGPT(LanguageModel):
|
|
1084
1086
|
if self.get_stream() and not cached:
|
1085
1087
|
llm_response, openai_response = self._stream_response(response, chat=True)
|
1086
1088
|
self._cache_store(hashed_key, openai_response)
|
1087
|
-
return llm_response
|
1089
|
+
return llm_response # type: ignore
|
1088
1090
|
if isinstance(response, dict):
|
1089
1091
|
response_dict = response
|
1090
1092
|
else:
|
@@ -1115,7 +1117,7 @@ class OpenAIGPT(LanguageModel):
|
|
1115
1117
|
response, chat=True
|
1116
1118
|
)
|
1117
1119
|
self._cache_store(hashed_key, openai_response)
|
1118
|
-
return llm_response
|
1120
|
+
return llm_response # type: ignore
|
1119
1121
|
if isinstance(response, dict):
|
1120
1122
|
response_dict = response
|
1121
1123
|
else:
|
langroid/mytypes.py
CHANGED
@@ -27,12 +27,12 @@ class DocMetaData(BaseModel):
|
|
27
27
|
|
28
28
|
source: str = "context"
|
29
29
|
is_chunk: bool = False # if it is a chunk, don't split
|
30
|
-
id: str
|
30
|
+
id: str = "" # unique id for the document
|
31
31
|
window_ids: List[str] = [] # for RAG: ids of chunks around this one
|
32
32
|
|
33
|
-
def
|
33
|
+
def dict_bool_int(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
|
34
34
|
"""
|
35
|
-
|
35
|
+
Special dict method to convert bool fields to int, to appease some
|
36
36
|
downstream libraries, e.g. Chroma which complains about bool fields in
|
37
37
|
metadata.
|
38
38
|
"""
|
langroid/parsing/parser.py
CHANGED
@@ -65,8 +65,8 @@ class Parser:
|
|
65
65
|
orig_id_to_ids: Dict[str, List[str]] = {}
|
66
66
|
for orig_id, id in zip(orig_ids, ids):
|
67
67
|
if orig_id not in orig_id_to_ids:
|
68
|
-
orig_id_to_ids[orig_id] = []
|
69
|
-
orig_id_to_ids[orig_id].append(id)
|
68
|
+
orig_id_to_ids[orig_id] = []
|
69
|
+
orig_id_to_ids[orig_id].append(id)
|
70
70
|
|
71
71
|
# now each orig_id maps to a sequence of ids within a single doc
|
72
72
|
|
@@ -114,7 +114,9 @@ class ChromaDB(VectorStore):
|
|
114
114
|
return
|
115
115
|
contents: List[str] = [document.content for document in documents]
|
116
116
|
# convert metadatas to dicts so chroma can handle them
|
117
|
-
metadata_dicts: List[dict[str, Any]] = [
|
117
|
+
metadata_dicts: List[dict[str, Any]] = [
|
118
|
+
d.metadata.dict_bool_int() for d in documents
|
119
|
+
]
|
118
120
|
for m in metadata_dicts:
|
119
121
|
# chroma does not handle non-atomic types in metadata
|
120
122
|
m["window_ids"] = ",".join(m["window_ids"])
|
langroid/vector_store/lancedb.py
CHANGED
@@ -14,9 +14,7 @@ from langroid.embedding_models.models import OpenAIEmbeddingsConfig
|
|
14
14
|
from langroid.mytypes import Document, EmbeddingFunction
|
15
15
|
from langroid.utils.configuration import settings
|
16
16
|
from langroid.utils.pydantic_utils import (
|
17
|
-
flatten_pydantic_instance,
|
18
17
|
flatten_pydantic_model,
|
19
|
-
nested_dict_from_flat,
|
20
18
|
)
|
21
19
|
from langroid.vector_store.base import VectorStore, VectorStoreConfig
|
22
20
|
|
@@ -191,7 +189,7 @@ class LanceDB(VectorStore):
|
|
191
189
|
else:
|
192
190
|
logger.warning("Recreating fresh collection")
|
193
191
|
tbl = self.client.create_table(
|
194
|
-
collection_name, schema=self.
|
192
|
+
collection_name, schema=self.schema, mode="overwrite"
|
195
193
|
)
|
196
194
|
if settings.debug:
|
197
195
|
level = logger.getEffectiveLevel()
|
@@ -217,12 +215,10 @@ class LanceDB(VectorStore):
|
|
217
215
|
def make_batches() -> Generator[List[Dict[str, Any]], None, None]:
|
218
216
|
for i in range(0, len(ids), b):
|
219
217
|
yield [
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
payload=doc,
|
225
|
-
)
|
218
|
+
self.schema( # type: ignore
|
219
|
+
id=ids[i],
|
220
|
+
vector=embedding_vecs[i],
|
221
|
+
payload=doc,
|
226
222
|
)
|
227
223
|
for i, doc in enumerate(documents[i : i + b])
|
228
224
|
]
|
@@ -238,12 +234,8 @@ class LanceDB(VectorStore):
|
|
238
234
|
raise ValueError("No collection name set, cannot retrieve docs")
|
239
235
|
tbl = self.client.open_table(self.config.collection_name)
|
240
236
|
records = tbl.search(None).to_arrow().to_pylist()
|
241
|
-
|
242
|
-
|
243
|
-
**(nested_dict_from_flat(rec, sub_dict="payload"))
|
244
|
-
)
|
245
|
-
for rec in records
|
246
|
-
]
|
237
|
+
doc_cls = self.config.document_class
|
238
|
+
docs = [doc_cls(**rec["payload"]) for rec in records]
|
247
239
|
return docs
|
248
240
|
|
249
241
|
def get_documents_by_ids(self, ids: List[str]) -> List[Document]:
|
@@ -256,10 +248,7 @@ class LanceDB(VectorStore):
|
|
256
248
|
for _id in _ids
|
257
249
|
]
|
258
250
|
doc_cls = self.config.document_class
|
259
|
-
docs = [
|
260
|
-
doc_cls(**(nested_dict_from_flat(rec, sub_dict="payload")))
|
261
|
-
for rec in records
|
262
|
-
]
|
251
|
+
docs = [doc_cls(**rec["payload"]) for rec in records]
|
263
252
|
return docs
|
264
253
|
|
265
254
|
def similar_texts_with_scores(
|
@@ -281,12 +270,7 @@ class LanceDB(VectorStore):
|
|
281
270
|
|
282
271
|
# note _distance is 1 - cosine
|
283
272
|
scores = [1 - rec["_distance"] for rec in records]
|
284
|
-
docs = [
|
285
|
-
self.config.document_class(
|
286
|
-
**(nested_dict_from_flat(rec, sub_dict="payload"))
|
287
|
-
)
|
288
|
-
for rec in records
|
289
|
-
]
|
273
|
+
docs = [self.config.document_class(**rec["payload"]) for rec in records]
|
290
274
|
if len(docs) == 0:
|
291
275
|
logger.warning(f"No matches found for {text}")
|
292
276
|
return []
|
@@ -165,7 +165,7 @@ class MeiliSearch(VectorStore):
|
|
165
165
|
async with self.client() as client:
|
166
166
|
index = client.index(collection_name)
|
167
167
|
await index.add_documents_in_batches(
|
168
|
-
documents=documents,
|
168
|
+
documents=documents,
|
169
169
|
batch_size=self.config.batch_size,
|
170
170
|
primary_key=self.config.primary_key,
|
171
171
|
)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.145
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
License: MIT
|
6
6
|
Author: Prasad Chalasani
|
@@ -29,11 +29,11 @@ Requires-Dist: flake8 (>=6.0.0,<7.0.0)
|
|
29
29
|
Requires-Dist: google-api-python-client (>=2.95.0,<3.0.0)
|
30
30
|
Requires-Dist: halo (>=0.0.31,<0.0.32)
|
31
31
|
Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
|
32
|
-
Requires-Dist: lancedb (>=0.3.
|
32
|
+
Requires-Dist: lancedb (>=0.3.6,<0.4.0)
|
33
33
|
Requires-Dist: litellm (>=1.0.0,<2.0.0) ; extra == "litellm"
|
34
34
|
Requires-Dist: lxml (>=4.9.3,<5.0.0)
|
35
35
|
Requires-Dist: meilisearch (>=0.28.3,<0.29.0)
|
36
|
-
Requires-Dist: meilisearch-python-sdk (>=2.
|
36
|
+
Requires-Dist: meilisearch-python-sdk (>=2.2.3,<3.0.0)
|
37
37
|
Requires-Dist: mkdocs (>=1.4.2,<2.0.0)
|
38
38
|
Requires-Dist: mkdocs-awesome-pages-plugin (>=2.8.0,<3.0.0)
|
39
39
|
Requires-Dist: mkdocs-gen-files (>=0.4.0,<0.5.0)
|
@@ -137,8 +137,9 @@ into simplifying the developer experience; it does not use `Langchain`.
|
|
137
137
|
We welcome contributions -- See the [contributions](./CONTRIBUTING.md) document
|
138
138
|
for ideas on what to contribute.
|
139
139
|
|
140
|
-
|
141
|
-
|
140
|
+
Are you building LLM Applications, or want help with Langroid for your company,
|
141
|
+
or want to prioritize Langroid features for your company use-cases?
|
142
|
+
[Prasad Chalasani](https://www.linkedin.com/in/pchalasani/) is available for consulting
|
142
143
|
(advisory/development): pchalasani at gmail dot com.
|
143
144
|
|
144
145
|
Sponsorship is also accepted via [GitHub Sponsors](https://github.com/sponsors/langroid)
|
@@ -1,17 +1,17 @@
|
|
1
1
|
langroid/__init__.py,sha256=-T6zCLCy-0U_h4iDmC2d6OUXjqopFe0XGrclYgbnBZk,465
|
2
2
|
langroid/agent/__init__.py,sha256=ZqDw3Ktw7XGDl6mC8DN61F71V4ckf0rBoEOydH9l6C4,428
|
3
|
-
langroid/agent/base.py,sha256=
|
3
|
+
langroid/agent/base.py,sha256=2xnNig9pRv5IJR-_up5QwRrX7UYMeP56BYf1MDDY0n0,31982
|
4
4
|
langroid/agent/batch.py,sha256=Cg7Qv1yGi_M9rMl38_4-hjXPsoLlZrOSXDhbOFqUcKY,5593
|
5
|
-
langroid/agent/chat_agent.py,sha256=
|
5
|
+
langroid/agent/chat_agent.py,sha256=yK56EAuT-RxRWBbfVaevl0qW1h8neWL0lktPQc34uUc,35626
|
6
6
|
langroid/agent/chat_document.py,sha256=dw0m_00qJgOhbzCkEsVBAiktM6BJ62mV8_piu5GruXM,7008
|
7
7
|
langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
8
|
langroid/agent/junk,sha256=LxfuuW7Cijsg0szAzT81OjWWv1PMNI-6w_-DspVIO2s,339
|
9
9
|
langroid/agent/openai_assistant.py,sha256=8YDzpvLWWsa_l27F5136rV-TE3N4oKDjYiTfxMCsteI,32328
|
10
10
|
langroid/agent/special/__init__.py,sha256=Iex-fxO7sTM2LKAOk-JQLzovk-zlzqi5PwxiMBHDz6o,681
|
11
11
|
langroid/agent/special/doc_chat_agent.py,sha256=ERZs1phXGA_18114d7qv_Zl4-TPte-z2v6dgA75RAq0,37264
|
12
|
-
langroid/agent/special/recipient_validator_agent.py,sha256=
|
13
|
-
langroid/agent/special/relevance_extractor_agent.py,sha256=
|
14
|
-
langroid/agent/special/retriever_agent.py,sha256=
|
12
|
+
langroid/agent/special/recipient_validator_agent.py,sha256=WmLnQUdkrvWLDfFeS3Q7CnrfkDk0-trQxitQSuR7fpE,6315
|
13
|
+
langroid/agent/special/relevance_extractor_agent.py,sha256=19LwSZ09q8LyiU-iKrOOuG0uboyoUBVL3zXeteHKiLs,4162
|
14
|
+
langroid/agent/special/retriever_agent.py,sha256=wUCm0PAzHNn4aGb2Q-7dwYIAxX2_RwrZQenwDeGxOIg,6564
|
15
15
|
langroid/agent/special/sql/__init__.py,sha256=3kR5nC0wnYIzmMrr9L8RJa7JAJpbwBLx7KKygiwz0v0,111
|
16
16
|
langroid/agent/special/sql/sql_chat_agent.py,sha256=Ua_gfK_1k5ct59Zkbe78bzs-2jabtFkEVx76a0pGs9Y,12867
|
17
17
|
langroid/agent/special/sql/utils/__init__.py,sha256=_IBHt3iNXvPqxvDrs5_T86qdj0gPugVGnGNi6Cx7F-I,238
|
@@ -19,8 +19,8 @@ langroid/agent/special/sql/utils/description_extractors.py,sha256=GcQ82IhKPInS_3
|
|
19
19
|
langroid/agent/special/sql/utils/populate_metadata.py,sha256=zRjw31a1ZXvpx9bcmbtC2mngdHl-bp1ZNHStcPG8_Qk,2712
|
20
20
|
langroid/agent/special/sql/utils/system_message.py,sha256=qKLHkvQWRQodTtPLPxr1GSLUYUFASZU8x-ybV67cB68,1885
|
21
21
|
langroid/agent/special/sql/utils/tools.py,sha256=6uB2424SLtmapui9ggcEr0ZTiB6_dL1-JRGgN8RK9Js,1332
|
22
|
-
langroid/agent/special/table_chat_agent.py,sha256=
|
23
|
-
langroid/agent/task.py,sha256=
|
22
|
+
langroid/agent/special/table_chat_agent.py,sha256=G7QIhZZmJwbM4d51C4cYTvfvtSKfBX0qKFFisuS2xSg,7807
|
23
|
+
langroid/agent/task.py,sha256=ie_8mz0NWzhO4tTrPE2uOK5vcKcl37-9OxSmxscPCgY,38004
|
24
24
|
langroid/agent/tool_message.py,sha256=b8qcO2fwkmUR49dgMzpXC9MnXdYh86I2VHFH7bv7D0w,6250
|
25
25
|
langroid/agent/tools/__init__.py,sha256=6le5y_iPEHwh7Tli_0MtwCGOjy3tPQfAdfDC7WBg2e0,172
|
26
26
|
langroid/agent/tools/extract_tool.py,sha256=u5lL9rKBzaLBOrRyLnTAZ97pQ1uxyLP39XsWMnpaZpw,3789
|
@@ -43,12 +43,12 @@ langroid/language_models/azure_openai.py,sha256=_OOEoZOziI3NDOH_8t3qmh8IDWoHESQe
|
|
43
43
|
langroid/language_models/base.py,sha256=jUEUqDWJBVxIxmG6U4Ysg2QKGOnP_CLmRuEMicsSwUw,20596
|
44
44
|
langroid/language_models/config.py,sha256=PXcmEUq52GCDj2sekt8F9E1flWyyNjP2S0LTRs7T6Kg,269
|
45
45
|
langroid/language_models/openai_assistants.py,sha256=9K-DEAL2aSWHeXj2hwCo2RAlK9_1oCPtqX2u1wISCj8,36
|
46
|
-
langroid/language_models/openai_gpt.py,sha256=
|
46
|
+
langroid/language_models/openai_gpt.py,sha256=K3UtPVrd6OdpsGnYfb2EwMiS6XqpHFivwb3oATbus8k,42204
|
47
47
|
langroid/language_models/prompt_formatter/__init__.py,sha256=wj2e6j7R9d3m63HCbSDY1vosjFuhHLQVlgBrq8iqF38,197
|
48
48
|
langroid/language_models/prompt_formatter/base.py,sha256=2y_GcwhstvB5ih3haS7l5Fv79jVnFJ_vEw1jqWJzB9k,1247
|
49
49
|
langroid/language_models/prompt_formatter/llama2_formatter.py,sha256=YdcO88qyBeuMENVIVvVqSYuEpvYSTndUe_jd6hVTko4,2899
|
50
50
|
langroid/language_models/utils.py,sha256=3stQOt3sAdbGT70thurlxfnQ4xPxH75YjFm-jaRuXlg,4474
|
51
|
-
langroid/mytypes.py,sha256
|
51
|
+
langroid/mytypes.py,sha256=-0q-SyicLbdjFSIEcwt5u-EwhRcMWllSoWYE3OWk78M,2501
|
52
52
|
langroid/parsing/__init__.py,sha256=_EZ8iuixxU39zuaydtfjyap8g9C_c1dnrCQ0QR81U2E,340
|
53
53
|
langroid/parsing/agent_chats.py,sha256=sbZRV9ujdM5QXvvuHVjIi2ysYSYlap-uqfMMUKulrW0,1068
|
54
54
|
langroid/parsing/code-parsing.md,sha256=--cyyNiSZSDlIwcjAV4-shKrSiRe2ytF3AdSoS_hD2g,3294
|
@@ -57,7 +57,7 @@ langroid/parsing/config.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
57
57
|
langroid/parsing/document_parser.py,sha256=YC3IXQ9ErpBGBZh6Be9gfJWHcTwGTSMfNQMT5ARrj5g,14615
|
58
58
|
langroid/parsing/json.py,sha256=MVqBUfInALQm1QKbcfEvLzWxBz_UztCIyGk7AK5uFPo,1650
|
59
59
|
langroid/parsing/para_sentence_split.py,sha256=AJBzZojP3zpB-_IMiiHismhqcvkrVBQ3ZINoQyx_bE4,2000
|
60
|
-
langroid/parsing/parser.py,sha256=
|
60
|
+
langroid/parsing/parser.py,sha256=BwVJboobG71N08w5LC7Tu36LI4pEJoSgAdiBSLChWGY,10251
|
61
61
|
langroid/parsing/repo_loader.py,sha256=4qCyRRHCKIYd8F1ghT-D8ko1C2sXpF7UYP1L5Im1hRE,27705
|
62
62
|
langroid/parsing/search.py,sha256=xmQdAdTIwZ0REEUeQVFlGZlqf7k8Poah7-ALuyW7Ov0,8440
|
63
63
|
langroid/parsing/spider.py,sha256=w_mHR1B4KOmxsBLoVI8kMkMTEbwTzeK3ath9fOMJrTk,3043
|
@@ -91,13 +91,13 @@ langroid/utils/web/login.py,sha256=1iz9eUAHa87vpKIkzwkmFa00avwFWivDSAr7QUhK7U0,2
|
|
91
91
|
langroid/utils/web/selenium_login.py,sha256=mYI6EvVmne34N9RajlsxxRqJQJvV-WG4LGp6sEECHPw,1156
|
92
92
|
langroid/vector_store/__init__.py,sha256=NhAXOCKX_x2whfghOn44e0O3-vV0nJRz6ZLsCBqYFyQ,242
|
93
93
|
langroid/vector_store/base.py,sha256=60XkAcX7OLkseYE66p3XOcXs7KB9udJbOV5IBfzNBDs,12188
|
94
|
-
langroid/vector_store/chromadb.py,sha256=
|
95
|
-
langroid/vector_store/lancedb.py,sha256=
|
96
|
-
langroid/vector_store/meilisearch.py,sha256=
|
94
|
+
langroid/vector_store/chromadb.py,sha256=tQ_qNWgboLpZs4MG21za9QYIUc6fNQe2XmTBjSn6_ak,7125
|
95
|
+
langroid/vector_store/lancedb.py,sha256=NWG2OS51jL141ATu6L4AVPECShBiR4GD2BW0ZQVnzf0,10919
|
96
|
+
langroid/vector_store/meilisearch.py,sha256=kxPtvZ3_fM8NxILps1nI50YbpJP6KqhAfY3idwcblh8,11238
|
97
97
|
langroid/vector_store/momento.py,sha256=91Ep3OVkGzDM60aPT1Y_Da5SS1_1lrR9no-CNzHrBoY,10027
|
98
98
|
langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
|
99
99
|
langroid/vector_store/qdrantdb.py,sha256=CcIAGale7LO7V_4CeRihIUKqlwi4LxUldtwPKVOvgUg,11848
|
100
|
-
langroid-0.1.
|
101
|
-
langroid-0.1.
|
102
|
-
langroid-0.1.
|
103
|
-
langroid-0.1.
|
100
|
+
langroid-0.1.145.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
101
|
+
langroid-0.1.145.dist-info/METADATA,sha256=YNfr4Ceiwock4mAgZorQouU6oUhPgxczKleXWn36GnY,41526
|
102
|
+
langroid-0.1.145.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
103
|
+
langroid-0.1.145.dist-info/RECORD,,
|
File without changes
|
File without changes
|