langroid 0.1.178__py3-none-any.whl → 0.1.181__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/base.py +13 -0
- langroid/agent/batch.py +3 -3
- langroid/agent/task.py +29 -6
- langroid/language_models/openai_gpt.py +9 -5
- langroid/language_models/prompt_formatter/hf_formatter.py +10 -0
- langroid/parsing/utils.py +48 -0
- {langroid-0.1.178.dist-info → langroid-0.1.181.dist-info}/METADATA +2 -2
- {langroid-0.1.178.dist-info → langroid-0.1.181.dist-info}/RECORD +10 -10
- {langroid-0.1.178.dist-info → langroid-0.1.181.dist-info}/LICENSE +0 -0
- {langroid-0.1.178.dist-info → langroid-0.1.181.dist-info}/WHEEL +0 -0
langroid/agent/base.py
CHANGED
@@ -536,6 +536,19 @@ class Agent(ABC):
|
|
536
536
|
cdoc.metadata.tool_ids = [] if isinstance(msg, str) else msg.metadata.tool_ids
|
537
537
|
return cdoc
|
538
538
|
|
539
|
+
def has_tool_message_attempt(self, msg: str | ChatDocument | None) -> bool:
|
540
|
+
"""Check whether msg contains a Tool/fn-call attempt (by the LLM)"""
|
541
|
+
if msg is None:
|
542
|
+
return False
|
543
|
+
try:
|
544
|
+
tools = self.get_tool_messages(msg)
|
545
|
+
return len(tools) > 0
|
546
|
+
except ValidationError:
|
547
|
+
# there is a tool/fn-call attempt but had a validation error,
|
548
|
+
# so we still consider this a tool message "attempt"
|
549
|
+
return True
|
550
|
+
return False
|
551
|
+
|
539
552
|
def get_tool_messages(self, msg: str | ChatDocument) -> List[ToolMessage]:
|
540
553
|
if isinstance(msg, str):
|
541
554
|
return self.get_json_tool_messages(msg)
|
langroid/agent/batch.py
CHANGED
@@ -49,9 +49,9 @@ def run_batch_tasks(
|
|
49
49
|
|
50
50
|
async def _do_task(input: str | ChatDocument, i: int) -> Any:
|
51
51
|
task_i = task.clone(i)
|
52
|
-
if task_i.agent.
|
53
|
-
task_i.agent.
|
54
|
-
|
52
|
+
if task_i.agent.llm is not None:
|
53
|
+
task_i.agent.llm.set_stream(False)
|
54
|
+
task_i.agent.config.show_stats = False
|
55
55
|
|
56
56
|
result = await task_i.run_async(input)
|
57
57
|
return output_map(result)
|
langroid/agent/task.py
CHANGED
@@ -538,9 +538,21 @@ class Task:
|
|
538
538
|
return error_doc
|
539
539
|
|
540
540
|
responders: List[Responder] = self.non_human_responders.copy()
|
541
|
-
|
542
|
-
|
543
|
-
|
541
|
+
|
542
|
+
if (
|
543
|
+
Entity.USER in self.responders
|
544
|
+
and not self.human_tried
|
545
|
+
and not self.agent.has_tool_message_attempt(self.pending_message)
|
546
|
+
):
|
547
|
+
# Give human first chance if they haven't been tried in last step,
|
548
|
+
# and the msg is not a tool-call attempt;
|
549
|
+
# This ensures human gets a chance to respond,
|
550
|
+
# other than to a LLM tool-call.
|
551
|
+
# When there's a tool msg attempt we want the
|
552
|
+
# Agent to be the next responder; this only makes a difference in an
|
553
|
+
# interactive setting: LLM generates tool, then we don't want user to
|
554
|
+
# have to respond, and instead let the agent_response handle the tool.
|
555
|
+
|
544
556
|
responders.insert(0, Entity.USER)
|
545
557
|
|
546
558
|
found_response = False
|
@@ -620,9 +632,20 @@ class Task:
|
|
620
632
|
return error_doc
|
621
633
|
|
622
634
|
responders: List[Responder] = self.non_human_responders_async.copy()
|
623
|
-
|
624
|
-
|
625
|
-
|
635
|
+
|
636
|
+
if (
|
637
|
+
Entity.USER in self.responders
|
638
|
+
and not self.human_tried
|
639
|
+
and not self.agent.has_tool_message_attempt(self.pending_message)
|
640
|
+
):
|
641
|
+
# Give human first chance if they haven't been tried in last step,
|
642
|
+
# and the msg is not a tool-call attempt;
|
643
|
+
# This ensures human gets a chance to respond,
|
644
|
+
# other than to a LLM tool-call.
|
645
|
+
# When there's a tool msg attempt we want the
|
646
|
+
# Agent to be the next responder; this only makes a difference in an
|
647
|
+
# interactive setting: LLM generates tool, then we don't want user to
|
648
|
+
# have to respond, and instead let the agent_response handle the tool.
|
626
649
|
responders.insert(0, Entity.USER)
|
627
650
|
|
628
651
|
found_response = False
|
@@ -212,6 +212,7 @@ class OpenAIGPTConfig(LLMConfig):
|
|
212
212
|
# a string that roughly matches a HuggingFace chat_template,
|
213
213
|
# e.g. "mistral-instruct-v0.2 (a fuzzy search is done to find the closest match)
|
214
214
|
formatter: str | None = None
|
215
|
+
hf_formatter: HFFormatter | None = None
|
215
216
|
|
216
217
|
def __init__(self, **kwargs) -> None: # type: ignore
|
217
218
|
local_model = "api_base" in kwargs and kwargs["api_base"] is not None
|
@@ -264,6 +265,7 @@ class OpenAIGPTConfig(LLMConfig):
|
|
264
265
|
"""
|
265
266
|
)
|
266
267
|
litellm.telemetry = False
|
268
|
+
litellm.drop_params = True # drop un-supported params without crashing
|
267
269
|
self.seed = None # some local mdls don't support seed
|
268
270
|
keys_dict = litellm.validate_environment(self.chat_model)
|
269
271
|
missing_keys = keys_dict.get("missing_keys", [])
|
@@ -368,6 +370,11 @@ class OpenAIGPT(LanguageModel):
|
|
368
370
|
# e.g. "local/localhost:8000/v1//mistral-instruct-v0.2"
|
369
371
|
self.config.formatter = formatter
|
370
372
|
|
373
|
+
if self.config.formatter is not None:
|
374
|
+
self.config.hf_formatter = HFFormatter(
|
375
|
+
HFPromptFormatterConfig(model_name=self.config.formatter)
|
376
|
+
)
|
377
|
+
|
371
378
|
# if model name starts with "litellm",
|
372
379
|
# set the actual model name by stripping the "litellm/" prefix
|
373
380
|
# and set the litellm flag to True
|
@@ -952,15 +959,12 @@ class OpenAIGPT(LanguageModel):
|
|
952
959
|
)
|
953
960
|
if self.config.use_completion_for_chat and not self.is_openai_chat_model():
|
954
961
|
# only makes sense for non-OpenAI models
|
955
|
-
if self.config.formatter is None:
|
962
|
+
if self.config.formatter is None or self.config.hf_formatter is None:
|
956
963
|
raise ValueError(
|
957
964
|
"""
|
958
965
|
`formatter` must be specified in config to use completion for chat.
|
959
966
|
"""
|
960
967
|
)
|
961
|
-
formatter = HFFormatter(
|
962
|
-
HFPromptFormatterConfig(model_name=self.config.formatter)
|
963
|
-
)
|
964
968
|
if isinstance(messages, str):
|
965
969
|
messages = [
|
966
970
|
LLMMessage(
|
@@ -968,7 +972,7 @@ class OpenAIGPT(LanguageModel):
|
|
968
972
|
),
|
969
973
|
LLMMessage(role=Role.USER, content=messages),
|
970
974
|
]
|
971
|
-
prompt =
|
975
|
+
prompt = self.config.hf_formatter.format(messages)
|
972
976
|
return self.generate(prompt=prompt, max_tokens=max_tokens)
|
973
977
|
try:
|
974
978
|
return self._chat(messages, max_tokens, functions, function_call)
|
@@ -63,6 +63,16 @@ class HFFormatter(PromptFormatter):
|
|
63
63
|
raise ValueError(
|
64
64
|
f"Model {config.model_name} does not support chat template"
|
65
65
|
)
|
66
|
+
else:
|
67
|
+
logger.warning(
|
68
|
+
f"""
|
69
|
+
Using HuggingFace {mdl.id} for prompt formatting:
|
70
|
+
This is the CHAT TEMPLATE. If this is not what you intended,
|
71
|
+
consider specifying a more complete model name for the formatter.
|
72
|
+
|
73
|
+
{self.tokenizer.chat_template}
|
74
|
+
"""
|
75
|
+
)
|
66
76
|
|
67
77
|
def format(self, messages: List[LLMMessage]) -> str:
|
68
78
|
sys_msg, chat_msgs, user_msg = LanguageModel.get_chat_history_components(
|
langroid/parsing/utils.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
import difflib
|
2
|
+
import logging
|
2
3
|
import random
|
3
4
|
import re
|
4
5
|
from functools import cache
|
@@ -8,9 +9,17 @@ from typing import Any, Iterable, List
|
|
8
9
|
import nltk
|
9
10
|
from faker import Faker
|
10
11
|
|
12
|
+
from langroid.mytypes import Document
|
13
|
+
from langroid.parsing.parser import Parser, ParsingConfig
|
14
|
+
from langroid.parsing.repo_loader import RepoLoader
|
15
|
+
from langroid.parsing.url_loader import URLLoader
|
16
|
+
from langroid.parsing.urls import get_urls_and_paths
|
17
|
+
|
11
18
|
Faker.seed(23)
|
12
19
|
random.seed(43)
|
13
20
|
|
21
|
+
logger = logging.getLogger(__name__)
|
22
|
+
|
14
23
|
|
15
24
|
# Ensures the NLTK resource is available
|
16
25
|
@cache
|
@@ -298,3 +307,42 @@ def extract_numbered_segments(s: str, specs: str) -> str:
|
|
298
307
|
extracted_paragraphs.append(" ".join(extracted_segments))
|
299
308
|
|
300
309
|
return "\n\n".join(extracted_paragraphs)
|
310
|
+
|
311
|
+
|
312
|
+
def extract_content_from_path(
|
313
|
+
path: str | List[str], parsing: ParsingConfig
|
314
|
+
) -> str | List[str]:
|
315
|
+
"""
|
316
|
+
Extract the content from a file path or URL, or a list of file paths or URLs.
|
317
|
+
|
318
|
+
Args:
|
319
|
+
path (str | List[str]): The file path or URL, or a list of file paths or URLs.
|
320
|
+
parsing (ParsingConfig): The parsing configuration.
|
321
|
+
|
322
|
+
Returns:
|
323
|
+
str | List[str]: The extracted content if a single file path or URL is provided,
|
324
|
+
or a list of extracted contents if a
|
325
|
+
list of file paths or URLs is provided.
|
326
|
+
"""
|
327
|
+
if isinstance(path, str):
|
328
|
+
path = [path]
|
329
|
+
elif isinstance(path, list) and len(path) == 0:
|
330
|
+
return ""
|
331
|
+
urls, path_list = get_urls_and_paths(path)
|
332
|
+
parser = Parser(parsing)
|
333
|
+
docs: List[Document] = []
|
334
|
+
try:
|
335
|
+
if len(urls) > 0:
|
336
|
+
loader = URLLoader(urls=urls, parser=parser)
|
337
|
+
docs = loader.load()
|
338
|
+
if len(path_list) > 0:
|
339
|
+
for p in path_list:
|
340
|
+
path_docs = RepoLoader.get_documents(p, parser=parser)
|
341
|
+
docs.extend(path_docs)
|
342
|
+
except Exception as e:
|
343
|
+
logger.warning(f"Error loading path {path}: {e}")
|
344
|
+
return ""
|
345
|
+
if len(docs) == 1:
|
346
|
+
return docs[0].content
|
347
|
+
else:
|
348
|
+
return [d.content for d in docs]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.181
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
License: MIT
|
6
6
|
Author: Prasad Chalasani
|
@@ -35,7 +35,7 @@ Requires-Dist: google-api-python-client (>=2.95.0,<3.0.0)
|
|
35
35
|
Requires-Dist: halo (>=0.0.31,<0.0.32)
|
36
36
|
Requires-Dist: jinja2 (>=3.1.2,<4.0.0)
|
37
37
|
Requires-Dist: lancedb (>=0.4.1,<0.5.0)
|
38
|
-
Requires-Dist: litellm (>=1.
|
38
|
+
Requires-Dist: litellm (>=1.20.6,<2.0.0) ; extra == "litellm"
|
39
39
|
Requires-Dist: lxml (>=4.9.3,<5.0.0)
|
40
40
|
Requires-Dist: meilisearch (>=0.28.3,<0.29.0)
|
41
41
|
Requires-Dist: meilisearch-python-sdk (>=2.2.3,<3.0.0)
|
@@ -1,7 +1,7 @@
|
|
1
1
|
langroid/__init__.py,sha256=I9edNDkpmfd5C4WvTONaGaTFzIlvFyp5GpFEaMCAKMk,778
|
2
2
|
langroid/agent/__init__.py,sha256=w2pap-rHrp41gMzdtzur2YY_m62LqQhF2Du-AmoIQi4,752
|
3
|
-
langroid/agent/base.py,sha256=
|
4
|
-
langroid/agent/batch.py,sha256=
|
3
|
+
langroid/agent/base.py,sha256=dzPuQ0R1AH7fYQ9cM2gslJ0-dhOeDjXv_P2UP0u9oRs,33803
|
4
|
+
langroid/agent/batch.py,sha256=8zHdM-863pRD3UoCXUPKEQ4Z4iqjkNVD2xXu1WspBak,6464
|
5
5
|
langroid/agent/chat_agent.py,sha256=Rp2a_KZA58U8zexQuTUkVjzTsQmyG4qT3Vya-jSDTCQ,35374
|
6
6
|
langroid/agent/chat_document.py,sha256=MRp2YCy5f3Q_yPoFXVyr1vGu48wz33UGxAUtMn7MJpo,7958
|
7
7
|
langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -30,7 +30,7 @@ langroid/agent/special/sql/utils/populate_metadata.py,sha256=zRjw31a1ZXvpx9bcmbt
|
|
30
30
|
langroid/agent/special/sql/utils/system_message.py,sha256=qKLHkvQWRQodTtPLPxr1GSLUYUFASZU8x-ybV67cB68,1885
|
31
31
|
langroid/agent/special/sql/utils/tools.py,sha256=6uB2424SLtmapui9ggcEr0ZTiB6_dL1-JRGgN8RK9Js,1332
|
32
32
|
langroid/agent/special/table_chat_agent.py,sha256=GEUTP-VdtMXq4CcPV80gDQrCEn-ZFb9IhuRMtLN5I1o,9030
|
33
|
-
langroid/agent/task.py,sha256=
|
33
|
+
langroid/agent/task.py,sha256=V2cagppWMwpMy6d3o_0ljoo4k86cho3YpTHHonhMuDI,47589
|
34
34
|
langroid/agent/tool_message.py,sha256=ngmWdiqMYbjF4Am0hsLyA9zK0Q9QF2ziec6FW0lPD90,7399
|
35
35
|
langroid/agent/tools/__init__.py,sha256=q-maq3k2BXhPAU99G0H6-j_ozoRvx15I1RFpPVicQIU,304
|
36
36
|
langroid/agent/tools/extract_tool.py,sha256=u5lL9rKBzaLBOrRyLnTAZ97pQ1uxyLP39XsWMnpaZpw,3789
|
@@ -55,10 +55,10 @@ langroid/language_models/azure_openai.py,sha256=ncRCbKooqLVOY-PWQUIo9C3yTuKEFbAw
|
|
55
55
|
langroid/language_models/base.py,sha256=N9Jn-veKBCd_ky9mhHVSmf1iRQ1RBdaxm45yrY9Njl0,20616
|
56
56
|
langroid/language_models/config.py,sha256=5UF3DzO1a-Dfsc3vghE0XGq7g9t_xDsRCsuRiU4dgBg,366
|
57
57
|
langroid/language_models/openai_assistants.py,sha256=9K-DEAL2aSWHeXj2hwCo2RAlK9_1oCPtqX2u1wISCj8,36
|
58
|
-
langroid/language_models/openai_gpt.py,sha256=
|
58
|
+
langroid/language_models/openai_gpt.py,sha256=vpAYgfo-Ezo9eqZa0ymy-e5hJ_XdtFFENYIOEI3Q7kQ,48101
|
59
59
|
langroid/language_models/prompt_formatter/__init__.py,sha256=9JXFF22QNMmbQV1q4nrIeQVTtA3Tx8tEZABLtLBdFyc,352
|
60
60
|
langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
|
61
|
-
langroid/language_models/prompt_formatter/hf_formatter.py,sha256=
|
61
|
+
langroid/language_models/prompt_formatter/hf_formatter.py,sha256=iO2OVfRnq7iRxxXdD1T-_TcINt_4oHVQsWAm7fyKYcc,3951
|
62
62
|
langroid/language_models/prompt_formatter/llama2_formatter.py,sha256=YdcO88qyBeuMENVIVvVqSYuEpvYSTndUe_jd6hVTko4,2899
|
63
63
|
langroid/language_models/utils.py,sha256=3stQOt3sAdbGT70thurlxfnQ4xPxH75YjFm-jaRuXlg,4474
|
64
64
|
langroid/mytypes.py,sha256=-0q-SyicLbdjFSIEcwt5u-EwhRcMWllSoWYE3OWk78M,2501
|
@@ -78,7 +78,7 @@ langroid/parsing/table_loader.py,sha256=qNM4obT_0Y4tjrxNBCNUYjKQ9oETCZ7FbolKBTcz
|
|
78
78
|
langroid/parsing/url_loader.py,sha256=54c6yt9grfUyImauSdM5UM4_ulU4JEz0ehdugAkxKI4,2391
|
79
79
|
langroid/parsing/url_loader_cookies.py,sha256=Lg4sNpRz9MByWq2mde6T0hKv68VZSV3mtMjNEHuFeSU,2327
|
80
80
|
langroid/parsing/urls.py,sha256=Nv4yCWQLLBEjaiRdaZZVQNBEl_cfK_V6cVuPm91wGtU,7686
|
81
|
-
langroid/parsing/utils.py,sha256=
|
81
|
+
langroid/parsing/utils.py,sha256=Ft0YytDQh2-S1xjk3FDA4IZI9Qp1odrIYm8cuK8H81s,11642
|
82
82
|
langroid/parsing/web_search.py,sha256=sS6UPeVB_KSsIUNlv4VHHvc0uFywMExEbQ15k40TLcc,3951
|
83
83
|
langroid/prompts/__init__.py,sha256=B0vpJzIJlMR3mFRtoQwyALsFzBHvLp9f92acD8xJA_0,185
|
84
84
|
langroid/prompts/dialog.py,sha256=SpfiSyofSgy2pwD1YboHR_yHO3LEEMbv6j2sm874jKo,331
|
@@ -111,7 +111,7 @@ langroid/vector_store/meilisearch.py,sha256=d2huA9P-NoYRuAQ9ZeXJmMKr7ry8u90RUSR2
|
|
111
111
|
langroid/vector_store/momento.py,sha256=j6Eo6oIDN2fe7lsBOlCXJn3uvvERHHTFL5QJfeREeOM,10044
|
112
112
|
langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
|
113
113
|
langroid/vector_store/qdrantdb.py,sha256=_egbsP9SWBwmI827EDYSSOqfIQSmwNsmJfFTxrLpWYE,13457
|
114
|
-
langroid-0.1.
|
115
|
-
langroid-0.1.
|
116
|
-
langroid-0.1.
|
117
|
-
langroid-0.1.
|
114
|
+
langroid-0.1.181.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
115
|
+
langroid-0.1.181.dist-info/METADATA,sha256=K5aL_WW9QSsuTt-XKslR2WhBNxwj6LNwLnKbhhk6X8U,45219
|
116
|
+
langroid-0.1.181.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
117
|
+
langroid-0.1.181.dist-info/RECORD,,
|
File without changes
|
File without changes
|