langroid 0.1.248__py3-none-any.whl → 0.1.249__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/base.py +6 -6
- langroid/agent/callbacks/chainlit.py +3 -2
- langroid/agent/chat_agent.py +54 -33
- langroid/agent/chat_document.py +1 -0
- langroid/agent/special/doc_chat_agent.py +44 -32
- langroid/agent/special/lance_rag/query_planner_agent.py +5 -1
- langroid/agent/special/table_chat_agent.py +16 -5
- langroid/language_models/base.py +4 -2
- langroid/language_models/openai_gpt.py +1 -1
- langroid/prompts/templates.py +12 -11
- langroid/utils/output/status.py +13 -6
- {langroid-0.1.248.dist-info → langroid-0.1.249.dist-info}/METADATA +1 -1
- {langroid-0.1.248.dist-info → langroid-0.1.249.dist-info}/RECORD +15 -15
- {langroid-0.1.248.dist-info → langroid-0.1.249.dist-info}/LICENSE +0 -0
- {langroid-0.1.248.dist-info → langroid-0.1.249.dist-info}/WHEEL +0 -0
langroid/agent/base.py
CHANGED
@@ -35,7 +35,7 @@ from langroid.language_models.base import (
|
|
35
35
|
LLMTokenUsage,
|
36
36
|
StreamingIfAllowed,
|
37
37
|
)
|
38
|
-
from langroid.language_models.openai_gpt import OpenAIGPTConfig
|
38
|
+
from langroid.language_models.openai_gpt import OpenAIGPT, OpenAIGPTConfig
|
39
39
|
from langroid.mytypes import Entity
|
40
40
|
from langroid.parsing.parse_json import extract_top_level_json
|
41
41
|
from langroid.parsing.parser import Parser, ParsingConfig
|
@@ -94,12 +94,12 @@ class Agent(ABC):
|
|
94
94
|
self._indent = ""
|
95
95
|
self.llm = LanguageModel.create(config.llm)
|
96
96
|
self.vecdb = VectorStore.create(config.vecdb) if config.vecdb else None
|
97
|
-
# token_encoding_model is used to obtain the tokenizer,
|
98
|
-
# so we ensure that the tokenizer corresponding to the model is used.
|
99
97
|
if config.parsing is not None and self.config.llm is not None:
|
100
|
-
|
101
|
-
|
102
|
-
|
98
|
+
# token_encoding_model is used to obtain the tokenizer,
|
99
|
+
# so in case it's an OpenAI model, we ensure that the tokenizer
|
100
|
+
# corresponding to the model is used.
|
101
|
+
if isinstance(self.llm, OpenAIGPT) and self.llm.is_openai_chat_model():
|
102
|
+
config.parsing.token_encoding_model = self.llm.config.chat_model
|
103
103
|
self.parser: Optional[Parser] = (
|
104
104
|
Parser(config.parsing) if config.parsing else None
|
105
105
|
)
|
@@ -98,7 +98,7 @@ async def make_llm_settings_widgets(
|
|
98
98
|
[
|
99
99
|
cl.input_widget.TextInput(
|
100
100
|
id="chat_model",
|
101
|
-
label="Model Name (Default
|
101
|
+
label="Model Name (Default GPT-4o)",
|
102
102
|
initial="",
|
103
103
|
placeholder="E.g. ollama/mistral or " "local/localhost:8000/v1",
|
104
104
|
),
|
@@ -337,6 +337,7 @@ class ChainlitAgentCallbacks:
|
|
337
337
|
content: str,
|
338
338
|
is_tool: bool = False,
|
339
339
|
cached: bool = False,
|
340
|
+
language: str | None = None,
|
340
341
|
) -> None:
|
341
342
|
"""Show non-streaming LLM response."""
|
342
343
|
step = cl.Step(
|
@@ -344,7 +345,7 @@ class ChainlitAgentCallbacks:
|
|
344
345
|
name=self._entity_name("llm", tool=is_tool, cached=cached),
|
345
346
|
type="llm",
|
346
347
|
parent_id=self._get_parent_id(),
|
347
|
-
language="json" if is_tool else None,
|
348
|
+
language=language or ("json" if is_tool else None),
|
348
349
|
)
|
349
350
|
self.last_step = step
|
350
351
|
self.curr_step = None
|
langroid/agent/chat_agent.py
CHANGED
@@ -15,6 +15,7 @@ from langroid.agent.tool_message import ToolMessage
|
|
15
15
|
from langroid.language_models.base import (
|
16
16
|
LLMFunctionSpec,
|
17
17
|
LLMMessage,
|
18
|
+
LLMResponse,
|
18
19
|
Role,
|
19
20
|
StreamingIfAllowed,
|
20
21
|
)
|
@@ -681,8 +682,11 @@ class ChatAgent(Agent):
|
|
681
682
|
streamer = self.callbacks.start_llm_stream()
|
682
683
|
self.llm.config.streamer = streamer
|
683
684
|
with ExitStack() as stack: # for conditionally using rich spinner
|
684
|
-
if not self.llm.get_stream():
|
685
|
+
if not self.llm.get_stream() and not settings.quiet:
|
685
686
|
# show rich spinner only if not streaming!
|
687
|
+
# (Why? b/c the intent of showing a spinner is to "show progress",
|
688
|
+
# and we don't need to do that when streaming, since
|
689
|
+
# streaming output already shows progress.)
|
686
690
|
cm = status(
|
687
691
|
"LLM responding to messages...",
|
688
692
|
log_if_quiet=False,
|
@@ -702,35 +706,22 @@ class ChatAgent(Agent):
|
|
702
706
|
self.callbacks.finish_llm_stream(
|
703
707
|
content=str(response),
|
704
708
|
is_tool=self.has_tool_message_attempt(
|
705
|
-
ChatDocument.from_LLMResponse(response, displayed=True)
|
709
|
+
ChatDocument.from_LLMResponse(response, displayed=True),
|
706
710
|
),
|
707
711
|
)
|
708
712
|
self.llm.config.streamer = noop_fn
|
709
713
|
if response.cached:
|
710
714
|
self.callbacks.cancel_llm_stream()
|
711
|
-
|
712
|
-
if not self.llm.get_stream() or response.cached:
|
713
|
-
# We would have already displayed the msg "live" ONLY if
|
714
|
-
# streaming was enabled, AND we did not find a cached response.
|
715
|
-
# If we are here, it means the response has not yet been displayed.
|
716
|
-
cached = f"[red]{self.indent}(cached)[/red]" if response.cached else ""
|
717
|
-
if not settings.quiet:
|
718
|
-
print(cached + "[green]" + escape(str(response)))
|
719
|
-
self.callbacks.show_llm_response(
|
720
|
-
content=str(response),
|
721
|
-
is_tool=self.has_tool_message_attempt(
|
722
|
-
ChatDocument.from_LLMResponse(response, displayed=True)
|
723
|
-
),
|
724
|
-
cached=response.cached,
|
725
|
-
)
|
715
|
+
self._render_llm_response(response)
|
726
716
|
self.update_token_usage(
|
727
|
-
response,
|
717
|
+
response, # .usage attrib is updated!
|
728
718
|
messages,
|
729
719
|
self.llm.get_stream(),
|
730
720
|
chat=True,
|
731
721
|
print_response_stats=self.config.show_stats and not settings.quiet,
|
732
722
|
)
|
733
|
-
|
723
|
+
chat_doc = ChatDocument.from_LLMResponse(response, displayed=True)
|
724
|
+
return chat_doc
|
734
725
|
|
735
726
|
async def llm_response_messages_async(
|
736
727
|
self, messages: List[LLMMessage], output_len: Optional[int] = None
|
@@ -764,35 +755,65 @@ class ChatAgent(Agent):
|
|
764
755
|
self.callbacks.finish_llm_stream(
|
765
756
|
content=str(response),
|
766
757
|
is_tool=self.has_tool_message_attempt(
|
767
|
-
ChatDocument.from_LLMResponse(response, displayed=True)
|
758
|
+
ChatDocument.from_LLMResponse(response, displayed=True),
|
768
759
|
),
|
769
760
|
)
|
770
761
|
self.llm.config.streamer = noop_fn
|
771
762
|
if response.cached:
|
772
763
|
self.callbacks.cancel_llm_stream()
|
773
|
-
|
764
|
+
self._render_llm_response(response)
|
765
|
+
self.update_token_usage(
|
766
|
+
response, # .usage attrib is updated!
|
767
|
+
messages,
|
768
|
+
self.llm.get_stream(),
|
769
|
+
chat=True,
|
770
|
+
print_response_stats=self.config.show_stats and not settings.quiet,
|
771
|
+
)
|
772
|
+
chat_doc = ChatDocument.from_LLMResponse(response, displayed=True)
|
773
|
+
return chat_doc
|
774
|
+
|
775
|
+
def _render_llm_response(
|
776
|
+
self, response: ChatDocument | LLMResponse, citation_only: bool = False
|
777
|
+
) -> None:
|
778
|
+
is_cached = (
|
779
|
+
response.cached
|
780
|
+
if isinstance(response, LLMResponse)
|
781
|
+
else response.metadata.cached
|
782
|
+
)
|
783
|
+
if self.llm is None:
|
784
|
+
return
|
785
|
+
if not citation_only and (not self.llm.get_stream() or is_cached):
|
786
|
+
# We expect response to be LLMResponse in this context
|
787
|
+
if not isinstance(response, LLMResponse):
|
788
|
+
raise ValueError(
|
789
|
+
"Expected response to be LLMResponse, but got "
|
790
|
+
f"{type(response)} instead."
|
791
|
+
)
|
774
792
|
# We would have already displayed the msg "live" ONLY if
|
775
793
|
# streaming was enabled, AND we did not find a cached response.
|
776
794
|
# If we are here, it means the response has not yet been displayed.
|
777
|
-
cached = f"[red]{self.indent}(cached)[/red]" if
|
795
|
+
cached = f"[red]{self.indent}(cached)[/red]" if is_cached else ""
|
778
796
|
if not settings.quiet:
|
779
797
|
print(cached + "[green]" + escape(str(response)))
|
780
798
|
self.callbacks.show_llm_response(
|
781
799
|
content=str(response),
|
782
800
|
is_tool=self.has_tool_message_attempt(
|
783
|
-
ChatDocument.from_LLMResponse(response, displayed=True)
|
801
|
+
ChatDocument.from_LLMResponse(response, displayed=True),
|
784
802
|
),
|
785
|
-
cached=
|
803
|
+
cached=is_cached,
|
786
804
|
)
|
787
|
-
|
788
|
-
|
789
|
-
|
790
|
-
|
791
|
-
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
805
|
+
if isinstance(response, LLMResponse):
|
806
|
+
# we are in the context immediately after an LLM responded,
|
807
|
+
# we won't have citations yet, so we're done
|
808
|
+
return
|
809
|
+
if response.metadata.has_citation and not settings.quiet:
|
810
|
+
print("[grey37]SOURCES:\n" + response.metadata.source + "[/grey37]")
|
811
|
+
self.callbacks.show_llm_response(
|
812
|
+
content=str(response.metadata.source),
|
813
|
+
is_tool=False,
|
814
|
+
cached=False,
|
815
|
+
language="text",
|
816
|
+
)
|
796
817
|
|
797
818
|
def _llm_response_temp_context(self, message: str, prompt: str) -> ChatDocument:
|
798
819
|
"""
|
langroid/agent/chat_document.py
CHANGED
@@ -14,7 +14,7 @@ pip install "langroid[hf-embeddings]"
|
|
14
14
|
"""
|
15
15
|
|
16
16
|
import logging
|
17
|
-
|
17
|
+
import re
|
18
18
|
from functools import cache
|
19
19
|
from typing import Any, Dict, List, Optional, Set, Tuple, no_type_check
|
20
20
|
|
@@ -49,7 +49,6 @@ from langroid.parsing.urls import get_list_from_user, get_urls_paths_bytes_indic
|
|
49
49
|
from langroid.parsing.utils import batched
|
50
50
|
from langroid.prompts.prompts_config import PromptsConfig
|
51
51
|
from langroid.prompts.templates import SUMMARY_ANSWER_PROMPT_GPT4
|
52
|
-
from langroid.utils.configuration import settings
|
53
52
|
from langroid.utils.constants import NO_ANSWER
|
54
53
|
from langroid.utils.output import show_if_debug, status
|
55
54
|
from langroid.utils.pydantic_utils import dataframe_to_documents, extract_fields
|
@@ -83,6 +82,23 @@ except ImportError:
|
|
83
82
|
pass
|
84
83
|
|
85
84
|
|
85
|
+
def extract_citations(text: str) -> List[int]:
|
86
|
+
# Find all patterns that match [[<numbers>]]
|
87
|
+
matches = re.findall(r"\[\[([\d,]+)\]\]", text)
|
88
|
+
|
89
|
+
# Initialize a set to hold distinct citation numbers
|
90
|
+
citations: Set[int] = set()
|
91
|
+
|
92
|
+
# Process each match
|
93
|
+
for match in matches:
|
94
|
+
# Split numbers by comma and convert to integers
|
95
|
+
numbers = match.split(",")
|
96
|
+
citations.update(int(number) for number in numbers)
|
97
|
+
|
98
|
+
# Return a sorted list of unique citations
|
99
|
+
return sorted(citations)
|
100
|
+
|
101
|
+
|
86
102
|
class DocChatAgentConfig(ChatAgentConfig):
|
87
103
|
system_message: str = DEFAULT_DOC_CHAT_SYSTEM_MESSAGE
|
88
104
|
user_message: str = DEFAULT_DOC_CHAT_INSTRUCTIONS
|
@@ -658,8 +674,7 @@ class DocChatAgent(ChatAgent):
|
|
658
674
|
query_str = query_str[1:] if query_str is not None else None
|
659
675
|
if self.llm is None:
|
660
676
|
raise ValueError("LLM not set")
|
661
|
-
|
662
|
-
response = super().llm_response(query_str)
|
677
|
+
response = super().llm_response(query_str)
|
663
678
|
if query_str is not None:
|
664
679
|
self.update_dialog(
|
665
680
|
query_str, "" if response is None else response.content
|
@@ -676,6 +691,10 @@ class DocChatAgent(ChatAgent):
|
|
676
691
|
else:
|
677
692
|
self.callbacks.show_start_response(entity="llm")
|
678
693
|
response = self.answer_from_docs(query_str)
|
694
|
+
# Citation details (if any) are NOT generated by LLM
|
695
|
+
# (We extract these from LLM's numerical citations),
|
696
|
+
# so render them here
|
697
|
+
self._render_llm_response(response, citation_only=True)
|
679
698
|
return ChatDocument(
|
680
699
|
content=response.content,
|
681
700
|
metadata=ChatDocMetaData(
|
@@ -701,8 +720,7 @@ class DocChatAgent(ChatAgent):
|
|
701
720
|
query_str = query_str[1:] if query_str is not None else None
|
702
721
|
if self.llm is None:
|
703
722
|
raise ValueError("LLM not set")
|
704
|
-
|
705
|
-
response = await super().llm_response_async(query_str)
|
723
|
+
response = await super().llm_response_async(query_str)
|
706
724
|
if query_str is not None:
|
707
725
|
self.update_dialog(
|
708
726
|
query_str, "" if response is None else response.content
|
@@ -719,6 +737,7 @@ class DocChatAgent(ChatAgent):
|
|
719
737
|
else:
|
720
738
|
self.callbacks.show_start_response(entity="llm")
|
721
739
|
response = self.answer_from_docs(query_str)
|
740
|
+
self._render_llm_response(response, citation_only=True)
|
722
741
|
return ChatDocument(
|
723
742
|
content=response.content,
|
724
743
|
metadata=ChatDocMetaData(
|
@@ -742,10 +761,11 @@ class DocChatAgent(ChatAgent):
|
|
742
761
|
return "\n".join(
|
743
762
|
[
|
744
763
|
f"""
|
764
|
+
[{i+1}]
|
745
765
|
{content}
|
746
766
|
{source}
|
747
767
|
"""
|
748
|
-
for (content, source) in zip(contents, sources)
|
768
|
+
for i, (content, source) in enumerate(zip(contents, sources))
|
749
769
|
]
|
750
770
|
)
|
751
771
|
|
@@ -769,7 +789,7 @@ class DocChatAgent(ChatAgent):
|
|
769
789
|
# Substitute Q and P into the templatized prompt
|
770
790
|
|
771
791
|
final_prompt = self.config.summarize_prompt.format(
|
772
|
-
question=
|
792
|
+
question=question, extracts=passages_str
|
773
793
|
)
|
774
794
|
show_if_debug(final_prompt, "SUMMARIZE_PROMPT= ")
|
775
795
|
|
@@ -788,24 +808,24 @@ class DocChatAgent(ChatAgent):
|
|
788
808
|
final_answer = answer_doc.content.strip()
|
789
809
|
show_if_debug(final_answer, "SUMMARIZE_RESPONSE= ")
|
790
810
|
|
791
|
-
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
sources = ""
|
811
|
+
citations = extract_citations(final_answer)
|
812
|
+
|
813
|
+
citations_str = ""
|
814
|
+
if len(citations) > 0:
|
815
|
+
# append [i] source, content for each citation
|
816
|
+
citations_str = "\n".join(
|
817
|
+
[
|
818
|
+
f"[{c}] {passages[c-1].metadata.source}\n{passages[c-1].content}"
|
819
|
+
for c in citations
|
820
|
+
]
|
821
|
+
)
|
822
|
+
|
804
823
|
return ChatDocument(
|
805
|
-
content=
|
824
|
+
content=final_answer, # does not contain citations
|
806
825
|
metadata=ChatDocMetaData(
|
807
|
-
source=
|
826
|
+
source=citations_str, # only the citations
|
808
827
|
sender=Entity.LLM,
|
828
|
+
has_citation=len(citations) > 0,
|
809
829
|
cached=getattr(answer_doc.metadata, "cached", False),
|
810
830
|
),
|
811
831
|
)
|
@@ -1277,15 +1297,7 @@ class DocChatAgent(ChatAgent):
|
|
1277
1297
|
content="\n\n".join([e.content for e in extracts]),
|
1278
1298
|
metadata=ChatDocMetaData(**meta),
|
1279
1299
|
)
|
1280
|
-
|
1281
|
-
# conditionally use Streaming or rich console context
|
1282
|
-
cm = (
|
1283
|
-
StreamingIfAllowed(self.llm)
|
1284
|
-
if settings.stream
|
1285
|
-
else (status("LLM Generating final answer..."))
|
1286
|
-
)
|
1287
|
-
stack.enter_context(cm) # type: ignore
|
1288
|
-
response = self.get_summary_answer(query, extracts)
|
1300
|
+
response = self.get_summary_answer(query, extracts)
|
1289
1301
|
|
1290
1302
|
self.update_dialog(query, response.content)
|
1291
1303
|
self.response = response # save last response
|
@@ -58,7 +58,7 @@ class LanceQueryPlanAgentConfig(ChatAgentConfig):
|
|
58
58
|
- a possibly REPHRASED QUERY to be answerable given the FILTER.
|
59
59
|
Keep in mind that the ASSISTANT does NOT know anything about the FILTER fields,
|
60
60
|
so the REPHRASED QUERY should NOT mention ANY FILTER fields.
|
61
|
-
The
|
61
|
+
The assistant will answer based on documents whose CONTENTS match the QUERY,
|
62
62
|
possibly REPHRASED.
|
63
63
|
- an OPTIONAL SINGLE-LINE Pandas-dataframe calculation/aggregation string
|
64
64
|
that can be used to calculate the answer to the original query,
|
@@ -70,6 +70,10 @@ class LanceQueryPlanAgentConfig(ChatAgentConfig):
|
|
70
70
|
|
71
71
|
IMPORTANT: The DataFrame `df` in this calculation is the result of
|
72
72
|
applying the FILTER AND REPHRASED QUERY to the documents.
|
73
|
+
|
74
|
+
WATCH OUT!! When deciding the dataframe calc, if any, CAREFULLY
|
75
|
+
note what the query is asking, and ensure that the result of your
|
76
|
+
dataframe calc expression would answer the query.
|
73
77
|
|
74
78
|
|
75
79
|
EXAMPLE:
|
@@ -25,7 +25,7 @@ from langroid.agent.tool_message import ToolMessage
|
|
25
25
|
from langroid.language_models.openai_gpt import OpenAIChatModel, OpenAIGPTConfig
|
26
26
|
from langroid.parsing.table_loader import read_tabular_data
|
27
27
|
from langroid.prompts.prompts_config import PromptsConfig
|
28
|
-
from langroid.utils.constants import DONE
|
28
|
+
from langroid.utils.constants import DONE, PASS
|
29
29
|
from langroid.vector_store.base import VectorStoreConfig
|
30
30
|
|
31
31
|
logger = logging.getLogger(__name__)
|
@@ -52,7 +52,8 @@ in your code, and try another way, or use `pandas_eval` to explore the dataframe
|
|
52
52
|
before submitting your final code.
|
53
53
|
|
54
54
|
Once you have the answer to the question, possibly after a few steps,
|
55
|
-
say {DONE} and
|
55
|
+
say {DONE} and PRESENT THE ANSWER TO ME; do not just say {DONE}.
|
56
|
+
If you receive an error message,
|
56
57
|
try using the `pandas_eval` tool/function again with the corrected code.
|
57
58
|
|
58
59
|
VERY IMPORTANT: When using the `pandas_eval` tool/function, DO NOT EXPLAIN ANYTHING,
|
@@ -238,12 +239,22 @@ class TableChatAgent(ChatAgent):
|
|
238
239
|
def handle_message_fallback(
|
239
240
|
self, msg: str | ChatDocument
|
240
241
|
) -> str | ChatDocument | None:
|
241
|
-
"""Handle
|
242
|
-
forgets to use pandas_eval"""
|
242
|
+
"""Handle various LLM deviations"""
|
243
243
|
if isinstance(msg, ChatDocument) and msg.metadata.sender == lr.Entity.LLM:
|
244
|
+
if msg.content.strip() == DONE and self.sent_expression:
|
245
|
+
# LLM sent an expression (i.e. used the `pandas_eval` tool)
|
246
|
+
# but upon receiving the results, simply said DONE without
|
247
|
+
# narrating the result as instructed.
|
248
|
+
return """
|
249
|
+
You forgot to PRESENT the answer to the user's query
|
250
|
+
based on the results from `pandas_eval` tool.
|
251
|
+
"""
|
244
252
|
if self.sent_expression:
|
245
|
-
|
253
|
+
# LLM forgot to say DONE
|
254
|
+
self.sent_expression = False
|
255
|
+
return DONE + " " + PASS
|
246
256
|
else:
|
257
|
+
# LLM forgot to use the `pandas_eval` tool
|
247
258
|
return """
|
248
259
|
You forgot to use the `pandas_eval` tool/function
|
249
260
|
to find the answer.
|
langroid/language_models/base.py
CHANGED
@@ -475,10 +475,12 @@ class LanguageModel(ABC):
|
|
475
475
|
history = collate_chat_history(chat_history)
|
476
476
|
|
477
477
|
prompt = f"""
|
478
|
-
Given the
|
479
|
-
question as a
|
478
|
+
Given the CHAT HISTORY below, and a follow-up QUESTION or SEARCH PHRASE,
|
479
|
+
rephrase the follow-up question/phrase as a STANDALONE QUESTION that
|
480
|
+
can be understood without the context of the chat history.
|
480
481
|
|
481
482
|
Chat history: {history}
|
483
|
+
|
482
484
|
Follow-up question: {question}
|
483
485
|
""".strip()
|
484
486
|
show_if_debug(prompt, "FOLLOWUP->STANDALONE-PROMPT= ")
|
langroid/prompts/templates.py
CHANGED
@@ -50,26 +50,27 @@ EXTRACTION_PROMPT = f"""
|
|
50
50
|
|
51
51
|
SUMMARY_ANSWER_PROMPT_GPT4 = f"""
|
52
52
|
|
53
|
-
Use the provided extracts (with sources) to answer the
|
53
|
+
Use the provided NUMBERED extracts (with sources) to answer the QUESTION.
|
54
54
|
If there's not enough information, respond with {NO_ANSWER}. Use only the
|
55
55
|
information in these extracts, even if your answer is factually incorrect,
|
56
56
|
and even if the answer contradicts other parts of the document. The only
|
57
57
|
important thing is that your answer is consistent with and supported by the
|
58
|
-
extracts. Compose your complete answer
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
58
|
+
extracts. Compose your complete answer, inserting CITATIONS
|
59
|
+
in the format [[i,j,...]] where i,j,... are the extract NUMBERS you are citing.
|
60
|
+
For example your answer might look like this (NOTE HOW multiple citations
|
61
|
+
are grouped as [[2,5]]):
|
62
|
+
|
63
|
+
Beethoven composed the 9th symphony in 1824. [[1]] After that he became deaf
|
64
|
+
and could not hear his own music. [[2,5]] He was a prolific composer and
|
65
|
+
wrote many famous pieces.
|
65
66
|
|
66
|
-
|
67
|
-
EXTRACTS: "The oak trees ... longevity and strength."
|
67
|
+
NUMBERED EXTRACTS:
|
68
68
|
|
69
69
|
{{extracts}}
|
70
70
|
|
71
|
+
QUESTION:
|
71
72
|
{{question}}
|
72
|
-
|
73
|
+
|
73
74
|
""".strip()
|
74
75
|
|
75
76
|
ANSWER_PROMPT_USE_HISTORY_GPT4 = f"""
|
langroid/utils/output/status.py
CHANGED
@@ -3,6 +3,7 @@ from contextlib import AbstractContextManager, ExitStack
|
|
3
3
|
from typing import Any
|
4
4
|
|
5
5
|
from rich.console import Console
|
6
|
+
from rich.errors import LiveError
|
6
7
|
|
7
8
|
from langroid.utils.configuration import quiet_mode, settings
|
8
9
|
|
@@ -19,15 +20,21 @@ def status(
|
|
19
20
|
Displays a rich spinner if not in quiet mode, else optionally logs the message.
|
20
21
|
"""
|
21
22
|
stack = ExitStack()
|
22
|
-
|
23
|
-
if settings.quiet:
|
24
|
-
if log_if_quiet:
|
25
|
-
logger.info(msg)
|
23
|
+
logged = False
|
26
24
|
if settings.quiet and log_if_quiet:
|
25
|
+
logged = True
|
27
26
|
logger.info(msg)
|
28
|
-
else:
|
29
|
-
stack.enter_context(console.status(msg))
|
30
27
|
|
28
|
+
if not settings.quiet:
|
29
|
+
try:
|
30
|
+
stack.enter_context(console.status(msg))
|
31
|
+
except LiveError:
|
32
|
+
if not logged:
|
33
|
+
logger.info(msg)
|
34
|
+
|
35
|
+
# When using rich spinner, we enforce quiet mode
|
36
|
+
# (since output will be messy otherwise);
|
37
|
+
# We make an exception to this when debug is enabled.
|
31
38
|
stack.enter_context(quiet_mode(not settings.debug))
|
32
39
|
|
33
40
|
return stack
|
@@ -1,21 +1,21 @@
|
|
1
1
|
langroid/__init__.py,sha256=zsYpGiAUsvyzZzjm964NUamsJImrXSJPVGz9a2jE_uY,1679
|
2
2
|
langroid/agent/__init__.py,sha256=_D8dxnfdr92ch1CIrUkKjrB5HVvsQdn62b1Fb2kBxV8,785
|
3
|
-
langroid/agent/base.py,sha256=
|
3
|
+
langroid/agent/base.py,sha256=5HQ9fAFTQL771rhx7UkVkL90bjxSVA1DjtvxV8_1RJA,35652
|
4
4
|
langroid/agent/batch.py,sha256=feRA_yRG768ElOQjrKEefcRv6Aefd_yY7qktuYUQDwc,10040
|
5
5
|
langroid/agent/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
-
langroid/agent/callbacks/chainlit.py,sha256=
|
7
|
-
langroid/agent/chat_agent.py,sha256=
|
8
|
-
langroid/agent/chat_document.py,sha256=
|
6
|
+
langroid/agent/callbacks/chainlit.py,sha256=LboE3zlLLzClKxpBkzHX4XU6fW4lNZW97zwwN97uuaU,21067
|
7
|
+
langroid/agent/chat_agent.py,sha256=YwlRMWoK_7vEl9pxgMeUjQAgsukrnZjDGJR0WVomSuQ,39592
|
8
|
+
langroid/agent/chat_document.py,sha256=uwCq53SHRyxQw6qyhjzPYuJG48VHBgOf2122Ew3fk6c,9316
|
9
9
|
langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
langroid/agent/junk,sha256=LxfuuW7Cijsg0szAzT81OjWWv1PMNI-6w_-DspVIO2s,339
|
11
11
|
langroid/agent/openai_assistant.py,sha256=kIVDI4r-xGvplLU5s0nShPVHs6Jq-wOsfWE0kcMhAdQ,33056
|
12
12
|
langroid/agent/special/__init__.py,sha256=NG0JkB5y4K0bgnd9Q9UIvFExun3uTfVOWEVLVymff1M,1207
|
13
|
-
langroid/agent/special/doc_chat_agent.py,sha256=
|
13
|
+
langroid/agent/special/doc_chat_agent.py,sha256=SBatLDoa2_Ju_Gk_El8FmlMekgHmgpBU1ihx26yFIvc,54008
|
14
14
|
langroid/agent/special/lance_doc_chat_agent.py,sha256=USp0U3eTaJzwF_3bdqE7CedSLbaqAi2tm-VzygcyLaA,10175
|
15
15
|
langroid/agent/special/lance_rag/__init__.py,sha256=QTbs0IVE2ZgDg8JJy1zN97rUUg4uEPH7SLGctFNumk4,174
|
16
16
|
langroid/agent/special/lance_rag/critic_agent.py,sha256=OsOcpcU_AmU2MagpZ5X5yxFeXyteKN9QJMzJGqIITig,6871
|
17
17
|
langroid/agent/special/lance_rag/lance_rag_task.py,sha256=l_HQgrYY-CX2FwIsS961aEF3bYog3GDYo98fj0C0mSk,2889
|
18
|
-
langroid/agent/special/lance_rag/query_planner_agent.py,sha256=
|
18
|
+
langroid/agent/special/lance_rag/query_planner_agent.py,sha256=U_2V8l3M44R3mX-El3wG1k-u2lTToU-HGfKvRkEWoEA,9816
|
19
19
|
langroid/agent/special/lance_tools.py,sha256=btMwKdcT8RdwAjmzbtN1xxm3s1H7ipO9GSpUamryYx8,1456
|
20
20
|
langroid/agent/special/neo4j/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
21
21
|
langroid/agent/special/neo4j/csv_kg_chat.py,sha256=koL3sKtHm3aRkLTiARs54ngrcU3lOR1WaLLc_i8rWOU,6374
|
@@ -31,7 +31,7 @@ langroid/agent/special/sql/utils/description_extractors.py,sha256=RZ2R3DmASxB1ij
|
|
31
31
|
langroid/agent/special/sql/utils/populate_metadata.py,sha256=x2OMKfmIBnJESBG3qKt6gvr3H3L4ZQcoxHfNdWfHjZs,2987
|
32
32
|
langroid/agent/special/sql/utils/system_message.py,sha256=qKLHkvQWRQodTtPLPxr1GSLUYUFASZU8x-ybV67cB68,1885
|
33
33
|
langroid/agent/special/sql/utils/tools.py,sha256=6uB2424SLtmapui9ggcEr0ZTiB6_dL1-JRGgN8RK9Js,1332
|
34
|
-
langroid/agent/special/table_chat_agent.py,sha256=
|
34
|
+
langroid/agent/special/table_chat_agent.py,sha256=xz4nWndTYTykET_oaveHcZUQ8IEpmA5yB8QGTXfOifw,9624
|
35
35
|
langroid/agent/task.py,sha256=b_d46txohISETxXJoWpmIX0hinvt1wjHbK08LZRBEz8,54020
|
36
36
|
langroid/agent/tool_message.py,sha256=2kPsQUwi3ZzINTUNj10huKnZLjLp5SXmefacTHx8QDc,8304
|
37
37
|
langroid/agent/tools/__init__.py,sha256=q-maq3k2BXhPAU99G0H6-j_ozoRvx15I1RFpPVicQIU,304
|
@@ -60,10 +60,10 @@ langroid/embedding_models/protoc/embeddings_pb2_grpc.py,sha256=9dYQqkW3JPyBpSEje
|
|
60
60
|
langroid/embedding_models/remote_embeds.py,sha256=6_kjXByVbqhY9cGwl9R83ZcYC2km-nGieNNAo1McHaY,5151
|
61
61
|
langroid/language_models/__init__.py,sha256=5L9ndEEC8iLJHjDJmYFTnv6-2-3xsxWUMHcugR8IeDs,821
|
62
62
|
langroid/language_models/azure_openai.py,sha256=ncRCbKooqLVOY-PWQUIo9C3yTuKEFbAwyngXT_M4P7k,5989
|
63
|
-
langroid/language_models/base.py,sha256=
|
63
|
+
langroid/language_models/base.py,sha256=2JhacnbQ-DJzLbOaJqyZPnl867xyiz_W-ODiAlEcp98,21131
|
64
64
|
langroid/language_models/config.py,sha256=5UF3DzO1a-Dfsc3vghE0XGq7g9t_xDsRCsuRiU4dgBg,366
|
65
65
|
langroid/language_models/openai_assistants.py,sha256=9K-DEAL2aSWHeXj2hwCo2RAlK9_1oCPtqX2u1wISCj8,36
|
66
|
-
langroid/language_models/openai_gpt.py,sha256=
|
66
|
+
langroid/language_models/openai_gpt.py,sha256=5O-roChpUDZ6OmRTOkR9e2AHHZL8w12Yzs9Q_xviDZ0,50668
|
67
67
|
langroid/language_models/prompt_formatter/__init__.py,sha256=9JXFF22QNMmbQV1q4nrIeQVTtA3Tx8tEZABLtLBdFyc,352
|
68
68
|
langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
|
69
69
|
langroid/language_models/prompt_formatter/hf_formatter.py,sha256=TFL6ppmeQWnzr6CKQzRZFYY810zE1mr8DZnhw6i85ok,5217
|
@@ -93,7 +93,7 @@ langroid/prompts/__init__.py,sha256=B0vpJzIJlMR3mFRtoQwyALsFzBHvLp9f92acD8xJA_0,
|
|
93
93
|
langroid/prompts/chat-gpt4-system-prompt.md,sha256=Q3uLCJTPQvmUkZN2XDnkBC7M2K3X0F3C3GIQBaFvYvw,5329
|
94
94
|
langroid/prompts/dialog.py,sha256=SpfiSyofSgy2pwD1YboHR_yHO3LEEMbv6j2sm874jKo,331
|
95
95
|
langroid/prompts/prompts_config.py,sha256=XRQHzod7KBnoKn3B_V878jZiqBA7rcn-CtGPkuAe_yM,131
|
96
|
-
langroid/prompts/templates.py,sha256=
|
96
|
+
langroid/prompts/templates.py,sha256=NxMyPIhDjmL3pNXBaNLrIsebETPQHr6VG5NWO_93NeA,6303
|
97
97
|
langroid/prompts/transforms.py,sha256=GsQo1klGxUy0fACh6j0lTblk6XEl2erRnhRWlN2M4-c,2706
|
98
98
|
langroid/utils/__init__.py,sha256=ARx5To4Hsv1K5QAzK4uUqdEoB_iq5HK797vae1AcMBI,300
|
99
99
|
langroid/utils/algorithms/__init__.py,sha256=WylYoZymA0fnzpB4vrsH_0n7WsoLhmuZq8qxsOCjUpM,41
|
@@ -107,7 +107,7 @@ langroid/utils/llms/strings.py,sha256=CSAX9Z6FQOLXOzbLMe_Opqtc3ruDAKTTk7cPqc6Blh
|
|
107
107
|
langroid/utils/logging.py,sha256=R8TN-FqVpwZ4Ajgls9TDMthLvPpQd0QVNXK-PJDj1Z8,3917
|
108
108
|
langroid/utils/output/__init__.py,sha256=4X8Hdo1SEm06NUnggMJrLtW8i1owdDQPrS7J08BaTec,341
|
109
109
|
langroid/utils/output/printing.py,sha256=5EsYB1O4qKhocW19aebOUzK82RD9U5nygbY21yo8gfg,2872
|
110
|
-
langroid/utils/output/status.py,sha256=
|
110
|
+
langroid/utils/output/status.py,sha256=rzbE7mDJcgNNvdtylCseQcPGCGghtJvVq3lB-OPJ49E,1049
|
111
111
|
langroid/utils/pandas_utils.py,sha256=UctS986Jtl_MvU5rA7-GfrjEHXP7MNu8ePhepv0bTn0,755
|
112
112
|
langroid/utils/pydantic_utils.py,sha256=yb-ghaQYL7EIYeiZ0tailvZvAuJZNF7UBXkd3z35OYc,21728
|
113
113
|
langroid/utils/system.py,sha256=tWoEbzHzJ6ywdsoa9EwsQrZfGk2t7q87_zKNwau2C8s,4546
|
@@ -121,7 +121,7 @@ langroid/vector_store/meilisearch.py,sha256=d2huA9P-NoYRuAQ9ZeXJmMKr7ry8u90RUSR2
|
|
121
121
|
langroid/vector_store/momento.py,sha256=9cui31TTrILid2KIzUpBkN2Ey3g_CZWOQVdaFsA4Ors,10045
|
122
122
|
langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
|
123
123
|
langroid/vector_store/qdrantdb.py,sha256=sk5Qb2ZNbooi0rorsMuqIMokF7WADw6PJ0D6goM2XBw,16802
|
124
|
-
langroid-0.1.
|
125
|
-
langroid-0.1.
|
126
|
-
langroid-0.1.
|
127
|
-
langroid-0.1.
|
124
|
+
langroid-0.1.249.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
125
|
+
langroid-0.1.249.dist-info/METADATA,sha256=1md_MzQhAHs9J7_OEEOfiL8C4N8GlWj5BBQGK4HrkGw,49426
|
126
|
+
langroid-0.1.249.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
127
|
+
langroid-0.1.249.dist-info/RECORD,,
|
File without changes
|
File without changes
|