langroid 0.1.157__py3-none-any.whl → 0.1.159__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/special/doc_chat_agent.py +41 -2
- langroid/agent/special/lance_doc_chat_agent.py +23 -1
- langroid/agent/special/lance_rag/critic_agent.py +42 -35
- langroid/agent/special/lance_rag/query_planner_agent.py +1 -0
- langroid/language_models/openai_gpt.py +1 -1
- langroid/parsing/table_loader.py +25 -12
- langroid/utils/logging.py +2 -5
- langroid/utils/pandas_utils.py +4 -2
- {langroid-0.1.157.dist-info → langroid-0.1.159.dist-info}/METADATA +4 -1
- {langroid-0.1.157.dist-info → langroid-0.1.159.dist-info}/RECORD +12 -12
- {langroid-0.1.157.dist-info → langroid-0.1.159.dist-info}/LICENSE +0 -0
- {langroid-0.1.157.dist-info → langroid-0.1.159.dist-info}/WHEEL +0 -0
@@ -14,7 +14,7 @@ pip install "langroid[hf-embeddings]"
|
|
14
14
|
"""
|
15
15
|
import logging
|
16
16
|
from contextlib import ExitStack
|
17
|
-
from typing import List, Optional, Tuple, no_type_check
|
17
|
+
from typing import Dict, List, Optional, Set, Tuple, no_type_check
|
18
18
|
|
19
19
|
import numpy as np
|
20
20
|
import pandas as pd
|
@@ -77,6 +77,7 @@ class DocChatAgentConfig(ChatAgentConfig):
|
|
77
77
|
# extra fields to include in content as key=value pairs
|
78
78
|
# (helps retrieval for table-like data)
|
79
79
|
add_fields_to_content: List[str] = []
|
80
|
+
filter_fields: List[str] = [] # fields usable in filter
|
80
81
|
retrieve_only: bool = False # only retr relevant extracts, don't gen summary answer
|
81
82
|
extraction_granularity: int = 1 # granularity (in sentences) for relev extraction
|
82
83
|
filter: str | None = (
|
@@ -335,7 +336,9 @@ class DocChatAgent(ChatAgent):
|
|
335
336
|
Ingest a dataframe into vecdb.
|
336
337
|
"""
|
337
338
|
self.from_dataframe = True
|
338
|
-
self.df_description = describe_dataframe(
|
339
|
+
self.df_description = describe_dataframe(
|
340
|
+
df, filter_fields=self.config.filter_fields, n_vals=5
|
341
|
+
)
|
339
342
|
df, metadata = DocChatAgent.document_compatible_dataframe(df, content, metadata)
|
340
343
|
docs = dataframe_to_documents(df, content="content", metadata=metadata)
|
341
344
|
# When ingesting a dataframe we will no longer do any chunking,
|
@@ -374,6 +377,42 @@ class DocChatAgent(ChatAgent):
|
|
374
377
|
for d in self.chunked_docs
|
375
378
|
]
|
376
379
|
|
380
|
+
def get_field_values(self, fields: list[str]) -> Dict[str, str]:
|
381
|
+
"""Get string-listing of possible values of each filterable field,
|
382
|
+
e.g.
|
383
|
+
{
|
384
|
+
"genre": "crime, drama, mystery, ... (10 more)",
|
385
|
+
"certificate": "R, PG-13, PG, R",
|
386
|
+
}
|
387
|
+
"""
|
388
|
+
field_values: Dict[str, Set[str]] = {}
|
389
|
+
# make empty set for each field
|
390
|
+
for f in fields:
|
391
|
+
field_values[f] = set()
|
392
|
+
if self.vecdb is None:
|
393
|
+
raise ValueError("VecDB not set")
|
394
|
+
# get all documents and accumulate possible values of each field until 10
|
395
|
+
docs = self.vecdb.get_all_documents() # only works for vecdbs that support this
|
396
|
+
for d in docs:
|
397
|
+
# extract fields from d
|
398
|
+
doc_field_vals = extract_fields(d, fields)
|
399
|
+
for field, val in doc_field_vals.items():
|
400
|
+
field_values[field].add(val)
|
401
|
+
# For each field make a string showing list of possible values,
|
402
|
+
# truncate to 20 values, and if there are more, indicate how many
|
403
|
+
# more there are, e.g. Genre: crime, drama, mystery, ... (20 more)
|
404
|
+
field_values_list = {}
|
405
|
+
for f in fields:
|
406
|
+
vals = list(field_values[f])
|
407
|
+
n = len(vals)
|
408
|
+
remaining = n - 20
|
409
|
+
vals = vals[:20]
|
410
|
+
if n > 20:
|
411
|
+
vals.append(f"(...{remaining} more)")
|
412
|
+
# make a string of the values, ensure they are strings
|
413
|
+
field_values_list[f] = ", ".join(str(v) for v in vals)
|
414
|
+
return field_values_list
|
415
|
+
|
377
416
|
def doc_length(self, docs: List[Document]) -> int:
|
378
417
|
"""
|
379
418
|
Calc token-length of a list of docs
|
@@ -44,6 +44,24 @@ class LanceDocChatAgent(DocChatAgent):
|
|
44
44
|
self.vecdb.schema,
|
45
45
|
excludes=["id", "vector"],
|
46
46
|
)
|
47
|
+
# intersect config.filter_fields with schema_dict.keys() in case
|
48
|
+
# there are extraneous fields in config.filter_fields
|
49
|
+
filter_fields_set = set(
|
50
|
+
self.config.filter_fields or schema_dict.keys()
|
51
|
+
).intersection(schema_dict.keys())
|
52
|
+
|
53
|
+
# possible values of filterable fields
|
54
|
+
filter_field_values = self.get_field_values(list(filter_fields_set))
|
55
|
+
|
56
|
+
# add field values to schema_dict as another field `values` for each field
|
57
|
+
for field, values in filter_field_values.items():
|
58
|
+
if field in schema_dict:
|
59
|
+
schema_dict[field]["values"] = values
|
60
|
+
# if self.config.filter_fields is set, restrict to these:
|
61
|
+
if len(self.config.filter_fields) > 0:
|
62
|
+
schema_dict = {
|
63
|
+
k: v for k, v in schema_dict.items() if k in self.config.filter_fields
|
64
|
+
}
|
47
65
|
schema = json.dumps(schema_dict, indent=4)
|
48
66
|
if len(fields := self.config.add_fields_to_content) > 0:
|
49
67
|
schema += f"""
|
@@ -122,7 +140,11 @@ class LanceDocChatAgent(DocChatAgent):
|
|
122
140
|
)
|
123
141
|
|
124
142
|
df, metadata = DocChatAgent.document_compatible_dataframe(df, content, metadata)
|
125
|
-
self.df_description = describe_dataframe(
|
143
|
+
self.df_description = describe_dataframe(
|
144
|
+
df,
|
145
|
+
filter_fields=self.config.filter_fields,
|
146
|
+
n_vals=10,
|
147
|
+
)
|
126
148
|
self.vecdb.add_dataframe(df, content="content", metadata=metadata)
|
127
149
|
|
128
150
|
tbl = self.vecdb.client.open_table(self.vecdb.config.collection_name)
|
@@ -27,59 +27,66 @@ from langroid.agent.special.lance_rag.query_planner_agent import (
|
|
27
27
|
LanceQueryPlanAgentConfig,
|
28
28
|
)
|
29
29
|
from langroid.mytypes import Entity
|
30
|
-
from langroid.utils.constants import DONE, PASS
|
30
|
+
from langroid.utils.constants import DONE, NO_ANSWER, PASS
|
31
31
|
|
32
32
|
logger = logging.getLogger(__name__)
|
33
33
|
|
34
34
|
|
35
35
|
class QueryPlanCriticConfig(LanceQueryPlanAgentConfig):
|
36
36
|
name = "QueryPlanCritic"
|
37
|
-
system_message = """
|
37
|
+
system_message = f"""
|
38
38
|
You are an expert at carefully planning a query that needs to be answered
|
39
39
|
based on a large collection of documents. These docs have a special `content` field
|
40
40
|
and additional FILTERABLE fields in the SCHEMA below:
|
41
41
|
|
42
|
-
{doc_schema}
|
43
|
-
|
44
|
-
You will receive a QUERY PLAN consisting of a
|
45
|
-
ORIGINAL QUERY, SQL-Like FILTER, REPHRASED QUERY,
|
46
|
-
a DATAFRAME CALCULATION, and an ANSWER which is the
|
47
|
-
answer received from an assistant that used this QUERY PLAN.
|
42
|
+
{{doc_schema}}
|
48
43
|
|
44
|
+
You will receive a QUERY PLAN consisting of:
|
45
|
+
- ORIGINAL QUERY,
|
46
|
+
- SQL-Like FILTER, WHICH CAN BE EMPTY (and it's fine if results sound reasonable)
|
47
|
+
FILTER SHOULD ONLY BE USED IF EXPLICITLY REQUIRED BY THE QUERY.
|
48
|
+
- REPHRASED QUERY that will be used to match against the CONTENT (not filterable)
|
49
|
+
of the documents.
|
50
|
+
In general the REPHRASED QUERY should be relied upon to match the CONTENT
|
51
|
+
of the docs. Thus the REPHRASED QUERY itself acts like a
|
52
|
+
SEMANTIC/LEXICAL/FUZZY FILTER since the Assistant is able to use it to match
|
53
|
+
the CONTENT of the docs in various ways (semantic, lexical, fuzzy, etc.).
|
54
|
+
|
55
|
+
- DATAFRAME CALCULATION, and
|
56
|
+
- ANSWER recieved from an assistant that used this QUERY PLAN.
|
57
|
+
|
58
|
+
In addition to the above SCHEMA fields there is a `content` field which:
|
59
|
+
- CANNOT appear in a FILTER,
|
60
|
+
- CAN appear in the DATAFRAME CALCULATION.
|
61
|
+
THERE ARE NO OTHER FIELDS IN THE DOCUMENTS or in the RESULTING DATAFRAME.
|
62
|
+
|
49
63
|
Your job is to act as a CRITIC and provide feedback,
|
50
64
|
ONLY using the `query_plan_feedback` tool, and DO NOT SAY ANYTHING ELSE.
|
51
|
-
You must take `answer` field into account
|
52
|
-
and judge whether it is a reasonable answer, and accordingly give your feedback.
|
53
65
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
66
|
+
Here is how you must examine the QUERY PLAN + ANSWER:
|
67
|
+
- If the ANSWER is in the expected form, then the QUERY PLAN is likely VALID,
|
68
|
+
and your feedback should be EMPTY.
|
69
|
+
- If the ANSWER is {NO_ANSWER} or of the wrong form,
|
70
|
+
then try to DIAGNOSE the problem IN THE FOLLOWING ORDER:
|
71
|
+
- DATAFRAME CALCULATION -- is it doing the right thing?
|
72
|
+
Is it finding the Index of a row instead of the value in a column?
|
73
|
+
Or another example: mmaybe it is finding the maximum population
|
74
|
+
rather than the CITY with the maximum population?
|
75
|
+
If you notice a problem with the DATAFRAME CALCULATION, then
|
76
|
+
ONLY SUBMIT FEEDBACK ON THE DATAFRAME CALCULATION, and DO NOT
|
77
|
+
SUGGEST ANYTHING ELSE.
|
78
|
+
- If the DATAFRAME CALCULATION looks correct, then check if
|
79
|
+
the REPHRASED QUERY makes sense given the ORIGINAL QUERY and FILTER.
|
80
|
+
If this is the problem, then ONLY SUBMIT FEEDBACK ON THE REPHRASED QUERY,
|
81
|
+
and DO NOT SUGGEST ANYTHING ELSE.
|
82
|
+
- If the REPHRASED QUERY looks correct, then check if the FILTER makes sense.
|
83
|
+
REMEMBER: A filter should ONLY be used if EXPLICITLY REQUIRED BY THE QUERY.
|
68
84
|
|
69
|
-
Keep these in mind:
|
70
|
-
* The FILTER must only use fields in the SCHEMA above, EXCEPT `content`
|
71
|
-
* The FILTER can be improved by RELAXING it, e.g. using "LIKE" instead of "=",
|
72
|
-
e.g. "CEO LIKE '%Jobs%'" instead of "CEO = 'Steve Jobs'"
|
73
|
-
* The DATAFRAME CALCULATION must only use fields in the SCHEMA above.
|
74
|
-
* The REPHRASED QUERY should NOT refer to any FILTER fields, and should
|
75
|
-
make sense with respect to the intended purpose, i.e. to be used to
|
76
|
-
MATCH the CONTENT of the docs.
|
77
|
-
* The ASSISTANT does NOT know anything about the FILTER fields
|
78
|
-
* The DATAFRAME CALCULATION, if any, should be suitable to answer
|
79
|
-
the user's ORIGINAL QUERY.
|
80
85
|
|
81
86
|
ALWAYS use `query_plan_feedback` tool/fn to present your feedback!
|
82
87
|
and DO NOT SAY ANYTHING ELSE OUTSIDE THE TOOL/FN.
|
88
|
+
IF NO REVISION NEEDED, simply give EMPTY FEEBACK, SAY NOTHING ELSE
|
89
|
+
and DO NOT EXPLAIN YOURSELF.
|
83
90
|
|
84
91
|
"""
|
85
92
|
|
@@ -50,6 +50,7 @@ class LanceQueryPlanAgentConfig(ChatAgentConfig):
|
|
50
50
|
- a FILTER (can be empty string) that would help the ASSISTANT to answer the query.
|
51
51
|
Remember the FILTER can only refer ANY fields in the above SCHEMA
|
52
52
|
EXCEPT the `content` field of the documents.
|
53
|
+
ONLY USE A FILTER IF EXPLICITLY MENTIONED IN THE QUERY.
|
53
54
|
TO get good results, for STRING MATCHES, consider using LIKE instead of =, e.g.
|
54
55
|
"CEO LIKE '%Jobs%'" instead of "CEO = 'Steve Jobs'"
|
55
56
|
- a possibly REPHRASED QUERY to be answerable given the FILTER.
|
@@ -169,7 +169,7 @@ class OpenAIGPTConfig(LLMConfig):
|
|
169
169
|
local_model = "api_base" in kwargs and kwargs["api_base"] is not None
|
170
170
|
|
171
171
|
chat_model = kwargs.get("chat_model", "")
|
172
|
-
if chat_model.startswith("litellm") or chat_model.startswith("local"):
|
172
|
+
if chat_model.startswith("litellm/") or chat_model.startswith("local/"):
|
173
173
|
local_model = True
|
174
174
|
|
175
175
|
warn_gpt_3_5 = (
|
langroid/parsing/table_loader.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
from csv import Sniffer
|
2
|
+
from typing import List
|
2
3
|
|
3
4
|
import pandas as pd
|
4
5
|
|
@@ -50,32 +51,44 @@ def read_tabular_data(path_or_url: str, sep: None | str = None) -> pd.DataFrame:
|
|
50
51
|
)
|
51
52
|
|
52
53
|
|
53
|
-
def describe_dataframe(
|
54
|
+
def describe_dataframe(
|
55
|
+
df: pd.DataFrame, filter_fields: List[str] = [], n_vals: int = 10
|
56
|
+
) -> str:
|
54
57
|
"""
|
55
|
-
Generates a description of the columns in the dataframe,
|
58
|
+
Generates a description of the columns in the dataframe,
|
59
|
+
along with a listing of up to `n_vals` unique values for each column.
|
56
60
|
Intended to be used to insert into an LLM context so it can generate
|
57
61
|
appropriate queries or filters on the df.
|
58
62
|
|
59
63
|
Args:
|
60
|
-
|
61
|
-
|
64
|
+
df (pd.DataFrame): The dataframe to describe.
|
65
|
+
filter_fields (list): A list of fields that can be used for filtering.
|
66
|
+
When non-empty, the values-list will be restricted to these.
|
67
|
+
n_vals (int): How many unique values to show for each column.
|
62
68
|
|
63
69
|
Returns:
|
64
|
-
|
70
|
+
str: A description of the dataframe.
|
65
71
|
"""
|
66
72
|
description = []
|
67
|
-
for column in df.columns:
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
73
|
+
for column in df.columns.to_list():
|
74
|
+
unique_values = df[column].dropna().unique()
|
75
|
+
unique_count = len(unique_values)
|
76
|
+
if column not in filter_fields:
|
77
|
+
values_desc = f"{unique_count} unique values"
|
78
|
+
else:
|
79
|
+
if unique_count > n_vals:
|
80
|
+
displayed_values = unique_values[:n_vals]
|
81
|
+
more_count = unique_count - n_vals
|
82
|
+
values_desc = f" Values - {displayed_values}, ... {more_count} more"
|
83
|
+
else:
|
84
|
+
values_desc = f" Values - {unique_values}"
|
72
85
|
col_type = "string" if df[column].dtype == "object" else df[column].dtype
|
73
|
-
col_desc = f"* {column} ({col_type})
|
86
|
+
col_desc = f"* {column} ({col_type}); {values_desc}"
|
74
87
|
description.append(col_desc)
|
75
88
|
|
76
89
|
all_cols = "\n".join(description)
|
77
90
|
|
78
91
|
return f"""
|
79
|
-
Name of each field, its type and
|
92
|
+
Name of each field, its type and unique values (up to {n_vals}):
|
80
93
|
{all_cols}
|
81
94
|
"""
|
langroid/utils/logging.py
CHANGED
@@ -72,12 +72,9 @@ def setup_file_logger(
|
|
72
72
|
propagate: bool = False,
|
73
73
|
) -> logging.Logger:
|
74
74
|
os.makedirs(os.path.dirname(filename), exist_ok=True)
|
75
|
-
if
|
76
|
-
if os.path.exists(filename):
|
77
|
-
os.remove(filename)
|
78
|
-
|
75
|
+
file_mode = "a" if append else "w"
|
79
76
|
logger = setup_logger(name)
|
80
|
-
handler = logging.FileHandler(filename)
|
77
|
+
handler = logging.FileHandler(filename, mode=file_mode)
|
81
78
|
handler.setLevel(logging.INFO)
|
82
79
|
if log_format:
|
83
80
|
formatter = logging.Formatter(
|
langroid/utils/pandas_utils.py
CHANGED
@@ -8,7 +8,7 @@ def stringify(x: Any) -> str:
|
|
8
8
|
if isinstance(x, pd.Series):
|
9
9
|
df = x.to_frame()
|
10
10
|
elif not isinstance(x, pd.DataFrame):
|
11
|
-
|
11
|
+
return str(x)
|
12
12
|
else:
|
13
13
|
df = x
|
14
14
|
|
@@ -16,7 +16,9 @@ def stringify(x: Any) -> str:
|
|
16
16
|
for col in df.columns:
|
17
17
|
if df[col].dtype == object:
|
18
18
|
df[col] = df[col].apply(
|
19
|
-
lambda
|
19
|
+
lambda item: (item[:1000] + "...")
|
20
|
+
if isinstance(item, str) and len(item) > 1000
|
21
|
+
else item
|
20
22
|
)
|
21
23
|
|
22
24
|
# Limit to 10 rows
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.159
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
License: MIT
|
6
6
|
Author: Prasad Chalasani
|
@@ -205,6 +205,9 @@ teacher_task.run()
|
|
205
205
|
<summary> <b>Click to expand</b></summary>
|
206
206
|
|
207
207
|
- **Jan 2024:**
|
208
|
+
- **[0.1.157](https://github.com/langroid/langroid/releases/tag/0.1.157):** `DocChatAgentConfig`
|
209
|
+
has a new param: `add_fields_to_content`, to specify additional document fields to insert into
|
210
|
+
the main `content` field, to help improve retrieval.
|
208
211
|
- **[0.1.156](https://github.com/langroid/langroid/releases/tag/0.1.156):** New Task control signals
|
209
212
|
PASS_TO, SEND_TO; VectorStore: Compute Pandas expression on documents; LanceRAGTaskCreator creates 3-agent RAG system with Query Planner, Critic and RAG Agent.
|
210
213
|
- **Dec 2023:**
|
@@ -8,13 +8,13 @@ langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
8
|
langroid/agent/junk,sha256=LxfuuW7Cijsg0szAzT81OjWWv1PMNI-6w_-DspVIO2s,339
|
9
9
|
langroid/agent/openai_assistant.py,sha256=yBtxis64XOnxtJzlkwUoTm-wCyvKr4DGo9-laXYMok0,32654
|
10
10
|
langroid/agent/special/__init__.py,sha256=PLGt63iwNnSCCigRYcqicD8AUXwdV_HRUWffRHllUC4,510
|
11
|
-
langroid/agent/special/doc_chat_agent.py,sha256=
|
12
|
-
langroid/agent/special/lance_doc_chat_agent.py,sha256=
|
11
|
+
langroid/agent/special/doc_chat_agent.py,sha256=0XMUTyIAmL-looB_Xb6TsljoywiiDfF6GjgldlL2K3E,44585
|
12
|
+
langroid/agent/special/lance_doc_chat_agent.py,sha256=xZvB4adgGBp8L79wi1Z77kGkZmTTJg8dZU60heeO5cc,7570
|
13
13
|
langroid/agent/special/lance_rag/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
|
-
langroid/agent/special/lance_rag/critic_agent.py,sha256=
|
14
|
+
langroid/agent/special/lance_rag/critic_agent.py,sha256=9izW4keCxVZEqrFOgyVUHD7N1vTXLkRynXYYd1Vpwzw,5785
|
15
15
|
langroid/agent/special/lance_rag/lance_rag_task.py,sha256=l_HQgrYY-CX2FwIsS961aEF3bYog3GDYo98fj0C0mSk,2889
|
16
16
|
langroid/agent/special/lance_rag/lance_tools.py,sha256=WypIS-3ZMDqY_PZEGB2K80-o4RfS43_OnER0dyFlsDY,1339
|
17
|
-
langroid/agent/special/lance_rag/query_planner_agent.py,sha256=
|
17
|
+
langroid/agent/special/lance_rag/query_planner_agent.py,sha256=Tf0-70Di4mw1B6efSp6dZvGcUPZO2lXjHkwEheSd_gQ,7880
|
18
18
|
langroid/agent/special/relevance_extractor_agent.py,sha256=Z0OJmpCOESRX0Viar3JRcjjnDBhSA8useD1ZDflIo0s,4571
|
19
19
|
langroid/agent/special/retriever_agent.py,sha256=uu6vqFg85uCVM-_DrXesYe2gH_-WcoHhlsKRlLuZPXk,1867
|
20
20
|
langroid/agent/special/sql/__init__.py,sha256=3kR5nC0wnYIzmMrr9L8RJa7JAJpbwBLx7KKygiwz0v0,111
|
@@ -48,7 +48,7 @@ langroid/language_models/azure_openai.py,sha256=_OOEoZOziI3NDOH_8t3qmh8IDWoHESQe
|
|
48
48
|
langroid/language_models/base.py,sha256=jUEUqDWJBVxIxmG6U4Ysg2QKGOnP_CLmRuEMicsSwUw,20596
|
49
49
|
langroid/language_models/config.py,sha256=PXcmEUq52GCDj2sekt8F9E1flWyyNjP2S0LTRs7T6Kg,269
|
50
50
|
langroid/language_models/openai_assistants.py,sha256=9K-DEAL2aSWHeXj2hwCo2RAlK9_1oCPtqX2u1wISCj8,36
|
51
|
-
langroid/language_models/openai_gpt.py,sha256=
|
51
|
+
langroid/language_models/openai_gpt.py,sha256=EF10UqGuAj-fZC6eAnssWGwLtA_pgW4e4ih64EP_LLA,42076
|
52
52
|
langroid/language_models/prompt_formatter/__init__.py,sha256=wj2e6j7R9d3m63HCbSDY1vosjFuhHLQVlgBrq8iqF38,197
|
53
53
|
langroid/language_models/prompt_formatter/base.py,sha256=2y_GcwhstvB5ih3haS7l5Fv79jVnFJ_vEw1jqWJzB9k,1247
|
54
54
|
langroid/language_models/prompt_formatter/llama2_formatter.py,sha256=YdcO88qyBeuMENVIVvVqSYuEpvYSTndUe_jd6hVTko4,2899
|
@@ -66,7 +66,7 @@ langroid/parsing/parser.py,sha256=BwVJboobG71N08w5LC7Tu36LI4pEJoSgAdiBSLChWGY,10
|
|
66
66
|
langroid/parsing/repo_loader.py,sha256=hhMfQBBSo-HvsZDQEcgmk_idKQQAeDQ_MMPd38x2ACU,29338
|
67
67
|
langroid/parsing/search.py,sha256=xmQdAdTIwZ0REEUeQVFlGZlqf7k8Poah7-ALuyW7Ov0,8440
|
68
68
|
langroid/parsing/spider.py,sha256=w_mHR1B4KOmxsBLoVI8kMkMTEbwTzeK3ath9fOMJrTk,3043
|
69
|
-
langroid/parsing/table_loader.py,sha256=
|
69
|
+
langroid/parsing/table_loader.py,sha256=qNM4obT_0Y4tjrxNBCNUYjKQ9oETCZ7FbolKBTcz-GM,3410
|
70
70
|
langroid/parsing/url_loader.py,sha256=RZCX1RJuQpTatJjBOU74_gJ5Ab7xwarRmFh5ON4n_G4,2279
|
71
71
|
langroid/parsing/url_loader_cookies.py,sha256=Lg4sNpRz9MByWq2mde6T0hKv68VZSV3mtMjNEHuFeSU,2327
|
72
72
|
langroid/parsing/urls.py,sha256=Nv4yCWQLLBEjaiRdaZZVQNBEl_cfK_V6cVuPm91wGtU,7686
|
@@ -86,10 +86,10 @@ langroid/utils/docker.py,sha256=kJQOLTgM0x9j9pgIIqp0dZNZCTvoUDhp6i8tYBq1Jr0,1105
|
|
86
86
|
langroid/utils/globals.py,sha256=VkTHhlqSz86oOPq65sjul0XU8I52UNaFC5vwybMQ74w,1343
|
87
87
|
langroid/utils/llms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
88
88
|
langroid/utils/llms/strings.py,sha256=CSAX9Z6FQOLXOzbLMe_Opqtc3ruDAKTTk7cPqc6Blh0,263
|
89
|
-
langroid/utils/logging.py,sha256=
|
89
|
+
langroid/utils/logging.py,sha256=R8TN-FqVpwZ4Ajgls9TDMthLvPpQd0QVNXK-PJDj1Z8,3917
|
90
90
|
langroid/utils/output/__init__.py,sha256=IpfqnCkfXa4HaOx39EMUhXuA7GPZFd7N_QMm1n43C_I,174
|
91
91
|
langroid/utils/output/printing.py,sha256=5EsYB1O4qKhocW19aebOUzK82RD9U5nygbY21yo8gfg,2872
|
92
|
-
langroid/utils/pandas_utils.py,sha256=
|
92
|
+
langroid/utils/pandas_utils.py,sha256=nSA1tIgOUTkRDn-IKq7HP8XGJcL6bA110LcPfRF7h8I,707
|
93
93
|
langroid/utils/pydantic_utils.py,sha256=E6miRLZRU61nkGL1Y55eXE79auCdXMji5QRsPfvwEt8,16554
|
94
94
|
langroid/utils/system.py,sha256=x9204H1-6EubDe8-9yX87KZSmRCkf0puXQv91QOetF4,3326
|
95
95
|
langroid/utils/web/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -103,7 +103,7 @@ langroid/vector_store/meilisearch.py,sha256=d2huA9P-NoYRuAQ9ZeXJmMKr7ry8u90RUSR2
|
|
103
103
|
langroid/vector_store/momento.py,sha256=j6Eo6oIDN2fe7lsBOlCXJn3uvvERHHTFL5QJfeREeOM,10044
|
104
104
|
langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
|
105
105
|
langroid/vector_store/qdrantdb.py,sha256=qt7Dye6rcgoe0551WzmOxRGIlJfL87D4MX7HdqxuEok,13393
|
106
|
-
langroid-0.1.
|
107
|
-
langroid-0.1.
|
108
|
-
langroid-0.1.
|
109
|
-
langroid-0.1.
|
106
|
+
langroid-0.1.159.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
107
|
+
langroid-0.1.159.dist-info/METADATA,sha256=-qIZRl58PZFOsLnKyeFFktfjVedKKQufuogeH6TN2qw,42701
|
108
|
+
langroid-0.1.159.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
|
109
|
+
langroid-0.1.159.dist-info/RECORD,,
|
File without changes
|
File without changes
|