langroid 0.51.0__py3-none-any.whl → 0.51.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langroid/agent/chat_agent.py +17 -8
- langroid/agent/chat_document.py +4 -6
- langroid/agent/special/doc_chat_agent.py +2 -2
- langroid/agent/special/table_chat_agent.py +2 -2
- langroid/language_models/base.py +5 -3
- langroid/language_models/model_info.py +33 -1
- langroid/language_models/openai_gpt.py +5 -2
- langroid/parsing/document_parser.py +34 -19
- langroid/parsing/parser.py +1 -0
- {langroid-0.51.0.dist-info → langroid-0.51.2.dist-info}/METADATA +2 -2
- {langroid-0.51.0.dist-info → langroid-0.51.2.dist-info}/RECORD +13 -13
- {langroid-0.51.0.dist-info → langroid-0.51.2.dist-info}/WHEEL +0 -0
- {langroid-0.51.0.dist-info → langroid-0.51.2.dist-info}/licenses/LICENSE +0 -0
langroid/agent/chat_agent.py
CHANGED
@@ -1511,12 +1511,14 @@ class ChatAgent(Agent):
|
|
1511
1511
|
output_len = self.config.llm.model_max_output_tokens
|
1512
1512
|
if (
|
1513
1513
|
truncate
|
1514
|
-
and self.chat_num_tokens(hist)
|
1515
|
-
> self.llm.chat_context_length() - self.config.llm.model_max_output_tokens
|
1514
|
+
and output_len > self.llm.chat_context_length() - self.chat_num_tokens(hist)
|
1516
1515
|
):
|
1517
1516
|
# chat + output > max context length,
|
1518
|
-
# so first try to shorten requested output len to fit
|
1519
|
-
|
1517
|
+
# so first try to shorten requested output len to fit;
|
1518
|
+
# use an extra margin of 300 tokens in case our calcs are off
|
1519
|
+
output_len = (
|
1520
|
+
self.llm.chat_context_length() - self.chat_num_tokens(hist) - 300
|
1521
|
+
)
|
1520
1522
|
if output_len < self.config.llm.min_output_tokens:
|
1521
1523
|
# unacceptably small output len, so drop early parts of conv history
|
1522
1524
|
# if output_len is still too long, then drop early parts of conv history
|
@@ -1534,10 +1536,17 @@ class ChatAgent(Agent):
|
|
1534
1536
|
# and last message (user msg).
|
1535
1537
|
raise ValueError(
|
1536
1538
|
"""
|
1537
|
-
The message history is longer than the
|
1538
|
-
length
|
1539
|
-
|
1540
|
-
|
1539
|
+
The (message history + max_output_tokens) is longer than the
|
1540
|
+
max chat context length of this model, and we have tried
|
1541
|
+
reducing the requested max output tokens, as well as dropping
|
1542
|
+
early parts of the message history, to accommodate the model
|
1543
|
+
context length, but we have run out of msgs to drop.
|
1544
|
+
|
1545
|
+
HINT: In the `llm` field of your `ChatAgentConfig` object,
|
1546
|
+
which is of type `LLMConfig/OpenAIGPTConfig`, try
|
1547
|
+
- increasing `chat_context_length`
|
1548
|
+
(if accurate for the model), or
|
1549
|
+
- decreasing `max_output_tokens`
|
1541
1550
|
"""
|
1542
1551
|
)
|
1543
1552
|
# drop the second message, i.e. first msg after the sys msg
|
langroid/agent/chat_document.py
CHANGED
@@ -356,12 +356,8 @@ class ChatDocument(Document):
|
|
356
356
|
Returns:
|
357
357
|
List[LLMMessage]: list of LLMMessages corresponding to this ChatDocument.
|
358
358
|
"""
|
359
|
-
|
359
|
+
|
360
360
|
sender_role = Role.USER
|
361
|
-
fun_call = None
|
362
|
-
oai_tool_calls = None
|
363
|
-
tool_id = "" # for OpenAI Assistant
|
364
|
-
chat_document_id: str = ""
|
365
361
|
if isinstance(message, str):
|
366
362
|
message = ChatDocument.from_str(message)
|
367
363
|
content = message.content or to_string(message.content_any) or ""
|
@@ -381,6 +377,8 @@ class ChatDocument(Document):
|
|
381
377
|
# same reasoning as for function-call above
|
382
378
|
content += " " + "\n\n".join(str(tc) for tc in oai_tool_calls)
|
383
379
|
oai_tool_calls = None
|
380
|
+
# some LLM APIs (e.g. gemini) don't like empty msg
|
381
|
+
content = content or " "
|
384
382
|
sender_name = message.metadata.sender_name
|
385
383
|
tool_ids = message.metadata.tool_ids
|
386
384
|
tool_id = tool_ids[-1] if len(tool_ids) > 0 else ""
|
@@ -437,7 +435,7 @@ class ChatDocument(Document):
|
|
437
435
|
LLMMessage(
|
438
436
|
role=Role.TOOL,
|
439
437
|
tool_call_id=tool_id,
|
440
|
-
content=result,
|
438
|
+
content=result or " ",
|
441
439
|
chat_document_id=chat_document_id,
|
442
440
|
)
|
443
441
|
for tool_id, result in message.oai_tool_id2result.items()
|
@@ -204,8 +204,8 @@ class DocChatAgentConfig(ChatAgentConfig):
|
|
204
204
|
|
205
205
|
llm: OpenAIGPTConfig = OpenAIGPTConfig(
|
206
206
|
type="openai",
|
207
|
-
chat_model=OpenAIChatModel.
|
208
|
-
completion_model=OpenAIChatModel.
|
207
|
+
chat_model=OpenAIChatModel.GPT4o,
|
208
|
+
completion_model=OpenAIChatModel.GPT4o,
|
209
209
|
timeout=40,
|
210
210
|
)
|
211
211
|
prompts: PromptsConfig = PromptsConfig(
|
@@ -118,8 +118,8 @@ class TableChatAgentConfig(ChatAgentConfig):
|
|
118
118
|
vecdb: None | VectorStoreConfig = None
|
119
119
|
llm: OpenAIGPTConfig = OpenAIGPTConfig(
|
120
120
|
type="openai",
|
121
|
-
chat_model=OpenAIChatModel.
|
122
|
-
completion_model=OpenAIChatModel.
|
121
|
+
chat_model=OpenAIChatModel.GPT4o,
|
122
|
+
completion_model=OpenAIChatModel.GPT4o,
|
123
123
|
)
|
124
124
|
prompts: PromptsConfig = PromptsConfig(
|
125
125
|
max_tokens=1000,
|
langroid/language_models/base.py
CHANGED
@@ -63,7 +63,8 @@ class LLMConfig(BaseSettings):
|
|
63
63
|
streamer_async: Optional[Callable[..., Awaitable[None]]] = async_noop_fn
|
64
64
|
api_base: str | None = None
|
65
65
|
formatter: None | str = None
|
66
|
-
|
66
|
+
# specify None if you want to use the full max output tokens of the model
|
67
|
+
max_output_tokens: int | None = 8192
|
67
68
|
timeout: int = 20 # timeout for API requests
|
68
69
|
chat_model: str = ""
|
69
70
|
completion_model: str = ""
|
@@ -89,8 +90,9 @@ class LLMConfig(BaseSettings):
|
|
89
90
|
|
90
91
|
@property
|
91
92
|
def model_max_output_tokens(self) -> int:
|
92
|
-
return (
|
93
|
-
self.max_output_tokens or get_model_info(self.chat_model).max_output_tokens
|
93
|
+
return min(
|
94
|
+
self.max_output_tokens or get_model_info(self.chat_model).max_output_tokens,
|
95
|
+
get_model_info(self.chat_model).max_output_tokens,
|
94
96
|
)
|
95
97
|
|
96
98
|
|
@@ -24,13 +24,16 @@ class OpenAIChatModel(ModelName):
|
|
24
24
|
"""Enum for OpenAI Chat models"""
|
25
25
|
|
26
26
|
GPT3_5_TURBO = "gpt-3.5-turbo-1106"
|
27
|
-
GPT4 = "gpt-4
|
27
|
+
GPT4 = "gpt-4o" # avoid deprecated gpt-4
|
28
28
|
GPT4_TURBO = "gpt-4-turbo"
|
29
29
|
GPT4o = "gpt-4o"
|
30
30
|
GPT4o_MINI = "gpt-4o-mini"
|
31
31
|
O1 = "o1"
|
32
32
|
O1_MINI = "o1-mini"
|
33
33
|
O3_MINI = "o3-mini"
|
34
|
+
GPT4_1 = "gpt-4.1"
|
35
|
+
GPT4_1_MINI = "gpt-4.1-mini"
|
36
|
+
GPT4_1_NANO = "gpt-4.1-nano"
|
34
37
|
|
35
38
|
|
36
39
|
class OpenAICompletionModel(str, Enum):
|
@@ -44,6 +47,7 @@ class AnthropicModel(ModelName):
|
|
44
47
|
"""Enum for Anthropic models"""
|
45
48
|
|
46
49
|
CLAUDE_3_5_SONNET = "claude-3-5-sonnet-latest"
|
50
|
+
CLAUDE_3_7_SONNET = "claude-3-7-sonnet-latest"
|
47
51
|
CLAUDE_3_OPUS = "claude-3-opus-latest"
|
48
52
|
CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
|
49
53
|
CLAUDE_3_HAIKU = "claude-3-haiku-20240307"
|
@@ -63,6 +67,7 @@ class GeminiModel(ModelName):
|
|
63
67
|
GEMINI_1_5_FLASH = "gemini-1.5-flash"
|
64
68
|
GEMINI_1_5_FLASH_8B = "gemini-1.5-flash-8b"
|
65
69
|
GEMINI_1_5_PRO = "gemini-1.5-pro"
|
70
|
+
GEMINI_2_5_PRO = "gemini-2.5-pro-exp-02-05"
|
66
71
|
GEMINI_2_PRO = "gemini-2.0-pro-exp-02-05"
|
67
72
|
GEMINI_2_FLASH = "gemini-2.0-flash"
|
68
73
|
GEMINI_2_FLASH_LITE = "gemini-2.0-flash-lite-preview"
|
@@ -160,6 +165,33 @@ MODEL_INFO: Dict[str, ModelInfo] = {
|
|
160
165
|
output_cost_per_million=30.0,
|
161
166
|
description="GPT-4 Turbo",
|
162
167
|
),
|
168
|
+
OpenAIChatModel.GPT4_1_NANO.value: ModelInfo(
|
169
|
+
name=OpenAIChatModel.GPT4_1_NANO.value,
|
170
|
+
provider=ModelProvider.OPENAI,
|
171
|
+
context_length=1_047_576,
|
172
|
+
max_output_tokens=32_768,
|
173
|
+
input_cost_per_million=0.10,
|
174
|
+
output_cost_per_million=0.40,
|
175
|
+
description="GPT-4.1",
|
176
|
+
),
|
177
|
+
OpenAIChatModel.GPT4_1_MINI.value: ModelInfo(
|
178
|
+
name=OpenAIChatModel.GPT4_1_MINI.value,
|
179
|
+
provider=ModelProvider.OPENAI,
|
180
|
+
context_length=1_047_576,
|
181
|
+
max_output_tokens=32_768,
|
182
|
+
input_cost_per_million=0.40,
|
183
|
+
output_cost_per_million=1.60,
|
184
|
+
description="GPT-4.1 Mini",
|
185
|
+
),
|
186
|
+
OpenAIChatModel.GPT4_1.value: ModelInfo(
|
187
|
+
name=OpenAIChatModel.GPT4_1.value,
|
188
|
+
provider=ModelProvider.OPENAI,
|
189
|
+
context_length=1_047_576,
|
190
|
+
max_output_tokens=32_768,
|
191
|
+
input_cost_per_million=2.00,
|
192
|
+
output_cost_per_million=8.00,
|
193
|
+
description="GPT-4.1",
|
194
|
+
),
|
163
195
|
OpenAIChatModel.GPT4o.value: ModelInfo(
|
164
196
|
name=OpenAIChatModel.GPT4o.value,
|
165
197
|
provider=ModelProvider.OPENAI,
|
@@ -91,10 +91,13 @@ LLAMACPP_API_KEY = os.environ.get("LLAMA_API_KEY", DUMMY_API_KEY)
|
|
91
91
|
|
92
92
|
openai_chat_model_pref_list = [
|
93
93
|
OpenAIChatModel.GPT4o,
|
94
|
+
OpenAIChatModel.GPT4_1_NANO,
|
95
|
+
OpenAIChatModel.GPT4_1_MINI,
|
96
|
+
OpenAIChatModel.GPT4_1,
|
94
97
|
OpenAIChatModel.GPT4o_MINI,
|
95
98
|
OpenAIChatModel.O1_MINI,
|
99
|
+
OpenAIChatModel.O3_MINI,
|
96
100
|
OpenAIChatModel.O1,
|
97
|
-
OpenAIChatModel.GPT3_5_TURBO,
|
98
101
|
]
|
99
102
|
|
100
103
|
openai_completion_model_pref_list = [
|
@@ -1879,7 +1882,7 @@ class OpenAIGPT(LanguageModel):
|
|
1879
1882
|
m.api_dict(has_system_role=self.info().allows_system_message)
|
1880
1883
|
for m in (llm_messages)
|
1881
1884
|
],
|
1882
|
-
|
1885
|
+
max_completion_tokens=max_tokens,
|
1883
1886
|
stream=self.get_stream(),
|
1884
1887
|
)
|
1885
1888
|
if self.get_stream():
|
@@ -31,7 +31,7 @@ if TYPE_CHECKING:
|
|
31
31
|
from PIL import Image
|
32
32
|
|
33
33
|
from langroid.mytypes import DocMetaData, Document
|
34
|
-
from langroid.parsing.parser import Parser, ParsingConfig
|
34
|
+
from langroid.parsing.parser import LLMPdfParserConfig, Parser, ParsingConfig
|
35
35
|
|
36
36
|
logger = logging.getLogger(__name__)
|
37
37
|
|
@@ -1040,7 +1040,8 @@ class LLMPdfParser(DocumentParser):
|
|
1040
1040
|
raise ValueError(
|
1041
1041
|
"LLMPdfParser requires a llm-based config in pdf parsing config"
|
1042
1042
|
)
|
1043
|
-
self.
|
1043
|
+
self.llm_parser_config: LLMPdfParserConfig = config.pdf.llm_parser_config
|
1044
|
+
self.model_name = self.llm_parser_config.model_name
|
1044
1045
|
|
1045
1046
|
# Ensure output directory exists
|
1046
1047
|
self.OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
@@ -1059,9 +1060,7 @@ class LLMPdfParser(DocumentParser):
|
|
1059
1060
|
temp_file.close()
|
1060
1061
|
self.output_filename = Path(temp_file.name)
|
1061
1062
|
|
1062
|
-
self.max_tokens =
|
1063
|
-
config.pdf.llm_parser_config.max_tokens or self.DEFAULT_MAX_TOKENS
|
1064
|
-
)
|
1063
|
+
self.max_tokens = self.llm_parser_config.max_tokens or self.DEFAULT_MAX_TOKENS
|
1065
1064
|
|
1066
1065
|
"""
|
1067
1066
|
If True, each PDF page is processed as a separate chunk,
|
@@ -1069,12 +1068,12 @@ class LLMPdfParser(DocumentParser):
|
|
1069
1068
|
grouped into chunks based on `max_token_limit` before being sent
|
1070
1069
|
to the LLM.
|
1071
1070
|
"""
|
1072
|
-
self.split_on_page =
|
1071
|
+
self.split_on_page = self.llm_parser_config.split_on_page or False
|
1073
1072
|
|
1074
1073
|
# Rate limiting parameters
|
1075
1074
|
import asyncio
|
1076
1075
|
|
1077
|
-
self.requests_per_minute =
|
1076
|
+
self.requests_per_minute = self.llm_parser_config.requests_per_minute or 5
|
1078
1077
|
|
1079
1078
|
"""
|
1080
1079
|
A semaphore to control the number of concurrent requests to the LLM,
|
@@ -1201,6 +1200,20 @@ class LLMPdfParser(DocumentParser):
|
|
1201
1200
|
pdf_chunks = pool.map(self._merge_pages_into_pdf_with_metadata, chunks)
|
1202
1201
|
return pdf_chunks
|
1203
1202
|
|
1203
|
+
@staticmethod
|
1204
|
+
def _page_num_str(page_numbers: Any) -> str:
|
1205
|
+
"""
|
1206
|
+
Converts page numbers to a formatted string.
|
1207
|
+
"""
|
1208
|
+
if isinstance(page_numbers, list):
|
1209
|
+
if len(page_numbers) == 0:
|
1210
|
+
return ""
|
1211
|
+
return str(page_numbers[0]) + "-" + str(page_numbers[-1])
|
1212
|
+
elif isinstance(page_numbers, int):
|
1213
|
+
return str(page_numbers)
|
1214
|
+
else:
|
1215
|
+
return str(page_numbers).replace(" ", "-")
|
1216
|
+
|
1204
1217
|
async def _send_chunk_to_llm(self, chunk: Dict[str, Any]) -> str:
|
1205
1218
|
"""
|
1206
1219
|
Sends a PDF chunk to the LLM API and returns the response text.
|
@@ -1217,8 +1230,10 @@ class LLMPdfParser(DocumentParser):
|
|
1217
1230
|
llm_config = OpenAIGPTConfig(
|
1218
1231
|
chat_model=self.model_name,
|
1219
1232
|
max_output_tokens=self.max_tokens,
|
1233
|
+
timeout=self.llm_parser_config.timeout,
|
1220
1234
|
)
|
1221
1235
|
llm = OpenAIGPT(config=llm_config)
|
1236
|
+
page_nums = self._page_num_str(chunk.get("page_numbers", "?"))
|
1222
1237
|
base64_string = base64.b64encode(chunk["pdf_bytes"]).decode("utf-8")
|
1223
1238
|
data_uri = f"data:application/pdf;base64,{base64_string}"
|
1224
1239
|
if "gemini" in self.model_name.lower():
|
@@ -1226,7 +1241,9 @@ class LLMPdfParser(DocumentParser):
|
|
1226
1241
|
type="image_url",
|
1227
1242
|
image_url=dict(url=data_uri),
|
1228
1243
|
)
|
1229
|
-
elif "claude" in self.model_name.lower()
|
1244
|
+
elif "claude" in self.model_name.lower():
|
1245
|
+
# optimistrally try this: some API proxies like litellm
|
1246
|
+
# support this, and others may not.
|
1230
1247
|
file_content = dict(
|
1231
1248
|
type="file",
|
1232
1249
|
file=dict(
|
@@ -1234,18 +1251,11 @@ class LLMPdfParser(DocumentParser):
|
|
1234
1251
|
),
|
1235
1252
|
)
|
1236
1253
|
else:
|
1237
|
-
|
1238
|
-
logger.warning(
|
1239
|
-
f"""
|
1240
|
-
File uploads may not be supported for this model
|
1241
|
-
{self.model_name}. But attempting to
|
1242
|
-
use OpenAI-like file upload.
|
1243
|
-
""",
|
1244
|
-
)
|
1254
|
+
# fallback: assume file upload is similar to OpenAI API
|
1245
1255
|
file_content = dict(
|
1246
1256
|
type="file",
|
1247
1257
|
file=dict(
|
1248
|
-
filename="
|
1258
|
+
filename=f"pages-{page_nums}.pdf",
|
1249
1259
|
file_data=data_uri,
|
1250
1260
|
),
|
1251
1261
|
)
|
@@ -1304,9 +1314,14 @@ class LLMPdfParser(DocumentParser):
|
|
1304
1314
|
await asyncio.sleep(delay)
|
1305
1315
|
else:
|
1306
1316
|
# Log failure after max retries
|
1317
|
+
page_nums = chunk.get("page_numbers", "Unknown")
|
1307
1318
|
logging.error(
|
1308
|
-
"
|
1309
|
-
|
1319
|
+
f"""
|
1320
|
+
Max retries reached for pages {page_nums}.
|
1321
|
+
It is possible your LLM API provider for
|
1322
|
+
the model {self.model_name} does not support
|
1323
|
+
file uploads via an OpenAI-compatible API.
|
1324
|
+
""",
|
1310
1325
|
)
|
1311
1326
|
break
|
1312
1327
|
return "" # Return empty string if all retries fail
|
langroid/parsing/parser.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.51.
|
3
|
+
Version: 0.51.2
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
Author-email: Prasad Chalasani <pchalasani@gmail.com>
|
6
6
|
License: MIT
|
@@ -846,7 +846,7 @@ import langroid.language_models as lm
|
|
846
846
|
|
847
847
|
mdl = lm.OpenAIGPT(
|
848
848
|
lm.OpenAIGPTConfig(
|
849
|
-
chat_model=lm.OpenAIChatModel.
|
849
|
+
chat_model=lm.OpenAIChatModel.GPT4o, # or, e.g. "ollama/qwen2.5"
|
850
850
|
),
|
851
851
|
)
|
852
852
|
|
@@ -5,8 +5,8 @@ langroid/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
5
5
|
langroid/agent/__init__.py,sha256=ll0Cubd2DZ-fsCMl7e10hf9ZjFGKzphfBco396IKITY,786
|
6
6
|
langroid/agent/base.py,sha256=bs5OLCf534mhsdR7Rgf27GqVNuSV2bOVbD46Y86mGFA,79829
|
7
7
|
langroid/agent/batch.py,sha256=vi1r5i1-vN80WfqHDSwjEym_KfGsqPGUtwktmiK1nuk,20635
|
8
|
-
langroid/agent/chat_agent.py,sha256=
|
9
|
-
langroid/agent/chat_document.py,sha256=
|
8
|
+
langroid/agent/chat_agent.py,sha256=mIkf3kq5m1RPXeBb6U52pXB9itum0ChcpXABC_g-Xfs,85082
|
9
|
+
langroid/agent/chat_document.py,sha256=PW445VpZ_MLkC-dllsLw_kpGtsgviX9zyYUV1mnbgL8,17760
|
10
10
|
langroid/agent/openai_assistant.py,sha256=JkAcs02bIrgPNVvUWVR06VCthc5-ulla2QMBzux_q6o,34340
|
11
11
|
langroid/agent/task.py,sha256=HB6N-Jn80HFqCf0ZYOC1v3Bn3oO7NLjShHQJJFwW0q4,90557
|
12
12
|
langroid/agent/tool_message.py,sha256=BhjP-_TfQ2tgxuY4Yo_JHLOwwt0mJ4BwjPnREvEY4vk,14744
|
@@ -14,13 +14,13 @@ langroid/agent/xml_tool_message.py,sha256=6SshYZJKIfi4mkE-gIoSwjkEYekQ8GwcSiCv7a
|
|
14
14
|
langroid/agent/callbacks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
15
|
langroid/agent/callbacks/chainlit.py,sha256=UHB6P_J40vsVnssosqkpkOVWRf9NK4TOY0_G2g_Arsg,20900
|
16
16
|
langroid/agent/special/__init__.py,sha256=gik_Xtm_zV7U9s30Mn8UX3Gyuy4jTjQe9zjiE3HWmEo,1273
|
17
|
-
langroid/agent/special/doc_chat_agent.py,sha256=
|
17
|
+
langroid/agent/special/doc_chat_agent.py,sha256=ALp2rv12J-ChRCxVtflDwz6n0qIbUAymldIy8qpsvrg,65236
|
18
18
|
langroid/agent/special/doc_chat_task.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
19
|
langroid/agent/special/lance_doc_chat_agent.py,sha256=s8xoRs0gGaFtDYFUSIRchsgDVbS5Q3C2b2mr3V1Fd-Q,10419
|
20
20
|
langroid/agent/special/lance_tools.py,sha256=qS8x4wi8mrqfbYV2ztFzrcxyhHQ0ZWOc-zkYiH7awj0,2105
|
21
21
|
langroid/agent/special/relevance_extractor_agent.py,sha256=zIx8GUdVo1aGW6ASla0NPQjYYIpmriK_TYMijqAx3F8,4796
|
22
22
|
langroid/agent/special/retriever_agent.py,sha256=o2UfqiCGME0t85SZ6qjK041_WZYqXSuV1SeH_3KtVuc,1931
|
23
|
-
langroid/agent/special/table_chat_agent.py,sha256=
|
23
|
+
langroid/agent/special/table_chat_agent.py,sha256=ii-xd7pRLLfRhamFZ04zpSkRO4xPn6Rm5qmA4z4N0HA,9661
|
24
24
|
langroid/agent/special/arangodb/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
25
25
|
langroid/agent/special/arangodb/arangodb_agent.py,sha256=12Y54c84c9qXV-YXRBcI5HaqyiY75JR4TmqlURYKJAM,25851
|
26
26
|
langroid/agent/special/arangodb/system_messages.py,sha256=udwfLleTdyz_DuxHuoiv2wHEZoAPBPbwdF_ivjIfP5c,6867
|
@@ -69,11 +69,11 @@ langroid/embedding_models/protoc/embeddings_pb2.pyi,sha256=UkNy7BrNsmQm0vLb3NtGX
|
|
69
69
|
langroid/embedding_models/protoc/embeddings_pb2_grpc.py,sha256=9dYQqkW3JPyBpSEjeGXTNpSqAkC-6FPtBHyteVob2Y8,2452
|
70
70
|
langroid/language_models/__init__.py,sha256=3aD2qC1lz8v12HX4B-dilv27gNxYdGdeu1QvDlkqqHs,1095
|
71
71
|
langroid/language_models/azure_openai.py,sha256=SW0Fp_y6HpERr9l6TtF6CYsKgKwjUf_hSL_2mhTV4wI,5034
|
72
|
-
langroid/language_models/base.py,sha256=
|
72
|
+
langroid/language_models/base.py,sha256=KJt1O9q1mLC_n8MUjNTCVc51I6WjBh1Rx0wNJ762cPg,27852
|
73
73
|
langroid/language_models/config.py,sha256=9Q8wk5a7RQr8LGMT_0WkpjY8S4ywK06SalVRjXlfCiI,378
|
74
74
|
langroid/language_models/mock_lm.py,sha256=5BgHKDVRWFbUwDT_PFgTZXz9-k8wJSA2e3PZmyDgQ1k,4022
|
75
|
-
langroid/language_models/model_info.py,sha256=
|
76
|
-
langroid/language_models/openai_gpt.py,sha256=
|
75
|
+
langroid/language_models/model_info.py,sha256=0NE1zWNUHJwcM5jhwNxUqGjbpek-Nq7ljGdWpM8R3RQ,13380
|
76
|
+
langroid/language_models/openai_gpt.py,sha256=hUEj97dUPkwOpByLAIWyVR2AI9BiEweSvhUhIYidVQ8,84831
|
77
77
|
langroid/language_models/utils.py,sha256=hC5p61P_Qlrowkm5wMap1A1b5ZUCwK_XhPIzAQk1T1s,5483
|
78
78
|
langroid/language_models/prompt_formatter/__init__.py,sha256=2-5cdE24XoFDhifOLl8yiscohil1ogbP1ECkYdBlBsk,372
|
79
79
|
langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
|
@@ -82,11 +82,11 @@ langroid/language_models/prompt_formatter/llama2_formatter.py,sha256=YdcO88qyBeu
|
|
82
82
|
langroid/parsing/__init__.py,sha256=2oUWJJAxIavq9Wtw5RGlkXLq3GF3zgXeVLLW4j7yeb8,1138
|
83
83
|
langroid/parsing/agent_chats.py,sha256=sbZRV9ujdM5QXvvuHVjIi2ysYSYlap-uqfMMUKulrW0,1068
|
84
84
|
langroid/parsing/code_parser.py,sha256=5ze0MBytrGGkU69pA_bJDjRm6QZz_QYfPcIwkagUa7U,3796
|
85
|
-
langroid/parsing/document_parser.py,sha256=
|
85
|
+
langroid/parsing/document_parser.py,sha256=CdQlsOlcreKK6NHkKboZyv9qm1E0s8wK5E9eBeVolTw,57521
|
86
86
|
langroid/parsing/md_parser.py,sha256=JUgsUpCaeAuBndmtDaJR9HMZaje1gmtXtaLXJHst3i8,21340
|
87
87
|
langroid/parsing/para_sentence_split.py,sha256=AJBzZojP3zpB-_IMiiHismhqcvkrVBQ3ZINoQyx_bE4,2000
|
88
88
|
langroid/parsing/parse_json.py,sha256=aADo38bAHQhC8on4aWZZzVzSDy-dK35vRLZsFI2ewh8,4756
|
89
|
-
langroid/parsing/parser.py,sha256=
|
89
|
+
langroid/parsing/parser.py,sha256=dUSWf_HCroalamDGYtcfJ4ZgFRFdJwgj_P5tB0t0hjo,15459
|
90
90
|
langroid/parsing/pdf_utils.py,sha256=rmNJ9UzuBgXTAYwj1TtRJcD8h53x7cizhgyYHKO88I4,1513
|
91
91
|
langroid/parsing/repo_loader.py,sha256=NpysuyzRHvgL3F4BB_wGo5sCUnZ3FOlVCJmZ7CaUdbs,30202
|
92
92
|
langroid/parsing/routing.py,sha256=-FcnlqldzL4ZoxuDwXjQPNHgBe9F9-F4R6q7b_z9CvI,1232
|
@@ -129,7 +129,7 @@ langroid/vector_store/pineconedb.py,sha256=otxXZNaBKb9f_H75HTaU3lMHiaR2NUp5MqwLZ
|
|
129
129
|
langroid/vector_store/postgres.py,sha256=wHPtIi2qM4fhO4pMQr95pz1ZCe7dTb2hxl4VYspGZoA,16104
|
130
130
|
langroid/vector_store/qdrantdb.py,sha256=O6dSBoDZ0jzfeVBd7LLvsXu083xs2fxXtPa9gGX3JX4,18443
|
131
131
|
langroid/vector_store/weaviatedb.py,sha256=Yn8pg139gOy3zkaPfoTbMXEEBCiLiYa1MU5d_3UA1K4,11847
|
132
|
-
langroid-0.51.
|
133
|
-
langroid-0.51.
|
134
|
-
langroid-0.51.
|
135
|
-
langroid-0.51.
|
132
|
+
langroid-0.51.2.dist-info/METADATA,sha256=feRMKJHHb0YSO9WURjmzR82-X9ggaHd4-tm10PnKV2M,63642
|
133
|
+
langroid-0.51.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
134
|
+
langroid-0.51.2.dist-info/licenses/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
|
135
|
+
langroid-0.51.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|