langroid 0.1.197__py3-none-any.whl → 0.1.198__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -135,7 +135,7 @@ class DocChatAgentConfig(ChatAgentConfig):
135
135
  # NOTE: PDF parsing is extremely challenging, and each library
136
136
  # has its own strengths and weaknesses.
137
137
  # Try one that works for your use case.
138
- # or "haystack", "unstructured", "pdfplumber", "fitz", "pypdf"
138
+ # or "unstructured", "pdfplumber", "fitz", "pypdf"
139
139
  library="pdfplumber",
140
140
  ),
141
141
  )
@@ -156,7 +156,7 @@ class DocChatAgentConfig(ChatAgentConfig):
156
156
  collection_name="doc-chat-lancedb",
157
157
  replace_collection=True,
158
158
  storage_path=".lancedb/data/",
159
- embedding=hf_embed_config,
159
+ embedding=oai_embed_config,
160
160
  )
161
161
  llm: OpenAIGPTConfig = OpenAIGPTConfig(
162
162
  type="openai",
@@ -82,6 +82,9 @@ class ToolMessage(ABC, BaseModel):
82
82
  ex = choice(cls.examples())
83
83
  return ex.json_example()
84
84
 
85
+ def to_json(self) -> str:
86
+ return self.json(indent=4, exclude={"result", "purpose"})
87
+
85
88
  def json_example(self) -> str:
86
89
  return self.json(indent=4, exclude={"result", "purpose"})
87
90
 
@@ -6,7 +6,6 @@ from dotenv import load_dotenv
6
6
  from openai import OpenAI
7
7
 
8
8
  from langroid.embedding_models.base import EmbeddingModel, EmbeddingModelsConfig
9
- from langroid.language_models.utils import retry_with_exponential_backoff
10
9
  from langroid.mytypes import Embeddings
11
10
  from langroid.parsing.utils import batched
12
11
 
@@ -26,6 +25,58 @@ class SentenceTransformerEmbeddingsConfig(EmbeddingModelsConfig):
26
25
  context_length: int = 512
27
26
 
28
27
 
28
+ class EmbeddingFunctionCallable:
29
+ """
30
+ A callable class designed to generate embeddings for a list of texts using
31
+ the OpenAI API, with automatic retries on failure.
32
+
33
+ Attributes:
34
+ model (OpenAIEmbeddings): An instance of OpenAIEmbeddings that provides
35
+ configuration and utilities for generating embeddings.
36
+
37
+ Methods:
38
+ __call__(input: List[str]) -> Embeddings: Generate embeddings for
39
+ a list of input texts.
40
+ """
41
+
42
+ def __init__(self, model: "OpenAIEmbeddings"):
43
+ """
44
+ Initialize the EmbeddingFunctionCallable with a specific model.
45
+
46
+ Args:
47
+ model (OpenAIEmbeddings): An instance of OpenAIEmbeddings to use for
48
+ generating embeddings.
49
+ """
50
+ self.model = model
51
+
52
+ def __call__(self, input: List[str]) -> Embeddings:
53
+ """
54
+ Generate embeddings for a given list of input texts using the OpenAI API,
55
+ with retries on failure.
56
+
57
+ This method:
58
+ - Truncates each text in the input list to the model's maximum context length.
59
+ - Processes the texts in batches to generate embeddings efficiently.
60
+ - Automatically retries the embedding generation process with exponential
61
+ backoff in case of failures.
62
+
63
+ Args:
64
+ input (List[str]): A list of input texts to generate embeddings for.
65
+
66
+ Returns:
67
+ Embeddings: A list of embedding vectors corresponding to the input texts.
68
+ """
69
+ tokenized_texts = self.model.truncate_texts(input)
70
+ embeds = []
71
+ for batch in batched(tokenized_texts, 500):
72
+ result = self.model.client.embeddings.create(
73
+ input=batch, model=self.model.config.model_name
74
+ )
75
+ batch_embeds = [d.embedding for d in result.data]
76
+ embeds.extend(batch_embeds)
77
+ return embeds
78
+
79
+
29
80
  class OpenAIEmbeddings(EmbeddingModel):
30
81
  def __init__(self, config: OpenAIEmbeddingsConfig = OpenAIEmbeddingsConfig()):
31
82
  super().__init__()
@@ -56,19 +107,7 @@ class OpenAIEmbeddings(EmbeddingModel):
56
107
  ]
57
108
 
58
109
  def embedding_fn(self) -> Callable[[List[str]], Embeddings]:
59
- @retry_with_exponential_backoff
60
- def fn(texts: List[str]) -> Embeddings:
61
- tokenized_texts = self.truncate_texts(texts)
62
- embeds = []
63
- for batch in batched(tokenized_texts, 500):
64
- result = self.client.embeddings.create(
65
- input=batch, model=self.config.model_name
66
- )
67
- batch_embeds = [d.embedding for d in result.data]
68
- embeds.extend(batch_embeds)
69
- return embeds
70
-
71
- return fn
110
+ return EmbeddingFunctionCallable(self)
72
111
 
73
112
  @property
74
113
  def embedding_dims(self) -> int:
@@ -6,11 +6,10 @@ models will have the same tokenizer, so we just use the first one.
6
6
  """
7
7
  import logging
8
8
  import re
9
- from typing import List, Set
9
+ from typing import Any, List, Set, Type
10
10
 
11
11
  from huggingface_hub import HfApi, ModelFilter
12
12
  from jinja2.exceptions import TemplateError
13
- from transformers import AutoTokenizer
14
13
 
15
14
  from langroid.language_models.base import LanguageModel, LLMMessage, Role
16
15
  from langroid.language_models.config import HFPromptFormatterConfig
@@ -19,6 +18,31 @@ from langroid.language_models.prompt_formatter.base import PromptFormatter
19
18
  logger = logging.getLogger(__name__)
20
19
 
21
20
 
21
+ def try_import_AutoTokenizer() -> Type[Any]:
22
+ """
23
+ Attempts to import the AutoTokenizer class from the transformers package.
24
+ Returns:
25
+ The AutoTokenizer class if successful.
26
+ Raises:
27
+ ImportError: If the transformers package is not installed.
28
+ """
29
+ try:
30
+ from transformers import AutoTokenizer
31
+
32
+ return AutoTokenizer # type: ignore
33
+ except ImportError:
34
+ raise ImportError(
35
+ """
36
+ You are trying to use the HuggingFace transformers.AutoTokenizer,
37
+ but the `transformers` package is not installed
38
+ by default with Langroid. Please install langroid using the
39
+ `transformers` extra, like so:
40
+ pip install "langroid[transformers]"
41
+ or equivalent.
42
+ """
43
+ )
44
+
45
+
22
46
  def find_hf_formatter(model_name: str) -> str:
23
47
  hf_api = HfApi()
24
48
  # try to find a matching model, with progressivly shorter prefixes of model_name
@@ -37,6 +61,7 @@ def find_hf_formatter(model_name: str) -> str:
37
61
  mdl = next(models)
38
62
  except StopIteration:
39
63
  continue
64
+ AutoTokenizer = try_import_AutoTokenizer()
40
65
  tokenizer = AutoTokenizer.from_pretrained(mdl.id)
41
66
  if tokenizer.chat_template is not None:
42
67
  return str(mdl.id)
@@ -60,6 +85,7 @@ class HFFormatter(PromptFormatter):
60
85
  mdl = next(models)
61
86
  except StopIteration:
62
87
  raise ValueError(f"Model {config.model_name} not found on HuggingFace Hub")
88
+ AutoTokenizer = try_import_AutoTokenizer()
63
89
  self.tokenizer = AutoTokenizer.from_pretrained(mdl.id)
64
90
  if self.tokenizer.chat_template is None:
65
91
  raise ValueError(
@@ -11,7 +11,6 @@ import requests
11
11
 
12
12
  from langroid.mytypes import DocMetaData, Document
13
13
  from langroid.parsing.parser import Parser, ParsingConfig
14
- from langroid.parsing.urls import url_to_tempfile
15
14
 
16
15
  logger = logging.getLogger(__name__)
17
16
 
@@ -54,8 +53,6 @@ class DocumentParser(Parser):
54
53
  return PDFPlumberParser(source, config)
55
54
  elif config.pdf.library == "unstructured":
56
55
  return UnstructuredPDFParser(source, config)
57
- elif config.pdf.library == "haystack":
58
- return HaystackPDFParser(source, config)
59
56
  else:
60
57
  raise ValueError(
61
58
  f"Unsupported PDF library specified: {config.pdf.library}"
@@ -301,59 +298,23 @@ class PDFPlumberParser(DocumentParser):
301
298
  return self.fix_text(page.extract_text())
302
299
 
303
300
 
304
- class HaystackPDFParser(DocumentParser):
305
- """
306
- Parser for processing PDFs using the `haystack` library.
307
- """
308
-
309
- def get_doc_chunks(self) -> List[Document]:
310
- """
311
- Overrides the base class method to use the `haystack` library.
312
- See there for more details.
313
- """
314
-
315
- from haystack.nodes import PDFToTextConverter, PreProcessor
316
-
317
- converter = PDFToTextConverter(
318
- remove_numeric_tables=True,
319
- )
320
- path = self.source
321
- if path.startswith(("http://", "https://")):
322
- path = url_to_tempfile(path)
323
- doc = converter.convert(file_path=path, meta=None)
324
- # note self.config.chunk_size is in token units,
325
- # and we use an approximation of 75 words per 100 tokens
326
- # to convert to word units
327
- preprocessor = PreProcessor(
328
- clean_empty_lines=True,
329
- clean_whitespace=True,
330
- clean_header_footer=False,
331
- split_by="word",
332
- split_length=int(0.75 * self.config.chunk_size),
333
- split_overlap=int(0.75 * self.config.overlap),
334
- split_respect_sentence_boundary=True,
335
- add_page_number=True,
336
- )
337
- chunks = preprocessor.process(doc)
338
- return [
339
- Document(
340
- content=chunk.content,
341
- metadata=DocMetaData(
342
- source=f"{self.source} page {chunk.meta['page']}",
343
- is_chunk=True,
344
- ),
345
- )
346
- for chunk in chunks
347
- ]
348
-
349
-
350
301
  class UnstructuredPDFParser(DocumentParser):
351
302
  """
352
303
  Parser for processing PDF files using the `unstructured` library.
353
304
  """
354
305
 
355
306
  def iterate_pages(self) -> Generator[Tuple[int, Any], None, None]: # type: ignore
356
- from unstructured.partition.pdf import partition_pdf
307
+ try:
308
+ from unstructured.partition.pdf import partition_pdf
309
+ except ImportError:
310
+ raise ImportError(
311
+ """
312
+ The `unstructured` library is not installed by default with langroid.
313
+ To include this library, please install langroid with the
314
+ `unstructured` extra by running `pip install "langroid[unstructured]"`
315
+ or equivalent.
316
+ """
317
+ )
357
318
 
358
319
  # from unstructured.chunking.title import chunk_by_title
359
320
 
@@ -367,7 +328,7 @@ class UnstructuredPDFParser(DocumentParser):
367
328
  Please try a different library by setting the `library` field
368
329
  in the `pdf` section of the `parsing` field in the config file.
369
330
  Supported libraries are:
370
- fitz, pypdf, pdfplumber, unstructured, haystack
331
+ fitz, pypdf, pdfplumber, unstructured
371
332
  """
372
333
  )
373
334
 
@@ -406,7 +367,17 @@ class UnstructuredDocxParser(DocumentParser):
406
367
  """
407
368
 
408
369
  def iterate_pages(self) -> Generator[Tuple[int, Any], None, None]: # type: ignore
409
- from unstructured.partition.docx import partition_docx
370
+ try:
371
+ from unstructured.partition.docx import partition_docx
372
+ except ImportError:
373
+ raise ImportError(
374
+ """
375
+ The `unstructured` library is not installed by default with langroid.
376
+ To include this library, please install langroid with the
377
+ `unstructured` extra by running `pip install "langroid[unstructured]"`
378
+ or equivalent.
379
+ """
380
+ )
410
381
 
411
382
  elements = partition_docx(file=self.doc_bytes, include_page_breaks=True)
412
383
 
@@ -447,7 +418,17 @@ class UnstructuredDocxParser(DocumentParser):
447
418
 
448
419
  class UnstructuredDocParser(UnstructuredDocxParser):
449
420
  def iterate_pages(self) -> Generator[Tuple[int, Any], None, None]: # type: ignore
450
- from unstructured.partition.doc import partition_doc
421
+ try:
422
+ from unstructured.partition.doc import partition_doc
423
+ except ImportError:
424
+ raise ImportError(
425
+ """
426
+ The `unstructured` library is not installed by default with langroid.
427
+ To include this library, please install langroid with the
428
+ `unstructured` extra by running `pip install "langroid[unstructured]"`
429
+ or equivalent.
430
+ """
431
+ )
451
432
 
452
433
  elements = partition_doc(filename=self.source, include_page_breaks=True)
453
434
 
langroid/parsing/json.py CHANGED
@@ -79,6 +79,26 @@ def replace_undefined(s: str, undefined_placeholder: str = '"<undefined>"') -> s
79
79
  return s
80
80
 
81
81
 
82
+ def repair_newlines(s: str) -> str:
83
+ """
84
+ Attempt to load as json, and if it fails, try with newlines replaced by space.
85
+ Intended to handle cases where weak LLMs produce JSON-like strings where
86
+ some string-values contain explicit newlines, e.g.:
87
+ {"text": "This is a text\n with a newline"}
88
+ These would not be valid JSON, so we try to clean them up here.
89
+ """
90
+ try:
91
+ json.loads(s)
92
+ return s
93
+ except Exception:
94
+ try:
95
+ s = s.replace("\n", " ")
96
+ json.loads(s)
97
+ return s
98
+ except Exception:
99
+ return s
100
+
101
+
82
102
  def extract_top_level_json(s: str) -> List[str]:
83
103
  """Extract all top-level JSON-formatted substrings from a given string.
84
104
 
@@ -96,6 +116,7 @@ def extract_top_level_json(s: str) -> List[str]:
96
116
  for candidate in json_candidates
97
117
  ]
98
118
  candidates = [replace_undefined(candidate) for candidate in normalized_candidates]
119
+ candidates = [repair_newlines(candidate) for candidate in candidates]
99
120
  top_level_jsons = [
100
121
  candidate for candidate in candidates if is_valid_json(candidate)
101
122
  ]
@@ -19,9 +19,7 @@ class Splitter(str, Enum):
19
19
 
20
20
 
21
21
  class PdfParsingConfig(BaseSettings):
22
- library: Literal[
23
- "fitz", "pdfplumber", "pypdf", "unstructured", "haystack"
24
- ] = "pdfplumber"
22
+ library: Literal["fitz", "pdfplumber", "pypdf", "unstructured"] = "pdfplumber"
25
23
 
26
24
 
27
25
  class DocxParsingConfig(BaseSettings):
@@ -141,10 +141,16 @@ class ChromaDB(VectorStore):
141
141
  return self._docs_from_results(results)
142
142
 
143
143
  def get_documents_by_ids(self, ids: List[str]) -> List[Document]:
144
- results = self.collection.get(ids=ids, include=["documents", "metadatas"])
145
- results["documents"] = [results["documents"]]
146
- results["metadatas"] = [results["metadatas"]]
147
- return self._docs_from_results(results)
144
+ # get them one by one since chroma mangles the order of the results
145
+ # when fetched from a list of ids.
146
+ results = [
147
+ self.collection.get(ids=[id], include=["documents", "metadatas"])
148
+ for id in ids
149
+ ]
150
+ final_results = {}
151
+ final_results["documents"] = [[r["documents"][0] for r in results]]
152
+ final_results["metadatas"] = [[r["metadatas"][0] for r in results]]
153
+ return self._docs_from_results(final_results)
148
154
 
149
155
  def delete_collection(self, collection_name: str) -> None:
150
156
  self.client.delete_collection(name=collection_name)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.1.197
3
+ Version: 0.1.198
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -18,20 +18,21 @@ Provides-Extra: mysql
18
18
  Provides-Extra: neo4j
19
19
  Provides-Extra: postgres
20
20
  Provides-Extra: sciphi
21
+ Provides-Extra: transformers
22
+ Provides-Extra: unstructured
21
23
  Requires-Dist: agent-search (>=0.0.7,<0.0.8) ; extra == "sciphi"
22
24
  Requires-Dist: aiohttp (>=3.9.1,<4.0.0)
23
25
  Requires-Dist: async-generator (>=1.10,<2.0)
24
26
  Requires-Dist: autopep8 (>=2.0.2,<3.0.0)
25
27
  Requires-Dist: black[jupyter] (>=23.3.0,<24.0.0)
26
28
  Requires-Dist: bs4 (>=0.0.1,<0.0.2)
27
- Requires-Dist: chainlit (>=1.0.200,<2.0.0) ; extra == "chainlit"
28
- Requires-Dist: chromadb (==0.3.21)
29
+ Requires-Dist: chainlit (>=1.0.301,<2.0.0) ; extra == "chainlit"
30
+ Requires-Dist: chromadb (>=0.4.21,<=0.4.23)
29
31
  Requires-Dist: colorlog (>=6.7.0,<7.0.0)
30
32
  Requires-Dist: docstring-parser (>=0.15,<0.16)
31
33
  Requires-Dist: duckduckgo-search (>=4.4,<5.0)
32
34
  Requires-Dist: faker (>=18.9.0,<19.0.0)
33
35
  Requires-Dist: fakeredis (>=2.12.1,<3.0.0)
34
- Requires-Dist: farm-haystack[file-conversion,ocr,pdf,preprocessing] (>=1.21.1,<2.0.0)
35
36
  Requires-Dist: fire (>=0.5.0,<0.6.0)
36
37
  Requires-Dist: flake8 (>=6.0.0,<7.0.0)
37
38
  Requires-Dist: google-api-python-client (>=2.95.0,<3.0.0)
@@ -95,7 +96,7 @@ Requires-Dist: trafilatura (>=1.5.0,<2.0.0)
95
96
  Requires-Dist: typer (>=0.9.0,<0.10.0)
96
97
  Requires-Dist: types-redis (>=4.5.5.2,<5.0.0.0)
97
98
  Requires-Dist: types-requests (>=2.31.0.1,<3.0.0.0)
98
- Requires-Dist: unstructured[docx,pdf,pptx] (>=0.10.16,<0.10.18)
99
+ Requires-Dist: unstructured[docx,pdf,pptx] (>=0.10.16,<0.10.18) ; extra == "unstructured"
99
100
  Requires-Dist: wget (>=3.2,<4.0)
100
101
  Description-Content-Type: text/markdown
101
102
 
@@ -10,7 +10,7 @@ langroid/agent/helpers.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
10
  langroid/agent/junk,sha256=LxfuuW7Cijsg0szAzT81OjWWv1PMNI-6w_-DspVIO2s,339
11
11
  langroid/agent/openai_assistant.py,sha256=yBtxis64XOnxtJzlkwUoTm-wCyvKr4DGo9-laXYMok0,32654
12
12
  langroid/agent/special/__init__.py,sha256=xj4TvQ_oQX_xYPySbhmQAi2CPhuy_3yQPqqwzb4wsc0,943
13
- langroid/agent/special/doc_chat_agent.py,sha256=X-mPVWAleQBQnBy2V1MFV8EsnA1IvUVIU_KhFoNzjLA,48993
13
+ langroid/agent/special/doc_chat_agent.py,sha256=YRKhA_je3Tze1kpjqiOPBliopFw6Gea4ARlww4RmzM8,48982
14
14
  langroid/agent/special/lance_doc_chat_agent.py,sha256=pAIJchnBOVZnd2fxTteF0KSyZHMzTLKDj8vziTRuUUk,10184
15
15
  langroid/agent/special/lance_rag/__init__.py,sha256=-pq--upe-8vycYoTwxoomBnuUqrcRFUukmW3uBL1cFM,219
16
16
  langroid/agent/special/lance_rag/critic_agent.py,sha256=9izW4keCxVZEqrFOgyVUHD7N1vTXLkRynXYYd1Vpwzw,5785
@@ -33,7 +33,7 @@ langroid/agent/special/sql/utils/system_message.py,sha256=qKLHkvQWRQodTtPLPxr1GS
33
33
  langroid/agent/special/sql/utils/tools.py,sha256=6uB2424SLtmapui9ggcEr0ZTiB6_dL1-JRGgN8RK9Js,1332
34
34
  langroid/agent/special/table_chat_agent.py,sha256=GEUTP-VdtMXq4CcPV80gDQrCEn-ZFb9IhuRMtLN5I1o,9030
35
35
  langroid/agent/task.py,sha256=BxMGmwH0ZYbU5lylfQtU9qLMd9D9Qd6qqO1U2V_B0WM,49705
36
- langroid/agent/tool_message.py,sha256=HXre9B8kVnwcGTv-czO0y-Z0hMDIuf6TKiS16_6djEQ,8207
36
+ langroid/agent/tool_message.py,sha256=2kPsQUwi3ZzINTUNj10huKnZLjLp5SXmefacTHx8QDc,8304
37
37
  langroid/agent/tools/__init__.py,sha256=q-maq3k2BXhPAU99G0H6-j_ozoRvx15I1RFpPVicQIU,304
38
38
  langroid/agent/tools/duckduckgo_search_tool.py,sha256=lgBFIPGdEffyxFuP6NUqRVBXyqypqHHSQBf-06xWsZE,2460
39
39
  langroid/agent/tools/extract_tool.py,sha256=u5lL9rKBzaLBOrRyLnTAZ97pQ1uxyLP39XsWMnpaZpw,3789
@@ -52,7 +52,7 @@ langroid/cachedb/redis_cachedb.py,sha256=uEjxephnxaL8OqPGDYZnM__fpcTsLb0WTNS_AFi
52
52
  langroid/embedding_models/__init__.py,sha256=6wCH_UTl0EVzEMq6L4nqCkAkoc3xr46vR6CLjvAUnEI,410
53
53
  langroid/embedding_models/base.py,sha256=XJ1UZuafbfImxxP6-M2zE2_lMxi-nJWWwjA9X8leOiI,1553
54
54
  langroid/embedding_models/clustering.py,sha256=tZWElUqXl9Etqla0FAa7og96iDKgjqWjucZR_Egtp-A,6684
55
- langroid/embedding_models/models.py,sha256=0bQ8u2ee2ODcopGPusz9WYWI_PjR5Gbdy47qcSU8gCo,4603
55
+ langroid/embedding_models/models.py,sha256=zQTOHmhd9b_fitWWi-erndkf2k2LFaGz46G6AZVxryo,5970
56
56
  langroid/language_models/__init__.py,sha256=5L9ndEEC8iLJHjDJmYFTnv6-2-3xsxWUMHcugR8IeDs,821
57
57
  langroid/language_models/azure_openai.py,sha256=ncRCbKooqLVOY-PWQUIo9C3yTuKEFbAwyngXT_M4P7k,5989
58
58
  langroid/language_models/base.py,sha256=oZskZ9oT-_4kEk1M2515jQ4VOpf31M8NFvPr5knDTEU,21008
@@ -61,7 +61,7 @@ langroid/language_models/openai_assistants.py,sha256=9K-DEAL2aSWHeXj2hwCo2RAlK9_
61
61
  langroid/language_models/openai_gpt.py,sha256=W2Cxj13qScqnfJCHvZJIqDM9YMNOFAFhnsIuBcnmDac,49327
62
62
  langroid/language_models/prompt_formatter/__init__.py,sha256=9JXFF22QNMmbQV1q4nrIeQVTtA3Tx8tEZABLtLBdFyc,352
63
63
  langroid/language_models/prompt_formatter/base.py,sha256=eDS1sgRNZVnoajwV_ZIha6cba5Dt8xjgzdRbPITwx3Q,1221
64
- langroid/language_models/prompt_formatter/hf_formatter.py,sha256=3MQhu8--p168qPWXqlp_nK4phi-SuAUMqahSVyLHIkA,4177
64
+ langroid/language_models/prompt_formatter/hf_formatter.py,sha256=PS8w6K7ON5ANw0rU8KDrCtSqf2klxbR7plLKP1M4iXY,5057
65
65
  langroid/language_models/prompt_formatter/llama2_formatter.py,sha256=YdcO88qyBeuMENVIVvVqSYuEpvYSTndUe_jd6hVTko4,2899
66
66
  langroid/language_models/utils.py,sha256=J1Y1HoYPkwS7L-kuLRAGzjcseqAj_S8u_MaaqlOA9uk,4265
67
67
  langroid/mytypes.py,sha256=opL488mtHKob1uJeK_h1-kNjU5GZwkgCfXhBQCsONWU,2614
@@ -70,10 +70,10 @@ langroid/parsing/agent_chats.py,sha256=sbZRV9ujdM5QXvvuHVjIi2ysYSYlap-uqfMMUKulr
70
70
  langroid/parsing/code-parsing.md,sha256=--cyyNiSZSDlIwcjAV4-shKrSiRe2ytF3AdSoS_hD2g,3294
71
71
  langroid/parsing/code_parser.py,sha256=BbDAzp35wkYQ9U1dpf1ARL0lVyi0tfqEc6_eox2C090,3727
72
72
  langroid/parsing/config.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
- langroid/parsing/document_parser.py,sha256=SEW53fnEsOrsJbVUy9Fq5ygQzF_5UiGB5_Ogkte1u2Y,16697
74
- langroid/parsing/json.py,sha256=2eO-0-VAYyBjeUbeB3FNw-8PKUSmnyFWaRb0EzLxoZk,3859
73
+ langroid/parsing/document_parser.py,sha256=4SmvB24P7S_bYxBI6o6uN2cfNc5o61--mPOjn30ZiOA,16228
74
+ langroid/parsing/json.py,sha256=1N2O5l4_8NzkZRwhm4axIDnUAXl0-9QQCpQD2vXlPA0,4533
75
75
  langroid/parsing/para_sentence_split.py,sha256=AJBzZojP3zpB-_IMiiHismhqcvkrVBQ3ZINoQyx_bE4,2000
76
- langroid/parsing/parser.py,sha256=727QivWlZNlQiRFgkxTZpPoTMqB2yaltOkAGqLZGI_Q,10513
76
+ langroid/parsing/parser.py,sha256=w7MAbj27X7SLkzuxx2nvwjLdxKTmSnmdth9_j3INnac,10487
77
77
  langroid/parsing/repo_loader.py,sha256=52jTajXOkq_66NCRKLMNQoGKMJ59H-m2CZB9arMT7Wo,29346
78
78
  langroid/parsing/search.py,sha256=xmQdAdTIwZ0REEUeQVFlGZlqf7k8Poah7-ALuyW7Ov0,8440
79
79
  langroid/parsing/spider.py,sha256=w_mHR1B4KOmxsBLoVI8kMkMTEbwTzeK3ath9fOMJrTk,3043
@@ -109,13 +109,13 @@ langroid/utils/web/login.py,sha256=1iz9eUAHa87vpKIkzwkmFa00avwFWivDSAr7QUhK7U0,2
109
109
  langroid/utils/web/selenium_login.py,sha256=mYI6EvVmne34N9RajlsxxRqJQJvV-WG4LGp6sEECHPw,1156
110
110
  langroid/vector_store/__init__.py,sha256=qOa3_BLvf8tjdUBT4Zq7pSLTY9TD2Fgw62UHHJWNu8w,557
111
111
  langroid/vector_store/base.py,sha256=JNk-2f6t_WCavizU332tOoZcXHP73RpobRk88Aus52w,13706
112
- langroid/vector_store/chromadb.py,sha256=Y80k6an5sN0cRWtcl78Xr-Ht87nd_hBjvkSU5OdCyY8,7312
112
+ langroid/vector_store/chromadb.py,sha256=fPD0OwPBSSUgZaQQcQjApeUCOaw17eW0MQ7XzVNmz9k,7559
113
113
  langroid/vector_store/lancedb.py,sha256=Vl0nWKqFyczgPRmWXLzof9UgOB0OhVZIuczY_rSAF10,17985
114
114
  langroid/vector_store/meilisearch.py,sha256=d2huA9P-NoYRuAQ9ZeXJmMKr7ry8u90RUSR28k2ecQg,11340
115
115
  langroid/vector_store/momento.py,sha256=j6Eo6oIDN2fe7lsBOlCXJn3uvvERHHTFL5QJfeREeOM,10044
116
116
  langroid/vector_store/qdrant_cloud.py,sha256=3im4Mip0QXLkR6wiqVsjV1QvhSElfxdFSuDKddBDQ-4,188
117
117
  langroid/vector_store/qdrantdb.py,sha256=_egbsP9SWBwmI827EDYSSOqfIQSmwNsmJfFTxrLpWYE,13457
118
- langroid-0.1.197.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
119
- langroid-0.1.197.dist-info/METADATA,sha256=hr_8uh8gl747E4gOZE1EfoK5-B1sxPshYkK18ox7CX8,45876
120
- langroid-0.1.197.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
121
- langroid-0.1.197.dist-info/RECORD,,
118
+ langroid-0.1.198.dist-info/LICENSE,sha256=EgVbvA6VSYgUlvC3RvPKehSg7MFaxWDsFuzLOsPPfJg,1065
119
+ langroid-0.1.198.dist-info/METADATA,sha256=WDlQMgWoIG02-PK5mk2GGeDXelCoqaGD9qLcjak7MCQ,45883
120
+ langroid-0.1.198.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
121
+ langroid-0.1.198.dist-info/RECORD,,