langroid 0.1.101__tar.gz → 0.1.102__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (101) hide show
  1. {langroid-0.1.101 → langroid-0.1.102}/PKG-INFO +1 -1
  2. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/batch.py +2 -2
  3. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/doc_chat_agent.py +48 -3
  4. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/retriever_agent.py +1 -1
  5. {langroid-0.1.101 → langroid-0.1.102}/langroid/mytypes.py +10 -4
  6. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/document_parser.py +1 -0
  7. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/parser.py +62 -31
  8. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/search.py +54 -49
  9. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/utils.py +26 -0
  10. langroid-0.1.102/langroid/utils/algorithms/graph.py +49 -0
  11. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/configuration.py +13 -0
  12. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/pydantic_utils.py +3 -1
  13. langroid-0.1.102/langroid/vector_store/base.py +317 -0
  14. {langroid-0.1.101 → langroid-0.1.102}/langroid/vector_store/chromadb.py +12 -19
  15. {langroid-0.1.101 → langroid-0.1.102}/langroid/vector_store/meilisearch.py +1 -0
  16. {langroid-0.1.101 → langroid-0.1.102}/langroid/vector_store/momento.py +1 -0
  17. {langroid-0.1.101 → langroid-0.1.102}/langroid/vector_store/qdrantdb.py +10 -4
  18. {langroid-0.1.101 → langroid-0.1.102}/pyproject.toml +1 -1
  19. {langroid-0.1.101 → langroid-0.1.102}/setup.py +2 -1
  20. langroid-0.1.101/langroid/vector_store/base.py +0 -161
  21. {langroid-0.1.101 → langroid-0.1.102}/LICENSE +0 -0
  22. {langroid-0.1.101 → langroid-0.1.102}/README.md +0 -0
  23. {langroid-0.1.101 → langroid-0.1.102}/langroid/__init__.py +0 -0
  24. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/__init__.py +0 -0
  25. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/base.py +0 -0
  26. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/chat_agent.py +0 -0
  27. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/chat_document.py +0 -0
  28. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/helpers.py +0 -0
  29. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/junk +0 -0
  30. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/__init__.py +0 -0
  31. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/recipient_validator_agent.py +0 -0
  32. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/relevance_extractor_agent.py +0 -0
  33. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/sql/__init__.py +0 -0
  34. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/sql/sql_chat_agent.py +0 -0
  35. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/sql/utils/__init__.py +0 -0
  36. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/sql/utils/description_extractors.py +0 -0
  37. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/sql/utils/populate_metadata.py +0 -0
  38. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/sql/utils/system_message.py +0 -0
  39. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/sql/utils/tools.py +0 -0
  40. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/special/table_chat_agent.py +0 -0
  41. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/task.py +0 -0
  42. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/tool_message.py +0 -0
  43. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/tools/__init__.py +0 -0
  44. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/tools/extract_tool.py +0 -0
  45. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/tools/generator_tool.py +0 -0
  46. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/tools/google_search_tool.py +0 -0
  47. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/tools/recipient_tool.py +0 -0
  48. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent/tools/segment_extract_tool.py +0 -0
  49. {langroid-0.1.101 → langroid-0.1.102}/langroid/agent_config.py +0 -0
  50. {langroid-0.1.101 → langroid-0.1.102}/langroid/cachedb/__init__.py +0 -0
  51. {langroid-0.1.101 → langroid-0.1.102}/langroid/cachedb/base.py +0 -0
  52. {langroid-0.1.101 → langroid-0.1.102}/langroid/cachedb/momento_cachedb.py +0 -0
  53. {langroid-0.1.101 → langroid-0.1.102}/langroid/cachedb/redis_cachedb.py +0 -0
  54. {langroid-0.1.101 → langroid-0.1.102}/langroid/embedding_models/__init__.py +0 -0
  55. {langroid-0.1.101 → langroid-0.1.102}/langroid/embedding_models/base.py +0 -0
  56. {langroid-0.1.101 → langroid-0.1.102}/langroid/embedding_models/clustering.py +0 -0
  57. {langroid-0.1.101 → langroid-0.1.102}/langroid/embedding_models/models.py +0 -0
  58. {langroid-0.1.101 → langroid-0.1.102}/langroid/language_models/__init__.py +0 -0
  59. {langroid-0.1.101 → langroid-0.1.102}/langroid/language_models/azure_openai.py +0 -0
  60. {langroid-0.1.101 → langroid-0.1.102}/langroid/language_models/base.py +0 -0
  61. {langroid-0.1.101 → langroid-0.1.102}/langroid/language_models/config.py +0 -0
  62. {langroid-0.1.101 → langroid-0.1.102}/langroid/language_models/openai_gpt.py +0 -0
  63. {langroid-0.1.101 → langroid-0.1.102}/langroid/language_models/prompt_formatter/__init__.py +0 -0
  64. {langroid-0.1.101 → langroid-0.1.102}/langroid/language_models/prompt_formatter/base.py +0 -0
  65. {langroid-0.1.101 → langroid-0.1.102}/langroid/language_models/prompt_formatter/llama2_formatter.py +0 -0
  66. {langroid-0.1.101 → langroid-0.1.102}/langroid/language_models/utils.py +0 -0
  67. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/__init__.py +0 -0
  68. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/agent_chats.py +0 -0
  69. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/code-parsing.md +0 -0
  70. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/code_parser.py +0 -0
  71. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/config.py +0 -0
  72. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/json.py +0 -0
  73. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/para_sentence_split.py +0 -0
  74. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/repo_loader.py +0 -0
  75. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/spider.py +0 -0
  76. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/table_loader.py +0 -0
  77. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/url_loader.py +0 -0
  78. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/url_loader_cookies.py +0 -0
  79. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/urls.py +0 -0
  80. {langroid-0.1.101 → langroid-0.1.102}/langroid/parsing/web_search.py +0 -0
  81. {langroid-0.1.101 → langroid-0.1.102}/langroid/prompts/__init__.py +0 -0
  82. {langroid-0.1.101 → langroid-0.1.102}/langroid/prompts/dialog.py +0 -0
  83. {langroid-0.1.101 → langroid-0.1.102}/langroid/prompts/prompts_config.py +0 -0
  84. {langroid-0.1.101 → langroid-0.1.102}/langroid/prompts/templates.py +0 -0
  85. {langroid-0.1.101 → langroid-0.1.102}/langroid/prompts/transforms.py +0 -0
  86. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/__init__.py +0 -0
  87. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/constants.py +0 -0
  88. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/docker.py +0 -0
  89. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/globals.py +0 -0
  90. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/llms/__init__.py +0 -0
  91. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/llms/strings.py +0 -0
  92. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/logging.py +0 -0
  93. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/output/__init__.py +0 -0
  94. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/output/printing.py +0 -0
  95. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/system.py +0 -0
  96. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/web/__init__.py +0 -0
  97. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/web/login.py +0 -0
  98. {langroid-0.1.101 → langroid-0.1.102}/langroid/utils/web/selenium_login.py +0 -0
  99. {langroid-0.1.101 → langroid-0.1.102}/langroid/vector_store/__init__.py +0 -0
  100. {langroid-0.1.101 → langroid-0.1.102}/langroid/vector_store/lancedb.py +0 -0
  101. {langroid-0.1.101 → langroid-0.1.102}/langroid/vector_store/qdrant_cloud.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: langroid
3
- Version: 0.1.101
3
+ Version: 0.1.102
4
4
  Summary: Harness LLMs with Multi-Agent Programming
5
5
  License: MIT
6
6
  Author: Prasad Chalasani
@@ -9,7 +9,7 @@ from rich.console import Console
9
9
  from langroid.agent.base import Agent
10
10
  from langroid.agent.chat_document import ChatDocument
11
11
  from langroid.agent.task import Task
12
- from langroid.utils.configuration import Settings, settings, temporary_settings
12
+ from langroid.utils.configuration import quiet_mode, settings
13
13
  from langroid.utils.logging import setup_colored_logging
14
14
 
15
15
  console = Console(quiet=settings.quiet)
@@ -53,7 +53,7 @@ def run_batch_tasks(
53
53
  return output_map(result)
54
54
 
55
55
  async def _do_all() -> List[Any]:
56
- with temporary_settings(Settings(quiet=True)):
56
+ with quiet_mode():
57
57
  return await asyncio.gather( # type: ignore
58
58
  *(_do_task(input, i) for i, input in enumerate(inputs))
59
59
  )
@@ -66,6 +66,10 @@ You are a helpful assistant, helping me understand a collection of documents.
66
66
  """
67
67
 
68
68
 
69
+ class DocChunkMetqdata(DocMetaData):
70
+ id: str
71
+
72
+
69
73
  class DocChatAgentConfig(ChatAgentConfig):
70
74
  """
71
75
  Attributes:
@@ -95,6 +99,7 @@ class DocChatAgentConfig(ChatAgentConfig):
95
99
  # It is False by default; its benefits depends on the context.
96
100
  hypothetical_answer: bool = False
97
101
  n_query_rephrases: int = 0
102
+ n_neighbor_chunks: int = 0 # how many neighbors on either side of match to retrieve
98
103
  use_fuzzy_match: bool = True
99
104
  use_bm25_search: bool = True
100
105
  cross_encoder_reranking_model: str = "cross-encoder/ms-marco-MiniLM-L-6-v2"
@@ -122,6 +127,7 @@ class DocChatAgentConfig(ChatAgentConfig):
122
127
  min_chunk_chars=200,
123
128
  discard_chunk_chars=5, # discard chunks with fewer than this many chars
124
129
  n_similar_docs=3,
130
+ n_neighbor_ids=0, # num chunk IDs to store on either side of each chunk
125
131
  pdf=PdfParsingConfig(
126
132
  # NOTE: PDF parsing is extremely challenging, and each library
127
133
  # has its own strengths and weaknesses.
@@ -195,6 +201,7 @@ class DocChatAgent(ChatAgent):
195
201
  if self.vecdb is None:
196
202
  raise ValueError("VecDB not set")
197
203
  self.chunked_docs = self.vecdb.get_all_documents()
204
+ # used for lexical similarity e.g. keyword search (bm25 etc)
198
205
  self.chunked_docs_clean = [
199
206
  Document(content=preprocess_text(d.content), metadata=d.metadata)
200
207
  for d in self.chunked_docs
@@ -509,9 +516,13 @@ class DocChatAgent(ChatAgent):
509
516
  if self.chunked_docs is None:
510
517
  logger.warning("No chunked docs; cannot use fuzzy matching")
511
518
  return []
519
+ if self.chunked_docs_clean is None:
520
+ logger.warning("No cleaned chunked docs; cannot use fuzzy-search")
521
+ return []
512
522
  fuzzy_match_docs = find_fuzzy_matches_in_docs(
513
523
  query,
514
524
  self.chunked_docs,
525
+ self.chunked_docs_clean,
515
526
  k=self.config.parsing.n_similar_docs * multiple,
516
527
  words_before=1000,
517
528
  words_after=1000,
@@ -546,6 +557,36 @@ class DocChatAgent(ChatAgent):
546
557
  ]
547
558
  return passages
548
559
 
560
+ def add_context_window(
561
+ self,
562
+ docs_scores: List[Tuple[Document, float]],
563
+ ) -> List[Tuple[Document, float]]:
564
+ """
565
+ In each doc's metadata, there may be a window_ids field indicating
566
+ the ids of the chunks around the current chunk.
567
+ These window_ids may overlap, so we
568
+ - gather connected-components of overlapping windows,
569
+ - split each component into roughly equal parts,
570
+ - create a new document for each part, preserving metadata,
571
+
572
+ We may have stored a longer set of window_ids than we need.
573
+ We just want `neighbors` on each side of the center of window_ids.
574
+
575
+ Args:
576
+ docs (List[Document]): List of documents to add context window to.
577
+ scores (List[float]): List of match scores for each document.
578
+ neighbors (int, optional): Number of neighbors on "each side" of match to
579
+ retrieve. Defaults to 0.
580
+ "Each side" here means before and after the match,
581
+ in the original text.
582
+
583
+ Returns:
584
+ List[Tuple[Document, float]]: List of (Document, score) tuples.
585
+ """
586
+ if self.vecdb is None or self.config.n_neighbor_chunks == 0:
587
+ return docs_scores
588
+ return self.vecdb.add_context_window(docs_scores, self.config.n_neighbor_chunks)
589
+
549
590
  def get_relevant_chunks(
550
591
  self, query: str, query_proxies: List[str] = []
551
592
  ) -> List[Document]:
@@ -560,10 +601,11 @@ class DocChatAgent(ChatAgent):
560
601
  dynamically retrieved based on a window around a lexical match.
561
602
 
562
603
  These are the steps (some optional based on config):
563
- - vector-embedding distance, from vecdb
564
- - bm25-ranking (keyword similarity)
604
+ - semantic search based on vector-embedding distance, from vecdb
605
+ - lexical search using bm25-ranking (keyword similarity)
565
606
  - fuzzy matching (keyword similarity)
566
- - re-ranking of doc-chunks using cross-encoder, pick top k
607
+ - re-ranking of doc-chunks by relevance to query, using cross-encoder,
608
+ and pick top k
567
609
 
568
610
  Args:
569
611
  query: original query (assumed to be in stand-alone form)
@@ -612,6 +654,9 @@ class DocChatAgent(ChatAgent):
612
654
  if len(passages) == 0:
613
655
  return []
614
656
 
657
+ passages_scores = [(p, 0.0) for p in passages]
658
+ passages_scores = self.add_context_window(passages_scores)
659
+ passages = [p for p, _ in passages_scores]
615
660
  # now passages can potentially have a lot of doc chunks,
616
661
  # so we re-rank them using a cross-encoder scoring model
617
662
  # https://www.sbert.net/examples/applications/retrieve_rerank
@@ -28,7 +28,7 @@ logger = logging.getLogger(__name__)
28
28
 
29
29
 
30
30
  class RecordMetadata(DocMetaData):
31
- id: None | int | str = None
31
+ id: None | str = None
32
32
 
33
33
 
34
34
  class RecordDoc(Document):
@@ -26,6 +26,8 @@ class DocMetaData(BaseModel):
26
26
 
27
27
  source: str = "context"
28
28
  is_chunk: bool = False # if it is a chunk, don't split
29
+ id: str | None = None # unique id for the document
30
+ window_ids: List[str] = [] # for RAG: ids of chunks around this one
29
31
 
30
32
  def dict(self, *args: Any, **kwargs: Any) -> Dict[str, Any]:
31
33
  """
@@ -51,9 +53,10 @@ class Document(BaseModel):
51
53
  content: str
52
54
  metadata: DocMetaData
53
55
 
54
- def _unique_hash_id(self) -> str:
56
+ @staticmethod
57
+ def hash_id(doc: str) -> str:
55
58
  # Encode the document as UTF-8
56
- doc_utf8 = str(self).encode("utf-8")
59
+ doc_utf8 = str(doc).encode("utf-8")
57
60
 
58
61
  # Create a SHA256 hash object
59
62
  sha256_hash = hashlib.sha256()
@@ -69,8 +72,11 @@ class Document(BaseModel):
69
72
 
70
73
  return str(hash_uuid)
71
74
 
72
- def id(self) -> Any:
73
- if hasattr(self.metadata, "id"):
75
+ def _unique_hash_id(self) -> str:
76
+ return self.hash_id(str(self))
77
+
78
+ def id(self) -> str:
79
+ if hasattr(self.metadata, "id") and self.metadata.id is not None:
74
80
  return self.metadata.id
75
81
  else:
76
82
  return self._unique_hash_id()
@@ -200,6 +200,7 @@ class DocumentParser(Parser):
200
200
  ),
201
201
  )
202
202
  )
203
+ self.add_window_ids(docs)
203
204
  return docs
204
205
 
205
206
 
@@ -1,6 +1,5 @@
1
1
  import logging
2
2
  from enum import Enum
3
- from functools import reduce
4
3
  from typing import List
5
4
 
6
5
  import tiktoken
@@ -36,6 +35,7 @@ class ParsingConfig(BaseSettings):
36
35
  min_chunk_chars: int = 350
37
36
  discard_chunk_chars: int = 5 # discard chunks with fewer than this many chars
38
37
  n_similar_docs: int = 4
38
+ n_neighbor_ids: int = 0 # window size to store around each chunk
39
39
  separators: List[str] = ["\n\n", "\n", " ", ""]
40
40
  token_encoding_model: str = "text-embedding-ada-002"
41
41
  pdf: PdfParsingConfig = PdfParsingConfig()
@@ -51,17 +51,42 @@ class Parser:
51
51
  tokens = self.tokenizer.encode(text)
52
52
  return len(tokens)
53
53
 
54
+ def add_window_ids(self, chunks: List[Document]) -> None:
55
+ """Chunks are consecutive parts of a single original document.
56
+ Add window_ids in metadata"""
57
+
58
+ # The original metadata.id (if any) is ignored since it will be same for all
59
+ # chunks and is useless. We want a distinct id for each chunk.
60
+ ids = [Document.hash_id(str(c)) for c in chunks]
61
+
62
+ k = self.config.n_neighbor_ids
63
+ n = len(ids)
64
+ window_ids = [ids[max(0, i - k) : min(n, i + k + 1)] for i in range(n)]
65
+ for i, c in enumerate(chunks):
66
+ if c.content.strip() == "":
67
+ continue
68
+ c.metadata.window_ids = window_ids[i]
69
+ c.metadata.id = ids[i]
70
+ c.metadata.is_chunk = True
71
+
54
72
  def split_simple(self, docs: List[Document]) -> List[Document]:
55
73
  if len(self.config.separators) == 0:
56
74
  raise ValueError("Must have at least one separator")
57
- return [
58
- Document(content=chunk.strip(), metadata=d.metadata)
59
- for d in docs
60
- for chunk in remove_extra_whitespace(d.content).split(
61
- self.config.separators[0]
62
- )
63
- if chunk.strip() != ""
64
- ]
75
+ final_docs = []
76
+ for d in docs:
77
+ if d.content.strip() == "":
78
+ continue
79
+ chunks = remove_extra_whitespace(d.content).split(self.config.separators[0])
80
+ chunk_docs = [
81
+ Document(
82
+ content=c, metadata=d.metadata.copy(update=dict(is_chunk=True))
83
+ )
84
+ for c in chunks
85
+ if c.strip() != ""
86
+ ]
87
+ self.add_window_ids(chunk_docs)
88
+ final_docs += chunk_docs
89
+ return final_docs
65
90
 
66
91
  def split_para_sentence(self, docs: List[Document]) -> List[Document]:
67
92
  final_chunks = []
@@ -95,28 +120,37 @@ class Parser:
95
120
  return final_chunks + chunks
96
121
 
97
122
  def _split_para_sentence_once(self, docs: List[Document]) -> List[Document]:
98
- chunked_docs = [
99
- [
100
- Document(content=chunk.strip(), metadata=d.metadata)
101
- for chunk in create_chunks(
102
- d.content, self.config.chunk_size, self.num_tokens
123
+ final_chunks = []
124
+ for d in docs:
125
+ if d.content.strip() == "":
126
+ continue
127
+ chunks = create_chunks(d.content, self.config.chunk_size, self.num_tokens)
128
+ chunk_docs = [
129
+ Document(
130
+ content=c, metadata=d.metadata.copy(update=dict(is_chunk=True))
103
131
  )
104
- if chunk.strip() != ""
132
+ for c in chunks
133
+ if c.strip() != ""
105
134
  ]
106
- for d in docs
107
- ]
108
- return reduce(lambda x, y: x + y, chunked_docs)
135
+ self.add_window_ids(chunk_docs)
136
+ final_chunks += chunk_docs
137
+
138
+ return final_chunks
109
139
 
110
140
  def split_chunk_tokens(self, docs: List[Document]) -> List[Document]:
111
- chunked_docs = [
112
- [
113
- Document(content=chunk.strip(), metadata=d.metadata)
114
- for chunk in self.chunk_tokens(d.content)
115
- if chunk.strip() != ""
141
+ final_docs = []
142
+ for d in docs:
143
+ chunks = self.chunk_tokens(d.content)
144
+ chunk_docs = [
145
+ Document(
146
+ content=c, metadata=d.metadata.copy(update=dict(is_chunk=True))
147
+ )
148
+ for c in chunks
149
+ if c.strip() != ""
116
150
  ]
117
- for d in docs
118
- ]
119
- return reduce(lambda x, y: x + y, chunked_docs)
151
+ self.add_window_ids(chunk_docs)
152
+ final_docs += chunk_docs
153
+ return final_docs
120
154
 
121
155
  def chunk_tokens(
122
156
  self,
@@ -198,11 +232,8 @@ class Parser:
198
232
  # Increment the number of chunks
199
233
  num_chunks += 1
200
234
 
201
- # Handle the remaining tokens
202
- if tokens:
203
- remaining_text = self.tokenizer.decode(tokens).replace("\n", " ").strip()
204
- if len(remaining_text) > self.config.discard_chunk_chars:
205
- chunks.append(remaining_text)
235
+ # There may be remaining tokens, but we discard them
236
+ # since we have already reached the maximum number of chunks
206
237
 
207
238
  return chunks
208
239
 
@@ -7,7 +7,6 @@ See tests for examples: tests/main/test_string_search.py
7
7
  """
8
8
 
9
9
  import difflib
10
- import re
11
10
  from typing import List, Tuple
12
11
 
13
12
  from nltk.corpus import stopwords
@@ -24,6 +23,7 @@ from .utils import download_nltk_resource
24
23
  def find_fuzzy_matches_in_docs(
25
24
  query: str,
26
25
  docs: List[Document],
26
+ docs_clean: List[Document],
27
27
  k: int,
28
28
  words_before: int | None = None,
29
29
  words_after: int | None = None,
@@ -49,45 +49,45 @@ def find_fuzzy_matches_in_docs(
49
49
  return []
50
50
  best_matches = process.extract(
51
51
  query,
52
- [d.content for d in docs],
52
+ [d.content for d in docs_clean],
53
53
  limit=k,
54
54
  scorer=fuzz.partial_ratio,
55
55
  )
56
56
 
57
57
  real_matches = [m for m, score in best_matches if score > 50]
58
-
59
- results = []
60
- for match in real_matches:
61
- words = match.split()
62
- for doc in docs:
63
- if match in doc.content:
64
- words_in_text = doc.content.split()
65
- first_word_idx = next(
66
- (
67
- i
68
- for i, word in enumerate(words_in_text)
69
- if word.startswith(words[0])
70
- ),
71
- -1,
72
- )
73
- if words_before is None:
74
- words_before = len(words_in_text)
75
- if words_after is None:
76
- words_after = len(words_in_text)
77
- if first_word_idx != -1:
78
- start_idx = max(0, first_word_idx - words_before)
79
- end_idx = min(
80
- len(words_in_text),
81
- first_word_idx + len(words) + words_after,
82
- )
83
- doc_match = Document(
84
- content=" ".join(words_in_text[start_idx:end_idx]),
85
- metadata=doc.metadata,
86
- )
87
- results.append(doc_match)
58
+ # find the original docs that corresponding to the matches
59
+ orig_doc_matches = []
60
+ for i, m in enumerate(real_matches):
61
+ for j, doc_clean in enumerate(docs_clean):
62
+ if m in doc_clean.content:
63
+ orig_doc_matches.append(docs[j])
88
64
  break
65
+ if words_after is None and words_before is None:
66
+ return orig_doc_matches
67
+
68
+ contextual_matches = []
69
+ for match in orig_doc_matches:
70
+ choice_text = match.content
71
+ contexts = []
72
+ while choice_text != "":
73
+ context, start_pos, end_pos = get_context(
74
+ query, choice_text, words_before, words_after
75
+ )
76
+ if context == "" or end_pos == 0:
77
+ break
78
+ contexts.append(context)
79
+ words = choice_text.split()
80
+ end_pos = min(end_pos, len(words))
81
+ choice_text = " ".join(words[end_pos:])
82
+ if len(contexts) > 0:
83
+ contextual_matches.append(
84
+ Document(
85
+ content=" ... ".join(contexts),
86
+ metadata=match.metadata,
87
+ )
88
+ )
89
89
 
90
- return results
90
+ return contextual_matches
91
91
 
92
92
 
93
93
  def preprocess_text(text: str) -> str:
@@ -171,7 +171,7 @@ def get_context(
171
171
  text: str,
172
172
  words_before: int | None = 100,
173
173
  words_after: int | None = 100,
174
- ) -> str:
174
+ ) -> Tuple[str, int, int]:
175
175
  """
176
176
  Returns a portion of text containing the best approximate match of the query,
177
177
  including b words before and a words after the match.
@@ -185,7 +185,9 @@ def get_context(
185
185
  Returns:
186
186
  str: A string containing b words before, the match, and a words after
187
187
  the best approximate match position of the query in the text. If no
188
- match is found, returns "No match found".
188
+ match is found, returns empty string.
189
+ int: The start position of the match in the text.
190
+ int: The end position of the match in the text.
189
191
 
190
192
  Example:
191
193
  >>> get_context("apple", "The quick brown fox jumps over the apple.", 3, 2)
@@ -193,26 +195,29 @@ def get_context(
193
195
  """
194
196
  if words_after is None and words_before is None:
195
197
  # return entire text since we're not asked to return a bounded context
196
- return text
198
+ return text, 0, 0
199
+
200
+ # make sure there is a good enough fu
201
+ if fuzz.partial_ratio(query, text) < 70:
202
+ return "", 0, 0
197
203
 
198
204
  sequence_matcher = difflib.SequenceMatcher(None, text, query)
199
205
  match = sequence_matcher.find_longest_match(0, len(text), 0, len(query))
200
206
 
201
207
  if match.size == 0:
202
- return "No match found"
203
-
204
- words = re.findall(r"\b\w+\b", text)
205
- if words_after is None:
206
- words_after = len(words)
207
- if words_before is None:
208
- words_before = len(words)
209
- start_word_pos = len(re.findall(r"\b\w+\b", text[: match.a]))
210
- start_pos = max(0, start_word_pos - words_before)
211
- end_pos = min(
212
- len(words), start_word_pos + words_after + len(re.findall(r"\b\w+\b", query))
213
- )
208
+ return "", 0, 0
209
+
210
+ segments = text.split()
211
+ n_segs = len(segments)
212
+
213
+ start_segment_pos = len(text[: match.a].split())
214
+
215
+ words_before = words_before or n_segs
216
+ words_after = words_after or n_segs
217
+ start_pos = max(0, start_segment_pos - words_before)
218
+ end_pos = min(len(segments), start_segment_pos + words_after + len(query.split()))
214
219
 
215
- return " ".join(words[start_pos:end_pos])
220
+ return " ".join(segments[start_pos:end_pos]), start_pos, end_pos
216
221
 
217
222
 
218
223
  def eliminate_near_duplicates(passages: List[str], threshold: float = 0.8) -> List[str]:
@@ -165,6 +165,32 @@ def parse_number_range_list(specs: str) -> List[int]:
165
165
  return sorted(list(spec_indices))
166
166
 
167
167
 
168
+ def strip_k(s: str, k: int = 2) -> str:
169
+ """
170
+ Strip any leading and trailing whitespaces from the input text beyond length k.
171
+ This is useful for removing leading/trailing whitespaces from a text while
172
+ preserving paragraph structure.
173
+
174
+ Args:
175
+ s (str): The input text.
176
+ k (int): The number of leading and trailing whitespaces to retain.
177
+
178
+ Returns:
179
+ str: The text with leading and trailing whitespaces removed beyond length k.
180
+ """
181
+
182
+ # Count leading and trailing whitespaces
183
+ leading_count = len(s) - len(s.lstrip())
184
+ trailing_count = len(s) - len(s.rstrip())
185
+
186
+ # Determine how many whitespaces to retain
187
+ leading_keep = min(leading_count, k)
188
+ trailing_keep = min(trailing_count, k)
189
+
190
+ # Use slicing to get the desired output
191
+ return s[leading_count - leading_keep : len(s) - (trailing_count - trailing_keep)]
192
+
193
+
168
194
  def clean_whitespace(text: str) -> str:
169
195
  """Remove extra whitespace from the input text, while preserving
170
196
  paragraph structure.
@@ -0,0 +1,49 @@
1
+ """
2
+ Graph algos.
3
+ """
4
+
5
+ from typing import List, no_type_check
6
+
7
+ import numpy as np
8
+
9
+
10
+ @no_type_check
11
+ def topological_sort(order: np.array) -> List[int]:
12
+ """
13
+ Given a directed adjacency matrix, return a topological sort of the nodes.
14
+ order[i,j] = -1 means there is an edge from i to j.
15
+ order[i,j] = 0 means there is no edge from i to j.
16
+ order[i,j] = 1 means there is an edge from j to i.
17
+
18
+ Args:
19
+ order (np.array): The adjacency matrix.
20
+
21
+ Returns:
22
+ List[int]: The topological sort of the nodes.
23
+
24
+ """
25
+ n = order.shape[0]
26
+
27
+ # Calculate the in-degrees
28
+ in_degree = [0] * n
29
+ for i in range(n):
30
+ for j in range(n):
31
+ if order[i, j] == -1:
32
+ in_degree[j] += 1
33
+
34
+ # Initialize the queue with nodes of in-degree 0
35
+ queue = [i for i in range(n) if in_degree[i] == 0]
36
+ result = []
37
+
38
+ while queue:
39
+ node = queue.pop(0)
40
+ result.append(node)
41
+
42
+ for i in range(n):
43
+ if order[node, i] == -1:
44
+ in_degree[i] -= 1
45
+ if in_degree[i] == 0:
46
+ queue.append(i)
47
+
48
+ assert len(result) == n, "Cycle detected"
49
+ return result
@@ -71,6 +71,19 @@ def temporary_settings(temp_settings: Settings) -> Iterator[None]:
71
71
  settings.__dict__.update(original_settings.__dict__)
72
72
 
73
73
 
74
+ @contextmanager
75
+ def quiet_mode() -> Iterator[None]:
76
+ """Temporarily set quiet=True in global settings and restore afterward."""
77
+ original_quiet = settings.quiet
78
+
79
+ set_global(Settings(quiet=True))
80
+
81
+ try:
82
+ yield
83
+ finally:
84
+ settings.quiet = original_quiet
85
+
86
+
74
87
  def set_env(settings: BaseSettings) -> None:
75
88
  """
76
89
  Set environment variables from a BaseSettings instance
@@ -79,7 +79,9 @@ def flatten_pydantic_model(
79
79
  current_model, current_prefix = models_to_process.pop()
80
80
 
81
81
  for name, field in current_model.__fields__.items():
82
- if issubclass(field.outer_type_, BaseModel):
82
+ if isinstance(field.outer_type_, type) and issubclass(
83
+ field.outer_type_, BaseModel
84
+ ):
83
85
  new_prefix = (
84
86
  f"{current_prefix}{name}__" if current_prefix else f"{name}__"
85
87
  )