langroid 0.46.0__tar.gz → 0.47.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {langroid-0.46.0 → langroid-0.47.0}/PKG-INFO +3 -1
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/doc_chat_agent.py +13 -3
- langroid-0.47.0/langroid/parsing/url_loader.py +340 -0
- {langroid-0.46.0 → langroid-0.47.0}/pyproject.toml +4 -1
- langroid-0.46.0/langroid/parsing/url_loader.py +0 -120
- {langroid-0.46.0 → langroid-0.47.0}/.gitignore +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/LICENSE +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/README.md +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/base.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/batch.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/callbacks/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/callbacks/chainlit.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/chat_agent.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/chat_document.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/openai_assistant.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/arangodb/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/arangodb/arangodb_agent.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/arangodb/system_messages.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/arangodb/tools.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/arangodb/utils.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/lance_doc_chat_agent.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/lance_rag/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/lance_rag/critic_agent.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/lance_rag/lance_rag_task.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/lance_rag/query_planner_agent.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/lance_tools.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/neo4j/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/neo4j/csv_kg_chat.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/neo4j/neo4j_chat_agent.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/neo4j/system_messages.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/neo4j/tools.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/relevance_extractor_agent.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/retriever_agent.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/sql/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/sql/sql_chat_agent.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/sql/utils/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/sql/utils/description_extractors.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/sql/utils/populate_metadata.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/sql/utils/system_message.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/sql/utils/tools.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/table_chat_agent.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/task.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tool_message.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/duckduckgo_search_tool.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/exa_search_tool.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/file_tools.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/google_search_tool.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/metaphor_search_tool.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/orchestration.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/recipient_tool.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/retrieval_tool.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/rewind_tool.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/segment_extract_tool.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/tools/tavily_search_tool.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/agent/xml_tool_message.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/cachedb/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/cachedb/base.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/cachedb/momento_cachedb.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/cachedb/redis_cachedb.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/embedding_models/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/embedding_models/base.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/embedding_models/models.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/embedding_models/protoc/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/embedding_models/protoc/embeddings.proto +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/embedding_models/protoc/embeddings_pb2.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/embedding_models/protoc/embeddings_pb2.pyi +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/embedding_models/protoc/embeddings_pb2_grpc.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/embedding_models/remote_embeds.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/exceptions.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/azure_openai.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/base.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/config.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/mock_lm.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/model_info.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/openai_gpt.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/prompt_formatter/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/prompt_formatter/base.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/prompt_formatter/hf_formatter.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/prompt_formatter/llama2_formatter.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/utils.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/mytypes.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/agent_chats.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/code_parser.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/document_parser.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/para_sentence_split.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/parse_json.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/parser.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/pdf_utils.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/repo_loader.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/routing.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/search.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/spider.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/table_loader.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/urls.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/utils.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/parsing/web_search.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/prompts/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/prompts/dialog.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/prompts/prompts_config.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/prompts/templates.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/py.typed +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/pydantic_v1/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/pydantic_v1/main.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/algorithms/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/algorithms/graph.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/configuration.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/constants.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/git_utils.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/globals.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/logging.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/object_registry.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/output/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/output/citations.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/output/printing.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/output/status.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/pandas_utils.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/pydantic_utils.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/system.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/utils/types.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/vector_store/__init__.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/vector_store/base.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/vector_store/chromadb.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/vector_store/lancedb.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/vector_store/meilisearch.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/vector_store/pineconedb.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/vector_store/postgres.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/vector_store/qdrantdb.py +0 -0
- {langroid-0.46.0 → langroid-0.47.0}/langroid/vector_store/weaviatedb.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: langroid
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.47.0
|
4
4
|
Summary: Harness LLMs with Multi-Agent Programming
|
5
5
|
Author-email: Prasad Chalasani <pchalasani@gmail.com>
|
6
6
|
License: MIT
|
@@ -121,6 +121,8 @@ Provides-Extra: exa
|
|
121
121
|
Requires-Dist: exa-py>=1.8.7; extra == 'exa'
|
122
122
|
Provides-Extra: fastembed
|
123
123
|
Requires-Dist: fastembed<0.4.0,>=0.3.1; extra == 'fastembed'
|
124
|
+
Provides-Extra: firecrawl
|
125
|
+
Requires-Dist: firecrawl-py>=1.13.5; extra == 'firecrawl'
|
124
126
|
Provides-Extra: google-genai
|
125
127
|
Requires-Dist: google-genai>=1.0.0; extra == 'google-genai'
|
126
128
|
Provides-Extra: google-generativeai
|
@@ -50,7 +50,7 @@ from langroid.parsing.search import (
|
|
50
50
|
preprocess_text,
|
51
51
|
)
|
52
52
|
from langroid.parsing.table_loader import describe_dataframe
|
53
|
-
from langroid.parsing.url_loader import URLLoader
|
53
|
+
from langroid.parsing.url_loader import BaseCrawlerConfig, TrafilaturaConfig, URLLoader
|
54
54
|
from langroid.parsing.urls import get_list_from_user, get_urls_paths_bytes_indices
|
55
55
|
from langroid.prompts.prompts_config import PromptsConfig
|
56
56
|
from langroid.prompts.templates import SUMMARY_ANSWER_PROMPT_GPT4
|
@@ -192,6 +192,7 @@ class DocChatAgentConfig(ChatAgentConfig):
|
|
192
192
|
library="pymupdf4llm",
|
193
193
|
),
|
194
194
|
)
|
195
|
+
crawler_config: Optional[BaseCrawlerConfig] = TrafilaturaConfig()
|
195
196
|
|
196
197
|
# Allow vecdb to be None in case we want to explicitly set it later
|
197
198
|
vecdb: Optional[VectorStoreConfig] = QdrantDBConfig(
|
@@ -336,11 +337,15 @@ class DocChatAgent(ChatAgent):
|
|
336
337
|
urls_meta = {u: idx2meta[u] for u in url_idxs}
|
337
338
|
paths_meta = {p: idx2meta[p] for p in path_idxs}
|
338
339
|
docs: List[Document] = []
|
339
|
-
parser = Parser(self.config.parsing)
|
340
|
+
parser: Parser = Parser(self.config.parsing)
|
340
341
|
if len(urls) > 0:
|
341
342
|
for ui in url_idxs:
|
342
343
|
meta = urls_meta.get(ui, {})
|
343
|
-
loader = URLLoader(
|
344
|
+
loader = URLLoader(
|
345
|
+
urls=[all_paths[ui]],
|
346
|
+
parsing_config=self.config.parsing,
|
347
|
+
crawler_config=self.config.crawler_config,
|
348
|
+
) # type: ignore
|
344
349
|
url_docs = loader.load()
|
345
350
|
# update metadata of each doc with meta
|
346
351
|
for d in url_docs:
|
@@ -466,6 +471,11 @@ class DocChatAgent(ChatAgent):
|
|
466
471
|
docs = docs[: self.config.parsing.max_chunks]
|
467
472
|
# vecdb should take care of adding docs in batches;
|
468
473
|
# batching can be controlled via vecdb.config.batch_size
|
474
|
+
if not docs:
|
475
|
+
logging.warning(
|
476
|
+
"No documents to ingest after processing. Skipping VecDB addition."
|
477
|
+
)
|
478
|
+
return 0 # Return 0 since no documents were added
|
469
479
|
self.vecdb.add_documents(docs)
|
470
480
|
self.original_docs_length = self.doc_length(docs)
|
471
481
|
self.setup_documents(docs, filter=self.config.filter)
|
@@ -0,0 +1,340 @@
|
|
1
|
+
import logging
|
2
|
+
import os
|
3
|
+
from abc import ABC, abstractmethod
|
4
|
+
from tempfile import NamedTemporaryFile
|
5
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional
|
6
|
+
|
7
|
+
from dotenv import load_dotenv
|
8
|
+
|
9
|
+
from langroid.exceptions import LangroidImportError
|
10
|
+
from langroid.mytypes import DocMetaData, Document
|
11
|
+
from langroid.parsing.document_parser import DocumentParser, ImagePdfParser
|
12
|
+
from langroid.parsing.parser import Parser, ParsingConfig
|
13
|
+
from langroid.pydantic_v1 import BaseSettings
|
14
|
+
|
15
|
+
if TYPE_CHECKING:
|
16
|
+
from firecrawl import FirecrawlApp
|
17
|
+
|
18
|
+
load_dotenv()
|
19
|
+
|
20
|
+
logging.getLogger("url_loader").setLevel(logging.WARNING)
|
21
|
+
|
22
|
+
|
23
|
+
# Base crawler config and specific configurations
|
24
|
+
class BaseCrawlerConfig(BaseSettings):
|
25
|
+
"""Base configuration for web crawlers."""
|
26
|
+
|
27
|
+
parser: Optional[Parser] = None
|
28
|
+
|
29
|
+
|
30
|
+
class TrafilaturaConfig(BaseCrawlerConfig):
|
31
|
+
"""Configuration for Trafilatura crawler."""
|
32
|
+
|
33
|
+
threads: int = 4
|
34
|
+
|
35
|
+
|
36
|
+
class FirecrawlConfig(BaseCrawlerConfig):
|
37
|
+
"""Configuration for Firecrawl crawler."""
|
38
|
+
|
39
|
+
api_key: str = ""
|
40
|
+
mode: str = "scrape"
|
41
|
+
params: Dict[str, Any] = {}
|
42
|
+
timeout: Optional[int] = None
|
43
|
+
|
44
|
+
class Config:
|
45
|
+
# Leverage Pydantic's BaseSettings to
|
46
|
+
# allow setting of fields via env vars,
|
47
|
+
# e.g. FIRECRAWL_MODE=scrape and FIRECRAWL_API_KEY=...
|
48
|
+
env_prefix = "FIRECRAWL_"
|
49
|
+
|
50
|
+
|
51
|
+
class BaseCrawler(ABC):
|
52
|
+
"""Abstract base class for web crawlers."""
|
53
|
+
|
54
|
+
def __init__(self, config: BaseCrawlerConfig):
|
55
|
+
"""Initialize the base crawler.
|
56
|
+
|
57
|
+
Args:
|
58
|
+
config: Configuration for the crawler
|
59
|
+
"""
|
60
|
+
self.parser = config.parser if self.needs_parser else None
|
61
|
+
self.config: BaseCrawlerConfig = config
|
62
|
+
|
63
|
+
@property
|
64
|
+
@abstractmethod
|
65
|
+
def needs_parser(self) -> bool:
|
66
|
+
"""Indicates whether the crawler requires a parser."""
|
67
|
+
pass
|
68
|
+
|
69
|
+
@abstractmethod
|
70
|
+
def crawl(self, urls: List[str]) -> List[Document]:
|
71
|
+
pass
|
72
|
+
|
73
|
+
def _process_document(self, url: str) -> List[Document]:
|
74
|
+
if self.parser:
|
75
|
+
import requests
|
76
|
+
from requests.structures import CaseInsensitiveDict
|
77
|
+
|
78
|
+
if self._is_document_url(url):
|
79
|
+
try:
|
80
|
+
doc_parser = DocumentParser.create(url, self.parser.config)
|
81
|
+
new_chunks = doc_parser.get_doc_chunks()
|
82
|
+
if not new_chunks:
|
83
|
+
# If the document is empty, try to extract images
|
84
|
+
img_parser = ImagePdfParser(url, self.parser.config)
|
85
|
+
new_chunks = img_parser.get_doc_chunks()
|
86
|
+
return new_chunks
|
87
|
+
except Exception as e:
|
88
|
+
logging.error(f"Error parsing {url}: {e}")
|
89
|
+
return []
|
90
|
+
|
91
|
+
else:
|
92
|
+
try:
|
93
|
+
headers = requests.head(url).headers
|
94
|
+
except Exception as e:
|
95
|
+
logging.warning(f"Error getting headers for {url}: {e}")
|
96
|
+
headers = CaseInsensitiveDict()
|
97
|
+
|
98
|
+
content_type = headers.get("Content-Type", "").lower()
|
99
|
+
temp_file_suffix = None
|
100
|
+
if "application/pdf" in content_type:
|
101
|
+
temp_file_suffix = ".pdf"
|
102
|
+
elif (
|
103
|
+
"application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
104
|
+
in content_type
|
105
|
+
):
|
106
|
+
temp_file_suffix = ".docx"
|
107
|
+
elif "application/msword" in content_type:
|
108
|
+
temp_file_suffix = ".doc"
|
109
|
+
|
110
|
+
if temp_file_suffix:
|
111
|
+
try:
|
112
|
+
response = requests.get(url)
|
113
|
+
with NamedTemporaryFile(
|
114
|
+
delete=False, suffix=temp_file_suffix
|
115
|
+
) as temp_file:
|
116
|
+
temp_file.write(response.content)
|
117
|
+
temp_file_path = temp_file.name
|
118
|
+
doc_parser = DocumentParser.create(
|
119
|
+
temp_file_path, self.parser.config
|
120
|
+
)
|
121
|
+
docs = doc_parser.get_doc_chunks()
|
122
|
+
os.remove(temp_file_path)
|
123
|
+
return docs
|
124
|
+
except Exception as e:
|
125
|
+
logging.error(f"Error downloading/parsing {url}: {e}")
|
126
|
+
return []
|
127
|
+
return []
|
128
|
+
|
129
|
+
def _is_document_url(self, url: str) -> bool:
|
130
|
+
return any(url.lower().endswith(ext) for ext in [".pdf", ".docx", ".doc"])
|
131
|
+
|
132
|
+
|
133
|
+
class CrawlerFactory:
|
134
|
+
"""Factory for creating web crawlers."""
|
135
|
+
|
136
|
+
@staticmethod
|
137
|
+
def create_crawler(config: BaseCrawlerConfig) -> BaseCrawler:
|
138
|
+
"""Create a crawler instance based on configuration type.
|
139
|
+
|
140
|
+
Args:
|
141
|
+
config: Configuration for the crawler
|
142
|
+
|
143
|
+
Returns:
|
144
|
+
A BaseCrawler instance
|
145
|
+
|
146
|
+
Raises:
|
147
|
+
ValueError: If config type is not supported
|
148
|
+
"""
|
149
|
+
if isinstance(config, TrafilaturaConfig):
|
150
|
+
return TrafilaturaCrawler(config)
|
151
|
+
elif isinstance(config, FirecrawlConfig):
|
152
|
+
return FirecrawlCrawler(config)
|
153
|
+
else:
|
154
|
+
raise ValueError(f"Unsupported crawler configuration type: {type(config)}")
|
155
|
+
|
156
|
+
|
157
|
+
class TrafilaturaCrawler(BaseCrawler):
|
158
|
+
"""Crawler implementation using Trafilatura."""
|
159
|
+
|
160
|
+
def __init__(self, config: TrafilaturaConfig):
|
161
|
+
"""Initialize the Trafilatura crawler.
|
162
|
+
|
163
|
+
Args:
|
164
|
+
config: Configuration for the crawler
|
165
|
+
"""
|
166
|
+
super().__init__(config)
|
167
|
+
self.config: TrafilaturaConfig = config
|
168
|
+
|
169
|
+
@property
|
170
|
+
def needs_parser(self) -> bool:
|
171
|
+
return True
|
172
|
+
|
173
|
+
def crawl(self, urls: List[str]) -> List[Document]:
|
174
|
+
import trafilatura
|
175
|
+
from trafilatura.downloads import (
|
176
|
+
add_to_compressed_dict,
|
177
|
+
buffered_downloads,
|
178
|
+
load_download_buffer,
|
179
|
+
)
|
180
|
+
|
181
|
+
docs = []
|
182
|
+
dl_dict = add_to_compressed_dict(urls)
|
183
|
+
|
184
|
+
while not dl_dict.done:
|
185
|
+
buffer, dl_dict = load_download_buffer(dl_dict, sleep_time=5)
|
186
|
+
for url, result in buffered_downloads(buffer, self.config.threads):
|
187
|
+
parsed_doc = self._process_document(url)
|
188
|
+
if parsed_doc:
|
189
|
+
docs.extend(parsed_doc)
|
190
|
+
else:
|
191
|
+
text = trafilatura.extract(
|
192
|
+
result, no_fallback=False, favor_recall=True
|
193
|
+
)
|
194
|
+
if text is None and result is not None and isinstance(result, str):
|
195
|
+
text = result
|
196
|
+
if text:
|
197
|
+
docs.append(
|
198
|
+
Document(content=text, metadata=DocMetaData(source=url))
|
199
|
+
)
|
200
|
+
|
201
|
+
return docs
|
202
|
+
|
203
|
+
|
204
|
+
class FirecrawlCrawler(BaseCrawler):
|
205
|
+
"""Crawler implementation using Firecrawl."""
|
206
|
+
|
207
|
+
def __init__(self, config: FirecrawlConfig) -> None:
|
208
|
+
"""Initialize the Firecrawl crawler.
|
209
|
+
|
210
|
+
Args:
|
211
|
+
config: Configuration for the crawler
|
212
|
+
"""
|
213
|
+
super().__init__(config)
|
214
|
+
self.config: FirecrawlConfig = config
|
215
|
+
|
216
|
+
@property
|
217
|
+
def needs_parser(self) -> bool:
|
218
|
+
return False
|
219
|
+
|
220
|
+
def _return_save_incremental_results(
|
221
|
+
self, app: "FirecrawlApp", crawl_id: str, output_dir: str = "firecrawl_output"
|
222
|
+
) -> List[Document]:
|
223
|
+
# Code used verbatim from firecrawl blog with few modifications
|
224
|
+
# https://www.firecrawl.dev/blog/mastering-the-crawl-endpoint-in-firecrawl
|
225
|
+
import json
|
226
|
+
import time
|
227
|
+
from pathlib import Path
|
228
|
+
|
229
|
+
from tqdm import tqdm
|
230
|
+
|
231
|
+
pbar = tqdm(desc="Pages saved", unit=" pages", dynamic_ncols=True)
|
232
|
+
Path(output_dir).mkdir(parents=True, exist_ok=True)
|
233
|
+
processed_urls: set[str] = set()
|
234
|
+
docs = []
|
235
|
+
|
236
|
+
while True:
|
237
|
+
# Check current status
|
238
|
+
status = app.check_crawl_status(crawl_id)
|
239
|
+
new_pages = 0
|
240
|
+
|
241
|
+
# Save new pages
|
242
|
+
for page in status["data"]:
|
243
|
+
url = page["metadata"]["url"]
|
244
|
+
if url not in processed_urls:
|
245
|
+
content = page.get("markdown", "")
|
246
|
+
filename = f"{output_dir}/{len(processed_urls)}.md"
|
247
|
+
with open(filename, "w") as f:
|
248
|
+
f.write(content)
|
249
|
+
docs.append(
|
250
|
+
Document(content=content, metadata=DocMetaData(source=url))
|
251
|
+
)
|
252
|
+
processed_urls.add(url)
|
253
|
+
new_pages += 1
|
254
|
+
pbar.update(new_pages) # Update progress bar with new pages
|
255
|
+
|
256
|
+
# Break if crawl is complete
|
257
|
+
if status["status"] == "completed":
|
258
|
+
print(f"Saved {len(processed_urls)} pages.")
|
259
|
+
with open(f"{output_dir}/full_results.json", "w") as f:
|
260
|
+
json.dump(status, f, indent=2)
|
261
|
+
break
|
262
|
+
|
263
|
+
time.sleep(5) # Wait before checking again
|
264
|
+
return docs
|
265
|
+
|
266
|
+
def crawl(self, urls: List[str]) -> List[Document]:
|
267
|
+
try:
|
268
|
+
from firecrawl import FirecrawlApp
|
269
|
+
except ImportError:
|
270
|
+
raise LangroidImportError("firecrawl", "firecrawl")
|
271
|
+
|
272
|
+
app = FirecrawlApp(api_key=self.config.api_key)
|
273
|
+
docs = []
|
274
|
+
params = self.config.params.copy() # Create a copy of the existing params
|
275
|
+
|
276
|
+
if self.config.timeout is not None:
|
277
|
+
params["timeout"] = self.config.timeout # Add/override timeout in params
|
278
|
+
|
279
|
+
if self.config.mode == "scrape":
|
280
|
+
for url in urls:
|
281
|
+
try:
|
282
|
+
result = app.scrape_url(url, params=params)
|
283
|
+
metadata = result.get(
|
284
|
+
"metadata", {}
|
285
|
+
) # Default to empty dict if missing
|
286
|
+
status_code = metadata.get("statusCode")
|
287
|
+
|
288
|
+
if status_code == 200:
|
289
|
+
docs.append(
|
290
|
+
Document(
|
291
|
+
content=result["markdown"],
|
292
|
+
metadata=DocMetaData(source=url),
|
293
|
+
)
|
294
|
+
)
|
295
|
+
except Exception as e:
|
296
|
+
logging.warning(
|
297
|
+
f"Firecrawl encountered an error for {url}: {e}. "
|
298
|
+
"Skipping but continuing."
|
299
|
+
)
|
300
|
+
elif self.config.mode == "crawl":
|
301
|
+
if not isinstance(urls, list) or len(urls) != 1:
|
302
|
+
raise ValueError(
|
303
|
+
"Crawl mode expects 'urls' to be a list containing a single URL."
|
304
|
+
)
|
305
|
+
|
306
|
+
# Start the crawl
|
307
|
+
crawl_status = app.async_crawl_url(url=urls[0], params=params)
|
308
|
+
|
309
|
+
# Save results incrementally
|
310
|
+
docs = self._return_save_incremental_results(app, crawl_status["id"])
|
311
|
+
return docs
|
312
|
+
|
313
|
+
|
314
|
+
class URLLoader:
|
315
|
+
"""Loads URLs and extracts text using a specified crawler."""
|
316
|
+
|
317
|
+
def __init__(
|
318
|
+
self,
|
319
|
+
urls: List[Any],
|
320
|
+
parsing_config: ParsingConfig = ParsingConfig(),
|
321
|
+
crawler_config: Optional[BaseCrawlerConfig] = None,
|
322
|
+
):
|
323
|
+
"""Initialize the URL loader.
|
324
|
+
|
325
|
+
Args:
|
326
|
+
urls: List of URLs to load
|
327
|
+
parsing_config: Configuration for parsing
|
328
|
+
crawler_config: Configuration for the crawler
|
329
|
+
"""
|
330
|
+
self.urls = urls
|
331
|
+
self.parsing_config = parsing_config
|
332
|
+
|
333
|
+
if crawler_config is None:
|
334
|
+
crawler_config = TrafilaturaConfig(parser=Parser(parsing_config))
|
335
|
+
|
336
|
+
self.crawler = CrawlerFactory.create_crawler(crawler_config)
|
337
|
+
|
338
|
+
def load(self) -> List[Document]:
|
339
|
+
"""Load the URLs using the specified crawler."""
|
340
|
+
return self.crawler.crawl(self.urls)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "langroid"
|
3
|
-
version = "0.
|
3
|
+
version = "0.47.0"
|
4
4
|
authors = [
|
5
5
|
{name = "Prasad Chalasani", email = "pchalasani@gmail.com"},
|
6
6
|
]
|
@@ -265,6 +265,9 @@ pinecone = [
|
|
265
265
|
asyncio = [
|
266
266
|
"asyncio>=3.4.3",
|
267
267
|
]
|
268
|
+
firecrawl = [
|
269
|
+
"firecrawl-py>=1.13.5",
|
270
|
+
]
|
268
271
|
|
269
272
|
|
270
273
|
[dependency-groups]
|
@@ -1,120 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
import os
|
3
|
-
from tempfile import NamedTemporaryFile
|
4
|
-
from typing import List, no_type_check
|
5
|
-
|
6
|
-
import requests
|
7
|
-
|
8
|
-
from langroid.mytypes import DocMetaData, Document
|
9
|
-
from langroid.parsing.document_parser import DocumentParser, ImagePdfParser
|
10
|
-
from langroid.parsing.parser import Parser, ParsingConfig
|
11
|
-
|
12
|
-
logging.getLogger("trafilatura").setLevel(logging.ERROR)
|
13
|
-
|
14
|
-
|
15
|
-
class URLLoader:
|
16
|
-
"""
|
17
|
-
Load a list of URLs and extract the text content.
|
18
|
-
Alternative approaches could use `bs4` or `scrapy`.
|
19
|
-
|
20
|
-
TODO - this currently does not handle cookie dialogs,
|
21
|
-
i.e. if there is a cookie pop-up, most/all of the extracted
|
22
|
-
content could be cookie policy text.
|
23
|
-
We could use `playwright` to simulate a user clicking
|
24
|
-
the "accept" button on the cookie dialog.
|
25
|
-
"""
|
26
|
-
|
27
|
-
def __init__(self, urls: List[str], parser: Parser = Parser(ParsingConfig())):
|
28
|
-
self.urls = urls
|
29
|
-
self.parser = parser
|
30
|
-
|
31
|
-
@no_type_check
|
32
|
-
def load(self) -> List[Document]:
|
33
|
-
import trafilatura
|
34
|
-
from trafilatura.downloads import (
|
35
|
-
add_to_compressed_dict,
|
36
|
-
buffered_downloads,
|
37
|
-
load_download_buffer,
|
38
|
-
)
|
39
|
-
|
40
|
-
docs = []
|
41
|
-
threads = 4
|
42
|
-
# converted the input list to an internal format
|
43
|
-
dl_dict = add_to_compressed_dict(self.urls)
|
44
|
-
# processing loop
|
45
|
-
while not dl_dict.done:
|
46
|
-
buffer, dl_dict = load_download_buffer(
|
47
|
-
dl_dict,
|
48
|
-
sleep_time=5,
|
49
|
-
)
|
50
|
-
for url, result in buffered_downloads(buffer, threads):
|
51
|
-
if (
|
52
|
-
url.lower().endswith(".pdf")
|
53
|
-
or url.lower().endswith(".docx")
|
54
|
-
or url.lower().endswith(".doc")
|
55
|
-
):
|
56
|
-
try:
|
57
|
-
doc_parser = DocumentParser.create(
|
58
|
-
url,
|
59
|
-
self.parser.config,
|
60
|
-
)
|
61
|
-
except Exception as e:
|
62
|
-
logging.error(f"Error parsing {url}: {e}")
|
63
|
-
continue
|
64
|
-
new_chunks = doc_parser.get_doc_chunks()
|
65
|
-
if len(new_chunks) == 0:
|
66
|
-
# If the document is empty, try to extract images
|
67
|
-
img_parser = ImagePdfParser(url, self.parser.config)
|
68
|
-
new_chunks = img_parser.get_doc_chunks()
|
69
|
-
docs.extend(new_chunks)
|
70
|
-
else:
|
71
|
-
# Try to detect content type and handle accordingly
|
72
|
-
try:
|
73
|
-
headers = requests.head(url).headers
|
74
|
-
except Exception as e:
|
75
|
-
logging.warning(f"Error getting headers for {url}: {e}")
|
76
|
-
headers = {}
|
77
|
-
content_type = headers.get("Content-Type", "").lower()
|
78
|
-
temp_file_suffix = None
|
79
|
-
if "application/pdf" in content_type:
|
80
|
-
temp_file_suffix = ".pdf"
|
81
|
-
elif (
|
82
|
-
"application/vnd.openxmlformats-officedocument.wordprocessingml.document"
|
83
|
-
in content_type
|
84
|
-
):
|
85
|
-
temp_file_suffix = ".docx"
|
86
|
-
elif "application/msword" in content_type:
|
87
|
-
temp_file_suffix = ".doc"
|
88
|
-
|
89
|
-
if temp_file_suffix:
|
90
|
-
# Download the document content
|
91
|
-
response = requests.get(url)
|
92
|
-
with NamedTemporaryFile(
|
93
|
-
delete=False, suffix=temp_file_suffix
|
94
|
-
) as temp_file:
|
95
|
-
temp_file.write(response.content)
|
96
|
-
temp_file_path = temp_file.name
|
97
|
-
# Process the downloaded document
|
98
|
-
doc_parser = DocumentParser.create(
|
99
|
-
temp_file_path, self.parser.config
|
100
|
-
)
|
101
|
-
docs.extend(doc_parser.get_doc_chunks())
|
102
|
-
# Clean up the temporary file
|
103
|
-
os.remove(temp_file_path)
|
104
|
-
else:
|
105
|
-
text = trafilatura.extract(
|
106
|
-
result,
|
107
|
-
no_fallback=False,
|
108
|
-
favor_recall=True,
|
109
|
-
)
|
110
|
-
if (
|
111
|
-
text is None
|
112
|
-
and result is not None
|
113
|
-
and isinstance(result, str)
|
114
|
-
):
|
115
|
-
text = result
|
116
|
-
if text is not None and text != "":
|
117
|
-
docs.append(
|
118
|
-
Document(content=text, metadata=DocMetaData(source=url))
|
119
|
-
)
|
120
|
-
return docs
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{langroid-0.46.0 → langroid-0.47.0}/langroid/agent/special/sql/utils/description_extractors.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/prompt_formatter/hf_formatter.py
RENAMED
File without changes
|
{langroid-0.46.0 → langroid-0.47.0}/langroid/language_models/prompt_formatter/llama2_formatter.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|