spiderforce4ai 1.8__tar.gz → 1.9__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/PKG-INFO +1 -1
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/pyproject.toml +1 -1
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/setup.py +1 -1
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/spiderforce4ai/__init__.py +5 -5
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/spiderforce4ai.egg-info/PKG-INFO +1 -1
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/README.md +0 -0
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/setup.cfg +0 -0
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/spiderforce4ai.egg-info/SOURCES.txt +0 -0
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/spiderforce4ai.egg-info/dependency_links.txt +0 -0
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/spiderforce4ai.egg-info/requires.txt +0 -0
- {spiderforce4ai-1.8 → spiderforce4ai-1.9}/spiderforce4ai.egg-info/top_level.txt +0 -0
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "spiderforce4ai"
|
7
|
-
version = "1.
|
7
|
+
version = "1.9"
|
8
8
|
description = "Python wrapper for SpiderForce4AI HTML-to-Markdown conversion service"
|
9
9
|
readme = "README.md"
|
10
10
|
authors = [{name = "Piotr Tamulewicz", email = "pt@petertam.pro"}]
|
@@ -22,7 +22,7 @@ from multiprocessing import Pool
|
|
22
22
|
|
23
23
|
console = Console()
|
24
24
|
|
25
|
-
def extract_metadata_headers(markdown: str) -> str:
|
25
|
+
def extract_metadata_headers(markdown: str, url: str = '') -> str:
|
26
26
|
"""Extract metadata and headers from markdown content with SEO formatting."""
|
27
27
|
lines = markdown.split('\n')
|
28
28
|
extracted = []
|
@@ -62,10 +62,10 @@ def extract_metadata_headers(markdown: str) -> str:
|
|
62
62
|
metadata['language'] = value
|
63
63
|
|
64
64
|
# Add formatted metadata section with URL first
|
65
|
-
extracted.append(f"URL: {
|
66
|
-
extracted.append(f"Title: {metadata['title']}")
|
65
|
+
extracted.append(f"URL: {url}") # Use the actual crawled URL
|
66
|
+
extracted.append(f"Title: {metadata['title'] or url.split('/')[-2].replace('-', ' ').title()}") # Fallback to URL segment
|
67
67
|
extracted.append(f"Description: {metadata['description']}")
|
68
|
-
extracted.append(f"CanonicalUrl: {metadata['canonical_url']}")
|
68
|
+
extracted.append(f"CanonicalUrl: {metadata['canonical_url'] or url}") # Fallback to crawled URL
|
69
69
|
extracted.append(f"Language: {metadata['language'] or 'en'}") # Default to 'en' if not specified
|
70
70
|
extracted.append("") # Empty line after metadata
|
71
71
|
|
@@ -247,7 +247,7 @@ def _process_url_parallel(args: Tuple[str, str, CrawlConfig]) -> CrawlResult:
|
|
247
247
|
|
248
248
|
# Handle combined markdown file
|
249
249
|
if config.combine_to_one_markdown:
|
250
|
-
content = markdown if config.combine_to_one_markdown == 'full' else extract_metadata_headers(markdown)
|
250
|
+
content = markdown if config.combine_to_one_markdown == 'full' else extract_metadata_headers(markdown, url)
|
251
251
|
combined_content = f"\n----PAGE----\n{url}\n\n{content}\n----PAGE END----\n"
|
252
252
|
|
253
253
|
with open(config.combined_markdown_file, 'a', encoding='utf-8') as f:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|