spiderforce4ai 1.9__tar.gz → 2.1__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/PKG-INFO +1 -1
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/pyproject.toml +1 -1
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/setup.py +1 -1
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/spiderforce4ai/__init__.py +30 -29
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/spiderforce4ai.egg-info/PKG-INFO +1 -1
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/README.md +0 -0
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/setup.cfg +0 -0
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/spiderforce4ai.egg-info/SOURCES.txt +0 -0
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/spiderforce4ai.egg-info/dependency_links.txt +0 -0
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/spiderforce4ai.egg-info/requires.txt +0 -0
- {spiderforce4ai-1.9 → spiderforce4ai-2.1}/spiderforce4ai.egg-info/top_level.txt +0 -0
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "spiderforce4ai"
|
7
|
-
version = "1
|
7
|
+
version = "2.1"
|
8
8
|
description = "Python wrapper for SpiderForce4AI HTML-to-Markdown conversion service"
|
9
9
|
readme = "README.md"
|
10
10
|
authors = [{name = "Piotr Tamulewicz", email = "pt@petertam.pro"}]
|
@@ -23,7 +23,7 @@ from multiprocessing import Pool
|
|
23
23
|
console = Console()
|
24
24
|
|
25
25
|
def extract_metadata_headers(markdown: str, url: str = '') -> str:
|
26
|
-
"""Extract metadata and headers from markdown content with SEO formatting."""
|
26
|
+
"""Extract metadata and headers from markdown content with enhanced SEO formatting."""
|
27
27
|
lines = markdown.split('\n')
|
28
28
|
extracted = []
|
29
29
|
in_metadata = False
|
@@ -33,8 +33,9 @@ def extract_metadata_headers(markdown: str, url: str = '') -> str:
|
|
33
33
|
'canonical_url': '',
|
34
34
|
'language': ''
|
35
35
|
}
|
36
|
+
first_paragraph = ''
|
36
37
|
|
37
|
-
# First pass - collect metadata
|
38
|
+
# First pass - collect metadata and first paragraph
|
38
39
|
for i, line in enumerate(lines):
|
39
40
|
# Check for metadata block boundaries
|
40
41
|
if line.strip() == '---':
|
@@ -48,47 +49,47 @@ def extract_metadata_headers(markdown: str, url: str = '') -> str:
|
|
48
49
|
# Extract metadata within the block
|
49
50
|
if in_metadata:
|
50
51
|
if ':' in line:
|
51
|
-
key, value = line.split(':', 1)
|
52
|
-
key = key.
|
53
|
-
|
52
|
+
key, value = [part.strip() for part in line.split(':', 1)]
|
53
|
+
key = key.lower()
|
54
|
+
|
55
|
+
# Handle multi-line values
|
56
|
+
if value.startswith('>'):
|
57
|
+
value = value[1:].strip()
|
58
|
+
j = i + 1
|
59
|
+
while j < len(lines) and lines[j].strip() and not lines[j].strip() == '---':
|
60
|
+
value += ' ' + lines[j].strip()
|
61
|
+
j += 1
|
54
62
|
|
55
63
|
if key == 'title':
|
56
64
|
metadata['title'] = value
|
57
|
-
elif key
|
65
|
+
elif key in ['description', 'meta_description', 'og:description', 'meta-description']:
|
58
66
|
metadata['description'] = value
|
59
|
-
elif key
|
67
|
+
elif key in ['canonical_url', 'canonical']:
|
60
68
|
metadata['canonical_url'] = value
|
61
|
-
elif key
|
69
|
+
elif key in ['language', 'lang']:
|
62
70
|
metadata['language'] = value
|
71
|
+
elif not in_metadata and not first_paragraph and line.strip() and not line.startswith('#'):
|
72
|
+
first_paragraph = line.strip()
|
73
|
+
|
74
|
+
# Use first paragraph as fallback description if none found
|
75
|
+
if not metadata['description'] and first_paragraph:
|
76
|
+
metadata['description'] = first_paragraph[:160] + ('...' if len(first_paragraph) > 160 else '')
|
63
77
|
|
64
|
-
# Add formatted metadata section
|
65
|
-
extracted.append(f"URL: {url}")
|
66
|
-
extracted.append(f"Title: {metadata['title'] or url.split('/')[-2].replace('-', ' ').title()}")
|
78
|
+
# Add formatted metadata section
|
79
|
+
extracted.append(f"URL: {url}")
|
80
|
+
extracted.append(f"Title: {metadata['title'] or url.split('/')[-2].replace('-', ' ').title()}")
|
67
81
|
extracted.append(f"Description: {metadata['description']}")
|
68
|
-
extracted.append(f"CanonicalUrl: {metadata['canonical_url'] or url}")
|
69
|
-
extracted.append(f"Language: {metadata['language'] or 'en'}")
|
82
|
+
extracted.append(f"CanonicalUrl: {metadata['canonical_url'] or url}")
|
83
|
+
extracted.append(f"Language: {metadata['language'] or 'en'}")
|
70
84
|
extracted.append("") # Empty line after metadata
|
71
85
|
|
72
86
|
# Second pass - process headers
|
73
87
|
for line in lines:
|
74
88
|
if line.strip().startswith('#'):
|
75
|
-
# Count the number of # symbols
|
76
89
|
level = len(line) - len(line.lstrip('#'))
|
77
90
|
text = line.lstrip('#').strip()
|
78
|
-
|
79
|
-
|
80
|
-
if level == 1:
|
81
|
-
extracted.append(f"H1: {text}")
|
82
|
-
elif level == 2:
|
83
|
-
extracted.append(f"H2: {text}")
|
84
|
-
elif level == 3:
|
85
|
-
extracted.append(f"H3: {text}")
|
86
|
-
elif level == 4:
|
87
|
-
extracted.append(f"H4: {text}")
|
88
|
-
elif level == 5:
|
89
|
-
extracted.append(f"H5: {text}")
|
90
|
-
elif level == 6:
|
91
|
-
extracted.append(f"H6: {text}")
|
91
|
+
if 1 <= level <= 6:
|
92
|
+
extracted.append(f"H{level}: {text}")
|
92
93
|
|
93
94
|
return '\n'.join(extracted)
|
94
95
|
|
@@ -310,7 +311,7 @@ class SpiderForce4AI:
|
|
310
311
|
|
311
312
|
# Handle combined markdown file
|
312
313
|
if self.config.combine_to_one_markdown:
|
313
|
-
content = markdown if
|
314
|
+
content = markdown if config.combine_to_one_markdown == 'full' else extract_metadata_headers(markdown, url)
|
314
315
|
combined_content = f"\n----PAGE----\n{url}\n\n{content}\n----PAGE END----\n"
|
315
316
|
|
316
317
|
async with aiofiles.open(self.config.combined_markdown_file, 'a', encoding='utf-8') as f:
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|