spiderforce4ai 1.6__tar.gz → 1.7__tar.gz
Sign up to get free protection for your applications and to get access to all the features.
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/PKG-INFO +1 -1
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/pyproject.toml +1 -1
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/setup.py +1 -1
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/spiderforce4ai/__init__.py +43 -14
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/spiderforce4ai.egg-info/PKG-INFO +1 -1
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/README.md +0 -0
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/setup.cfg +0 -0
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/spiderforce4ai.egg-info/SOURCES.txt +0 -0
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/spiderforce4ai.egg-info/dependency_links.txt +0 -0
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/spiderforce4ai.egg-info/requires.txt +0 -0
- {spiderforce4ai-1.6 → spiderforce4ai-1.7}/spiderforce4ai.egg-info/top_level.txt +0 -0
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "spiderforce4ai"
|
7
|
-
version = "1.
|
7
|
+
version = "1.7"
|
8
8
|
description = "Python wrapper for SpiderForce4AI HTML-to-Markdown conversion service"
|
9
9
|
readme = "README.md"
|
10
10
|
authors = [{name = "Piotr Tamulewicz", email = "pt@petertam.pro"}]
|
@@ -23,26 +23,55 @@ from multiprocessing import Pool
|
|
23
23
|
console = Console()
|
24
24
|
|
25
25
|
def extract_metadata_headers(markdown: str) -> str:
|
26
|
-
"""Extract metadata and headers from markdown content."""
|
26
|
+
"""Extract metadata and headers from markdown content with SEO formatting."""
|
27
27
|
lines = markdown.split('\n')
|
28
28
|
extracted = []
|
29
29
|
in_metadata = False
|
30
|
+
metadata = {
|
31
|
+
'title': '',
|
32
|
+
'description': '',
|
33
|
+
'canonical_url': '',
|
34
|
+
'language': ''
|
35
|
+
}
|
30
36
|
|
37
|
+
# First pass - collect metadata
|
38
|
+
for line in lines:
|
39
|
+
if line.strip().startswith('title:'):
|
40
|
+
metadata['title'] = line.split(':', 1)[1].strip()
|
41
|
+
elif line.strip().startswith('description:'):
|
42
|
+
metadata['description'] = line.split(':', 1)[1].strip()
|
43
|
+
elif line.strip().startswith('canonical_url:'):
|
44
|
+
metadata['canonical_url'] = line.split(':', 1)[1].strip()
|
45
|
+
elif line.strip().startswith('language:'):
|
46
|
+
metadata['language'] = line.split(':', 1)[1].strip()
|
47
|
+
|
48
|
+
# Add formatted metadata section
|
49
|
+
extracted.append(f"Title: {metadata['title']}")
|
50
|
+
extracted.append(f"Description: {metadata['description']}")
|
51
|
+
extracted.append(f"CanonicalUrl: {metadata['canonical_url']}")
|
52
|
+
extracted.append(f"Language: {metadata['language']}")
|
53
|
+
extracted.append("") # Empty line after metadata
|
54
|
+
|
55
|
+
# Second pass - process headers
|
31
56
|
for line in lines:
|
32
|
-
# Check for metadata block
|
33
|
-
if line.strip() == '---':
|
34
|
-
in_metadata = not in_metadata
|
35
|
-
extracted.append(line)
|
36
|
-
continue
|
37
|
-
|
38
|
-
# Include metadata
|
39
|
-
if in_metadata:
|
40
|
-
extracted.append(line)
|
41
|
-
continue
|
42
|
-
|
43
|
-
# Include headers (lines starting with #)
|
44
57
|
if line.strip().startswith('#'):
|
45
|
-
|
58
|
+
# Count the number of # symbols
|
59
|
+
level = len(line) - len(line.lstrip('#'))
|
60
|
+
text = line.lstrip('#').strip()
|
61
|
+
|
62
|
+
# Format header according to level
|
63
|
+
if level == 1:
|
64
|
+
extracted.append(f"H1: {text}")
|
65
|
+
elif level == 2:
|
66
|
+
extracted.append(f"H2: {text}")
|
67
|
+
elif level == 3:
|
68
|
+
extracted.append(f"H3: {text}")
|
69
|
+
elif level == 4:
|
70
|
+
extracted.append(f"H4: {text}")
|
71
|
+
elif level == 5:
|
72
|
+
extracted.append(f"H5: {text}")
|
73
|
+
elif level == 6:
|
74
|
+
extracted.append(f"H6: {text}")
|
46
75
|
|
47
76
|
return '\n'.join(extracted)
|
48
77
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|