web2textpy 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,19 @@
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(uv pip:*)",
5
+ "Bash(python3:*)",
6
+ "Bash(python:*)",
7
+ "Bash(/tmp/l3s_final2.py:*)",
8
+ "Bash(/tmp/l3s_summary.py:*)",
9
+ "Bash(ls -la /Users/williambrach/Developer/web2textpy/*.py)",
10
+ "Bash(grep -E \"^libraryDependencies|^import \" /Users/williambrach/Developer/web2textpy/web2text/build.sbt /Users/williambrach/Developer/web2textpy/web2text/src/main/scala/ch/ethz/dalab/web2text/*.scala)",
11
+ "Bash(echo \"exit: $?\")",
12
+ "Bash(uv run:*)",
13
+ "Bash(grep -r \"requests\\\\|urllib\\\\|http\\\\|socket\\\\|fetch\\\\|download\" /Users/williambrach/Developer/web2textpy/*.py)",
14
+ "Bash(ls:*)",
15
+ "WebSearch",
16
+ "WebFetch(domain:discuss.huggingface.co)"
17
+ ]
18
+ }
19
+ }
@@ -0,0 +1,217 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+ web2text/
6
+ scripts/
7
+ # C extensions
8
+ *.so
9
+ .env
10
+ # Distribution / packaging
11
+ .Python
12
+ build/
13
+ develop-eggs/
14
+ dist/
15
+ downloads/
16
+ eggs/
17
+ .eggs/
18
+ lib/
19
+ lib64/
20
+ parts/
21
+ sdist/
22
+ var/
23
+ wheels/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py.cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+ cover/
54
+
55
+ # Translations
56
+ *.mo
57
+ *.pot
58
+
59
+ # Django stuff:
60
+ *.log
61
+ local_settings.py
62
+ db.sqlite3
63
+ db.sqlite3-journal
64
+
65
+ # Flask stuff:
66
+ instance/
67
+ .webassets-cache
68
+
69
+ # Scrapy stuff:
70
+ .scrapy
71
+ .DS_Store
72
+ # Sphinx documentation
73
+ docs/_build/
74
+ data/
75
+ # PyBuilder
76
+ .pybuilder/
77
+ target/
78
+
79
+ # Jupyter Notebook
80
+ .ipynb_checkpoints
81
+
82
+ # IPython
83
+ profile_default/
84
+ ipython_config.py
85
+
86
+ # pyenv
87
+ # For a library or package, you might want to ignore these files since the code is
88
+ # intended to run in multiple environments; otherwise, check them in:
89
+ # .python-version
90
+
91
+ # pipenv
92
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
93
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
94
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
95
+ # install all needed dependencies.
96
+ # Pipfile.lock
97
+
98
+ # UV
99
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
100
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
101
+ # commonly ignored for libraries.
102
+ # uv.lock
103
+
104
+ # poetry
105
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
106
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
107
+ # commonly ignored for libraries.
108
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
109
+ # poetry.lock
110
+ # poetry.toml
111
+
112
+ # pdm
113
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
114
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
115
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
116
+ # pdm.lock
117
+ # pdm.toml
118
+ .pdm-python
119
+ .pdm-build/
120
+
121
+ # pixi
122
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
123
+ # pixi.lock
124
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
125
+ # in the .venv directory. It is recommended not to include this directory in version control.
126
+ .pixi
127
+
128
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
129
+ __pypackages__/
130
+
131
+ # Celery stuff
132
+ celerybeat-schedule
133
+ celerybeat.pid
134
+
135
+ # Redis
136
+ *.rdb
137
+ *.aof
138
+ *.pid
139
+
140
+ # RabbitMQ
141
+ mnesia/
142
+ rabbitmq/
143
+ rabbitmq-data/
144
+
145
+ # ActiveMQ
146
+ activemq-data/
147
+
148
+ # SageMath parsed files
149
+ *.sage.py
150
+
151
+ # Environments
152
+ .env
153
+ .envrc
154
+ .venv
155
+ env/
156
+ venv/
157
+ ENV/
158
+ env.bak/
159
+ venv.bak/
160
+
161
+ # Spyder project settings
162
+ .spyderproject
163
+ .spyproject
164
+
165
+ # Rope project settings
166
+ .ropeproject
167
+
168
+ # mkdocs documentation
169
+ /site
170
+
171
+ # mypy
172
+ .mypy_cache/
173
+ .dmypy.json
174
+ dmypy.json
175
+
176
+ # Pyre type checker
177
+ .pyre/
178
+
179
+ # pytype static type analyzer
180
+ .pytype/
181
+
182
+ # Cython debug symbols
183
+ cython_debug/
184
+
185
+ # PyCharm
186
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
187
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
188
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
189
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
190
+ # .idea/
191
+
192
+ # Abstra
193
+ # Abstra is an AI-powered process automation framework.
194
+ # Ignore directories containing user credentials, local state, and settings.
195
+ # Learn more at https://abstra.io/docs
196
+ .abstra/
197
+
198
+ # Visual Studio Code
199
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
200
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
201
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
202
+ # you could uncomment the following to ignore the entire vscode folder
203
+ # .vscode/
204
+
205
+ # Ruff stuff:
206
+ .ruff_cache/
207
+
208
+ # PyPI configuration file
209
+ .pypirc
210
+
211
+ # Marimo
212
+ marimo/_static/
213
+ marimo/_lsp/
214
+ __marimo__/
215
+
216
+ # Streamlit
217
+ .streamlit/secrets.toml
@@ -0,0 +1 @@
1
+ 3.12
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 William Brach
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,118 @@
1
+ Metadata-Version: 2.4
2
+ Name: web2textpy
3
+ Version: 0.1.0
4
+ Summary: Python reimplementation of the Web2Text pipeline for labeling HTML DOM nodes as content or boilerplate
5
+ Project-URL: Homepage, https://github.com/williambrach/web2textpy
6
+ Project-URL: Repository, https://github.com/williambrach/web2textpy
7
+ Author: William Brach
8
+ License-Expression: MIT
9
+ License-File: LICENSE
10
+ Classifier: Development Status :: 3 - Alpha
11
+ Classifier: Intended Audience :: Developers
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.10
16
+ Classifier: Programming Language :: Python :: 3.11
17
+ Classifier: Programming Language :: Python :: 3.12
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Topic :: Text Processing :: Markup :: HTML
20
+ Requires-Python: >=3.10
21
+ Requires-Dist: lxml>=6.0.2
22
+ Provides-Extra: cli
23
+ Requires-Dist: datasets>=4.8.4; extra == 'cli'
24
+ Requires-Dist: rouge-score>=0.1.2; extra == 'cli'
25
+ Requires-Dist: sacrebleu>=2.6.0; extra == 'cli'
26
+ Provides-Extra: eval
27
+ Requires-Dist: rouge-score>=0.1.2; extra == 'eval'
28
+ Requires-Dist: sacrebleu>=2.6.0; extra == 'eval'
29
+ Description-Content-Type: text/markdown
30
+
31
+ # web2textpy
32
+
33
+ Python reimplementation of the [Web2Text](https://github.com/dalab/web2text) pipeline for labeling HTML DOM nodes as **content** or **boilerplate** using paired `(raw_html, clean_text)` data.
34
+
35
+ ## Installation
36
+
37
+ ```bash
38
+ uv add web2textpy
39
+ ```
40
+
41
+ ## Quick Start
42
+
43
+ ```python
44
+ from datasets import load_dataset
45
+ from web2text import run_pipeline
46
+
47
+ ds = load_dataset("williambrach/html-boilerplate-labeled", split="test")
48
+ row = ds[0]
49
+
50
+ tree, extracted_text, metrics = run_pipeline(row["html"], row["text"])
51
+
52
+ print(extracted_text[:200])
53
+ print(metrics)
54
+ ```
55
+
56
+ ## Step-by-Step API
57
+
58
+ Each stage of the pipeline is exposed as a standalone function:
59
+
60
+ ```python
61
+ from web2text import build_cdom, extract_leaves, align, label_nodes, extract_text, evaluate
62
+
63
+ # 1. Parse HTML into a collapsed DOM tree
64
+ tree = build_cdom(html_string)
65
+
66
+ # 2. Extract ordered text-bearing leaf nodes
67
+ leaves = extract_leaves(tree) # [(element, "normalized text"), ...]
68
+
69
+ # 3. Align leaf texts against ground-truth clean text
70
+ scores = align(leaves, clean_text) # {leaf_id: 0.0-1.0 match score}
71
+
72
+ # 4. Label each node as "content" or "boilerplate"
73
+ tree = label_nodes(tree, scores, threshold=0.667)
74
+
75
+ # 5. Extract text from content-labeled nodes
76
+ result = extract_text(tree)
77
+
78
+ # 6. Evaluate against ground truth
79
+ metrics = evaluate(result, clean_text)
80
+ # => {'token_f1': 0.99, 'precision': 0.99, 'recall': 0.99, 'rouge1_f': 0.99, 'bleu': 98.5, 'chrf': 98.8}
81
+ ```
82
+
83
+ ## How the Matching Algorithm Works
84
+
85
+ Given raw HTML and its known clean text, the algorithm determines which DOM nodes are content versus boilerplate in six steps:
86
+
87
+ 1. **Simplify the DOM** — strip non-content tags (`<script>`, `<style>`, etc.) and collapse single-child chains into a Collapsed DOM (CDOM) representation
88
+ 2. **Collect leaf text** — walk the CDOM, concatenate text from every leaf node into one source string with tracked character offsets
89
+ 3. **Find anchors** — identify 10-character substrings that appear exactly once in both the source and clean text, splitting the problem into independent segments
90
+ 4. **DP alignment** — for each segment between anchors, run character-level dynamic programming with affine gap penalties to map source characters to clean-text characters
91
+ 5. **Score leaves** — map alignment results back to leaf boundaries via stored offsets, giving each leaf a score: `matched_chars / total_chars`
92
+ 6. **Label nodes** — leaves scoring above `0.667` are labeled `"content"`, the rest `"boilerplate"`, with labels propagating upward to parents
93
+
94
+ ![Alignment pipeline: extract leaf texts → anchor matching → DP alignment → per-leaf scores](assets/image1.png)
95
+
96
+
97
+ ## Dataset
98
+
99
+ Dataset: [williambrach/html-boilerplate-labeled](https://huggingface.co/datasets/williambrach/html-boilerplate-labeled) — ~4k pages from CleanEval, Dragnet, CETD, Readability, and others (3,985 pages total).
100
+
101
+ | Source | Train (ROUGE-1 F) | Test (ROUGE-1 F) |
102
+ |--------------------|-------------------|------------------|
103
+ | readability | 0.993 (92) | 0.997 (23) |
104
+ | scrapinghub | 0.991 (145) | 0.996 (36) |
105
+ | cetd | 0.993 (560) | 0.987 (140) |
106
+ | google-trends-2017 | 0.986 (144) | 0.995 (36) |
107
+ | cleanportaleval | 0.985 (57) | 0.971 (14) |
108
+ | cleaneval | 0.985 (590) | 0.991 (148) |
109
+ | dragnet | 0.983 (1,103) | 0.983 (276) |
110
+ | l3s-gn1 | 0.920 (497) | 0.927 (124) |
111
+ | **Overall** | **0.976** (3,188) | **0.978** (797) |
112
+
113
+ >Sample counts in parentheses.
114
+
115
+ ## Original Work
116
+
117
+ - **Paper**: Vogels et al., "Web2Text: Deep Structured Boilerplate Removal" (ECIR 2018) — [arxiv.org/abs/1801.02607](https://arxiv.org/abs/1801.02607)
118
+ - **Original implementation** (Scala): [github.com/dalab/web2text](https://github.com/dalab/web2text)
@@ -0,0 +1,88 @@
1
+ # web2textpy
2
+
3
+ Python reimplementation of the [Web2Text](https://github.com/dalab/web2text) pipeline for labeling HTML DOM nodes as **content** or **boilerplate** using paired `(raw_html, clean_text)` data.
4
+
5
+ ## Installation
6
+
7
+ ```bash
8
+ uv add web2textpy
9
+ ```
10
+
11
+ ## Quick Start
12
+
13
+ ```python
14
+ from datasets import load_dataset
15
+ from web2text import run_pipeline
16
+
17
+ ds = load_dataset("williambrach/html-boilerplate-labeled", split="test")
18
+ row = ds[0]
19
+
20
+ tree, extracted_text, metrics = run_pipeline(row["html"], row["text"])
21
+
22
+ print(extracted_text[:200])
23
+ print(metrics)
24
+ ```
25
+
26
+ ## Step-by-Step API
27
+
28
+ Each stage of the pipeline is exposed as a standalone function:
29
+
30
+ ```python
31
+ from web2text import build_cdom, extract_leaves, align, label_nodes, extract_text, evaluate
32
+
33
+ # 1. Parse HTML into a collapsed DOM tree
34
+ tree = build_cdom(html_string)
35
+
36
+ # 2. Extract ordered text-bearing leaf nodes
37
+ leaves = extract_leaves(tree) # [(element, "normalized text"), ...]
38
+
39
+ # 3. Align leaf texts against ground-truth clean text
40
+ scores = align(leaves, clean_text) # {leaf_id: 0.0-1.0 match score}
41
+
42
+ # 4. Label each node as "content" or "boilerplate"
43
+ tree = label_nodes(tree, scores, threshold=0.667)
44
+
45
+ # 5. Extract text from content-labeled nodes
46
+ result = extract_text(tree)
47
+
48
+ # 6. Evaluate against ground truth
49
+ metrics = evaluate(result, clean_text)
50
+ # => {'token_f1': 0.99, 'precision': 0.99, 'recall': 0.99, 'rouge1_f': 0.99, 'bleu': 98.5, 'chrf': 98.8}
51
+ ```
52
+
53
+ ## How the Matching Algorithm Works
54
+
55
+ Given raw HTML and its known clean text, the algorithm determines which DOM nodes are content versus boilerplate in six steps:
56
+
57
+ 1. **Simplify the DOM** — strip non-content tags (`<script>`, `<style>`, etc.) and collapse single-child chains into a Collapsed DOM (CDOM) representation
58
+ 2. **Collect leaf text** — walk the CDOM, concatenate text from every leaf node into one source string with tracked character offsets
59
+ 3. **Find anchors** — identify 10-character substrings that appear exactly once in both the source and clean text, splitting the problem into independent segments
60
+ 4. **DP alignment** — for each segment between anchors, run character-level dynamic programming with affine gap penalties to map source characters to clean-text characters
61
+ 5. **Score leaves** — map alignment results back to leaf boundaries via stored offsets, giving each leaf a score: `matched_chars / total_chars`
62
+ 6. **Label nodes** — leaves scoring above `0.667` are labeled `"content"`, the rest `"boilerplate"`, with labels propagating upward to parents
63
+
64
+ ![Alignment pipeline: extract leaf texts → anchor matching → DP alignment → per-leaf scores](assets/image1.png)
65
+
66
+
67
+ ## Dataset
68
+
69
+ Dataset: [williambrach/html-boilerplate-labeled](https://huggingface.co/datasets/williambrach/html-boilerplate-labeled) — ~4k pages from CleanEval, Dragnet, CETD, Readability, and others (3,985 pages total).
70
+
71
+ | Source | Train (ROUGE-1 F) | Test (ROUGE-1 F) |
72
+ |--------------------|-------------------|------------------|
73
+ | readability | 0.993 (92) | 0.997 (23) |
74
+ | scrapinghub | 0.991 (145) | 0.996 (36) |
75
+ | cetd | 0.993 (560) | 0.987 (140) |
76
+ | google-trends-2017 | 0.986 (144) | 0.995 (36) |
77
+ | cleanportaleval | 0.985 (57) | 0.971 (14) |
78
+ | cleaneval | 0.985 (590) | 0.991 (148) |
79
+ | dragnet | 0.983 (1,103) | 0.983 (276) |
80
+ | l3s-gn1 | 0.920 (497) | 0.927 (124) |
81
+ | **Overall** | **0.976** (3,188) | **0.978** (797) |
82
+
83
+ >Sample counts in parentheses.
84
+
85
+ ## Original Work
86
+
87
+ - **Paper**: Vogels et al., "Web2Text: Deep Structured Boilerplate Removal" (ECIR 2018) — [arxiv.org/abs/1801.02607](https://arxiv.org/abs/1801.02607)
88
+ - **Original implementation** (Scala): [github.com/dalab/web2text](https://github.com/dalab/web2text)
@@ -0,0 +1,50 @@
1
+ [project]
2
+ name = "web2textpy"
3
+ version = "0.1.0"
4
+ description = "Python reimplementation of the Web2Text pipeline for labeling HTML DOM nodes as content or boilerplate"
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ license = "MIT"
8
+ authors = [
9
+ { name = "William Brach" },
10
+ ]
11
+ classifiers = [
12
+ "Development Status :: 3 - Alpha",
13
+ "Intended Audience :: Developers",
14
+ "Intended Audience :: Science/Research",
15
+ "License :: OSI Approved :: MIT License",
16
+ "Programming Language :: Python :: 3",
17
+ "Programming Language :: Python :: 3.10",
18
+ "Programming Language :: Python :: 3.11",
19
+ "Programming Language :: Python :: 3.12",
20
+ "Programming Language :: Python :: 3.13",
21
+ "Topic :: Text Processing :: Markup :: HTML",
22
+ ]
23
+ dependencies = [
24
+ "lxml>=6.0.2",
25
+ ]
26
+
27
+ [project.optional-dependencies]
28
+ eval = [
29
+ "rouge-score>=0.1.2",
30
+ "sacrebleu>=2.6.0",
31
+ ]
32
+ cli = [
33
+ "datasets>=4.8.4",
34
+ "rouge-score>=0.1.2",
35
+ "sacrebleu>=2.6.0",
36
+ ]
37
+
38
+ [project.urls]
39
+ Homepage = "https://github.com/williambrach/web2textpy"
40
+ Repository = "https://github.com/williambrach/web2textpy"
41
+
42
+ [build-system]
43
+ requires = ["hatchling"]
44
+ build-backend = "hatchling.build"
45
+
46
+ [tool.hatch.build.targets.wheel]
47
+ packages = ["web2text.py"]
48
+
49
+ [tool.hatch.build.targets.sdist]
50
+ exclude = ["assets/", "evaluate.py", "push_to_hf.py"]