@wentorai/research-plugins 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +204 -0
- package/curated/analysis/README.md +64 -0
- package/curated/domains/README.md +104 -0
- package/curated/literature/README.md +53 -0
- package/curated/research/README.md +62 -0
- package/curated/tools/README.md +87 -0
- package/curated/writing/README.md +61 -0
- package/index.ts +39 -0
- package/mcp-configs/academic-db/ChatSpatial.json +17 -0
- package/mcp-configs/academic-db/academia-mcp.json +17 -0
- package/mcp-configs/academic-db/academic-paper-explorer.json +17 -0
- package/mcp-configs/academic-db/academic-search-mcp-server.json +17 -0
- package/mcp-configs/academic-db/agentinterviews-mcp.json +17 -0
- package/mcp-configs/academic-db/all-in-mcp.json +17 -0
- package/mcp-configs/academic-db/apple-health-mcp.json +17 -0
- package/mcp-configs/academic-db/arxiv-latex-mcp.json +17 -0
- package/mcp-configs/academic-db/arxiv-mcp-server.json +17 -0
- package/mcp-configs/academic-db/bgpt-mcp.json +17 -0
- package/mcp-configs/academic-db/biomcp.json +17 -0
- package/mcp-configs/academic-db/biothings-mcp.json +17 -0
- package/mcp-configs/academic-db/catalysishub-mcp-server.json +17 -0
- package/mcp-configs/academic-db/clinicaltrialsgov-mcp-server.json +17 -0
- package/mcp-configs/academic-db/deep-research-mcp.json +17 -0
- package/mcp-configs/academic-db/dicom-mcp.json +17 -0
- package/mcp-configs/academic-db/enrichr-mcp-server.json +17 -0
- package/mcp-configs/academic-db/fec-mcp-server.json +17 -0
- package/mcp-configs/academic-db/fhir-mcp-server-themomentum.json +17 -0
- package/mcp-configs/academic-db/fhir-mcp.json +19 -0
- package/mcp-configs/academic-db/gget-mcp.json +17 -0
- package/mcp-configs/academic-db/google-researcher-mcp.json +17 -0
- package/mcp-configs/academic-db/idea-reality-mcp.json +17 -0
- package/mcp-configs/academic-db/legiscan-mcp.json +19 -0
- package/mcp-configs/academic-db/lex.json +17 -0
- package/mcp-configs/ai-platform/Adaptive-Graph-of-Thoughts-MCP-server.json +17 -0
- package/mcp-configs/ai-platform/ai-counsel.json +17 -0
- package/mcp-configs/ai-platform/atlas-mcp-server.json +17 -0
- package/mcp-configs/ai-platform/counsel-mcp.json +17 -0
- package/mcp-configs/ai-platform/cross-llm-mcp.json +17 -0
- package/mcp-configs/ai-platform/gptr-mcp.json +17 -0
- package/mcp-configs/browser/decipher-research-agent.json +17 -0
- package/mcp-configs/browser/deep-research.json +17 -0
- package/mcp-configs/browser/everything-claude-code.json +17 -0
- package/mcp-configs/browser/gpt-researcher.json +17 -0
- package/mcp-configs/browser/heurist-agent-framework.json +17 -0
- package/mcp-configs/data-platform/4everland-hosting-mcp.json +17 -0
- package/mcp-configs/data-platform/context-keeper.json +17 -0
- package/mcp-configs/data-platform/context7.json +19 -0
- package/mcp-configs/data-platform/contextstream-mcp.json +17 -0
- package/mcp-configs/data-platform/email-mcp.json +17 -0
- package/mcp-configs/note-knowledge/ApeRAG.json +17 -0
- package/mcp-configs/note-knowledge/In-Memoria.json +17 -0
- package/mcp-configs/note-knowledge/agent-memory.json +17 -0
- package/mcp-configs/note-knowledge/aimemo.json +17 -0
- package/mcp-configs/note-knowledge/biel-mcp.json +19 -0
- package/mcp-configs/note-knowledge/cognee.json +17 -0
- package/mcp-configs/note-knowledge/context-awesome.json +17 -0
- package/mcp-configs/note-knowledge/context-mcp.json +17 -0
- package/mcp-configs/note-knowledge/conversation-handoff-mcp.json +17 -0
- package/mcp-configs/note-knowledge/cortex.json +17 -0
- package/mcp-configs/note-knowledge/devrag.json +17 -0
- package/mcp-configs/note-knowledge/easy-obsidian-mcp.json +17 -0
- package/mcp-configs/note-knowledge/engram.json +17 -0
- package/mcp-configs/note-knowledge/gnosis-mcp.json +17 -0
- package/mcp-configs/note-knowledge/graphlit-mcp-server.json +19 -0
- package/mcp-configs/reference-mgr/arxiv-cli.json +17 -0
- package/mcp-configs/reference-mgr/arxiv-search-mcp.json +17 -0
- package/mcp-configs/reference-mgr/chiken.json +17 -0
- package/mcp-configs/reference-mgr/claude-scholar.json +17 -0
- package/mcp-configs/reference-mgr/devonthink-mcp.json +17 -0
- package/mcp-configs/registry.json +447 -0
- package/openclaw.plugin.json +21 -0
- package/package.json +61 -0
- package/skills/analysis/dataviz/color-accessibility-guide/SKILL.md +230 -0
- package/skills/analysis/dataviz/geospatial-viz-guide/SKILL.md +218 -0
- package/skills/analysis/dataviz/interactive-viz-guide/SKILL.md +287 -0
- package/skills/analysis/dataviz/network-visualization-guide/SKILL.md +195 -0
- package/skills/analysis/dataviz/publication-figures-guide/SKILL.md +238 -0
- package/skills/analysis/dataviz/python-dataviz-guide/SKILL.md +195 -0
- package/skills/analysis/econometrics/causal-inference-guide/SKILL.md +197 -0
- package/skills/analysis/econometrics/iv-regression-guide/SKILL.md +198 -0
- package/skills/analysis/econometrics/panel-data-guide/SKILL.md +274 -0
- package/skills/analysis/econometrics/robustness-checks/SKILL.md +250 -0
- package/skills/analysis/econometrics/stata-regression/SKILL.md +117 -0
- package/skills/analysis/econometrics/time-series-guide/SKILL.md +235 -0
- package/skills/analysis/statistics/bayesian-statistics-guide/SKILL.md +221 -0
- package/skills/analysis/statistics/hypothesis-testing-guide/SKILL.md +210 -0
- package/skills/analysis/statistics/meta-analysis-guide/SKILL.md +206 -0
- package/skills/analysis/statistics/nonparametric-tests-guide/SKILL.md +221 -0
- package/skills/analysis/statistics/power-analysis-guide/SKILL.md +240 -0
- package/skills/analysis/statistics/sem-guide/SKILL.md +231 -0
- package/skills/analysis/statistics/survival-analysis-guide/SKILL.md +195 -0
- package/skills/analysis/wrangling/missing-data-handling/SKILL.md +224 -0
- package/skills/analysis/wrangling/pandas-data-wrangling/SKILL.md +242 -0
- package/skills/analysis/wrangling/questionnaire-design-guide/SKILL.md +234 -0
- package/skills/analysis/wrangling/text-mining-guide/SKILL.md +225 -0
- package/skills/domains/ai-ml/computer-vision-guide/SKILL.md +213 -0
- package/skills/domains/ai-ml/deep-learning-papers-guide/SKILL.md +200 -0
- package/skills/domains/ai-ml/llm-evaluation-guide/SKILL.md +194 -0
- package/skills/domains/ai-ml/prompt-engineering-research/SKILL.md +233 -0
- package/skills/domains/ai-ml/reinforcement-learning-guide/SKILL.md +254 -0
- package/skills/domains/ai-ml/transformer-architecture-guide/SKILL.md +233 -0
- package/skills/domains/biomedical/clinical-research-guide/SKILL.md +232 -0
- package/skills/domains/biomedical/clinicaltrials-api/SKILL.md +177 -0
- package/skills/domains/biomedical/epidemiology-guide/SKILL.md +200 -0
- package/skills/domains/biomedical/genomics-analysis-guide/SKILL.md +270 -0
- package/skills/domains/business/market-analysis-guide/SKILL.md +112 -0
- package/skills/domains/business/strategic-management-guide/SKILL.md +154 -0
- package/skills/domains/chemistry/computational-chemistry-guide/SKILL.md +266 -0
- package/skills/domains/chemistry/retrosynthesis-guide/SKILL.md +215 -0
- package/skills/domains/cs/algorithms-complexity-guide/SKILL.md +194 -0
- package/skills/domains/cs/dblp-api/SKILL.md +129 -0
- package/skills/domains/cs/software-engineering-research/SKILL.md +218 -0
- package/skills/domains/ecology/biodiversity-data-guide/SKILL.md +296 -0
- package/skills/domains/ecology/conservation-biology-guide/SKILL.md +198 -0
- package/skills/domains/ecology/gbif-api/SKILL.md +158 -0
- package/skills/domains/ecology/inaturalist-api/SKILL.md +173 -0
- package/skills/domains/economics/behavioral-economics-guide/SKILL.md +239 -0
- package/skills/domains/economics/development-economics-guide/SKILL.md +181 -0
- package/skills/domains/economics/fred-api/SKILL.md +189 -0
- package/skills/domains/education/curriculum-design-guide/SKILL.md +144 -0
- package/skills/domains/education/learning-science-guide/SKILL.md +150 -0
- package/skills/domains/finance/financial-data-analysis/SKILL.md +152 -0
- package/skills/domains/finance/quantitative-finance-guide/SKILL.md +151 -0
- package/skills/domains/geoscience/climate-science-guide/SKILL.md +158 -0
- package/skills/domains/geoscience/gis-remote-sensing-guide/SKILL.md +129 -0
- package/skills/domains/humanities/digital-humanities-guide/SKILL.md +181 -0
- package/skills/domains/humanities/philosophy-research-guide/SKILL.md +148 -0
- package/skills/domains/law/courtlistener-api/SKILL.md +213 -0
- package/skills/domains/law/legal-research-guide/SKILL.md +250 -0
- package/skills/domains/math/linear-algebra-applications/SKILL.md +227 -0
- package/skills/domains/math/numerical-methods-guide/SKILL.md +236 -0
- package/skills/domains/math/oeis-api/SKILL.md +158 -0
- package/skills/domains/pharma/clinical-pharmacology-guide/SKILL.md +165 -0
- package/skills/domains/pharma/drug-development-guide/SKILL.md +177 -0
- package/skills/domains/physics/computational-physics-guide/SKILL.md +300 -0
- package/skills/domains/physics/nasa-ads-api/SKILL.md +150 -0
- package/skills/domains/physics/quantum-computing-guide/SKILL.md +234 -0
- package/skills/domains/social-science/social-research-methods/SKILL.md +194 -0
- package/skills/domains/social-science/survey-research-guide/SKILL.md +182 -0
- package/skills/literature/discovery/citation-alert-guide/SKILL.md +154 -0
- package/skills/literature/discovery/conference-proceedings-guide/SKILL.md +142 -0
- package/skills/literature/discovery/literature-mapping-guide/SKILL.md +175 -0
- package/skills/literature/discovery/paper-tracking-guide/SKILL.md +211 -0
- package/skills/literature/discovery/rss-paper-feeds/SKILL.md +214 -0
- package/skills/literature/discovery/semantic-scholar-recs-guide/SKILL.md +164 -0
- package/skills/literature/fulltext/doaj-api/SKILL.md +120 -0
- package/skills/literature/fulltext/interlibrary-loan-guide/SKILL.md +163 -0
- package/skills/literature/fulltext/open-access-guide/SKILL.md +183 -0
- package/skills/literature/fulltext/pmc-oai-api/SKILL.md +184 -0
- package/skills/literature/fulltext/preprint-servers-guide/SKILL.md +128 -0
- package/skills/literature/fulltext/repository-harvesting-guide/SKILL.md +207 -0
- package/skills/literature/fulltext/unpaywall-api/SKILL.md +113 -0
- package/skills/literature/metadata/altmetrics-guide/SKILL.md +132 -0
- package/skills/literature/metadata/citation-network-guide/SKILL.md +236 -0
- package/skills/literature/metadata/crossref-api/SKILL.md +133 -0
- package/skills/literature/metadata/datacite-api/SKILL.md +126 -0
- package/skills/literature/metadata/doi-resolution-guide/SKILL.md +168 -0
- package/skills/literature/metadata/h-index-guide/SKILL.md +183 -0
- package/skills/literature/metadata/journal-metrics-guide/SKILL.md +188 -0
- package/skills/literature/metadata/opencitations-api/SKILL.md +128 -0
- package/skills/literature/metadata/orcid-api/SKILL.md +136 -0
- package/skills/literature/metadata/orcid-integration-guide/SKILL.md +178 -0
- package/skills/literature/search/arxiv-api/SKILL.md +95 -0
- package/skills/literature/search/biorxiv-api/SKILL.md +123 -0
- package/skills/literature/search/boolean-search-guide/SKILL.md +199 -0
- package/skills/literature/search/citation-chaining-guide/SKILL.md +148 -0
- package/skills/literature/search/database-comparison-guide/SKILL.md +100 -0
- package/skills/literature/search/europe-pmc-api/SKILL.md +120 -0
- package/skills/literature/search/google-scholar-guide/SKILL.md +182 -0
- package/skills/literature/search/mesh-terms-guide/SKILL.md +164 -0
- package/skills/literature/search/openalex-api/SKILL.md +134 -0
- package/skills/literature/search/pubmed-api/SKILL.md +130 -0
- package/skills/literature/search/scientify-literature-survey/SKILL.md +203 -0
- package/skills/literature/search/semantic-scholar-api/SKILL.md +134 -0
- package/skills/literature/search/systematic-search-strategy/SKILL.md +214 -0
- package/skills/research/automation/ai-scientist-guide/SKILL.md +228 -0
- package/skills/research/automation/data-collection-automation/SKILL.md +248 -0
- package/skills/research/automation/research-workflow-automation/SKILL.md +266 -0
- package/skills/research/deep-research/meta-synthesis-guide/SKILL.md +174 -0
- package/skills/research/deep-research/research-cog/SKILL.md +153 -0
- package/skills/research/deep-research/scoping-review-guide/SKILL.md +217 -0
- package/skills/research/deep-research/systematic-review-guide/SKILL.md +250 -0
- package/skills/research/funding/figshare-api/SKILL.md +163 -0
- package/skills/research/funding/grant-writing-guide/SKILL.md +233 -0
- package/skills/research/funding/nsf-grant-guide/SKILL.md +206 -0
- package/skills/research/funding/open-science-guide/SKILL.md +255 -0
- package/skills/research/funding/zenodo-api/SKILL.md +174 -0
- package/skills/research/methodology/action-research-guide/SKILL.md +201 -0
- package/skills/research/methodology/experimental-design-guide/SKILL.md +236 -0
- package/skills/research/methodology/grad-school-guide/SKILL.md +182 -0
- package/skills/research/methodology/grounded-theory-guide/SKILL.md +171 -0
- package/skills/research/methodology/mixed-methods-guide/SKILL.md +208 -0
- package/skills/research/methodology/qualitative-research-guide/SKILL.md +234 -0
- package/skills/research/methodology/scientify-idea-generation/SKILL.md +222 -0
- package/skills/research/paper-review/paper-reading-assistant/SKILL.md +266 -0
- package/skills/research/paper-review/peer-review-guide/SKILL.md +227 -0
- package/skills/research/paper-review/rebuttal-writing-guide/SKILL.md +185 -0
- package/skills/research/paper-review/scientify-write-review-paper/SKILL.md +209 -0
- package/skills/tools/code-exec/jupyter-notebook-guide/SKILL.md +178 -0
- package/skills/tools/code-exec/python-reproducibility-guide/SKILL.md +341 -0
- package/skills/tools/code-exec/r-reproducibility-guide/SKILL.md +236 -0
- package/skills/tools/code-exec/sandbox-execution-guide/SKILL.md +221 -0
- package/skills/tools/diagram/mermaid-diagram-guide/SKILL.md +269 -0
- package/skills/tools/diagram/plantuml-guide/SKILL.md +397 -0
- package/skills/tools/diagram/scientific-illustration-guide/SKILL.md +225 -0
- package/skills/tools/document/anystyle-api/SKILL.md +199 -0
- package/skills/tools/document/grobid-pdf-parsing/SKILL.md +294 -0
- package/skills/tools/document/markdown-academic-guide/SKILL.md +217 -0
- package/skills/tools/document/pdf-extraction-guide/SKILL.md +321 -0
- package/skills/tools/knowledge-graph/knowledge-graph-construction/SKILL.md +306 -0
- package/skills/tools/knowledge-graph/ontology-design-guide/SKILL.md +214 -0
- package/skills/tools/knowledge-graph/rag-methodology-guide/SKILL.md +325 -0
- package/skills/tools/ocr-translate/formula-recognition-guide/SKILL.md +367 -0
- package/skills/tools/ocr-translate/handwriting-recognition-guide/SKILL.md +211 -0
- package/skills/tools/ocr-translate/latex-ocr-guide/SKILL.md +204 -0
- package/skills/tools/ocr-translate/multilingual-research-guide/SKILL.md +234 -0
- package/skills/tools/scraping/academic-web-scraping/SKILL.md +326 -0
- package/skills/tools/scraping/api-data-collection-guide/SKILL.md +301 -0
- package/skills/tools/scraping/web-scraping-ethics-guide/SKILL.md +250 -0
- package/skills/writing/citation/bibtex-management-guide/SKILL.md +246 -0
- package/skills/writing/citation/citation-style-guide/SKILL.md +248 -0
- package/skills/writing/citation/reference-manager-comparison/SKILL.md +208 -0
- package/skills/writing/citation/zotero-api/SKILL.md +188 -0
- package/skills/writing/composition/abstract-writing-guide/SKILL.md +188 -0
- package/skills/writing/composition/discussion-writing-guide/SKILL.md +194 -0
- package/skills/writing/composition/introduction-writing-guide/SKILL.md +194 -0
- package/skills/writing/composition/literature-review-writing/SKILL.md +196 -0
- package/skills/writing/composition/methods-section-guide/SKILL.md +185 -0
- package/skills/writing/composition/response-to-reviewers/SKILL.md +215 -0
- package/skills/writing/composition/scientific-writing-guide/SKILL.md +152 -0
- package/skills/writing/latex/bibliography-management-guide/SKILL.md +206 -0
- package/skills/writing/latex/latex-drawing-guide/SKILL.md +234 -0
- package/skills/writing/latex/latex-ecosystem-guide/SKILL.md +240 -0
- package/skills/writing/latex/math-typesetting-guide/SKILL.md +231 -0
- package/skills/writing/latex/overleaf-collaboration-guide/SKILL.md +211 -0
- package/skills/writing/latex/tikz-diagrams-guide/SKILL.md +211 -0
- package/skills/writing/polish/academic-translation-guide/SKILL.md +175 -0
- package/skills/writing/polish/academic-writing-refiner/SKILL.md +143 -0
- package/skills/writing/polish/ai-writing-humanizer/SKILL.md +178 -0
- package/skills/writing/polish/grammar-checker-guide/SKILL.md +184 -0
- package/skills/writing/polish/plagiarism-detection-guide/SKILL.md +167 -0
- package/skills/writing/templates/beamer-presentation-guide/SKILL.md +263 -0
- package/skills/writing/templates/conference-paper-template/SKILL.md +219 -0
- package/skills/writing/templates/thesis-template-guide/SKILL.md +200 -0
- package/skills/writing/templates/thesis-writing-guide/SKILL.md +220 -0
- package/src/tools/arxiv.ts +131 -0
- package/src/tools/crossref.ts +112 -0
- package/src/tools/openalex.ts +174 -0
- package/src/tools/pubmed.ts +166 -0
- package/src/tools/semantic-scholar.ts +108 -0
- package/src/tools/unpaywall.ts +58 -0
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: handwriting-recognition-guide
|
|
3
|
+
description: "Apply handwriting OCR to digitize historical and archival documents"
|
|
4
|
+
metadata:
|
|
5
|
+
openclaw:
|
|
6
|
+
emoji: "fountain_pen"
|
|
7
|
+
category: "tools"
|
|
8
|
+
subcategory: "ocr-translate"
|
|
9
|
+
keywords: ["handwriting recognition", "HTR", "OCR", "historical documents", "digitization", "manuscript transcription"]
|
|
10
|
+
source: "wentor-research-plugins"
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
# Handwriting Recognition Guide
|
|
14
|
+
|
|
15
|
+
A skill for applying handwriting text recognition (HTR) to digitize historical documents, archival manuscripts, and handwritten research notes. Covers HTR platforms, image preprocessing, model training, post-correction, and integration into digital humanities research workflows.
|
|
16
|
+
|
|
17
|
+
## Handwriting Recognition vs. Printed OCR
|
|
18
|
+
|
|
19
|
+
### Key Differences
|
|
20
|
+
|
|
21
|
+
```
|
|
22
|
+
Printed Text OCR:
|
|
23
|
+
- Characters are standardized and uniform
|
|
24
|
+
- Well-solved problem (>99% accuracy on clean scans)
|
|
25
|
+
- Tools: Tesseract, ABBYY FineReader, Adobe Acrobat
|
|
26
|
+
|
|
27
|
+
Handwriting Text Recognition (HTR):
|
|
28
|
+
- Characters vary by writer, mood, pen, era
|
|
29
|
+
- Much harder -- typically 85-95% character accuracy
|
|
30
|
+
- Requires training on specific handwriting styles
|
|
31
|
+
- Tools: Transkribus, Kraken, HTR-Flor, Google Cloud Vision
|
|
32
|
+
|
|
33
|
+
Challenges specific to historical documents:
|
|
34
|
+
- Faded ink, bleed-through, stains, tears
|
|
35
|
+
- Archaic letterforms and abbreviations
|
|
36
|
+
- Multiple hands in one document
|
|
37
|
+
- Non-standard orthography
|
|
38
|
+
- Mixed languages and scripts
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## HTR Platforms
|
|
42
|
+
|
|
43
|
+
### Transkribus (State of the Art for Historical Documents)
|
|
44
|
+
|
|
45
|
+
```
|
|
46
|
+
Transkribus is the leading platform for historical HTR.
|
|
47
|
+
|
|
48
|
+
Workflow:
|
|
49
|
+
1. Upload document images
|
|
50
|
+
2. Automatic layout analysis (detect text regions and baselines)
|
|
51
|
+
3. Manual correction of layout (if needed)
|
|
52
|
+
4. Apply a pre-trained HTR model (or train your own)
|
|
53
|
+
5. Review and correct transcription
|
|
54
|
+
6. Export as TEXT, PAGE XML, TEI, DOCX, or PDF
|
|
55
|
+
|
|
56
|
+
Pre-trained models:
|
|
57
|
+
- Noscemus GM (general model for Latin scripts)
|
|
58
|
+
- English Writing M1 (18th-19th century English)
|
|
59
|
+
- German Kurrent models
|
|
60
|
+
- Dutch, French, Italian, Spanish models available
|
|
61
|
+
|
|
62
|
+
Training a custom model:
|
|
63
|
+
- Requires ~15,000-25,000 words of ground truth (manually transcribed)
|
|
64
|
+
- Can start with a pre-trained base model and fine-tune
|
|
65
|
+
- Training takes 1-8 hours depending on dataset size
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### Other Tools
|
|
69
|
+
|
|
70
|
+
| Tool | Type | Strengths |
|
|
71
|
+
|------|------|----------|
|
|
72
|
+
| Transkribus | Cloud platform | Best for historical documents, active community |
|
|
73
|
+
| Kraken | Open source (Python) | Flexible, scriptable, custom training |
|
|
74
|
+
| eScriptorium | Open source (web) | Based on Kraken, collaborative interface |
|
|
75
|
+
| Google Cloud Vision | API | Good for modern handwriting, many languages |
|
|
76
|
+
| Azure AI Vision | API | Competitive with Google for modern text |
|
|
77
|
+
| HTR-Flor | Open source | Research-focused, PyTorch-based |
|
|
78
|
+
|
|
79
|
+
## Image Preprocessing
|
|
80
|
+
|
|
81
|
+
### Preparing Scans for HTR
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
from PIL import Image, ImageFilter, ImageEnhance
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def preprocess_document_image(image_path: str,
|
|
88
|
+
output_path: str) -> dict:
|
|
89
|
+
"""
|
|
90
|
+
Preprocess a document scan for optimal HTR performance.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
image_path: Path to the input scan
|
|
94
|
+
output_path: Path to save the preprocessed image
|
|
95
|
+
"""
|
|
96
|
+
img = Image.open(image_path)
|
|
97
|
+
|
|
98
|
+
# Convert to grayscale
|
|
99
|
+
img = img.convert("L")
|
|
100
|
+
|
|
101
|
+
# Enhance contrast
|
|
102
|
+
enhancer = ImageEnhance.Contrast(img)
|
|
103
|
+
img = enhancer.enhance(1.5)
|
|
104
|
+
|
|
105
|
+
# Remove noise
|
|
106
|
+
img = img.filter(ImageFilter.MedianFilter(size=3))
|
|
107
|
+
|
|
108
|
+
# Binarize (convert to black and white)
|
|
109
|
+
threshold = 128
|
|
110
|
+
img = img.point(lambda x: 255 if x > threshold else 0, "1")
|
|
111
|
+
|
|
112
|
+
img.save(output_path)
|
|
113
|
+
|
|
114
|
+
return {
|
|
115
|
+
"original": image_path,
|
|
116
|
+
"processed": output_path,
|
|
117
|
+
"steps_applied": [
|
|
118
|
+
"Grayscale conversion",
|
|
119
|
+
"Contrast enhancement (1.5x)",
|
|
120
|
+
"Median filter (noise removal)",
|
|
121
|
+
"Binarization (threshold=128)"
|
|
122
|
+
],
|
|
123
|
+
"additional_steps_if_needed": [
|
|
124
|
+
"Deskewing (correct rotation)",
|
|
125
|
+
"Dewarping (correct page curvature)",
|
|
126
|
+
"Bleed-through removal",
|
|
127
|
+
"Background normalization"
|
|
128
|
+
]
|
|
129
|
+
}
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
### Scanning Best Practices
|
|
133
|
+
|
|
134
|
+
```
|
|
135
|
+
Resolution: 300-400 DPI for most documents
|
|
136
|
+
600 DPI for fine handwriting or damaged originals
|
|
137
|
+
Color: Grayscale usually sufficient; color for illuminated MSS
|
|
138
|
+
Format: TIFF (lossless) for archival; PNG for working copies
|
|
139
|
+
Lighting: Even, diffused light; avoid shadows and glare
|
|
140
|
+
Flatness: Use a book cradle or V-shaped scanner for bound volumes
|
|
141
|
+
Calibration: Include a color/grayscale chart for batch consistency
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
## Post-OCR Correction
|
|
145
|
+
|
|
146
|
+
### Semi-Automated Correction Workflow
|
|
147
|
+
|
|
148
|
+
```python
|
|
149
|
+
def post_correction_workflow(raw_transcription: str,
|
|
150
|
+
dictionary: set,
|
|
151
|
+
confidence_threshold: float = 0.8) -> dict:
|
|
152
|
+
"""
|
|
153
|
+
Post-correction strategy for HTR output.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
raw_transcription: Raw OCR/HTR text output
|
|
157
|
+
dictionary: Set of valid words for the document's language/period
|
|
158
|
+
confidence_threshold: Below this, flag for manual review
|
|
159
|
+
"""
|
|
160
|
+
words = raw_transcription.split()
|
|
161
|
+
flagged = []
|
|
162
|
+
corrected = []
|
|
163
|
+
|
|
164
|
+
for word in words:
|
|
165
|
+
clean = word.strip(".,;:!?()[]")
|
|
166
|
+
if clean.lower() in dictionary:
|
|
167
|
+
corrected.append(word)
|
|
168
|
+
else:
|
|
169
|
+
flagged.append({
|
|
170
|
+
"word": word,
|
|
171
|
+
"position": len(corrected),
|
|
172
|
+
"suggestion": "Manual review needed"
|
|
173
|
+
})
|
|
174
|
+
corrected.append(word)
|
|
175
|
+
|
|
176
|
+
return {
|
|
177
|
+
"total_words": len(words),
|
|
178
|
+
"flagged_words": len(flagged),
|
|
179
|
+
"estimated_accuracy": 1 - len(flagged) / max(len(words), 1),
|
|
180
|
+
"flagged": flagged[:20],
|
|
181
|
+
"correction_strategies": [
|
|
182
|
+
"Dictionary-based spell checking (period-appropriate dictionary)",
|
|
183
|
+
"N-gram language model for context-aware correction",
|
|
184
|
+
"Crowdsourcing (Zooniverse, FromThePage)",
|
|
185
|
+
"Double-keying (two independent transcribers, compare)",
|
|
186
|
+
"AI-assisted correction with human verification"
|
|
187
|
+
]
|
|
188
|
+
}
|
|
189
|
+
```
|
|
190
|
+
|
|
191
|
+
## Integration with Research Workflows
|
|
192
|
+
|
|
193
|
+
### From Transcription to Analysis
|
|
194
|
+
|
|
195
|
+
```
|
|
196
|
+
1. Transcribe documents using HTR
|
|
197
|
+
2. Correct and validate transcriptions
|
|
198
|
+
3. Encode in TEI-XML for digital editions
|
|
199
|
+
4. Apply NLP for named entity recognition, topic modeling
|
|
200
|
+
5. Link entities to knowledge bases (Wikidata, VIAF)
|
|
201
|
+
6. Publish as a searchable digital archive
|
|
202
|
+
|
|
203
|
+
Tools for TEI encoding:
|
|
204
|
+
- oXygen XML Editor (standard for digital humanities)
|
|
205
|
+
- TEI Publisher (web-based publishing platform)
|
|
206
|
+
- FromThePage (collaborative transcription with TEI export)
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
## Evaluating HTR Accuracy
|
|
210
|
+
|
|
211
|
+
Report Character Error Rate (CER) and Word Error Rate (WER) on a held-out test set. CER below 5% is generally considered production-quality for historical documents. Always compare against a manually created ground truth. Report accuracy separately for different document types, hands, or time periods if your corpus is heterogeneous.
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: latex-ocr-guide
|
|
3
|
+
description: "Extract and convert mathematical formulas from images and PDFs to LaTeX code"
|
|
4
|
+
metadata:
|
|
5
|
+
openclaw:
|
|
6
|
+
emoji: "mag"
|
|
7
|
+
category: "tools"
|
|
8
|
+
subcategory: "ocr-translate"
|
|
9
|
+
keywords: ["math OCR", "formula recognition", "LaTeX OCR", "document OCR", "equation extraction"]
|
|
10
|
+
source: "wentor"
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
# LaTeX OCR Guide
|
|
14
|
+
|
|
15
|
+
A skill for extracting mathematical formulas from images, PDFs, and handwritten notes and converting them to LaTeX code. Covers tool selection, batch processing workflows, and quality verification techniques.
|
|
16
|
+
|
|
17
|
+
## Tool Landscape
|
|
18
|
+
|
|
19
|
+
### Available Math OCR Tools
|
|
20
|
+
|
|
21
|
+
| Tool | Type | Accuracy | Best For | License |
|
|
22
|
+
|------|------|----------|----------|---------|
|
|
23
|
+
| Mathpix | Cloud API | Very high | All math, diagrams | Commercial ($) |
|
|
24
|
+
| LaTeX-OCR (Lukas Blecher) | Local model | High | Printed formulas | MIT |
|
|
25
|
+
| Pix2Tex | Local model | High | Single equations | MIT |
|
|
26
|
+
| Nougat (Meta) | Local model | High | Full papers with math | MIT |
|
|
27
|
+
| InftyReader | Desktop | High | Printed math, Japanese | Commercial |
|
|
28
|
+
| img2latex | Local model | Moderate | Simple equations | MIT |
|
|
29
|
+
|
|
30
|
+
### Quick Start with LaTeX-OCR
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
# Install the open-source LaTeX-OCR package
|
|
34
|
+
pip install "pix2tex[gui]"
|
|
35
|
+
|
|
36
|
+
# Or install from GitHub for latest version
|
|
37
|
+
pip install git+https://github.com/lukas-blecher/LaTeX-OCR.git
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
```python
|
|
41
|
+
from pix2tex.cli import LatexOCR
|
|
42
|
+
from PIL import Image
|
|
43
|
+
|
|
44
|
+
def recognize_formula(image_path: str) -> str:
|
|
45
|
+
"""
|
|
46
|
+
Convert a formula image to LaTeX code.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
image_path: Path to image containing a mathematical formula
|
|
50
|
+
Returns:
|
|
51
|
+
LaTeX string representation of the formula
|
|
52
|
+
"""
|
|
53
|
+
model = LatexOCR()
|
|
54
|
+
img = Image.open(image_path)
|
|
55
|
+
latex_code = model(img)
|
|
56
|
+
return latex_code
|
|
57
|
+
|
|
58
|
+
# Single image
|
|
59
|
+
result = recognize_formula('formula.png')
|
|
60
|
+
print(result)
|
|
61
|
+
# Output: E = mc^{2}
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
## Batch Processing Workflow
|
|
65
|
+
|
|
66
|
+
### Processing Multiple Formulas from a PDF
|
|
67
|
+
|
|
68
|
+
```python
|
|
69
|
+
import fitz # PyMuPDF
|
|
70
|
+
from PIL import Image
|
|
71
|
+
import io
|
|
72
|
+
|
|
73
|
+
def extract_formulas_from_pdf(pdf_path: str, output_dir: str,
|
|
74
|
+
min_height: int = 30) -> list[dict]:
|
|
75
|
+
"""
|
|
76
|
+
Extract formula regions from a PDF and convert to LaTeX.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
pdf_path: Path to the PDF file
|
|
80
|
+
output_dir: Directory to save extracted formula images
|
|
81
|
+
min_height: Minimum height (px) to consider as formula region
|
|
82
|
+
"""
|
|
83
|
+
doc = fitz.open(pdf_path)
|
|
84
|
+
model = LatexOCR()
|
|
85
|
+
results = []
|
|
86
|
+
|
|
87
|
+
for page_num in range(len(doc)):
|
|
88
|
+
page = doc[page_num]
|
|
89
|
+
# Extract images from page
|
|
90
|
+
image_list = page.get_images(full=True)
|
|
91
|
+
|
|
92
|
+
for img_idx, img_info in enumerate(image_list):
|
|
93
|
+
xref = img_info[0]
|
|
94
|
+
pix = fitz.Pixmap(doc, xref)
|
|
95
|
+
|
|
96
|
+
if pix.height >= min_height:
|
|
97
|
+
img_data = pix.tobytes("png")
|
|
98
|
+
img = Image.open(io.BytesIO(img_data))
|
|
99
|
+
|
|
100
|
+
try:
|
|
101
|
+
latex = model(img)
|
|
102
|
+
results.append({
|
|
103
|
+
'page': page_num + 1,
|
|
104
|
+
'image_index': img_idx,
|
|
105
|
+
'latex': latex,
|
|
106
|
+
'confidence': 'high' if len(latex) > 3 else 'low'
|
|
107
|
+
})
|
|
108
|
+
except Exception as e:
|
|
109
|
+
results.append({
|
|
110
|
+
'page': page_num + 1,
|
|
111
|
+
'image_index': img_idx,
|
|
112
|
+
'latex': None,
|
|
113
|
+
'error': str(e)
|
|
114
|
+
})
|
|
115
|
+
|
|
116
|
+
return results
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### Processing Handwritten Notes
|
|
120
|
+
|
|
121
|
+
For handwritten mathematics, preprocessing improves accuracy significantly:
|
|
122
|
+
|
|
123
|
+
```python
|
|
124
|
+
import cv2
|
|
125
|
+
import numpy as np
|
|
126
|
+
|
|
127
|
+
def preprocess_handwritten(image_path: str) -> Image.Image:
|
|
128
|
+
"""
|
|
129
|
+
Preprocess a handwritten formula image for better OCR accuracy.
|
|
130
|
+
"""
|
|
131
|
+
img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
|
|
132
|
+
|
|
133
|
+
# 1. Denoise
|
|
134
|
+
img = cv2.fastNlMeansDenoising(img, h=10)
|
|
135
|
+
|
|
136
|
+
# 2. Adaptive thresholding for varying illumination
|
|
137
|
+
img = cv2.adaptiveThreshold(
|
|
138
|
+
img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
|
139
|
+
cv2.THRESH_BINARY, 15, 8
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
# 3. Dilation to connect broken strokes
|
|
143
|
+
kernel = np.ones((2, 2), np.uint8)
|
|
144
|
+
img = cv2.dilate(img, kernel, iterations=1)
|
|
145
|
+
|
|
146
|
+
# 4. Crop to content with padding
|
|
147
|
+
coords = cv2.findNonZero(255 - img)
|
|
148
|
+
x, y, w, h = cv2.boundingRect(coords)
|
|
149
|
+
pad = 20
|
|
150
|
+
img = img[max(0, y-pad):y+h+pad, max(0, x-pad):x+w+pad]
|
|
151
|
+
|
|
152
|
+
return Image.fromarray(img)
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
## Using Mathpix API
|
|
156
|
+
|
|
157
|
+
For production-quality results, the Mathpix API provides the highest accuracy:
|
|
158
|
+
|
|
159
|
+
```python
|
|
160
|
+
import requests
|
|
161
|
+
import base64
|
|
162
|
+
|
|
163
|
+
def mathpix_ocr(image_path: str, app_id: str, app_key: str) -> dict:
|
|
164
|
+
"""
|
|
165
|
+
Use Mathpix API for high-accuracy math OCR.
|
|
166
|
+
"""
|
|
167
|
+
with open(image_path, 'rb') as f:
|
|
168
|
+
image_data = base64.b64encode(f.read()).decode()
|
|
169
|
+
|
|
170
|
+
response = requests.post(
|
|
171
|
+
'https://api.mathpix.com/v3/text',
|
|
172
|
+
headers={
|
|
173
|
+
'app_id': app_id,
|
|
174
|
+
'app_key': app_key,
|
|
175
|
+
'Content-type': 'application/json'
|
|
176
|
+
},
|
|
177
|
+
json={
|
|
178
|
+
'src': f'data:image/png;base64,{image_data}',
|
|
179
|
+
'formats': ['latex_styled', 'text'],
|
|
180
|
+
'data_options': {'include_asciimath': True}
|
|
181
|
+
}
|
|
182
|
+
)
|
|
183
|
+
return response.json()
|
|
184
|
+
```
|
|
185
|
+
|
|
186
|
+
## Verification and Correction
|
|
187
|
+
|
|
188
|
+
Always verify OCR output by rendering the LaTeX:
|
|
189
|
+
|
|
190
|
+
```python
|
|
191
|
+
import matplotlib.pyplot as plt
|
|
192
|
+
|
|
193
|
+
def verify_latex(latex_string: str, output_path: str = 'verify.png'):
|
|
194
|
+
"""Render LaTeX formula and save as image for visual verification."""
|
|
195
|
+
fig, ax = plt.subplots(figsize=(8, 2))
|
|
196
|
+
ax.text(0.5, 0.5, f'${latex_string}$', fontsize=20,
|
|
197
|
+
ha='center', va='center', transform=ax.transAxes)
|
|
198
|
+
ax.axis('off')
|
|
199
|
+
fig.savefig(output_path, dpi=150, bbox_inches='tight')
|
|
200
|
+
plt.close()
|
|
201
|
+
print(f"Verification image saved to {output_path}")
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
Common OCR errors to watch for: confusing `l` with `1`, `O` with `0`, missing superscripts/subscripts, incorrect fraction nesting, and misrecognized Greek letters. Always proofread critical equations before submission.
|
|
@@ -0,0 +1,234 @@
|
|
|
1
|
+
---
|
|
2
|
+
name: multilingual-research-guide
|
|
3
|
+
description: "Strategies for translating academic papers while preserving technical accuracy"
|
|
4
|
+
metadata:
|
|
5
|
+
openclaw:
|
|
6
|
+
emoji: "globe_with_meridians"
|
|
7
|
+
category: "tools"
|
|
8
|
+
subcategory: "ocr-translate"
|
|
9
|
+
keywords: ["translation strategies", "document OCR", "math OCR", "academic writing", "multilingual research"]
|
|
10
|
+
source: "wentor"
|
|
11
|
+
---
|
|
12
|
+
|
|
13
|
+
# Academic Translation Guide
|
|
14
|
+
|
|
15
|
+
A skill for translating academic papers, theses, and research documents between languages while preserving technical precision, citation integrity, and discipline-specific terminology. Covers workflow design, terminology management, and quality assurance.
|
|
16
|
+
|
|
17
|
+
## Translation Workflow
|
|
18
|
+
|
|
19
|
+
### End-to-End Pipeline
|
|
20
|
+
|
|
21
|
+
```
|
|
22
|
+
Source Document
|
|
23
|
+
|
|
|
24
|
+
v
|
|
25
|
+
1. Document Preparation
|
|
26
|
+
- Extract text (OCR if scanned)
|
|
27
|
+
- Identify formulas, figures, tables (do NOT translate these)
|
|
28
|
+
- Build terminology glossary
|
|
29
|
+
|
|
|
30
|
+
v
|
|
31
|
+
2. Segmentation
|
|
32
|
+
- Split into translatable units (sentences/paragraphs)
|
|
33
|
+
- Tag non-translatable elements: equations, citations, proper nouns
|
|
34
|
+
|
|
|
35
|
+
v
|
|
36
|
+
3. Translation
|
|
37
|
+
- Apply machine translation (first pass)
|
|
38
|
+
- Human post-editing (second pass)
|
|
39
|
+
- Terminology consistency check (third pass)
|
|
40
|
+
|
|
|
41
|
+
v
|
|
42
|
+
4. Quality Assurance
|
|
43
|
+
- Back-translation verification (sample)
|
|
44
|
+
- Domain expert review
|
|
45
|
+
- Formatting and citation check
|
|
46
|
+
|
|
|
47
|
+
v
|
|
48
|
+
Target Document
|
|
49
|
+
```
|
|
50
|
+
|
|
51
|
+
## Terminology Management
|
|
52
|
+
|
|
53
|
+
### Building a Domain Glossary
|
|
54
|
+
|
|
55
|
+
```python
|
|
56
|
+
import json
|
|
57
|
+
|
|
58
|
+
def build_terminology_glossary(source_text: str, domain: str,
|
|
59
|
+
source_lang: str = 'zh',
|
|
60
|
+
target_lang: str = 'en') -> list[dict]:
|
|
61
|
+
"""
|
|
62
|
+
Extract and standardize technical terms from source text.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
source_text: Raw text of the source document
|
|
66
|
+
domain: Research domain (e.g., 'machine_learning', 'biochemistry')
|
|
67
|
+
source_lang: Source language code
|
|
68
|
+
target_lang: Target language code
|
|
69
|
+
Returns:
|
|
70
|
+
List of terminology entries
|
|
71
|
+
"""
|
|
72
|
+
# Common domain-specific glossaries
|
|
73
|
+
glossaries = {
|
|
74
|
+
'machine_learning': {
|
|
75
|
+
'zh_en': {
|
|
76
|
+
'过拟合': 'overfitting',
|
|
77
|
+
'欠拟合': 'underfitting',
|
|
78
|
+
'梯度下降': 'gradient descent',
|
|
79
|
+
'损失函数': 'loss function',
|
|
80
|
+
'卷积神经网络': 'convolutional neural network',
|
|
81
|
+
'注意力机制': 'attention mechanism',
|
|
82
|
+
'预训练模型': 'pre-trained model',
|
|
83
|
+
'微调': 'fine-tuning',
|
|
84
|
+
'批归一化': 'batch normalization',
|
|
85
|
+
'学习率': 'learning rate'
|
|
86
|
+
}
|
|
87
|
+
},
|
|
88
|
+
'biochemistry': {
|
|
89
|
+
'zh_en': {
|
|
90
|
+
'蛋白质折叠': 'protein folding',
|
|
91
|
+
'酶动力学': 'enzyme kinetics',
|
|
92
|
+
'基因表达': 'gene expression',
|
|
93
|
+
'转录因子': 'transcription factor',
|
|
94
|
+
'信号通路': 'signaling pathway',
|
|
95
|
+
'代谢组学': 'metabolomics'
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
domain_terms = glossaries.get(domain, {}).get(f'{source_lang}_{target_lang}', {})
|
|
101
|
+
|
|
102
|
+
entries = []
|
|
103
|
+
for source_term, target_term in domain_terms.items():
|
|
104
|
+
if source_term in source_text:
|
|
105
|
+
entries.append({
|
|
106
|
+
'source': source_term,
|
|
107
|
+
'target': target_term,
|
|
108
|
+
'domain': domain,
|
|
109
|
+
'verified': True,
|
|
110
|
+
'notes': ''
|
|
111
|
+
})
|
|
112
|
+
return entries
|
|
113
|
+
```
|
|
114
|
+
|
|
115
|
+
### Terminology Consistency Enforcement
|
|
116
|
+
|
|
117
|
+
```python
|
|
118
|
+
def enforce_terminology(translated_text: str,
|
|
119
|
+
glossary: list[dict]) -> tuple[str, list[str]]:
|
|
120
|
+
"""
|
|
121
|
+
Check and enforce terminology consistency in translated text.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Tuple of (corrected_text, list of warnings)
|
|
125
|
+
"""
|
|
126
|
+
warnings = []
|
|
127
|
+
corrected = translated_text
|
|
128
|
+
|
|
129
|
+
for entry in glossary:
|
|
130
|
+
target_term = entry['target']
|
|
131
|
+
# Check for common mistranslations or inconsistent usage
|
|
132
|
+
variants = entry.get('incorrect_variants', [])
|
|
133
|
+
for variant in variants:
|
|
134
|
+
if variant.lower() in corrected.lower():
|
|
135
|
+
warnings.append(
|
|
136
|
+
f"Found '{variant}' -- should be '{target_term}'"
|
|
137
|
+
)
|
|
138
|
+
# Case-insensitive replacement
|
|
139
|
+
import re
|
|
140
|
+
corrected = re.sub(
|
|
141
|
+
re.escape(variant), target_term, corrected,
|
|
142
|
+
flags=re.IGNORECASE
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
return corrected, warnings
|
|
146
|
+
```
|
|
147
|
+
|
|
148
|
+
## Machine Translation Integration
|
|
149
|
+
|
|
150
|
+
### Using DeepL API for Academic Text
|
|
151
|
+
|
|
152
|
+
```python
|
|
153
|
+
import deepl
|
|
154
|
+
|
|
155
|
+
def translate_academic_text(text: str, source_lang: str, target_lang: str,
|
|
156
|
+
auth_key: str, glossary_id: str = None) -> str:
|
|
157
|
+
"""
|
|
158
|
+
Translate academic text using DeepL with optional glossary.
|
|
159
|
+
"""
|
|
160
|
+
translator = deepl.Translator(auth_key)
|
|
161
|
+
|
|
162
|
+
result = translator.translate_text(
|
|
163
|
+
text,
|
|
164
|
+
source_lang=source_lang.upper(),
|
|
165
|
+
target_lang=target_lang.upper(),
|
|
166
|
+
formality="more", # academic style
|
|
167
|
+
glossary=glossary_id,
|
|
168
|
+
preserve_formatting=True,
|
|
169
|
+
tag_handling="xml" # preserve XML/HTML tags
|
|
170
|
+
)
|
|
171
|
+
return result.text
|
|
172
|
+
```
|
|
173
|
+
|
|
174
|
+
### Protecting Non-Translatable Elements
|
|
175
|
+
|
|
176
|
+
Before sending text to any translation engine, protect elements that should not be translated:
|
|
177
|
+
|
|
178
|
+
```python
|
|
179
|
+
import re
|
|
180
|
+
|
|
181
|
+
def protect_elements(text: str) -> tuple[str, dict]:
|
|
182
|
+
"""
|
|
183
|
+
Replace non-translatable elements with placeholders.
|
|
184
|
+
Returns protected text and a mapping to restore later.
|
|
185
|
+
"""
|
|
186
|
+
placeholders = {}
|
|
187
|
+
counter = 0
|
|
188
|
+
|
|
189
|
+
# Protect LaTeX equations
|
|
190
|
+
for pattern in [r'\$\$.*?\$\$', r'\$.*?\$', r'\\begin\{equation\}.*?\\end\{equation\}']:
|
|
191
|
+
for match in re.finditer(pattern, text, re.DOTALL):
|
|
192
|
+
key = f'__MATH_{counter}__'
|
|
193
|
+
placeholders[key] = match.group()
|
|
194
|
+
text = text.replace(match.group(), key, 1)
|
|
195
|
+
counter += 1
|
|
196
|
+
|
|
197
|
+
# Protect citations
|
|
198
|
+
for match in re.finditer(r'\\cite\{[^}]+\}|\([A-Z][a-z]+(?:\s+et\s+al\.)?,\s*\d{4}\)', text):
|
|
199
|
+
key = f'__CITE_{counter}__'
|
|
200
|
+
placeholders[key] = match.group()
|
|
201
|
+
text = text.replace(match.group(), key, 1)
|
|
202
|
+
counter += 1
|
|
203
|
+
|
|
204
|
+
# Protect URLs
|
|
205
|
+
for match in re.finditer(r'https?://\S+', text):
|
|
206
|
+
key = f'__URL_{counter}__'
|
|
207
|
+
placeholders[key] = match.group()
|
|
208
|
+
text = text.replace(match.group(), key, 1)
|
|
209
|
+
counter += 1
|
|
210
|
+
|
|
211
|
+
return text, placeholders
|
|
212
|
+
|
|
213
|
+
def restore_elements(text: str, placeholders: dict) -> str:
|
|
214
|
+
"""Restore protected elements from placeholders."""
|
|
215
|
+
for key, value in placeholders.items():
|
|
216
|
+
text = text.replace(key, value)
|
|
217
|
+
return text
|
|
218
|
+
```
|
|
219
|
+
|
|
220
|
+
## Quality Assurance
|
|
221
|
+
|
|
222
|
+
### Back-Translation Verification
|
|
223
|
+
|
|
224
|
+
For critical documents, perform back-translation on a random 10-20% sample of paragraphs. Compare the back-translated text with the original to identify semantic drift. Flag any paragraph where back-translation diverges significantly from the source.
|
|
225
|
+
|
|
226
|
+
### Checklist Before Submission
|
|
227
|
+
|
|
228
|
+
1. All technical terms match the domain glossary
|
|
229
|
+
2. All equations, formulas, and figures are unchanged
|
|
230
|
+
3. All citations and references are intact and correctly formatted
|
|
231
|
+
4. Author names and institutional affiliations are not translated
|
|
232
|
+
5. Abbreviations are defined on first use in the target language
|
|
233
|
+
6. The abstract has been reviewed by a domain expert in the target language
|
|
234
|
+
7. Journal-specific terminology preferences have been applied
|