fast-sentence-segment 0.1.9__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_sentence_segment/__init__.py +18 -18
- fast_sentence_segment/bp/__init__.py +1 -1
- fast_sentence_segment/bp/segmenter.py +65 -68
- fast_sentence_segment/cli.py +56 -0
- fast_sentence_segment/core/__init__.py +4 -0
- fast_sentence_segment/core/base_object.py +18 -0
- fast_sentence_segment/core/stopwatch.py +38 -0
- fast_sentence_segment/dmo/__init__.py +10 -6
- fast_sentence_segment/dmo/abbreviation_merger.py +146 -0
- fast_sentence_segment/dmo/abbreviation_splitter.py +95 -0
- fast_sentence_segment/dmo/abbreviations.py +96 -0
- fast_sentence_segment/dmo/bullet_point_cleaner.py +55 -55
- fast_sentence_segment/dmo/ellipsis_normalizer.py +45 -0
- fast_sentence_segment/dmo/newlines_to_periods.py +57 -57
- fast_sentence_segment/dmo/numbered_list_normalizer.py +47 -53
- fast_sentence_segment/dmo/post_process_sentences.py +48 -48
- fast_sentence_segment/dmo/question_exclamation_splitter.py +59 -0
- fast_sentence_segment/dmo/spacy_doc_segmenter.py +101 -101
- fast_sentence_segment/dmo/title_name_merger.py +152 -0
- fast_sentence_segment/svc/__init__.py +2 -2
- fast_sentence_segment/svc/perform_paragraph_segmentation.py +50 -50
- fast_sentence_segment/svc/perform_sentence_segmentation.py +165 -129
- fast_sentence_segment-1.2.0.dist-info/METADATA +189 -0
- fast_sentence_segment-1.2.0.dist-info/RECORD +27 -0
- {fast_sentence_segment-0.1.9.dist-info → fast_sentence_segment-1.2.0.dist-info}/WHEEL +1 -1
- fast_sentence_segment-1.2.0.dist-info/entry_points.txt +3 -0
- fast_sentence_segment-1.2.0.dist-info/licenses/LICENSE +21 -0
- fast_sentence_segment/dmo/delimiters_to_periods.py +0 -37
- fast_sentence_segment-0.1.9.dist-info/METADATA +0 -54
- fast_sentence_segment-0.1.9.dist-info/RECORD +0 -16
|
@@ -1,18 +1,18 @@
|
|
|
1
|
-
from .bp import *
|
|
2
|
-
from .svc import *
|
|
3
|
-
from .dmo import *
|
|
4
|
-
|
|
5
|
-
from .bp.segmenter import Segmenter
|
|
6
|
-
|
|
7
|
-
segment = Segmenter().input_text
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def segment_text(input_text: str, flatten: bool = False) -> list:
|
|
11
|
-
results = segment(input_text)
|
|
12
|
-
|
|
13
|
-
if flatten:
|
|
14
|
-
flat = []
|
|
15
|
-
[[flat.append(y) for y in x] for x in results]
|
|
16
|
-
return flat
|
|
17
|
-
|
|
18
|
-
return results
|
|
1
|
+
from .bp import *
|
|
2
|
+
from .svc import *
|
|
3
|
+
from .dmo import *
|
|
4
|
+
|
|
5
|
+
from .bp.segmenter import Segmenter
|
|
6
|
+
|
|
7
|
+
segment = Segmenter().input_text
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def segment_text(input_text: str, flatten: bool = False) -> list:
|
|
11
|
+
results = segment(input_text)
|
|
12
|
+
|
|
13
|
+
if flatten:
|
|
14
|
+
flat = []
|
|
15
|
+
[[flat.append(y) for y in x] for x in results]
|
|
16
|
+
return flat
|
|
17
|
+
|
|
18
|
+
return results
|
|
@@ -1 +1 @@
|
|
|
1
|
-
from .segmenter import Segmenter
|
|
1
|
+
from .segmenter import Segmenter
|
|
@@ -1,68 +1,65 @@
|
|
|
1
|
-
#!/usr/bin/env python
|
|
2
|
-
# -*- coding: UTF-8 -*-
|
|
3
|
-
""" Orchestrate Sentence Segmentation """
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
from functools import lru_cache
|
|
7
|
-
|
|
8
|
-
from
|
|
9
|
-
from
|
|
10
|
-
from
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
f"\tTotal Time: {str(sw)}"]))
|
|
67
|
-
|
|
68
|
-
return paragraphs
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
""" Orchestrate Sentence Segmentation """
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
from functools import lru_cache
|
|
7
|
+
|
|
8
|
+
from fast_sentence_segment.core import BaseObject, Stopwatch
|
|
9
|
+
from fast_sentence_segment.svc import PerformParagraphSegmentation
|
|
10
|
+
from fast_sentence_segment.svc import PerformSentenceSegmentation
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class Segmenter(BaseObject):
|
|
14
|
+
""" Orchestrate Sentence Segmentation """
|
|
15
|
+
|
|
16
|
+
def __init__(self):
|
|
17
|
+
""" Change Log
|
|
18
|
+
|
|
19
|
+
Created:
|
|
20
|
+
30-Sept-2021
|
|
21
|
+
"""
|
|
22
|
+
BaseObject.__init__(self, __name__)
|
|
23
|
+
self._segment_paragraphs = PerformParagraphSegmentation().process
|
|
24
|
+
self._segment_sentences = PerformSentenceSegmentation().process
|
|
25
|
+
|
|
26
|
+
def _input_text(self,
|
|
27
|
+
input_text: str) -> list:
|
|
28
|
+
paragraphs = []
|
|
29
|
+
|
|
30
|
+
for paragraph in self._segment_paragraphs(input_text):
|
|
31
|
+
paragraphs.append(self._segment_sentences(paragraph))
|
|
32
|
+
|
|
33
|
+
return paragraphs
|
|
34
|
+
|
|
35
|
+
@lru_cache(maxsize=1024, typed=True)
|
|
36
|
+
def input_text(self,
|
|
37
|
+
input_text: str) -> list:
|
|
38
|
+
"""Segment Input Text into Paragraphs and Sentences
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
input_text (str): An input string of any length or type
|
|
42
|
+
|
|
43
|
+
Raises:
|
|
44
|
+
ValueError: input must be a string
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
list: returns a list of lists.
|
|
48
|
+
Each outer list is a paragraph.
|
|
49
|
+
Each inner list contains 1..* sentences
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
if self.isEnabledForDebug and not isinstance(input_text, str):
|
|
53
|
+
raise ValueError(f"Expected str, got {type(input_text)}")
|
|
54
|
+
|
|
55
|
+
sw = Stopwatch()
|
|
56
|
+
|
|
57
|
+
paragraphs = self._input_text(input_text)
|
|
58
|
+
|
|
59
|
+
if self.isEnabledForInfo:
|
|
60
|
+
self.logger.info('\n'.join([
|
|
61
|
+
"Segmentation of Input Text Complete",
|
|
62
|
+
f"\tTotal Paragraphs: {len(paragraphs)}",
|
|
63
|
+
f"\tTotal Time: {str(sw)}"]))
|
|
64
|
+
|
|
65
|
+
return paragraphs
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""CLI for fast-sentence-segment."""
|
|
3
|
+
|
|
4
|
+
import argparse
|
|
5
|
+
import logging
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
from fast_sentence_segment import segment_text
|
|
9
|
+
|
|
10
|
+
logging.disable(logging.CRITICAL)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def main():
|
|
14
|
+
parser = argparse.ArgumentParser(
|
|
15
|
+
prog="segment",
|
|
16
|
+
description="Segment text into sentences",
|
|
17
|
+
)
|
|
18
|
+
parser.add_argument(
|
|
19
|
+
"text",
|
|
20
|
+
nargs="?",
|
|
21
|
+
help="Text to segment (or use stdin)",
|
|
22
|
+
)
|
|
23
|
+
parser.add_argument(
|
|
24
|
+
"-f", "--file",
|
|
25
|
+
help="Read text from file",
|
|
26
|
+
)
|
|
27
|
+
parser.add_argument(
|
|
28
|
+
"-n", "--numbered",
|
|
29
|
+
action="store_true",
|
|
30
|
+
help="Number output lines",
|
|
31
|
+
)
|
|
32
|
+
args = parser.parse_args()
|
|
33
|
+
|
|
34
|
+
# Get input text
|
|
35
|
+
if args.file:
|
|
36
|
+
with open(args.file, "r", encoding="utf-8") as f:
|
|
37
|
+
text = f.read()
|
|
38
|
+
elif args.text:
|
|
39
|
+
text = args.text
|
|
40
|
+
elif not sys.stdin.isatty():
|
|
41
|
+
text = sys.stdin.read()
|
|
42
|
+
else:
|
|
43
|
+
parser.print_help()
|
|
44
|
+
sys.exit(1)
|
|
45
|
+
|
|
46
|
+
# Segment and output
|
|
47
|
+
sentences = segment_text(text.strip(), flatten=True)
|
|
48
|
+
for i, sentence in enumerate(sentences, 1):
|
|
49
|
+
if args.numbered:
|
|
50
|
+
print(f"{i}. {sentence}")
|
|
51
|
+
else:
|
|
52
|
+
print(sentence)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
if __name__ == "__main__":
|
|
56
|
+
main()
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""Base object providing logging functionality."""
|
|
3
|
+
|
|
4
|
+
import logging
|
|
5
|
+
|
|
6
|
+
logging.basicConfig(
|
|
7
|
+
format='%(asctime)s : %(levelname)s : %(filename)s : %(funcName)s() : %(lineno)d : %(message)s',
|
|
8
|
+
level=logging.DEBUG)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class BaseObject:
|
|
12
|
+
"""Base class providing logging capabilities."""
|
|
13
|
+
|
|
14
|
+
def __init__(self, component_name: str):
|
|
15
|
+
self.logger = logging.getLogger(component_name)
|
|
16
|
+
self.isEnabledForDebug = self.logger.isEnabledFor(logging.DEBUG)
|
|
17
|
+
self.isEnabledForInfo = self.logger.isEnabledFor(logging.INFO)
|
|
18
|
+
self.isEnabledForWarning = self.logger.isEnabledFor(logging.WARNING)
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""Simple stopwatch for timing operations."""
|
|
3
|
+
|
|
4
|
+
import time
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Stopwatch:
|
|
8
|
+
"""A simple stopwatch for measuring elapsed time."""
|
|
9
|
+
|
|
10
|
+
def __init__(self):
|
|
11
|
+
self._start = time.perf_counter()
|
|
12
|
+
self._end = None
|
|
13
|
+
|
|
14
|
+
@property
|
|
15
|
+
def duration(self):
|
|
16
|
+
return self._end - self._start if self._end else time.perf_counter() - self._start
|
|
17
|
+
|
|
18
|
+
@property
|
|
19
|
+
def running(self):
|
|
20
|
+
return not self._end
|
|
21
|
+
|
|
22
|
+
def restart(self):
|
|
23
|
+
self._start = time.perf_counter()
|
|
24
|
+
self._end = None
|
|
25
|
+
return self
|
|
26
|
+
|
|
27
|
+
def stop(self):
|
|
28
|
+
if self.running:
|
|
29
|
+
self._end = time.perf_counter()
|
|
30
|
+
return self
|
|
31
|
+
|
|
32
|
+
def __str__(self):
|
|
33
|
+
ms = self.duration * 1000
|
|
34
|
+
if ms >= 1000:
|
|
35
|
+
return f'{ms / 1000:.2f}s'
|
|
36
|
+
if ms >= 1:
|
|
37
|
+
return f'{ms:.2f}ms'
|
|
38
|
+
return f'{ms * 1000:.2f}μs'
|
|
@@ -1,6 +1,10 @@
|
|
|
1
|
-
from .
|
|
2
|
-
from .
|
|
3
|
-
from .
|
|
4
|
-
from .
|
|
5
|
-
from .
|
|
6
|
-
from .
|
|
1
|
+
from .abbreviation_merger import AbbreviationMerger
|
|
2
|
+
from .abbreviation_splitter import AbbreviationSplitter
|
|
3
|
+
from .title_name_merger import TitleNameMerger
|
|
4
|
+
from .bullet_point_cleaner import BulletPointCleaner
|
|
5
|
+
from .ellipsis_normalizer import EllipsisNormalizer
|
|
6
|
+
from .newlines_to_periods import NewlinesToPeriods
|
|
7
|
+
from .post_process_sentences import PostProcessStructure
|
|
8
|
+
from .question_exclamation_splitter import QuestionExclamationSplitter
|
|
9
|
+
from .spacy_doc_segmenter import SpacyDocSegmenter
|
|
10
|
+
from .numbered_list_normalizer import NumberedListNormalizer
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Merge sentences that spaCy incorrectly split at abbreviation boundaries.
|
|
5
|
+
|
|
6
|
+
When spaCy incorrectly splits after an abbreviation (e.g., "ext. 5" becomes
|
|
7
|
+
["ext.", "5. Ask for help."]), this component merges them back together
|
|
8
|
+
using specific known patterns.
|
|
9
|
+
|
|
10
|
+
Reference: https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import re
|
|
14
|
+
from typing import List, Optional, Tuple
|
|
15
|
+
|
|
16
|
+
from fast_sentence_segment.core import BaseObject
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Patterns where spaCy incorrectly splits after an abbreviation.
|
|
20
|
+
# Format: (ending_pattern, extract_pattern)
|
|
21
|
+
# - ending_pattern: regex to match end of current sentence
|
|
22
|
+
# - extract_pattern: regex to extract the portion to merge from next sentence
|
|
23
|
+
#
|
|
24
|
+
# The extract_pattern MUST have a capture group for the portion to merge.
|
|
25
|
+
# Whatever is NOT captured remains as a separate sentence.
|
|
26
|
+
|
|
27
|
+
MERGE_PATTERNS: List[Tuple[str, str]] = [
|
|
28
|
+
|
|
29
|
+
# ext. 5, Ext. 123, EXT. 42
|
|
30
|
+
(r"(?i)\bext\.$", r"^(\d+\.?)\s*"),
|
|
31
|
+
|
|
32
|
+
# no. 5, No. 42, NO. 100
|
|
33
|
+
(r"(?i)\bno\.$", r"^(\d+\.?)\s*"),
|
|
34
|
+
|
|
35
|
+
# vol. 3, Vol. 42, VOL. 1
|
|
36
|
+
(r"(?i)\bvol\.$", r"^(\d+\.?)\s*"),
|
|
37
|
+
|
|
38
|
+
# pt. 2, Pt. 1, PT. 3
|
|
39
|
+
(r"(?i)\bpt\.$", r"^(\d+\.?)\s*"),
|
|
40
|
+
|
|
41
|
+
# ch. 5, Ch. 10, CH. 3
|
|
42
|
+
(r"(?i)\bch\.$", r"^(\d+\.?)\s*"),
|
|
43
|
+
|
|
44
|
+
# sec. 3, Sec. 14, SEC. 2
|
|
45
|
+
(r"(?i)\bsec\.$", r"^(\d+(?:\.\d+)?\.?)\s*"),
|
|
46
|
+
|
|
47
|
+
# fig. 1, Fig. 3.2, FIG. 10
|
|
48
|
+
(r"(?i)\bfig\.$", r"^(\d+(?:\.\d+)?\.?)\s*"),
|
|
49
|
+
|
|
50
|
+
# p. 42, P. 100
|
|
51
|
+
(r"(?i)\bp\.$", r"^(\d+\.?)\s*"),
|
|
52
|
+
|
|
53
|
+
# pp. 42-50, PP. 100-110
|
|
54
|
+
(r"(?i)\bpp\.$", r"^(\d+(?:-\d+)?\.?)\s*"),
|
|
55
|
+
|
|
56
|
+
# art. 5, Art. 12, ART. 1
|
|
57
|
+
(r"(?i)\bart\.$", r"^(\d+\.?)\s*"),
|
|
58
|
+
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class AbbreviationMerger(BaseObject):
|
|
63
|
+
"""Merge sentences incorrectly split at abbreviation boundaries."""
|
|
64
|
+
|
|
65
|
+
def __init__(self):
|
|
66
|
+
"""
|
|
67
|
+
Created:
|
|
68
|
+
27-Dec-2024
|
|
69
|
+
craigtrim@gmail.com
|
|
70
|
+
Reference:
|
|
71
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
72
|
+
"""
|
|
73
|
+
BaseObject.__init__(self, __name__)
|
|
74
|
+
# Compile patterns for efficiency
|
|
75
|
+
self._patterns = [
|
|
76
|
+
(re.compile(ending), re.compile(extract))
|
|
77
|
+
for ending, extract in MERGE_PATTERNS
|
|
78
|
+
]
|
|
79
|
+
|
|
80
|
+
def _try_merge(self, current: str, next_sent: str) -> Optional[Tuple[str, str]]:
|
|
81
|
+
"""Try to merge two sentences based on known patterns.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
current: Current sentence
|
|
85
|
+
next_sent: Next sentence
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Tuple of (merged_sentence, remainder) if merge needed, else None
|
|
89
|
+
"""
|
|
90
|
+
current = current.strip()
|
|
91
|
+
next_sent = next_sent.strip()
|
|
92
|
+
|
|
93
|
+
for ending_pattern, extract_pattern in self._patterns:
|
|
94
|
+
if ending_pattern.search(current):
|
|
95
|
+
match = extract_pattern.match(next_sent)
|
|
96
|
+
if match:
|
|
97
|
+
# Extract the portion to merge
|
|
98
|
+
extracted = match.group(1)
|
|
99
|
+
# Get the remainder (everything after the match)
|
|
100
|
+
remainder = next_sent[match.end():].strip()
|
|
101
|
+
# Build merged sentence
|
|
102
|
+
merged = current + " " + extracted
|
|
103
|
+
return (merged, remainder)
|
|
104
|
+
|
|
105
|
+
return None
|
|
106
|
+
|
|
107
|
+
def process(self, sentences: List[str]) -> List[str]:
|
|
108
|
+
"""Process a list of sentences, merging incorrectly split ones.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
sentences: List of sentences from spaCy
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
List of sentences with incorrect splits merged
|
|
115
|
+
"""
|
|
116
|
+
if not sentences:
|
|
117
|
+
return sentences
|
|
118
|
+
|
|
119
|
+
result = []
|
|
120
|
+
i = 0
|
|
121
|
+
|
|
122
|
+
while i < len(sentences):
|
|
123
|
+
current = sentences[i]
|
|
124
|
+
|
|
125
|
+
# Check if we should merge with next sentence
|
|
126
|
+
if i + 1 < len(sentences):
|
|
127
|
+
next_sent = sentences[i + 1]
|
|
128
|
+
merge_result = self._try_merge(current, next_sent)
|
|
129
|
+
|
|
130
|
+
if merge_result:
|
|
131
|
+
merged, remainder = merge_result
|
|
132
|
+
result.append(merged)
|
|
133
|
+
|
|
134
|
+
# If there's a remainder, it becomes a new sentence to process
|
|
135
|
+
if remainder:
|
|
136
|
+
# Insert remainder back for processing
|
|
137
|
+
sentences = sentences[:i+2] + [remainder] + sentences[i+2:]
|
|
138
|
+
sentences[i+1] = remainder
|
|
139
|
+
|
|
140
|
+
i += 2
|
|
141
|
+
continue
|
|
142
|
+
|
|
143
|
+
result.append(current)
|
|
144
|
+
i += 1
|
|
145
|
+
|
|
146
|
+
return result
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Split sentences at abbreviation boundaries.
|
|
5
|
+
|
|
6
|
+
When spaCy fails to detect a sentence boundary after an abbreviation
|
|
7
|
+
(e.g., "I woke at 6 a.m. It was dark."), this component splits the
|
|
8
|
+
sentence by detecting the pattern: abbreviation + space + Capital letter.
|
|
9
|
+
|
|
10
|
+
Reference: https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import re
|
|
14
|
+
from typing import List
|
|
15
|
+
|
|
16
|
+
from fast_sentence_segment.core import BaseObject
|
|
17
|
+
from fast_sentence_segment.dmo.abbreviations import SENTENCE_ENDING_ABBREVIATIONS
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class AbbreviationSplitter(BaseObject):
|
|
21
|
+
"""Split sentences at abbreviation boundaries."""
|
|
22
|
+
|
|
23
|
+
def __init__(self):
|
|
24
|
+
"""
|
|
25
|
+
Created:
|
|
26
|
+
27-Dec-2024
|
|
27
|
+
craigtrim@gmail.com
|
|
28
|
+
Reference:
|
|
29
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
30
|
+
"""
|
|
31
|
+
BaseObject.__init__(self, __name__)
|
|
32
|
+
self._pattern = self._build_pattern()
|
|
33
|
+
|
|
34
|
+
def _build_pattern(self) -> re.Pattern:
|
|
35
|
+
"""Build regex pattern to match abbreviation + capital letter.
|
|
36
|
+
|
|
37
|
+
Pattern matches:
|
|
38
|
+
- A known sentence-ending abbreviation (escaped for regex)
|
|
39
|
+
- Followed by one or more spaces
|
|
40
|
+
- Followed by a capital letter (start of new sentence)
|
|
41
|
+
|
|
42
|
+
Note: Title abbreviations (Dr., Mr., etc.) are excluded because
|
|
43
|
+
they are typically followed by names, not new sentences.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Compiled regex pattern
|
|
47
|
+
"""
|
|
48
|
+
escaped_abbrevs = [re.escape(abbr) for abbr in SENTENCE_ENDING_ABBREVIATIONS]
|
|
49
|
+
abbrev_pattern = "|".join(escaped_abbrevs)
|
|
50
|
+
pattern = rf"({abbrev_pattern})\s+([A-Z])"
|
|
51
|
+
return re.compile(pattern)
|
|
52
|
+
|
|
53
|
+
def _split_sentence(self, sentence: str) -> List[str]:
|
|
54
|
+
"""Split a single sentence at abbreviation boundaries.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
sentence: A sentence that may contain abbreviation boundaries
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
List of one or more sentences
|
|
61
|
+
"""
|
|
62
|
+
results = []
|
|
63
|
+
remaining = sentence
|
|
64
|
+
|
|
65
|
+
while True:
|
|
66
|
+
match = self._pattern.search(remaining)
|
|
67
|
+
if not match:
|
|
68
|
+
if remaining.strip():
|
|
69
|
+
results.append(remaining.strip())
|
|
70
|
+
break
|
|
71
|
+
|
|
72
|
+
split_pos = match.end(1)
|
|
73
|
+
|
|
74
|
+
before = remaining[:split_pos].strip()
|
|
75
|
+
if before:
|
|
76
|
+
results.append(before)
|
|
77
|
+
|
|
78
|
+
remaining = remaining[split_pos:].strip()
|
|
79
|
+
|
|
80
|
+
return results if results else [sentence]
|
|
81
|
+
|
|
82
|
+
def process(self, sentences: List[str]) -> List[str]:
|
|
83
|
+
"""Process a list of sentences, splitting at abbreviation boundaries.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
sentences: List of sentences from spaCy
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
List of sentences with abbreviation boundaries properly split
|
|
90
|
+
"""
|
|
91
|
+
result = []
|
|
92
|
+
for sentence in sentences:
|
|
93
|
+
split_sentences = self._split_sentence(sentence)
|
|
94
|
+
result.extend(split_sentences)
|
|
95
|
+
return result
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Known abbreviations that end with periods.
|
|
5
|
+
|
|
6
|
+
Reference: https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
# Abbreviations that can END a sentence and be followed by a new sentence.
|
|
10
|
+
# When these are followed by a capital letter, it likely indicates a sentence break.
|
|
11
|
+
from typing import List
|
|
12
|
+
|
|
13
|
+
SENTENCE_ENDING_ABBREVIATIONS: List[str] = [
|
|
14
|
+
# Time
|
|
15
|
+
"a.m.",
|
|
16
|
+
"p.m.",
|
|
17
|
+
"A.M.",
|
|
18
|
+
"P.M.",
|
|
19
|
+
|
|
20
|
+
# Common sentence-enders
|
|
21
|
+
"etc.",
|
|
22
|
+
"ext.",
|
|
23
|
+
|
|
24
|
+
# Academic degrees (when at end of sentence)
|
|
25
|
+
"Ph.D.",
|
|
26
|
+
"M.D.",
|
|
27
|
+
"B.A.",
|
|
28
|
+
"B.S.",
|
|
29
|
+
"M.A.",
|
|
30
|
+
"M.S.",
|
|
31
|
+
"Ed.D.",
|
|
32
|
+
"J.D.",
|
|
33
|
+
"D.D.S.",
|
|
34
|
+
"R.N.",
|
|
35
|
+
|
|
36
|
+
# Business (when at end of sentence)
|
|
37
|
+
"Inc.",
|
|
38
|
+
"Corp.",
|
|
39
|
+
"Ltd.",
|
|
40
|
+
"Co.",
|
|
41
|
+
"Bros.",
|
|
42
|
+
|
|
43
|
+
# Countries/Regions (when at end of sentence)
|
|
44
|
+
"U.S.",
|
|
45
|
+
"U.S.A.",
|
|
46
|
+
"U.K.",
|
|
47
|
+
"U.N.",
|
|
48
|
+
"E.U.",
|
|
49
|
+
"D.C.",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
# Abbreviations that are NEVER sentence-enders because they're
|
|
53
|
+
# typically followed by a name or noun (e.g., "Dr. Smith", "Mt. Everest").
|
|
54
|
+
# Do NOT split after these even when followed by a capital letter.
|
|
55
|
+
TITLE_ABBREVIATIONS: List[str] = [
|
|
56
|
+
# Personal titles
|
|
57
|
+
"Dr.",
|
|
58
|
+
"Mr.",
|
|
59
|
+
"Mrs.",
|
|
60
|
+
"Ms.",
|
|
61
|
+
"Prof.",
|
|
62
|
+
"Sr.",
|
|
63
|
+
"Jr.",
|
|
64
|
+
"Rev.",
|
|
65
|
+
"Gen.",
|
|
66
|
+
"Col.",
|
|
67
|
+
"Capt.",
|
|
68
|
+
"Lt.",
|
|
69
|
+
"Sgt.",
|
|
70
|
+
"Rep.",
|
|
71
|
+
"Sen.",
|
|
72
|
+
"Gov.",
|
|
73
|
+
"Pres.",
|
|
74
|
+
"Hon.",
|
|
75
|
+
|
|
76
|
+
# Geographic prefixes
|
|
77
|
+
"St.",
|
|
78
|
+
"Mt.",
|
|
79
|
+
"Ft.",
|
|
80
|
+
|
|
81
|
+
# Other prefixes
|
|
82
|
+
"Fig.",
|
|
83
|
+
"fig.",
|
|
84
|
+
"Sec.",
|
|
85
|
+
"sec.",
|
|
86
|
+
"Ch.",
|
|
87
|
+
"ch.",
|
|
88
|
+
"Art.",
|
|
89
|
+
"art.",
|
|
90
|
+
"Vol.",
|
|
91
|
+
"vol.",
|
|
92
|
+
"No.",
|
|
93
|
+
"no.",
|
|
94
|
+
"Pt.",
|
|
95
|
+
"pt.",
|
|
96
|
+
]
|