fast-sentence-segment 1.1.8__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_sentence_segment/cli.py +56 -0
- fast_sentence_segment/dmo/__init__.py +5 -1
- fast_sentence_segment/dmo/abbreviation_merger.py +146 -0
- fast_sentence_segment/dmo/abbreviation_splitter.py +95 -0
- fast_sentence_segment/dmo/abbreviations.py +96 -0
- fast_sentence_segment/dmo/ellipsis_normalizer.py +45 -0
- fast_sentence_segment/dmo/numbered_list_normalizer.py +19 -25
- fast_sentence_segment/dmo/question_exclamation_splitter.py +59 -0
- fast_sentence_segment/dmo/title_name_merger.py +152 -0
- fast_sentence_segment/svc/perform_sentence_segmentation.py +53 -17
- {fast_sentence_segment-1.1.8.dist-info → fast_sentence_segment-1.2.0.dist-info}/METADATA +49 -6
- fast_sentence_segment-1.2.0.dist-info/RECORD +27 -0
- fast_sentence_segment-1.2.0.dist-info/entry_points.txt +3 -0
- fast_sentence_segment/dmo/delimiters_to_periods.py +0 -37
- fast_sentence_segment-1.1.8.dist-info/RECORD +0 -20
- {fast_sentence_segment-1.1.8.dist-info → fast_sentence_segment-1.2.0.dist-info}/WHEEL +0 -0
- {fast_sentence_segment-1.1.8.dist-info → fast_sentence_segment-1.2.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""CLI for fast-sentence-segment."""
|
|
3
|
+
|
|
4
|
+
import argparse
|
|
5
|
+
import logging
|
|
6
|
+
import sys
|
|
7
|
+
|
|
8
|
+
from fast_sentence_segment import segment_text
|
|
9
|
+
|
|
10
|
+
logging.disable(logging.CRITICAL)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def main():
|
|
14
|
+
parser = argparse.ArgumentParser(
|
|
15
|
+
prog="segment",
|
|
16
|
+
description="Segment text into sentences",
|
|
17
|
+
)
|
|
18
|
+
parser.add_argument(
|
|
19
|
+
"text",
|
|
20
|
+
nargs="?",
|
|
21
|
+
help="Text to segment (or use stdin)",
|
|
22
|
+
)
|
|
23
|
+
parser.add_argument(
|
|
24
|
+
"-f", "--file",
|
|
25
|
+
help="Read text from file",
|
|
26
|
+
)
|
|
27
|
+
parser.add_argument(
|
|
28
|
+
"-n", "--numbered",
|
|
29
|
+
action="store_true",
|
|
30
|
+
help="Number output lines",
|
|
31
|
+
)
|
|
32
|
+
args = parser.parse_args()
|
|
33
|
+
|
|
34
|
+
# Get input text
|
|
35
|
+
if args.file:
|
|
36
|
+
with open(args.file, "r", encoding="utf-8") as f:
|
|
37
|
+
text = f.read()
|
|
38
|
+
elif args.text:
|
|
39
|
+
text = args.text
|
|
40
|
+
elif not sys.stdin.isatty():
|
|
41
|
+
text = sys.stdin.read()
|
|
42
|
+
else:
|
|
43
|
+
parser.print_help()
|
|
44
|
+
sys.exit(1)
|
|
45
|
+
|
|
46
|
+
# Segment and output
|
|
47
|
+
sentences = segment_text(text.strip(), flatten=True)
|
|
48
|
+
for i, sentence in enumerate(sentences, 1):
|
|
49
|
+
if args.numbered:
|
|
50
|
+
print(f"{i}. {sentence}")
|
|
51
|
+
else:
|
|
52
|
+
print(sentence)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
if __name__ == "__main__":
|
|
56
|
+
main()
|
|
@@ -1,6 +1,10 @@
|
|
|
1
|
+
from .abbreviation_merger import AbbreviationMerger
|
|
2
|
+
from .abbreviation_splitter import AbbreviationSplitter
|
|
3
|
+
from .title_name_merger import TitleNameMerger
|
|
1
4
|
from .bullet_point_cleaner import BulletPointCleaner
|
|
2
|
-
from .
|
|
5
|
+
from .ellipsis_normalizer import EllipsisNormalizer
|
|
3
6
|
from .newlines_to_periods import NewlinesToPeriods
|
|
4
7
|
from .post_process_sentences import PostProcessStructure
|
|
8
|
+
from .question_exclamation_splitter import QuestionExclamationSplitter
|
|
5
9
|
from .spacy_doc_segmenter import SpacyDocSegmenter
|
|
6
10
|
from .numbered_list_normalizer import NumberedListNormalizer
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Merge sentences that spaCy incorrectly split at abbreviation boundaries.
|
|
5
|
+
|
|
6
|
+
When spaCy incorrectly splits after an abbreviation (e.g., "ext. 5" becomes
|
|
7
|
+
["ext.", "5. Ask for help."]), this component merges them back together
|
|
8
|
+
using specific known patterns.
|
|
9
|
+
|
|
10
|
+
Reference: https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import re
|
|
14
|
+
from typing import List, Optional, Tuple
|
|
15
|
+
|
|
16
|
+
from fast_sentence_segment.core import BaseObject
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
# Patterns where spaCy incorrectly splits after an abbreviation.
|
|
20
|
+
# Format: (ending_pattern, extract_pattern)
|
|
21
|
+
# - ending_pattern: regex to match end of current sentence
|
|
22
|
+
# - extract_pattern: regex to extract the portion to merge from next sentence
|
|
23
|
+
#
|
|
24
|
+
# The extract_pattern MUST have a capture group for the portion to merge.
|
|
25
|
+
# Whatever is NOT captured remains as a separate sentence.
|
|
26
|
+
|
|
27
|
+
MERGE_PATTERNS: List[Tuple[str, str]] = [
|
|
28
|
+
|
|
29
|
+
# ext. 5, Ext. 123, EXT. 42
|
|
30
|
+
(r"(?i)\bext\.$", r"^(\d+\.?)\s*"),
|
|
31
|
+
|
|
32
|
+
# no. 5, No. 42, NO. 100
|
|
33
|
+
(r"(?i)\bno\.$", r"^(\d+\.?)\s*"),
|
|
34
|
+
|
|
35
|
+
# vol. 3, Vol. 42, VOL. 1
|
|
36
|
+
(r"(?i)\bvol\.$", r"^(\d+\.?)\s*"),
|
|
37
|
+
|
|
38
|
+
# pt. 2, Pt. 1, PT. 3
|
|
39
|
+
(r"(?i)\bpt\.$", r"^(\d+\.?)\s*"),
|
|
40
|
+
|
|
41
|
+
# ch. 5, Ch. 10, CH. 3
|
|
42
|
+
(r"(?i)\bch\.$", r"^(\d+\.?)\s*"),
|
|
43
|
+
|
|
44
|
+
# sec. 3, Sec. 14, SEC. 2
|
|
45
|
+
(r"(?i)\bsec\.$", r"^(\d+(?:\.\d+)?\.?)\s*"),
|
|
46
|
+
|
|
47
|
+
# fig. 1, Fig. 3.2, FIG. 10
|
|
48
|
+
(r"(?i)\bfig\.$", r"^(\d+(?:\.\d+)?\.?)\s*"),
|
|
49
|
+
|
|
50
|
+
# p. 42, P. 100
|
|
51
|
+
(r"(?i)\bp\.$", r"^(\d+\.?)\s*"),
|
|
52
|
+
|
|
53
|
+
# pp. 42-50, PP. 100-110
|
|
54
|
+
(r"(?i)\bpp\.$", r"^(\d+(?:-\d+)?\.?)\s*"),
|
|
55
|
+
|
|
56
|
+
# art. 5, Art. 12, ART. 1
|
|
57
|
+
(r"(?i)\bart\.$", r"^(\d+\.?)\s*"),
|
|
58
|
+
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class AbbreviationMerger(BaseObject):
|
|
63
|
+
"""Merge sentences incorrectly split at abbreviation boundaries."""
|
|
64
|
+
|
|
65
|
+
def __init__(self):
|
|
66
|
+
"""
|
|
67
|
+
Created:
|
|
68
|
+
27-Dec-2024
|
|
69
|
+
craigtrim@gmail.com
|
|
70
|
+
Reference:
|
|
71
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
72
|
+
"""
|
|
73
|
+
BaseObject.__init__(self, __name__)
|
|
74
|
+
# Compile patterns for efficiency
|
|
75
|
+
self._patterns = [
|
|
76
|
+
(re.compile(ending), re.compile(extract))
|
|
77
|
+
for ending, extract in MERGE_PATTERNS
|
|
78
|
+
]
|
|
79
|
+
|
|
80
|
+
def _try_merge(self, current: str, next_sent: str) -> Optional[Tuple[str, str]]:
|
|
81
|
+
"""Try to merge two sentences based on known patterns.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
current: Current sentence
|
|
85
|
+
next_sent: Next sentence
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
Tuple of (merged_sentence, remainder) if merge needed, else None
|
|
89
|
+
"""
|
|
90
|
+
current = current.strip()
|
|
91
|
+
next_sent = next_sent.strip()
|
|
92
|
+
|
|
93
|
+
for ending_pattern, extract_pattern in self._patterns:
|
|
94
|
+
if ending_pattern.search(current):
|
|
95
|
+
match = extract_pattern.match(next_sent)
|
|
96
|
+
if match:
|
|
97
|
+
# Extract the portion to merge
|
|
98
|
+
extracted = match.group(1)
|
|
99
|
+
# Get the remainder (everything after the match)
|
|
100
|
+
remainder = next_sent[match.end():].strip()
|
|
101
|
+
# Build merged sentence
|
|
102
|
+
merged = current + " " + extracted
|
|
103
|
+
return (merged, remainder)
|
|
104
|
+
|
|
105
|
+
return None
|
|
106
|
+
|
|
107
|
+
def process(self, sentences: List[str]) -> List[str]:
|
|
108
|
+
"""Process a list of sentences, merging incorrectly split ones.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
sentences: List of sentences from spaCy
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
List of sentences with incorrect splits merged
|
|
115
|
+
"""
|
|
116
|
+
if not sentences:
|
|
117
|
+
return sentences
|
|
118
|
+
|
|
119
|
+
result = []
|
|
120
|
+
i = 0
|
|
121
|
+
|
|
122
|
+
while i < len(sentences):
|
|
123
|
+
current = sentences[i]
|
|
124
|
+
|
|
125
|
+
# Check if we should merge with next sentence
|
|
126
|
+
if i + 1 < len(sentences):
|
|
127
|
+
next_sent = sentences[i + 1]
|
|
128
|
+
merge_result = self._try_merge(current, next_sent)
|
|
129
|
+
|
|
130
|
+
if merge_result:
|
|
131
|
+
merged, remainder = merge_result
|
|
132
|
+
result.append(merged)
|
|
133
|
+
|
|
134
|
+
# If there's a remainder, it becomes a new sentence to process
|
|
135
|
+
if remainder:
|
|
136
|
+
# Insert remainder back for processing
|
|
137
|
+
sentences = sentences[:i+2] + [remainder] + sentences[i+2:]
|
|
138
|
+
sentences[i+1] = remainder
|
|
139
|
+
|
|
140
|
+
i += 2
|
|
141
|
+
continue
|
|
142
|
+
|
|
143
|
+
result.append(current)
|
|
144
|
+
i += 1
|
|
145
|
+
|
|
146
|
+
return result
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Split sentences at abbreviation boundaries.
|
|
5
|
+
|
|
6
|
+
When spaCy fails to detect a sentence boundary after an abbreviation
|
|
7
|
+
(e.g., "I woke at 6 a.m. It was dark."), this component splits the
|
|
8
|
+
sentence by detecting the pattern: abbreviation + space + Capital letter.
|
|
9
|
+
|
|
10
|
+
Reference: https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import re
|
|
14
|
+
from typing import List
|
|
15
|
+
|
|
16
|
+
from fast_sentence_segment.core import BaseObject
|
|
17
|
+
from fast_sentence_segment.dmo.abbreviations import SENTENCE_ENDING_ABBREVIATIONS
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class AbbreviationSplitter(BaseObject):
|
|
21
|
+
"""Split sentences at abbreviation boundaries."""
|
|
22
|
+
|
|
23
|
+
def __init__(self):
|
|
24
|
+
"""
|
|
25
|
+
Created:
|
|
26
|
+
27-Dec-2024
|
|
27
|
+
craigtrim@gmail.com
|
|
28
|
+
Reference:
|
|
29
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
30
|
+
"""
|
|
31
|
+
BaseObject.__init__(self, __name__)
|
|
32
|
+
self._pattern = self._build_pattern()
|
|
33
|
+
|
|
34
|
+
def _build_pattern(self) -> re.Pattern:
|
|
35
|
+
"""Build regex pattern to match abbreviation + capital letter.
|
|
36
|
+
|
|
37
|
+
Pattern matches:
|
|
38
|
+
- A known sentence-ending abbreviation (escaped for regex)
|
|
39
|
+
- Followed by one or more spaces
|
|
40
|
+
- Followed by a capital letter (start of new sentence)
|
|
41
|
+
|
|
42
|
+
Note: Title abbreviations (Dr., Mr., etc.) are excluded because
|
|
43
|
+
they are typically followed by names, not new sentences.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Compiled regex pattern
|
|
47
|
+
"""
|
|
48
|
+
escaped_abbrevs = [re.escape(abbr) for abbr in SENTENCE_ENDING_ABBREVIATIONS]
|
|
49
|
+
abbrev_pattern = "|".join(escaped_abbrevs)
|
|
50
|
+
pattern = rf"({abbrev_pattern})\s+([A-Z])"
|
|
51
|
+
return re.compile(pattern)
|
|
52
|
+
|
|
53
|
+
def _split_sentence(self, sentence: str) -> List[str]:
|
|
54
|
+
"""Split a single sentence at abbreviation boundaries.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
sentence: A sentence that may contain abbreviation boundaries
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
List of one or more sentences
|
|
61
|
+
"""
|
|
62
|
+
results = []
|
|
63
|
+
remaining = sentence
|
|
64
|
+
|
|
65
|
+
while True:
|
|
66
|
+
match = self._pattern.search(remaining)
|
|
67
|
+
if not match:
|
|
68
|
+
if remaining.strip():
|
|
69
|
+
results.append(remaining.strip())
|
|
70
|
+
break
|
|
71
|
+
|
|
72
|
+
split_pos = match.end(1)
|
|
73
|
+
|
|
74
|
+
before = remaining[:split_pos].strip()
|
|
75
|
+
if before:
|
|
76
|
+
results.append(before)
|
|
77
|
+
|
|
78
|
+
remaining = remaining[split_pos:].strip()
|
|
79
|
+
|
|
80
|
+
return results if results else [sentence]
|
|
81
|
+
|
|
82
|
+
def process(self, sentences: List[str]) -> List[str]:
|
|
83
|
+
"""Process a list of sentences, splitting at abbreviation boundaries.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
sentences: List of sentences from spaCy
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
List of sentences with abbreviation boundaries properly split
|
|
90
|
+
"""
|
|
91
|
+
result = []
|
|
92
|
+
for sentence in sentences:
|
|
93
|
+
split_sentences = self._split_sentence(sentence)
|
|
94
|
+
result.extend(split_sentences)
|
|
95
|
+
return result
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Known abbreviations that end with periods.
|
|
5
|
+
|
|
6
|
+
Reference: https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
# Abbreviations that can END a sentence and be followed by a new sentence.
|
|
10
|
+
# When these are followed by a capital letter, it likely indicates a sentence break.
|
|
11
|
+
from typing import List
|
|
12
|
+
|
|
13
|
+
SENTENCE_ENDING_ABBREVIATIONS: List[str] = [
|
|
14
|
+
# Time
|
|
15
|
+
"a.m.",
|
|
16
|
+
"p.m.",
|
|
17
|
+
"A.M.",
|
|
18
|
+
"P.M.",
|
|
19
|
+
|
|
20
|
+
# Common sentence-enders
|
|
21
|
+
"etc.",
|
|
22
|
+
"ext.",
|
|
23
|
+
|
|
24
|
+
# Academic degrees (when at end of sentence)
|
|
25
|
+
"Ph.D.",
|
|
26
|
+
"M.D.",
|
|
27
|
+
"B.A.",
|
|
28
|
+
"B.S.",
|
|
29
|
+
"M.A.",
|
|
30
|
+
"M.S.",
|
|
31
|
+
"Ed.D.",
|
|
32
|
+
"J.D.",
|
|
33
|
+
"D.D.S.",
|
|
34
|
+
"R.N.",
|
|
35
|
+
|
|
36
|
+
# Business (when at end of sentence)
|
|
37
|
+
"Inc.",
|
|
38
|
+
"Corp.",
|
|
39
|
+
"Ltd.",
|
|
40
|
+
"Co.",
|
|
41
|
+
"Bros.",
|
|
42
|
+
|
|
43
|
+
# Countries/Regions (when at end of sentence)
|
|
44
|
+
"U.S.",
|
|
45
|
+
"U.S.A.",
|
|
46
|
+
"U.K.",
|
|
47
|
+
"U.N.",
|
|
48
|
+
"E.U.",
|
|
49
|
+
"D.C.",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
# Abbreviations that are NEVER sentence-enders because they're
|
|
53
|
+
# typically followed by a name or noun (e.g., "Dr. Smith", "Mt. Everest").
|
|
54
|
+
# Do NOT split after these even when followed by a capital letter.
|
|
55
|
+
TITLE_ABBREVIATIONS: List[str] = [
|
|
56
|
+
# Personal titles
|
|
57
|
+
"Dr.",
|
|
58
|
+
"Mr.",
|
|
59
|
+
"Mrs.",
|
|
60
|
+
"Ms.",
|
|
61
|
+
"Prof.",
|
|
62
|
+
"Sr.",
|
|
63
|
+
"Jr.",
|
|
64
|
+
"Rev.",
|
|
65
|
+
"Gen.",
|
|
66
|
+
"Col.",
|
|
67
|
+
"Capt.",
|
|
68
|
+
"Lt.",
|
|
69
|
+
"Sgt.",
|
|
70
|
+
"Rep.",
|
|
71
|
+
"Sen.",
|
|
72
|
+
"Gov.",
|
|
73
|
+
"Pres.",
|
|
74
|
+
"Hon.",
|
|
75
|
+
|
|
76
|
+
# Geographic prefixes
|
|
77
|
+
"St.",
|
|
78
|
+
"Mt.",
|
|
79
|
+
"Ft.",
|
|
80
|
+
|
|
81
|
+
# Other prefixes
|
|
82
|
+
"Fig.",
|
|
83
|
+
"fig.",
|
|
84
|
+
"Sec.",
|
|
85
|
+
"sec.",
|
|
86
|
+
"Ch.",
|
|
87
|
+
"ch.",
|
|
88
|
+
"Art.",
|
|
89
|
+
"art.",
|
|
90
|
+
"Vol.",
|
|
91
|
+
"vol.",
|
|
92
|
+
"No.",
|
|
93
|
+
"no.",
|
|
94
|
+
"Pt.",
|
|
95
|
+
"pt.",
|
|
96
|
+
]
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
""" Normalize Ellipses to prevent them being stripped by cleanup routines """
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
import re
|
|
7
|
+
|
|
8
|
+
from fast_sentence_segment.core import BaseObject
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
PLACEHOLDER = "〈ELLIPSIS〉"
|
|
12
|
+
|
|
13
|
+
# Pattern: ... followed by space and capital letter
|
|
14
|
+
BOUNDARY_PATTERN = re.compile(r'\.\.\.(\s+)([A-Z])')
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class EllipsisNormalizer(BaseObject):
|
|
18
|
+
""" Normalize Ellipses to prevent them being stripped by cleanup routines """
|
|
19
|
+
|
|
20
|
+
def __init__(self):
|
|
21
|
+
"""
|
|
22
|
+
Created:
|
|
23
|
+
27-Dec-2024
|
|
24
|
+
craigtrim@gmail.com
|
|
25
|
+
* preserve ellipses through the pipeline
|
|
26
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
27
|
+
"""
|
|
28
|
+
BaseObject.__init__(self, __name__)
|
|
29
|
+
|
|
30
|
+
def process(self,
|
|
31
|
+
input_text: str,
|
|
32
|
+
denormalize: bool = False) -> str:
|
|
33
|
+
|
|
34
|
+
if not denormalize:
|
|
35
|
+
# "... [Capital]" → "〈ELLIPSIS〉. [Capital]" (adds period for spaCy to split)
|
|
36
|
+
input_text = BOUNDARY_PATTERN.sub(PLACEHOLDER + r'.\1\2', input_text)
|
|
37
|
+
# Remaining ellipses (mid-sentence): "..." → "〈ELLIPSIS〉"
|
|
38
|
+
input_text = input_text.replace("...", PLACEHOLDER)
|
|
39
|
+
else:
|
|
40
|
+
# "〈ELLIPSIS〉." → "..." (remove extra period added for boundary)
|
|
41
|
+
input_text = input_text.replace(PLACEHOLDER + ".", "...")
|
|
42
|
+
# Remaining placeholders: "〈ELLIPSIS〉" → "..."
|
|
43
|
+
input_text = input_text.replace(PLACEHOLDER, "...")
|
|
44
|
+
|
|
45
|
+
return input_text
|
|
@@ -3,24 +3,21 @@
|
|
|
3
3
|
""" Normalize Numbered Lists to prevent False Positive Segmentation """
|
|
4
4
|
|
|
5
5
|
|
|
6
|
+
import re
|
|
7
|
+
|
|
6
8
|
from fast_sentence_segment.core import BaseObject
|
|
7
9
|
|
|
8
10
|
|
|
9
11
|
class NumberedListNormalizer(BaseObject):
|
|
10
12
|
""" Normalize Numbered Lists to prevent False Positive Segmentation """
|
|
11
13
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
"7. ": "7_ ",
|
|
20
|
-
"8. ": "8_ ",
|
|
21
|
-
"9. ": "9_ ",
|
|
22
|
-
"10. ": "10_ ",
|
|
23
|
-
}
|
|
14
|
+
# Pattern 1: start of string OR newline, followed by number, period, space
|
|
15
|
+
__normalize_line_start = re.compile(r'(^|\n\s*)(\d{1,2})\. ')
|
|
16
|
+
__denormalize_line_start = re.compile(r'(^|\n\s*)(\d{1,2})_ ')
|
|
17
|
+
|
|
18
|
+
# Pattern 2: inline numbered list ". N. " (period + space + number + period + space)
|
|
19
|
+
__normalize_inline = re.compile(r'(\. )(\d{1,2})\. ')
|
|
20
|
+
__denormalize_inline = re.compile(r'(\. )(\d{1,2})_ ')
|
|
24
21
|
|
|
25
22
|
def __init__(self):
|
|
26
23
|
"""
|
|
@@ -28,6 +25,11 @@ class NumberedListNormalizer(BaseObject):
|
|
|
28
25
|
19-Oct-2022
|
|
29
26
|
craigtrim@gmail.com
|
|
30
27
|
* https://github.com/craigtrim/fast-sentence-segment/issues/1
|
|
28
|
+
Updated:
|
|
29
|
+
27-Dec-2024
|
|
30
|
+
craigtrim@gmail.com
|
|
31
|
+
* fix to only match at line starts, not mid-sentence
|
|
32
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
31
33
|
"""
|
|
32
34
|
BaseObject.__init__(self, __name__)
|
|
33
35
|
|
|
@@ -36,18 +38,10 @@ class NumberedListNormalizer(BaseObject):
|
|
|
36
38
|
denormalize: bool = False) -> str:
|
|
37
39
|
|
|
38
40
|
if not denormalize:
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
else: # reverse the process
|
|
45
|
-
d_rev = {self.__d_candidate_list_elements[k]: k
|
|
46
|
-
for k in self.__d_candidate_list_elements}
|
|
47
|
-
|
|
48
|
-
for candidate in d_rev:
|
|
49
|
-
if candidate in input_text:
|
|
50
|
-
input_text = input_text.replace(
|
|
51
|
-
candidate, d_rev[candidate])
|
|
41
|
+
input_text = self.__normalize_line_start.sub(r'\1\2_ ', input_text)
|
|
42
|
+
input_text = self.__normalize_inline.sub(r'\1\2_ ', input_text)
|
|
43
|
+
else:
|
|
44
|
+
input_text = self.__denormalize_line_start.sub(r'\1\2. ', input_text)
|
|
45
|
+
input_text = self.__denormalize_inline.sub(r'\1\2. ', input_text)
|
|
52
46
|
|
|
53
47
|
return input_text
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
""" Split sentences at ? and ! followed by capital letter """
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
import re
|
|
7
|
+
from typing import List
|
|
8
|
+
|
|
9
|
+
from fast_sentence_segment.core import BaseObject
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Pattern: ? or ! followed by space and capital letter
|
|
13
|
+
BOUNDARY_PATTERN = re.compile(r'([?!])(\s+)([A-Z])')
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class QuestionExclamationSplitter(BaseObject):
|
|
17
|
+
""" Split sentences at ? and ! followed by capital letter """
|
|
18
|
+
|
|
19
|
+
def __init__(self):
|
|
20
|
+
"""
|
|
21
|
+
Created:
|
|
22
|
+
27-Dec-2024
|
|
23
|
+
craigtrim@gmail.com
|
|
24
|
+
* spaCy doesn't always split on ? and ! boundaries
|
|
25
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
26
|
+
"""
|
|
27
|
+
BaseObject.__init__(self, __name__)
|
|
28
|
+
|
|
29
|
+
def process(self, sentences: List[str]) -> List[str]:
|
|
30
|
+
"""Split sentences that contain ? or ! followed by capital letter.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
sentences: List of sentences from earlier processing
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
List of sentences with ? and ! boundaries split
|
|
37
|
+
"""
|
|
38
|
+
result = []
|
|
39
|
+
for sent in sentences:
|
|
40
|
+
# Split on pattern, keeping the punctuation with the first part
|
|
41
|
+
parts = BOUNDARY_PATTERN.split(sent)
|
|
42
|
+
if len(parts) == 1:
|
|
43
|
+
result.append(sent)
|
|
44
|
+
else:
|
|
45
|
+
# Reassemble: parts = [before, punct, space, capital, after, ...]
|
|
46
|
+
i = 0
|
|
47
|
+
while i < len(parts):
|
|
48
|
+
if i + 3 < len(parts):
|
|
49
|
+
# before + punct
|
|
50
|
+
result.append(parts[i] + parts[i + 1])
|
|
51
|
+
# capital + rest will be handled in next iteration
|
|
52
|
+
parts[i + 4] = parts[i + 3] + parts[i + 4] if i + 4 < len(parts) else parts[i + 3]
|
|
53
|
+
i += 4
|
|
54
|
+
else:
|
|
55
|
+
if parts[i].strip():
|
|
56
|
+
result.append(parts[i])
|
|
57
|
+
i += 1
|
|
58
|
+
|
|
59
|
+
return [s.strip() for s in result if s.strip()]
|
|
@@ -0,0 +1,152 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# -*- coding: UTF-8 -*-
|
|
3
|
+
"""
|
|
4
|
+
Merge sentences incorrectly split when a title abbreviation is followed by a
|
|
5
|
+
single-word name ending in punctuation.
|
|
6
|
+
|
|
7
|
+
When the question/exclamation splitter splits "Dr. Who?" into ["Dr.", "Who?"],
|
|
8
|
+
this component merges them back together because a title + single capitalized
|
|
9
|
+
word is almost certainly a name, not two sentences.
|
|
10
|
+
|
|
11
|
+
Examples that should be merged:
|
|
12
|
+
["Dr.", "Who?"] -> ["Dr. Who?"]
|
|
13
|
+
["Mr.", "T!"] -> ["Mr. T!"]
|
|
14
|
+
["Do you like Dr.", "Who?"] -> ["Do you like Dr. Who?"]
|
|
15
|
+
|
|
16
|
+
Examples that should NOT be merged:
|
|
17
|
+
["Dr.", "Where did he go?"] -> stays split (multi-word sentence)
|
|
18
|
+
["Dr.", "who can help."] -> stays split (lowercase = not a name)
|
|
19
|
+
|
|
20
|
+
Reference: https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
import re
|
|
24
|
+
from typing import List, Optional, Tuple
|
|
25
|
+
|
|
26
|
+
from fast_sentence_segment.core import BaseObject
|
|
27
|
+
from fast_sentence_segment.dmo.abbreviations import TITLE_ABBREVIATIONS
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
# Subset of titles that commonly precede names (not geographic like Mt., St.)
|
|
31
|
+
PERSONAL_TITLES: List[str] = [
|
|
32
|
+
"Dr.",
|
|
33
|
+
"Mr.",
|
|
34
|
+
"Mrs.",
|
|
35
|
+
"Ms.",
|
|
36
|
+
"Prof.",
|
|
37
|
+
"Sr.",
|
|
38
|
+
"Jr.",
|
|
39
|
+
"Rev.",
|
|
40
|
+
"Gen.",
|
|
41
|
+
"Col.",
|
|
42
|
+
"Capt.",
|
|
43
|
+
"Lt.",
|
|
44
|
+
"Sgt.",
|
|
45
|
+
"Rep.",
|
|
46
|
+
"Sen.",
|
|
47
|
+
"Gov.",
|
|
48
|
+
"Pres.",
|
|
49
|
+
"Hon.",
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class TitleNameMerger(BaseObject):
|
|
54
|
+
"""Merge sentences incorrectly split at title + single-word name boundaries."""
|
|
55
|
+
|
|
56
|
+
def __init__(self):
|
|
57
|
+
"""
|
|
58
|
+
Created:
|
|
59
|
+
28-Dec-2024
|
|
60
|
+
craigtrim@gmail.com
|
|
61
|
+
Reference:
|
|
62
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
63
|
+
"""
|
|
64
|
+
BaseObject.__init__(self, __name__)
|
|
65
|
+
|
|
66
|
+
# Build pattern to match sentences ending with a title abbreviation
|
|
67
|
+
# Escape dots in abbreviations for regex
|
|
68
|
+
escaped_titles = [re.escape(t) for t in PERSONAL_TITLES]
|
|
69
|
+
titles_pattern = "|".join(escaped_titles)
|
|
70
|
+
self._ending_with_title = re.compile(rf"({titles_pattern})$", re.IGNORECASE)
|
|
71
|
+
|
|
72
|
+
# Pattern to match a single capitalized word followed by sentence-ending punctuation
|
|
73
|
+
# at the START of a sentence (may have more content after)
|
|
74
|
+
# Matches: "Who?", "Who? More text", "T!", "T! More", "Who?." (with trailing period), etc.
|
|
75
|
+
# Captures the word+punctuation part for extraction
|
|
76
|
+
# Note: The spaCy segmenter may add a trailing period to sentences ending in ?/!
|
|
77
|
+
self._single_word_with_punct = re.compile(r"^([A-Z][a-zA-Z\-]*[?!]+\.?)\s*(.*)$")
|
|
78
|
+
|
|
79
|
+
def _try_merge(self, current: str, next_sent: str) -> Optional[Tuple[str, str]]:
|
|
80
|
+
"""Try to merge two sentences if they match the title + single-word name pattern.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
current: Current sentence (may end with title abbreviation)
|
|
84
|
+
next_sent: Next sentence (may start with single-word name with punctuation)
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Tuple of (merged_sentence, remainder) if merge needed, else None
|
|
88
|
+
"""
|
|
89
|
+
current = current.strip()
|
|
90
|
+
next_sent = next_sent.strip()
|
|
91
|
+
|
|
92
|
+
# Current sentence must end with a title abbreviation
|
|
93
|
+
if not self._ending_with_title.search(current):
|
|
94
|
+
return None
|
|
95
|
+
|
|
96
|
+
# Next sentence must start with a single capitalized word with ?/! punctuation
|
|
97
|
+
match = self._single_word_with_punct.match(next_sent)
|
|
98
|
+
if not match:
|
|
99
|
+
return None
|
|
100
|
+
|
|
101
|
+
# Extract the name part and any remainder
|
|
102
|
+
name_part = match.group(1)
|
|
103
|
+
remainder = match.group(2).strip() if match.group(2) else ""
|
|
104
|
+
|
|
105
|
+
# Clean up trailing period from name if present (added by spaCy)
|
|
106
|
+
if name_part.endswith('?.') or name_part.endswith('!.'):
|
|
107
|
+
name_part = name_part[:-1]
|
|
108
|
+
|
|
109
|
+
merged = current + " " + name_part
|
|
110
|
+
return (merged, remainder)
|
|
111
|
+
|
|
112
|
+
def process(self, sentences: List[str]) -> List[str]:
|
|
113
|
+
"""Process a list of sentences, merging title + single-word name splits.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
sentences: List of sentences
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
List of sentences with title+name splits merged
|
|
120
|
+
"""
|
|
121
|
+
if not sentences or len(sentences) < 2:
|
|
122
|
+
return sentences
|
|
123
|
+
|
|
124
|
+
# Work with a mutable copy
|
|
125
|
+
sentences = list(sentences)
|
|
126
|
+
result = []
|
|
127
|
+
i = 0
|
|
128
|
+
|
|
129
|
+
while i < len(sentences):
|
|
130
|
+
current = sentences[i]
|
|
131
|
+
|
|
132
|
+
# Check if we should merge with next sentence
|
|
133
|
+
if i + 1 < len(sentences):
|
|
134
|
+
next_sent = sentences[i + 1]
|
|
135
|
+
merge_result = self._try_merge(current, next_sent)
|
|
136
|
+
|
|
137
|
+
if merge_result:
|
|
138
|
+
merged, remainder = merge_result
|
|
139
|
+
result.append(merged)
|
|
140
|
+
|
|
141
|
+
# If there's a remainder, replace next_sent with it for further processing
|
|
142
|
+
if remainder:
|
|
143
|
+
sentences[i + 1] = remainder
|
|
144
|
+
i += 1 # Move to process the remainder (now at i+1, will be i after increment)
|
|
145
|
+
else:
|
|
146
|
+
i += 2 # Skip both merged sentences
|
|
147
|
+
continue
|
|
148
|
+
|
|
149
|
+
result.append(current)
|
|
150
|
+
i += 1
|
|
151
|
+
|
|
152
|
+
return result
|
|
@@ -7,10 +7,14 @@ import spacy
|
|
|
7
7
|
|
|
8
8
|
from fast_sentence_segment.core import BaseObject
|
|
9
9
|
|
|
10
|
+
from fast_sentence_segment.dmo import AbbreviationMerger
|
|
11
|
+
from fast_sentence_segment.dmo import AbbreviationSplitter
|
|
12
|
+
from fast_sentence_segment.dmo import TitleNameMerger
|
|
13
|
+
from fast_sentence_segment.dmo import EllipsisNormalizer
|
|
10
14
|
from fast_sentence_segment.dmo import NewlinesToPeriods
|
|
11
|
-
from fast_sentence_segment.dmo import DelimitersToPeriods
|
|
12
15
|
from fast_sentence_segment.dmo import BulletPointCleaner
|
|
13
16
|
from fast_sentence_segment.dmo import NumberedListNormalizer
|
|
17
|
+
from fast_sentence_segment.dmo import QuestionExclamationSplitter
|
|
14
18
|
from fast_sentence_segment.dmo import SpacyDocSegmenter
|
|
15
19
|
from fast_sentence_segment.dmo import PostProcessStructure
|
|
16
20
|
|
|
@@ -31,18 +35,38 @@ class PerformSentenceSegmentation(BaseObject):
|
|
|
31
35
|
craigtrim@gmail.com
|
|
32
36
|
* add numbered-list normalization
|
|
33
37
|
https://github.com/craigtrim/fast-sentence-segment/issues/1
|
|
38
|
+
Updated:
|
|
39
|
+
27-Dec-2024
|
|
40
|
+
craigtrim@gmail.com
|
|
41
|
+
* add abbreviation-aware sentence splitting
|
|
42
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/3
|
|
34
43
|
"""
|
|
35
44
|
BaseObject.__init__(self, __name__)
|
|
36
45
|
if not self.__nlp:
|
|
37
46
|
self.__nlp = spacy.load("en_core_web_sm")
|
|
38
47
|
|
|
39
|
-
self._delimiters_to_periods = DelimitersToPeriods.process
|
|
40
48
|
self._newlines_to_periods = NewlinesToPeriods.process
|
|
41
49
|
self._normalize_numbered_lists = NumberedListNormalizer().process
|
|
50
|
+
self._normalize_ellipses = EllipsisNormalizer().process
|
|
42
51
|
self._clean_bullet_points = BulletPointCleaner.process
|
|
43
52
|
self._spacy_segmenter = SpacyDocSegmenter(self.__nlp).process
|
|
53
|
+
self._abbreviation_merger = AbbreviationMerger().process
|
|
54
|
+
self._abbreviation_splitter = AbbreviationSplitter().process
|
|
55
|
+
self._question_exclamation_splitter = QuestionExclamationSplitter().process
|
|
56
|
+
self._title_name_merger = TitleNameMerger().process
|
|
44
57
|
self._post_process = PostProcessStructure().process
|
|
45
58
|
|
|
59
|
+
def _denormalize(self, text: str) -> str:
|
|
60
|
+
""" Restore normalized placeholders to original form """
|
|
61
|
+
text = self._normalize_numbered_lists(text, denormalize=True)
|
|
62
|
+
text = self._normalize_ellipses(text, denormalize=True)
|
|
63
|
+
return text
|
|
64
|
+
|
|
65
|
+
@staticmethod
|
|
66
|
+
def _has_sentence_punct(text: str) -> bool:
|
|
67
|
+
""" Check if text has sentence-ending punctuation """
|
|
68
|
+
return "." in text or "?" in text or "!" in text
|
|
69
|
+
|
|
46
70
|
@staticmethod
|
|
47
71
|
def _clean_punctuation(input_text: str) -> str:
|
|
48
72
|
""" Purpose:
|
|
@@ -67,33 +91,41 @@ class PerformSentenceSegmentation(BaseObject):
|
|
|
67
91
|
def _process(self,
|
|
68
92
|
input_text: str) -> list:
|
|
69
93
|
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
input_text=input_text)
|
|
73
|
-
|
|
74
|
-
input_text = self._delimiters_to_periods(
|
|
75
|
-
delimiter=';',
|
|
76
|
-
input_text=input_text)
|
|
94
|
+
# Normalize tabs to spaces
|
|
95
|
+
input_text = input_text.replace('\t', ' ')
|
|
77
96
|
|
|
78
97
|
input_text = self._normalize_numbered_lists(input_text)
|
|
98
|
+
input_text = self._normalize_ellipses(input_text)
|
|
79
99
|
|
|
80
100
|
input_text = self._newlines_to_periods(input_text)
|
|
81
101
|
|
|
82
102
|
input_text = self._clean_spacing(input_text)
|
|
83
|
-
if
|
|
84
|
-
return [input_text]
|
|
103
|
+
if not self._has_sentence_punct(input_text):
|
|
104
|
+
return [self._denormalize(input_text)]
|
|
85
105
|
|
|
86
106
|
input_text = self._clean_bullet_points(input_text)
|
|
87
|
-
if
|
|
88
|
-
return [input_text]
|
|
107
|
+
if not self._has_sentence_punct(input_text):
|
|
108
|
+
return [self._denormalize(input_text)]
|
|
89
109
|
|
|
90
110
|
input_text = self._clean_punctuation(input_text)
|
|
91
|
-
if
|
|
92
|
-
return [input_text]
|
|
111
|
+
if not self._has_sentence_punct(input_text):
|
|
112
|
+
return [self._denormalize(input_text)]
|
|
93
113
|
|
|
94
114
|
sentences = self._spacy_segmenter(input_text)
|
|
95
|
-
if
|
|
96
|
-
return [input_text]
|
|
115
|
+
if not self._has_sentence_punct(input_text):
|
|
116
|
+
return [self._denormalize(input_text)]
|
|
117
|
+
|
|
118
|
+
# Merge sentences incorrectly split at abbreviations (issue #3)
|
|
119
|
+
sentences = self._abbreviation_merger(sentences)
|
|
120
|
+
|
|
121
|
+
# Merge title + single-word name splits (e.g., "Dr." + "Who?" -> "Dr. Who?")
|
|
122
|
+
sentences = self._title_name_merger(sentences)
|
|
123
|
+
|
|
124
|
+
# Split sentences at abbreviation boundaries (issue #3)
|
|
125
|
+
sentences = self._abbreviation_splitter(sentences)
|
|
126
|
+
|
|
127
|
+
# Split sentences at ? and ! boundaries (issue #3)
|
|
128
|
+
sentences = self._question_exclamation_splitter(sentences)
|
|
97
129
|
|
|
98
130
|
sentences = self._post_process(sentences)
|
|
99
131
|
|
|
@@ -101,6 +133,10 @@ class PerformSentenceSegmentation(BaseObject):
|
|
|
101
133
|
self._normalize_numbered_lists(x, denormalize=True)
|
|
102
134
|
for x in sentences
|
|
103
135
|
]
|
|
136
|
+
sentences = [
|
|
137
|
+
self._normalize_ellipses(x, denormalize=True)
|
|
138
|
+
for x in sentences
|
|
139
|
+
]
|
|
104
140
|
|
|
105
141
|
return sentences
|
|
106
142
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: fast-sentence-segment
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.2.0
|
|
4
4
|
Summary: Fast and Efficient Sentence Segmentation
|
|
5
5
|
License: MIT
|
|
6
6
|
License-File: LICENSE
|
|
@@ -30,17 +30,25 @@ Description-Content-Type: text/markdown
|
|
|
30
30
|
[](https://pypi.org/project/fast-sentence-segment/)
|
|
31
31
|
[](https://pypi.org/project/fast-sentence-segment/)
|
|
32
32
|
[](https://opensource.org/licenses/MIT)
|
|
33
|
-
[](https://spacy.io/)
|
|
34
34
|
|
|
35
|
-
Fast and efficient sentence segmentation using spaCy. Handles complex edge cases like abbreviations (Dr., Mr., etc.), quoted text, and multi-paragraph documents.
|
|
35
|
+
Fast and efficient sentence segmentation using spaCy with surgical post-processing fixes. Handles complex edge cases like abbreviations (Dr., Mr., etc.), ellipses, quoted text, and multi-paragraph documents.
|
|
36
|
+
|
|
37
|
+
## Why This Library?
|
|
38
|
+
|
|
39
|
+
1. **Keep it local**: LLM API calls cost money and send your data to third parties. Run sentence segmentation entirely on your machine.
|
|
40
|
+
2. **spaCy perfected**: spaCy is a great local model, but it makes mistakes. This library fixes most of spaCy's shortcomings.
|
|
36
41
|
|
|
37
42
|
## Features
|
|
38
43
|
|
|
39
44
|
- **Paragraph-aware segmentation**: Returns sentences grouped by paragraph
|
|
40
|
-
- **Abbreviation handling**: Correctly handles "Dr.", "Mr.", "etc." without false splits
|
|
45
|
+
- **Abbreviation handling**: Correctly handles "Dr.", "Mr.", "etc.", "p.m.", "a.m." without false splits
|
|
46
|
+
- **Ellipsis preservation**: Keeps `...` intact while detecting sentence boundaries
|
|
47
|
+
- **Question/exclamation splitting**: Properly splits on `?` and `!` followed by capital letters
|
|
41
48
|
- **Cached processing**: LRU cache for repeated text processing
|
|
42
49
|
- **Flexible output**: Nested lists (by paragraph) or flattened list of sentences
|
|
43
50
|
- **Bullet point & numbered list normalization**: Cleans common list formats
|
|
51
|
+
- **CLI tool**: Command-line interface for quick segmentation
|
|
44
52
|
|
|
45
53
|
## Installation
|
|
46
54
|
|
|
@@ -106,6 +114,24 @@ segmenter = Segmenter()
|
|
|
106
114
|
results = segmenter.input_text("Your text here.")
|
|
107
115
|
```
|
|
108
116
|
|
|
117
|
+
### Command Line Interface
|
|
118
|
+
|
|
119
|
+
Segment text directly from the terminal:
|
|
120
|
+
|
|
121
|
+
```bash
|
|
122
|
+
# Direct text input
|
|
123
|
+
segment "Hello world. How are you? I am fine."
|
|
124
|
+
|
|
125
|
+
# Numbered output
|
|
126
|
+
segment -n "First sentence. Second sentence."
|
|
127
|
+
|
|
128
|
+
# From stdin
|
|
129
|
+
echo "Some text here. Another sentence." | segment
|
|
130
|
+
|
|
131
|
+
# From file
|
|
132
|
+
segment -f document.txt
|
|
133
|
+
```
|
|
134
|
+
|
|
109
135
|
## API Reference
|
|
110
136
|
|
|
111
137
|
| Function | Parameters | Returns | Description |
|
|
@@ -113,6 +139,14 @@ results = segmenter.input_text("Your text here.")
|
|
|
113
139
|
| `segment_text()` | `input_text: str`, `flatten: bool = False` | `list` | Main entry point for segmentation |
|
|
114
140
|
| `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |
|
|
115
141
|
|
|
142
|
+
### CLI Options
|
|
143
|
+
|
|
144
|
+
| Option | Description |
|
|
145
|
+
|--------|-------------|
|
|
146
|
+
| `text` | Text to segment (positional argument) |
|
|
147
|
+
| `-f, --file` | Read text from file |
|
|
148
|
+
| `-n, --numbered` | Number output lines |
|
|
149
|
+
|
|
116
150
|
## Why Nested Lists?
|
|
117
151
|
|
|
118
152
|
The segmentation process preserves document structure by segmenting into both paragraphs and sentences. Each outer list represents a paragraph, and each inner list contains that paragraph's sentences. This is useful for:
|
|
@@ -125,10 +159,19 @@ Use `flatten=True` when you only need sentences without paragraph context.
|
|
|
125
159
|
|
|
126
160
|
## Requirements
|
|
127
161
|
|
|
128
|
-
- Python 3.
|
|
129
|
-
- spaCy 3.
|
|
162
|
+
- Python 3.9+
|
|
163
|
+
- spaCy 3.8+
|
|
130
164
|
- en_core_web_sm spaCy model
|
|
131
165
|
|
|
166
|
+
## How It Works
|
|
167
|
+
|
|
168
|
+
This library uses spaCy for initial sentence segmentation, then applies surgical post-processing fixes for cases where spaCy's default behavior is incorrect:
|
|
169
|
+
|
|
170
|
+
1. **Pre-processing**: Normalize numbered lists, preserve ellipses with placeholders
|
|
171
|
+
2. **spaCy segmentation**: Use spaCy's sentence boundary detection
|
|
172
|
+
3. **Post-processing**: Split on abbreviation boundaries, handle `?`/`!` + capital patterns
|
|
173
|
+
4. **Denormalization**: Restore placeholders to original text
|
|
174
|
+
|
|
132
175
|
## License
|
|
133
176
|
|
|
134
177
|
MIT License - see [LICENSE](LICENSE) for details.
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
fast_sentence_segment/__init__.py,sha256=HTONyC0JLVWTAHyvJO6rMINmxymbfMpARtGeRw5iIsQ,359
|
|
2
|
+
fast_sentence_segment/bp/__init__.py,sha256=j2-WfQ9WwVuXeGSjvV6XLVwEdvau8sdAQe4Pa4DrYi8,33
|
|
3
|
+
fast_sentence_segment/bp/segmenter.py,sha256=UW6DguPgA56h-pPYRsfJhjIzBe40j6NdjkwYxamASyA,1928
|
|
4
|
+
fast_sentence_segment/cli.py,sha256=X2dMLkfc2dtheig62wKC75AohfW0Y9oTU0ORhGUFkbQ,1250
|
|
5
|
+
fast_sentence_segment/core/__init__.py,sha256=uoBersYyVStJ5a8zJpQz1GDGaloEdAv2jGHw1292hRM,108
|
|
6
|
+
fast_sentence_segment/core/base_object.py,sha256=AYr7yzusIwawjbKdvcv4yTEnhmx6M583kDZzhzPOmq4,635
|
|
7
|
+
fast_sentence_segment/core/stopwatch.py,sha256=hE6hMz2q6rduaKi58KZmiAL-lRtyh_wWCANhl4KLkRQ,879
|
|
8
|
+
fast_sentence_segment/dmo/__init__.py,sha256=E70tkpdHu86KP2dwBX5Dy5D7eNiU6fzucrfDJOY1ui4,551
|
|
9
|
+
fast_sentence_segment/dmo/abbreviation_merger.py,sha256=tCXM6yCfMryJvMIVWIxP_EocoibZi8vohFzJ5tvMYr0,4432
|
|
10
|
+
fast_sentence_segment/dmo/abbreviation_splitter.py,sha256=03mSyJcLooNyIjXx6mPlrnjmKgZW-uhUIqG4U-MbIGw,2981
|
|
11
|
+
fast_sentence_segment/dmo/abbreviations.py,sha256=7mpEoOnw5MH8URYmmpxaYs3Wc2eqy4pC0hAnYfYSdck,1639
|
|
12
|
+
fast_sentence_segment/dmo/bullet_point_cleaner.py,sha256=WOZQRWXiiyRi8rOuEIw36EmkaXmATHL9_Dxb2rderw4,1606
|
|
13
|
+
fast_sentence_segment/dmo/ellipsis_normalizer.py,sha256=lHs9dLFfKJe-2vFNe17Hik90g3_kXX347OzGP_IOT08,1521
|
|
14
|
+
fast_sentence_segment/dmo/newlines_to_periods.py,sha256=PUrXreqZWiITINfoJL5xRRlXJH6noH0cdXtW1EqAh8I,1517
|
|
15
|
+
fast_sentence_segment/dmo/numbered_list_normalizer.py,sha256=q0sOCW8Jkn2vTXlUcVhmDvYES3yvJx1oUVl_8y7eL4E,1672
|
|
16
|
+
fast_sentence_segment/dmo/post_process_sentences.py,sha256=5jxG3TmFjxIExMPLhnCB5JT1lXQvFU9r4qQGoATGrWk,916
|
|
17
|
+
fast_sentence_segment/dmo/question_exclamation_splitter.py,sha256=cRsWRu8zb6wOWG-BjMahHfz4YGutKiV9lW7dE-q3tgc,2006
|
|
18
|
+
fast_sentence_segment/dmo/spacy_doc_segmenter.py,sha256=0icAkSQwAUQo3VYqQ2PUjW6-MOU5RNCGPX3-fB5YfCc,2554
|
|
19
|
+
fast_sentence_segment/dmo/title_name_merger.py,sha256=zbG04_VjwM8TtT8LhavvmZqIZL_2xgT2OTxWkK_Zt1s,5133
|
|
20
|
+
fast_sentence_segment/svc/__init__.py,sha256=9B12mXxBnlalH4OAm1AMLwUMa-RLi2ilv7qhqv26q7g,144
|
|
21
|
+
fast_sentence_segment/svc/perform_paragraph_segmentation.py,sha256=zLKw9rSzb0NNfx4MyEeoGrHwhxTtH5oDrYcAL2LMVHY,1378
|
|
22
|
+
fast_sentence_segment/svc/perform_sentence_segmentation.py,sha256=dqGxFsJoP6ox_MJwtB85R9avEbBAR4x9YKaRaQ5fAXo,5723
|
|
23
|
+
fast_sentence_segment-1.2.0.dist-info/METADATA,sha256=05V3aFKHCD9JaYN8va_vIuMtaoAbGmKgFAOUDJWfM80,6405
|
|
24
|
+
fast_sentence_segment-1.2.0.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
|
25
|
+
fast_sentence_segment-1.2.0.dist-info/entry_points.txt,sha256=mDiRuKOZlOeqmtH1eZwqGEGM6KUh0RTzwyETGMpxSDI,58
|
|
26
|
+
fast_sentence_segment-1.2.0.dist-info/licenses/LICENSE,sha256=vou5JCLAT5nHcsUv-AkjUYAihYfN9mwPDXxV2DHyHBo,1067
|
|
27
|
+
fast_sentence_segment-1.2.0.dist-info/RECORD,,
|
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python
|
|
2
|
-
# -*- coding: UTF-8 -*-
|
|
3
|
-
""" Convert Delimiters into Periods """
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
from fast_sentence_segment.core import BaseObject
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
class DelimitersToPeriods(BaseObject):
|
|
10
|
-
""" Convert Delimiters into Periods """
|
|
11
|
-
|
|
12
|
-
def __init__(self):
|
|
13
|
-
"""
|
|
14
|
-
Created:
|
|
15
|
-
30-Sept-2021
|
|
16
|
-
"""
|
|
17
|
-
BaseObject.__init__(self, __name__)
|
|
18
|
-
|
|
19
|
-
@staticmethod
|
|
20
|
-
def process(input_text: str,
|
|
21
|
-
delimiter: str):
|
|
22
|
-
"""
|
|
23
|
-
Purpose:
|
|
24
|
-
Take a CSV list and transform to sentences
|
|
25
|
-
:param input_text:
|
|
26
|
-
:return:
|
|
27
|
-
"""
|
|
28
|
-
total_len = len(input_text)
|
|
29
|
-
total_delims = input_text.count(delimiter)
|
|
30
|
-
|
|
31
|
-
if total_delims == 0:
|
|
32
|
-
return input_text
|
|
33
|
-
|
|
34
|
-
if total_delims / total_len > 0.04:
|
|
35
|
-
return input_text.replace(delimiter, '.')
|
|
36
|
-
|
|
37
|
-
return input_text
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
fast_sentence_segment/__init__.py,sha256=HTONyC0JLVWTAHyvJO6rMINmxymbfMpARtGeRw5iIsQ,359
|
|
2
|
-
fast_sentence_segment/bp/__init__.py,sha256=j2-WfQ9WwVuXeGSjvV6XLVwEdvau8sdAQe4Pa4DrYi8,33
|
|
3
|
-
fast_sentence_segment/bp/segmenter.py,sha256=UW6DguPgA56h-pPYRsfJhjIzBe40j6NdjkwYxamASyA,1928
|
|
4
|
-
fast_sentence_segment/core/__init__.py,sha256=uoBersYyVStJ5a8zJpQz1GDGaloEdAv2jGHw1292hRM,108
|
|
5
|
-
fast_sentence_segment/core/base_object.py,sha256=AYr7yzusIwawjbKdvcv4yTEnhmx6M583kDZzhzPOmq4,635
|
|
6
|
-
fast_sentence_segment/core/stopwatch.py,sha256=hE6hMz2q6rduaKi58KZmiAL-lRtyh_wWCANhl4KLkRQ,879
|
|
7
|
-
fast_sentence_segment/dmo/__init__.py,sha256=Tz1ICwhQLimQ3KoZM4f5HQFFgYamnMAn4opcyIB7Chk,328
|
|
8
|
-
fast_sentence_segment/dmo/bullet_point_cleaner.py,sha256=WOZQRWXiiyRi8rOuEIw36EmkaXmATHL9_Dxb2rderw4,1606
|
|
9
|
-
fast_sentence_segment/dmo/delimiters_to_periods.py,sha256=cshN3TU2YzCHWVGGDnd_FOQJluyRExpYPrpZ_BQQeko,857
|
|
10
|
-
fast_sentence_segment/dmo/newlines_to_periods.py,sha256=PUrXreqZWiITINfoJL5xRRlXJH6noH0cdXtW1EqAh8I,1517
|
|
11
|
-
fast_sentence_segment/dmo/numbered_list_normalizer.py,sha256=q0cLiTUt6ITX_cCwCRrIR7A3o92WOKVWpFWs8P5Gu5M,1549
|
|
12
|
-
fast_sentence_segment/dmo/post_process_sentences.py,sha256=5jxG3TmFjxIExMPLhnCB5JT1lXQvFU9r4qQGoATGrWk,916
|
|
13
|
-
fast_sentence_segment/dmo/spacy_doc_segmenter.py,sha256=0icAkSQwAUQo3VYqQ2PUjW6-MOU5RNCGPX3-fB5YfCc,2554
|
|
14
|
-
fast_sentence_segment/svc/__init__.py,sha256=9B12mXxBnlalH4OAm1AMLwUMa-RLi2ilv7qhqv26q7g,144
|
|
15
|
-
fast_sentence_segment/svc/perform_paragraph_segmentation.py,sha256=zLKw9rSzb0NNfx4MyEeoGrHwhxTtH5oDrYcAL2LMVHY,1378
|
|
16
|
-
fast_sentence_segment/svc/perform_sentence_segmentation.py,sha256=ZT231A5CJ5v9tGpYbny_eq8SzgEdEwVTCVu9OV_UO9g,3839
|
|
17
|
-
fast_sentence_segment-1.1.8.dist-info/METADATA,sha256=mCHUoRPcbOcSlffv6-da66W2KZU6P5OUBQFnGFgJhQU,4817
|
|
18
|
-
fast_sentence_segment-1.1.8.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
|
19
|
-
fast_sentence_segment-1.1.8.dist-info/licenses/LICENSE,sha256=vou5JCLAT5nHcsUv-AkjUYAihYfN9mwPDXxV2DHyHBo,1067
|
|
20
|
-
fast_sentence_segment-1.1.8.dist-info/RECORD,,
|
|
File without changes
|
{fast_sentence_segment-1.1.8.dist-info → fast_sentence_segment-1.2.0.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|