fast-sentence-segment 1.2.1__tar.gz → 1.3.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/PKG-INFO +27 -33
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/README.md +17 -27
- fast_sentence_segment-1.3.0/fast_sentence_segment/__init__.py +51 -0
- fast_sentence_segment-1.3.0/fast_sentence_segment/cli.py +144 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/__init__.py +4 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/abbreviations.py +55 -2
- fast_sentence_segment-1.3.0/fast_sentence_segment/dmo/group_quoted_sentences.py +141 -0
- fast_sentence_segment-1.3.0/fast_sentence_segment/dmo/normalize_quotes.py +80 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/spacy_doc_segmenter.py +24 -11
- fast_sentence_segment-1.3.0/fast_sentence_segment/dmo/strip_trailing_period_after_quote.py +70 -0
- fast_sentence_segment-1.3.0/fast_sentence_segment/dmo/unwrap_hard_wrapped_text.py +34 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/svc/perform_sentence_segmentation.py +5 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/pyproject.toml +15 -2
- fast_sentence_segment-1.3.0/setup.py +39 -0
- fast_sentence_segment-1.2.1/fast_sentence_segment/__init__.py +0 -18
- fast_sentence_segment-1.2.1/fast_sentence_segment/cli.py +0 -56
- fast_sentence_segment-1.2.1/setup.py +0 -38
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/LICENSE +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/bp/__init__.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/bp/segmenter.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/core/__init__.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/core/base_object.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/core/stopwatch.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/abbreviation_merger.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/abbreviation_splitter.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/bullet_point_cleaner.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/ellipsis_normalizer.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/newlines_to_periods.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/numbered_list_normalizer.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/post_process_sentences.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/question_exclamation_splitter.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/title_name_merger.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/svc/__init__.py +0 -0
- {fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/svc/perform_paragraph_segmentation.py +0 -0
|
@@ -1,25 +1,29 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
2
|
Name: fast-sentence-segment
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.3.0
|
|
4
4
|
Summary: Fast and Efficient Sentence Segmentation
|
|
5
|
+
Home-page: https://github.com/craigtrim/fast-sentence-segment
|
|
5
6
|
License: MIT
|
|
6
|
-
License-File: LICENSE
|
|
7
7
|
Keywords: nlp,text,preprocess,segment
|
|
8
8
|
Author: Craig Trim
|
|
9
9
|
Author-email: craigtrim@gmail.com
|
|
10
10
|
Maintainer: Craig Trim
|
|
11
11
|
Maintainer-email: craigtrim@gmail.com
|
|
12
12
|
Requires-Python: >=3.9,<4.0
|
|
13
|
-
Classifier: Development Status ::
|
|
13
|
+
Classifier: Development Status :: 5 - Production/Stable
|
|
14
|
+
Classifier: Intended Audience :: Developers
|
|
15
|
+
Classifier: Intended Audience :: Science/Research
|
|
14
16
|
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
15
18
|
Classifier: Programming Language :: Python :: 3
|
|
16
19
|
Classifier: Programming Language :: Python :: 3.9
|
|
17
20
|
Classifier: Programming Language :: Python :: 3.10
|
|
18
21
|
Classifier: Programming Language :: Python :: 3.11
|
|
19
22
|
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
-
Classifier:
|
|
21
|
-
Classifier: Programming Language :: Python :: 3.14
|
|
23
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
22
24
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
25
|
+
Classifier: Topic :: Text Processing :: Linguistic
|
|
26
|
+
Classifier: Typing :: Typed
|
|
23
27
|
Requires-Dist: spacy (>=3.8.0,<4.0.0)
|
|
24
28
|
Project-URL: Bug Tracker, https://github.com/craigtrim/fast-sentence-segment/issues
|
|
25
29
|
Project-URL: Repository, https://github.com/craigtrim/fast-sentence-segment
|
|
@@ -31,6 +35,8 @@ Description-Content-Type: text/markdown
|
|
|
31
35
|
[](https://pypi.org/project/fast-sentence-segment/)
|
|
32
36
|
[](https://opensource.org/licenses/MIT)
|
|
33
37
|
[](https://spacy.io/)
|
|
38
|
+
[](https://pepy.tech/project/fast-sentence-segment)
|
|
39
|
+
[](https://pepy.tech/project/fast-sentence-segment)
|
|
34
40
|
|
|
35
41
|
Fast and efficient sentence segmentation using spaCy with surgical post-processing fixes. Handles complex edge cases like abbreviations (Dr., Mr., etc.), ellipses, quoted text, and multi-paragraph documents.
|
|
36
42
|
|
|
@@ -142,48 +148,36 @@ results = segmenter.input_text("Your text here.")
|
|
|
142
148
|
|
|
143
149
|
### Command Line Interface
|
|
144
150
|
|
|
145
|
-
Segment text directly from the terminal:
|
|
146
|
-
|
|
147
151
|
```bash
|
|
148
|
-
#
|
|
149
|
-
|
|
150
|
-
```
|
|
152
|
+
# Inline text
|
|
153
|
+
segment "Gandalf paused... You shall not pass! The Balrog roared."
|
|
151
154
|
|
|
152
|
-
|
|
153
|
-
Have you seen Dr. Who?
|
|
154
|
-
It's brilliant!
|
|
155
|
-
```
|
|
155
|
+
# Pipe from stdin
|
|
156
|
+
echo "Have you seen Dr. Who? It's brilliant!" | segment
|
|
156
157
|
|
|
157
|
-
```bash
|
|
158
158
|
# Numbered output
|
|
159
|
-
segment -n
|
|
160
|
-
```
|
|
159
|
+
segment -n -f silmarillion.txt
|
|
161
160
|
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
2. You shall not pass!
|
|
165
|
-
3. The Balrog roared.
|
|
166
|
-
```
|
|
161
|
+
# File-to-file (one sentence per line)
|
|
162
|
+
segment-file --input-file book.txt --output-file sentences.txt
|
|
167
163
|
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
segment -f silmarillion.txt
|
|
164
|
+
# Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)
|
|
165
|
+
segment-file --input-file book.txt --output-file sentences.txt --unwrap
|
|
171
166
|
```
|
|
172
167
|
|
|
173
168
|
## API Reference
|
|
174
169
|
|
|
175
170
|
| Function | Parameters | Returns | Description |
|
|
176
171
|
|----------|------------|---------|-------------|
|
|
177
|
-
| `segment_text()` | `input_text: str`, `flatten: bool = False` | `list` | Main entry point for segmentation |
|
|
172
|
+
| `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False` | `list` | Main entry point for segmentation |
|
|
178
173
|
| `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |
|
|
179
174
|
|
|
180
|
-
### CLI
|
|
175
|
+
### CLI Commands
|
|
181
176
|
|
|
182
|
-
|
|
|
183
|
-
|
|
184
|
-
| `text` |
|
|
185
|
-
|
|
|
186
|
-
| `-n, --numbered` | Number output lines |
|
|
177
|
+
| Command | Description |
|
|
178
|
+
|---------|-------------|
|
|
179
|
+
| `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output. |
|
|
180
|
+
| `segment-file --input-file IN --output-file OUT [--unwrap]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts. |
|
|
187
181
|
|
|
188
182
|
## Why Nested Lists?
|
|
189
183
|
|
|
@@ -4,6 +4,8 @@
|
|
|
4
4
|
[](https://pypi.org/project/fast-sentence-segment/)
|
|
5
5
|
[](https://opensource.org/licenses/MIT)
|
|
6
6
|
[](https://spacy.io/)
|
|
7
|
+
[](https://pepy.tech/project/fast-sentence-segment)
|
|
8
|
+
[](https://pepy.tech/project/fast-sentence-segment)
|
|
7
9
|
|
|
8
10
|
Fast and efficient sentence segmentation using spaCy with surgical post-processing fixes. Handles complex edge cases like abbreviations (Dr., Mr., etc.), ellipses, quoted text, and multi-paragraph documents.
|
|
9
11
|
|
|
@@ -115,48 +117,36 @@ results = segmenter.input_text("Your text here.")
|
|
|
115
117
|
|
|
116
118
|
### Command Line Interface
|
|
117
119
|
|
|
118
|
-
Segment text directly from the terminal:
|
|
119
|
-
|
|
120
120
|
```bash
|
|
121
|
-
#
|
|
122
|
-
|
|
123
|
-
```
|
|
121
|
+
# Inline text
|
|
122
|
+
segment "Gandalf paused... You shall not pass! The Balrog roared."
|
|
124
123
|
|
|
125
|
-
|
|
126
|
-
Have you seen Dr. Who?
|
|
127
|
-
It's brilliant!
|
|
128
|
-
```
|
|
124
|
+
# Pipe from stdin
|
|
125
|
+
echo "Have you seen Dr. Who? It's brilliant!" | segment
|
|
129
126
|
|
|
130
|
-
```bash
|
|
131
127
|
# Numbered output
|
|
132
|
-
segment -n
|
|
133
|
-
```
|
|
128
|
+
segment -n -f silmarillion.txt
|
|
134
129
|
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
2. You shall not pass!
|
|
138
|
-
3. The Balrog roared.
|
|
139
|
-
```
|
|
130
|
+
# File-to-file (one sentence per line)
|
|
131
|
+
segment-file --input-file book.txt --output-file sentences.txt
|
|
140
132
|
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
segment -f silmarillion.txt
|
|
133
|
+
# Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)
|
|
134
|
+
segment-file --input-file book.txt --output-file sentences.txt --unwrap
|
|
144
135
|
```
|
|
145
136
|
|
|
146
137
|
## API Reference
|
|
147
138
|
|
|
148
139
|
| Function | Parameters | Returns | Description |
|
|
149
140
|
|----------|------------|---------|-------------|
|
|
150
|
-
| `segment_text()` | `input_text: str`, `flatten: bool = False` | `list` | Main entry point for segmentation |
|
|
141
|
+
| `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False` | `list` | Main entry point for segmentation |
|
|
151
142
|
| `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |
|
|
152
143
|
|
|
153
|
-
### CLI
|
|
144
|
+
### CLI Commands
|
|
154
145
|
|
|
155
|
-
|
|
|
156
|
-
|
|
157
|
-
| `text` |
|
|
158
|
-
|
|
|
159
|
-
| `-n, --numbered` | Number output lines |
|
|
146
|
+
| Command | Description |
|
|
147
|
+
|---------|-------------|
|
|
148
|
+
| `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output. |
|
|
149
|
+
| `segment-file --input-file IN --output-file OUT [--unwrap]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts. |
|
|
160
150
|
|
|
161
151
|
## Why Nested Lists?
|
|
162
152
|
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from .bp import *
|
|
2
|
+
from .svc import *
|
|
3
|
+
from .dmo import *
|
|
4
|
+
|
|
5
|
+
from .bp.segmenter import Segmenter
|
|
6
|
+
from .dmo.unwrap_hard_wrapped_text import unwrap_hard_wrapped_text
|
|
7
|
+
from .dmo.normalize_quotes import normalize_quotes
|
|
8
|
+
|
|
9
|
+
segment = Segmenter().input_text
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def segment_text(
|
|
13
|
+
input_text: str,
|
|
14
|
+
flatten: bool = False,
|
|
15
|
+
unwrap: bool = False,
|
|
16
|
+
normalize: bool = True,
|
|
17
|
+
) -> list:
|
|
18
|
+
"""Segment text into sentences.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
input_text: The text to segment.
|
|
22
|
+
flatten: If True, return a flat list of sentences instead of
|
|
23
|
+
nested paragraphs.
|
|
24
|
+
unwrap: If True, unwrap hard-wrapped lines (e.g., Project
|
|
25
|
+
Gutenberg e-texts) before segmenting.
|
|
26
|
+
normalize: If True (default), normalize unicode quote variants
|
|
27
|
+
to ASCII equivalents before segmenting. Ensures consistent
|
|
28
|
+
quote characters for downstream processing.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
List of sentences (if flatten=True) or list of paragraph
|
|
32
|
+
groups, each containing a list of sentences.
|
|
33
|
+
|
|
34
|
+
Related GitHub Issue:
|
|
35
|
+
#6 - Review findings from Issue #5
|
|
36
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/6
|
|
37
|
+
"""
|
|
38
|
+
if unwrap:
|
|
39
|
+
input_text = unwrap_hard_wrapped_text(input_text)
|
|
40
|
+
|
|
41
|
+
if normalize:
|
|
42
|
+
input_text = normalize_quotes(input_text)
|
|
43
|
+
|
|
44
|
+
results = segment(input_text)
|
|
45
|
+
|
|
46
|
+
if flatten:
|
|
47
|
+
flat = []
|
|
48
|
+
[[flat.append(y) for y in x] for x in results]
|
|
49
|
+
return flat
|
|
50
|
+
|
|
51
|
+
return results
|
|
@@ -0,0 +1,144 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""CLI for fast-sentence-segment."""
|
|
3
|
+
|
|
4
|
+
import argparse
|
|
5
|
+
import logging
|
|
6
|
+
import os
|
|
7
|
+
import sys
|
|
8
|
+
import time
|
|
9
|
+
|
|
10
|
+
from fast_sentence_segment import segment_text
|
|
11
|
+
from fast_sentence_segment.dmo.group_quoted_sentences import format_grouped_sentences
|
|
12
|
+
|
|
13
|
+
logging.disable(logging.CRITICAL)
|
|
14
|
+
|
|
15
|
+
# ANSI color codes
|
|
16
|
+
BOLD = "\033[1m"
|
|
17
|
+
DIM = "\033[2m"
|
|
18
|
+
CYAN = "\033[36m"
|
|
19
|
+
GREEN = "\033[32m"
|
|
20
|
+
YELLOW = "\033[33m"
|
|
21
|
+
RESET = "\033[0m"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _header(title: str):
|
|
25
|
+
print(f"\n{BOLD}{CYAN}{title}{RESET}")
|
|
26
|
+
print(f"{DIM}{'─' * 40}{RESET}")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _param(label: str, value: str):
|
|
30
|
+
print(f" {DIM}{label}:{RESET} {value}")
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _done(msg: str):
|
|
34
|
+
print(f"\n {GREEN}✓{RESET} {msg}")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _file_size(path: str) -> str:
|
|
38
|
+
size = os.path.getsize(path)
|
|
39
|
+
if size < 1024:
|
|
40
|
+
return f"{size} B"
|
|
41
|
+
elif size < 1024 * 1024:
|
|
42
|
+
return f"{size / 1024:.1f} KB"
|
|
43
|
+
return f"{size / (1024 * 1024):.1f} MB"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def main():
|
|
47
|
+
parser = argparse.ArgumentParser(
|
|
48
|
+
prog="segment",
|
|
49
|
+
description="Segment text into sentences",
|
|
50
|
+
)
|
|
51
|
+
parser.add_argument(
|
|
52
|
+
"text",
|
|
53
|
+
nargs="?",
|
|
54
|
+
help="Text to segment (or use stdin)",
|
|
55
|
+
)
|
|
56
|
+
parser.add_argument(
|
|
57
|
+
"-f", "--file",
|
|
58
|
+
help="Read text from file",
|
|
59
|
+
)
|
|
60
|
+
parser.add_argument(
|
|
61
|
+
"-n", "--numbered",
|
|
62
|
+
action="store_true",
|
|
63
|
+
help="Number output lines",
|
|
64
|
+
)
|
|
65
|
+
args = parser.parse_args()
|
|
66
|
+
|
|
67
|
+
# Get input text
|
|
68
|
+
if args.file:
|
|
69
|
+
with open(args.file, "r", encoding="utf-8") as f:
|
|
70
|
+
text = f.read()
|
|
71
|
+
elif args.text:
|
|
72
|
+
text = args.text
|
|
73
|
+
elif not sys.stdin.isatty():
|
|
74
|
+
text = sys.stdin.read()
|
|
75
|
+
else:
|
|
76
|
+
parser.print_help()
|
|
77
|
+
sys.exit(1)
|
|
78
|
+
|
|
79
|
+
# Segment and output
|
|
80
|
+
sentences = segment_text(text.strip(), flatten=True)
|
|
81
|
+
for i, sentence in enumerate(sentences, 1):
|
|
82
|
+
if args.numbered:
|
|
83
|
+
print(f"{i}. {sentence}")
|
|
84
|
+
else:
|
|
85
|
+
print(sentence)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def file_main():
|
|
89
|
+
parser = argparse.ArgumentParser(
|
|
90
|
+
prog="segment-file",
|
|
91
|
+
description="Segment a text file into sentences and write to an output file",
|
|
92
|
+
)
|
|
93
|
+
parser.add_argument(
|
|
94
|
+
"--input-file", required=True,
|
|
95
|
+
help="Path to input text file",
|
|
96
|
+
)
|
|
97
|
+
parser.add_argument(
|
|
98
|
+
"--output-file", required=True,
|
|
99
|
+
help="Path to output file",
|
|
100
|
+
)
|
|
101
|
+
parser.add_argument(
|
|
102
|
+
"--unwrap", action="store_true",
|
|
103
|
+
help="Unwrap hard-wrapped lines (e.g., Project Gutenberg e-texts)",
|
|
104
|
+
)
|
|
105
|
+
parser.add_argument(
|
|
106
|
+
"--no-normalize-quotes", action="store_true",
|
|
107
|
+
help="Disable unicode quote normalization to ASCII equivalents",
|
|
108
|
+
)
|
|
109
|
+
args = parser.parse_args()
|
|
110
|
+
|
|
111
|
+
_header("segment-file")
|
|
112
|
+
_param("Input", args.input_file)
|
|
113
|
+
_param("Output", args.output_file)
|
|
114
|
+
_param("Size", _file_size(args.input_file))
|
|
115
|
+
if args.unwrap:
|
|
116
|
+
_param("Unwrap", "enabled")
|
|
117
|
+
|
|
118
|
+
print(f"\n {YELLOW}Segmenting...{RESET}", end="", flush=True)
|
|
119
|
+
|
|
120
|
+
with open(args.input_file, "r", encoding="utf-8") as f:
|
|
121
|
+
text = f.read()
|
|
122
|
+
|
|
123
|
+
start = time.perf_counter()
|
|
124
|
+
normalize = not args.no_normalize_quotes
|
|
125
|
+
sentences = segment_text(
|
|
126
|
+
text.strip(), flatten=True, unwrap=args.unwrap, normalize=normalize,
|
|
127
|
+
)
|
|
128
|
+
elapsed = time.perf_counter() - start
|
|
129
|
+
|
|
130
|
+
with open(args.output_file, "w", encoding="utf-8") as f:
|
|
131
|
+
if args.unwrap:
|
|
132
|
+
f.write(format_grouped_sentences(sentences) + "\n")
|
|
133
|
+
else:
|
|
134
|
+
for sentence in sentences:
|
|
135
|
+
f.write(sentence + "\n")
|
|
136
|
+
|
|
137
|
+
print(f"\r {' ' * 20}\r", end="")
|
|
138
|
+
_done(f"{len(sentences):,} sentences in {elapsed:.2f}s")
|
|
139
|
+
_done(f"Written to {args.output_file}")
|
|
140
|
+
print()
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
if __name__ == "__main__":
|
|
144
|
+
main()
|
{fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/dmo/__init__.py
RENAMED
|
@@ -8,3 +8,7 @@ from .post_process_sentences import PostProcessStructure
|
|
|
8
8
|
from .question_exclamation_splitter import QuestionExclamationSplitter
|
|
9
9
|
from .spacy_doc_segmenter import SpacyDocSegmenter
|
|
10
10
|
from .numbered_list_normalizer import NumberedListNormalizer
|
|
11
|
+
from .unwrap_hard_wrapped_text import unwrap_hard_wrapped_text
|
|
12
|
+
from .normalize_quotes import normalize_quotes
|
|
13
|
+
from .group_quoted_sentences import group_quoted_sentences, format_grouped_sentences
|
|
14
|
+
from .strip_trailing_period_after_quote import StripTrailingPeriodAfterQuote
|
|
@@ -20,6 +20,8 @@ SENTENCE_ENDING_ABBREVIATIONS: List[str] = [
|
|
|
20
20
|
# Common sentence-enders
|
|
21
21
|
"etc.",
|
|
22
22
|
"ext.",
|
|
23
|
+
"approx.",
|
|
24
|
+
"dept.",
|
|
23
25
|
|
|
24
26
|
# Academic degrees (when at end of sentence)
|
|
25
27
|
"Ph.D.",
|
|
@@ -32,6 +34,9 @@ SENTENCE_ENDING_ABBREVIATIONS: List[str] = [
|
|
|
32
34
|
"J.D.",
|
|
33
35
|
"D.D.S.",
|
|
34
36
|
"R.N.",
|
|
37
|
+
"M.B.A.",
|
|
38
|
+
"LL.B.",
|
|
39
|
+
"LL.M.",
|
|
35
40
|
|
|
36
41
|
# Business (when at end of sentence)
|
|
37
42
|
"Inc.",
|
|
@@ -39,6 +44,14 @@ SENTENCE_ENDING_ABBREVIATIONS: List[str] = [
|
|
|
39
44
|
"Ltd.",
|
|
40
45
|
"Co.",
|
|
41
46
|
"Bros.",
|
|
47
|
+
"LLC.",
|
|
48
|
+
"LLP.",
|
|
49
|
+
|
|
50
|
+
# Academic/legal citations (can end sentences)
|
|
51
|
+
"ibid.",
|
|
52
|
+
"Ibid.",
|
|
53
|
+
"cf.",
|
|
54
|
+
"Cf.",
|
|
42
55
|
|
|
43
56
|
# Countries/Regions (when at end of sentence)
|
|
44
57
|
"U.S.",
|
|
@@ -62,23 +75,59 @@ TITLE_ABBREVIATIONS: List[str] = [
|
|
|
62
75
|
"Sr.",
|
|
63
76
|
"Jr.",
|
|
64
77
|
"Rev.",
|
|
78
|
+
"Hon.",
|
|
79
|
+
"Esq.",
|
|
80
|
+
|
|
81
|
+
# French/formal titles (common in translated literature)
|
|
82
|
+
"Mme.",
|
|
83
|
+
"Mlle.",
|
|
84
|
+
"Messrs.",
|
|
85
|
+
|
|
86
|
+
# Military ranks
|
|
65
87
|
"Gen.",
|
|
66
88
|
"Col.",
|
|
67
89
|
"Capt.",
|
|
68
90
|
"Lt.",
|
|
69
91
|
"Sgt.",
|
|
92
|
+
"Maj.",
|
|
93
|
+
"Cpl.",
|
|
94
|
+
"Pvt.",
|
|
95
|
+
"Adm.",
|
|
96
|
+
"Cmdr.",
|
|
97
|
+
|
|
98
|
+
# Political titles
|
|
70
99
|
"Rep.",
|
|
71
100
|
"Sen.",
|
|
72
101
|
"Gov.",
|
|
73
102
|
"Pres.",
|
|
74
|
-
|
|
103
|
+
|
|
104
|
+
# Ecclesiastical titles
|
|
105
|
+
"Fr.",
|
|
106
|
+
"Msgr.",
|
|
75
107
|
|
|
76
108
|
# Geographic prefixes
|
|
77
109
|
"St.",
|
|
78
110
|
"Mt.",
|
|
79
111
|
"Ft.",
|
|
112
|
+
"Ave.",
|
|
113
|
+
"Blvd.",
|
|
114
|
+
"Rd.",
|
|
80
115
|
|
|
81
|
-
#
|
|
116
|
+
# Latin terms (never end sentences -- always introduce clauses)
|
|
117
|
+
# Include common inconsistent forms: with/without internal periods,
|
|
118
|
+
# and with trailing comma (the most common real-world form)
|
|
119
|
+
"i.e.",
|
|
120
|
+
"i.e.,",
|
|
121
|
+
"ie.",
|
|
122
|
+
"ie.,",
|
|
123
|
+
"e.g.",
|
|
124
|
+
"e.g.,",
|
|
125
|
+
"eg.",
|
|
126
|
+
"eg.,",
|
|
127
|
+
"viz.",
|
|
128
|
+
"viz.,",
|
|
129
|
+
|
|
130
|
+
# Reference/numbering prefixes
|
|
82
131
|
"Fig.",
|
|
83
132
|
"fig.",
|
|
84
133
|
"Sec.",
|
|
@@ -93,4 +142,8 @@ TITLE_ABBREVIATIONS: List[str] = [
|
|
|
93
142
|
"no.",
|
|
94
143
|
"Pt.",
|
|
95
144
|
"pt.",
|
|
145
|
+
|
|
146
|
+
# Legal / adversarial
|
|
147
|
+
"vs.",
|
|
148
|
+
"Vs.",
|
|
96
149
|
]
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""Group sentences that belong to the same open-quote span.
|
|
3
|
+
|
|
4
|
+
When outputting segmented text with blank-line separators, sentences
|
|
5
|
+
that open with a double quote but do not close it should be grouped
|
|
6
|
+
with subsequent sentences (no blank line between them) until the
|
|
7
|
+
closing quote is found.
|
|
8
|
+
|
|
9
|
+
Related GitHub Issues:
|
|
10
|
+
#5 - Normalize quotes and group open-quote sentences in unwrap mode
|
|
11
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/5
|
|
12
|
+
|
|
13
|
+
#6 - Review findings from Issue #5
|
|
14
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/6
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from typing import List
|
|
18
|
+
|
|
19
|
+
# Maximum number of sentences that can be grouped under a single
|
|
20
|
+
# open-quote span before the quote state is forcibly reset. This
|
|
21
|
+
# bounds the damage from a stray quote character (e.g., OCR artifact)
|
|
22
|
+
# which would otherwise corrupt grouping for all subsequent sentences.
|
|
23
|
+
#
|
|
24
|
+
# A typical quoted passage in literature rarely exceeds 20 sentences.
|
|
25
|
+
# This limit is deliberately generous to avoid false resets on
|
|
26
|
+
# legitimately long quoted passages while still preventing runaway
|
|
27
|
+
# grouping on malformed input.
|
|
28
|
+
MAX_QUOTE_GROUP_SIZE = 20
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def group_quoted_sentences(sentences: List[str]) -> List[List[str]]:
|
|
32
|
+
"""Group sentences into blocks based on open/close quote tracking.
|
|
33
|
+
|
|
34
|
+
Sentences within an unclosed double-quote span are grouped together
|
|
35
|
+
into the same block. Sentences outside of a quote span each form
|
|
36
|
+
their own block.
|
|
37
|
+
|
|
38
|
+
When rendered, each block is joined by newlines, and blocks are
|
|
39
|
+
separated by blank lines (double newlines).
|
|
40
|
+
|
|
41
|
+
The algorithm tracks the quote state by counting ASCII double quote
|
|
42
|
+
characters in each sentence. An odd count toggles the open/close
|
|
43
|
+
state. When a quote is open, subsequent sentences are appended to
|
|
44
|
+
the current group rather than starting a new one.
|
|
45
|
+
|
|
46
|
+
A safety limit (MAX_QUOTE_GROUP_SIZE) prevents a stray or malformed
|
|
47
|
+
quote from swallowing all remaining sentences into one group. When
|
|
48
|
+
the limit is reached, the current group is flushed and the quote
|
|
49
|
+
state is reset. This bounds corruption from OCR artifacts or
|
|
50
|
+
encoding errors to a bounded window rather than the entire document.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
sentences: Flat list of segmented sentences.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
List of sentence groups. Each group is a list of sentences
|
|
57
|
+
that should be rendered together without blank-line separators.
|
|
58
|
+
|
|
59
|
+
Example:
|
|
60
|
+
>>> groups = group_quoted_sentences([
|
|
61
|
+
... '"The probability lies in that direction.',
|
|
62
|
+
... 'And if we take this as a working hypothesis."',
|
|
63
|
+
... 'He paused.',
|
|
64
|
+
... ])
|
|
65
|
+
>>> groups
|
|
66
|
+
[['"The probability lies in that direction.',
|
|
67
|
+
'And if we take this as a working hypothesis."'],
|
|
68
|
+
['He paused.']]
|
|
69
|
+
|
|
70
|
+
Related GitHub Issues:
|
|
71
|
+
#5 - Normalize quotes and group open-quote sentences in unwrap mode
|
|
72
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/5
|
|
73
|
+
|
|
74
|
+
#6 - Review findings from Issue #5
|
|
75
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/6
|
|
76
|
+
"""
|
|
77
|
+
if not sentences:
|
|
78
|
+
return []
|
|
79
|
+
|
|
80
|
+
groups: List[List[str]] = []
|
|
81
|
+
current_group: List[str] = []
|
|
82
|
+
quote_open = False
|
|
83
|
+
|
|
84
|
+
for sentence in sentences:
|
|
85
|
+
quote_count = sentence.count('"')
|
|
86
|
+
|
|
87
|
+
if not quote_open:
|
|
88
|
+
# Starting a new group
|
|
89
|
+
if current_group:
|
|
90
|
+
groups.append(current_group)
|
|
91
|
+
current_group = [sentence]
|
|
92
|
+
else:
|
|
93
|
+
# Inside an open quote span -- append to current group
|
|
94
|
+
current_group.append(sentence)
|
|
95
|
+
|
|
96
|
+
# Toggle quote state on odd quote count
|
|
97
|
+
if quote_count % 2 == 1:
|
|
98
|
+
quote_open = not quote_open
|
|
99
|
+
|
|
100
|
+
# Safety: if a group grows beyond the limit, the quote is
|
|
101
|
+
# likely corrupted (stray quote character). Flush the group
|
|
102
|
+
# and reset state to prevent runaway grouping.
|
|
103
|
+
if quote_open and len(current_group) >= MAX_QUOTE_GROUP_SIZE:
|
|
104
|
+
groups.append(current_group)
|
|
105
|
+
current_group = []
|
|
106
|
+
quote_open = False
|
|
107
|
+
|
|
108
|
+
# Flush the final group
|
|
109
|
+
if current_group:
|
|
110
|
+
groups.append(current_group)
|
|
111
|
+
|
|
112
|
+
return groups
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def format_grouped_sentences(sentences: List[str]) -> str:
|
|
116
|
+
"""Format sentences with quote-aware blank-line separation.
|
|
117
|
+
|
|
118
|
+
Sentences within the same quoted span are separated by single
|
|
119
|
+
newlines. Sentence groups are separated by blank lines (double
|
|
120
|
+
newlines).
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
sentences: Flat list of segmented sentences.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
Formatted string with appropriate line separation.
|
|
127
|
+
|
|
128
|
+
Example:
|
|
129
|
+
>>> text = format_grouped_sentences([
|
|
130
|
+
... '"The probability lies in that direction.',
|
|
131
|
+
... 'And if we take this as a working hypothesis."',
|
|
132
|
+
... 'He paused.',
|
|
133
|
+
... ])
|
|
134
|
+
>>> print(text)
|
|
135
|
+
"The probability lies in that direction.
|
|
136
|
+
And if we take this as a working hypothesis."
|
|
137
|
+
<BLANKLINE>
|
|
138
|
+
He paused.
|
|
139
|
+
"""
|
|
140
|
+
groups = group_quoted_sentences(sentences)
|
|
141
|
+
return '\n\n'.join('\n'.join(group) for group in groups)
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""Normalize unicode quote variants to ASCII equivalents.
|
|
3
|
+
|
|
4
|
+
E-texts use a variety of quote characters (curly/smart quotes, unicode
|
|
5
|
+
variants, primes, guillemets). This module normalizes all quote variants
|
|
6
|
+
to their standard ASCII equivalents: double quote (") and single
|
|
7
|
+
quote/apostrophe (').
|
|
8
|
+
|
|
9
|
+
Related GitHub Issues:
|
|
10
|
+
#5 - Normalize quotes and group open-quote sentences in unwrap mode
|
|
11
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/5
|
|
12
|
+
|
|
13
|
+
#6 - Review findings from Issue #5
|
|
14
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/6
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import re
|
|
18
|
+
|
|
19
|
+
# Unicode double quote variants to normalize to ASCII " (U+0022).
|
|
20
|
+
#
|
|
21
|
+
# U+201C " LEFT DOUBLE QUOTATION MARK
|
|
22
|
+
# U+201D " RIGHT DOUBLE QUOTATION MARK
|
|
23
|
+
# U+201E „ DOUBLE LOW-9 QUOTATION MARK
|
|
24
|
+
# U+201F ‟ DOUBLE HIGH-REVERSED-9 QUOTATION MARK
|
|
25
|
+
# U+00AB « LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
|
|
26
|
+
# U+00BB » RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
|
|
27
|
+
# U+2033 ″ DOUBLE PRIME
|
|
28
|
+
# U+301D 〝 REVERSED DOUBLE PRIME QUOTATION MARK
|
|
29
|
+
# U+301E 〞 DOUBLE PRIME QUOTATION MARK
|
|
30
|
+
# U+301F 〟 LOW DOUBLE PRIME QUOTATION MARK
|
|
31
|
+
# U+FF02 " FULLWIDTH QUOTATION MARK
|
|
32
|
+
DOUBLE_QUOTE_PATTERN = re.compile(
|
|
33
|
+
'[\u201c\u201d\u201e\u201f\u00ab\u00bb\u2033\u301d\u301e\u301f\uff02]'
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Unicode single quote variants to normalize to ASCII ' (U+0027).
|
|
37
|
+
#
|
|
38
|
+
# U+2018 ' LEFT SINGLE QUOTATION MARK
|
|
39
|
+
# U+2019 ' RIGHT SINGLE QUOTATION MARK
|
|
40
|
+
# U+201A ‚ SINGLE LOW-9 QUOTATION MARK
|
|
41
|
+
# U+201B ‛ SINGLE HIGH-REVERSED-9 QUOTATION MARK
|
|
42
|
+
# U+2039 ‹ SINGLE LEFT-POINTING ANGLE QUOTATION MARK
|
|
43
|
+
# U+203A › SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
|
|
44
|
+
# U+2032 ′ PRIME
|
|
45
|
+
# U+FF07 ' FULLWIDTH APOSTROPHE
|
|
46
|
+
# U+0060 ` GRAVE ACCENT (used as opening quote in some e-texts)
|
|
47
|
+
# U+00B4 ´ ACUTE ACCENT (used as closing quote in some e-texts)
|
|
48
|
+
SINGLE_QUOTE_PATTERN = re.compile(
|
|
49
|
+
'[\u2018\u2019\u201a\u201b\u2039\u203a\u2032\uff07\u0060\u00b4]'
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def normalize_quotes(text: str) -> str:
|
|
54
|
+
"""Replace all unicode quote variants with their ASCII equivalents.
|
|
55
|
+
|
|
56
|
+
Double quote variants are normalized to ASCII " (U+0022).
|
|
57
|
+
Single quote variants are normalized to ASCII ' (U+0027).
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
text: Input text potentially containing unicode quotes.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Text with all quote variants replaced by ASCII equivalents.
|
|
64
|
+
|
|
65
|
+
Example:
|
|
66
|
+
>>> normalize_quotes('\u201cHello,\u201d she said.')
|
|
67
|
+
'"Hello," she said.'
|
|
68
|
+
>>> normalize_quotes('It\u2019s fine.')
|
|
69
|
+
"It's fine."
|
|
70
|
+
|
|
71
|
+
Related GitHub Issues:
|
|
72
|
+
#5 - Normalize quotes and group open-quote sentences in unwrap mode
|
|
73
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/5
|
|
74
|
+
|
|
75
|
+
#6 - Review findings from Issue #5
|
|
76
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/6
|
|
77
|
+
"""
|
|
78
|
+
text = DOUBLE_QUOTE_PATTERN.sub('"', text)
|
|
79
|
+
text = SINGLE_QUOTE_PATTERN.sub("'", text)
|
|
80
|
+
return text
|
|
@@ -23,18 +23,31 @@ class SpacyDocSegmenter(BaseObject):
|
|
|
23
23
|
|
|
24
24
|
@staticmethod
|
|
25
25
|
def _append_period(a_sentence: str) -> str:
|
|
26
|
+
"""Append a period if the sentence lacks terminal punctuation.
|
|
27
|
+
|
|
28
|
+
Checks for terminal punctuation (. ? ! :) after stripping any
|
|
29
|
+
trailing quote characters (" '). This prevents a spurious period
|
|
30
|
+
from being appended to sentences like:
|
|
31
|
+
'He said "Hello."' -> unchanged (not 'He said "Hello.".')
|
|
32
|
+
|
|
33
|
+
Related GitHub Issue:
|
|
34
|
+
#7 - Spurious trailing period appended after sentence-final
|
|
35
|
+
closing quote
|
|
36
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/7
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
a_sentence: A sentence that may or may not have terminal
|
|
40
|
+
punctuation.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
The sentence with a period appended if it lacked terminal
|
|
44
|
+
punctuation, otherwise unchanged.
|
|
26
45
|
"""
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
:
|
|
30
|
-
|
|
31
|
-
""
|
|
32
|
-
__blacklist = [':', '?', '!']
|
|
33
|
-
if not a_sentence.strip().endswith('.'):
|
|
34
|
-
for ch in __blacklist:
|
|
35
|
-
if not a_sentence.endswith(ch):
|
|
36
|
-
return f"{a_sentence}."
|
|
37
|
-
return a_sentence
|
|
46
|
+
# Strip trailing quotes to inspect the actual punctuation
|
|
47
|
+
stripped = a_sentence.strip().rstrip('"\'')
|
|
48
|
+
if stripped and stripped[-1] in '.?!:':
|
|
49
|
+
return a_sentence
|
|
50
|
+
return f"{a_sentence}."
|
|
38
51
|
|
|
39
52
|
@staticmethod
|
|
40
53
|
def _is_valid_sentence(a_sentence: str) -> bool:
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""Strip spurious trailing periods appended after sentence-final closing quotes.
|
|
3
|
+
|
|
4
|
+
The spaCy segmenter's _append_period method can produce sentences like:
|
|
5
|
+
'He said "Hello.".' (spurious trailing period)
|
|
6
|
+
'She asked "Why?".' (spurious trailing period)
|
|
7
|
+
'He yelled "Stop!".' (spurious trailing period)
|
|
8
|
+
|
|
9
|
+
This post-processor removes the trailing period when the sentence ends
|
|
10
|
+
with a closing double quote preceded by terminal punctuation.
|
|
11
|
+
|
|
12
|
+
Related GitHub Issue:
|
|
13
|
+
#7 - Spurious trailing period appended after sentence-final
|
|
14
|
+
closing quote
|
|
15
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/7
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
import re
|
|
19
|
+
|
|
20
|
+
from fast_sentence_segment.core import BaseObject
|
|
21
|
+
|
|
22
|
+
# Matches a sentence that ends with terminal punctuation (. ? !)
|
|
23
|
+
# followed by a closing double quote, followed by a spurious period.
|
|
24
|
+
# The fix strips the final period.
|
|
25
|
+
_SPURIOUS_PERIOD_PATTERN = re.compile(r'([.?!]")\.$')
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class StripTrailingPeriodAfterQuote(BaseObject):
|
|
29
|
+
"""Strip spurious trailing periods after sentence-final closing quotes.
|
|
30
|
+
|
|
31
|
+
Detects sentences ending with patterns like:
|
|
32
|
+
."." -> ."
|
|
33
|
+
?"." -> ?"
|
|
34
|
+
!"." -> !"
|
|
35
|
+
|
|
36
|
+
Applied as a post-processing step in the sentence segmentation
|
|
37
|
+
pipeline, after spaCy segmentation and after the existing
|
|
38
|
+
PostProcessStructure step.
|
|
39
|
+
|
|
40
|
+
Related GitHub Issue:
|
|
41
|
+
#7 - Spurious trailing period appended after sentence-final
|
|
42
|
+
closing quote
|
|
43
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/7
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
def __init__(self):
|
|
47
|
+
"""
|
|
48
|
+
Created:
|
|
49
|
+
29-Jan-2026
|
|
50
|
+
"""
|
|
51
|
+
BaseObject.__init__(self, __name__)
|
|
52
|
+
|
|
53
|
+
def process(self, sentences: list) -> list:
|
|
54
|
+
"""Remove spurious trailing periods after closing quotes.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
sentences: List of segmented sentences.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
List of sentences with spurious trailing periods removed.
|
|
61
|
+
|
|
62
|
+
Example:
|
|
63
|
+
>>> proc = StripTrailingPeriodAfterQuote()
|
|
64
|
+
>>> proc.process(['He said "Hello.".', 'She waved.'])
|
|
65
|
+
['He said "Hello."', 'She waved.']
|
|
66
|
+
"""
|
|
67
|
+
return [
|
|
68
|
+
_SPURIOUS_PERIOD_PATTERN.sub(r'\1', sentence)
|
|
69
|
+
for sentence in sentences
|
|
70
|
+
]
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""Unwrap hard-wrapped text (e.g., Project Gutenberg e-texts).
|
|
3
|
+
|
|
4
|
+
Joins lines within paragraphs into continuous strings while
|
|
5
|
+
preserving paragraph boundaries (blank lines).
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import re
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def unwrap_hard_wrapped_text(text: str) -> str:
|
|
12
|
+
"""Unwrap hard-wrapped paragraphs into continuous lines.
|
|
13
|
+
|
|
14
|
+
Splits on blank lines to identify paragraphs, then joins
|
|
15
|
+
lines within each paragraph into a single string with
|
|
16
|
+
single spaces.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
text: Raw text with hard-wrapped lines.
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
Text with paragraphs unwrapped into continuous strings,
|
|
23
|
+
separated by double newlines.
|
|
24
|
+
"""
|
|
25
|
+
blocks = re.split(r'\n\s*\n', text)
|
|
26
|
+
unwrapped = []
|
|
27
|
+
|
|
28
|
+
for block in blocks:
|
|
29
|
+
lines = block.splitlines()
|
|
30
|
+
joined = ' '.join(line.strip() for line in lines if line.strip())
|
|
31
|
+
if joined:
|
|
32
|
+
unwrapped.append(joined)
|
|
33
|
+
|
|
34
|
+
return '\n\n'.join(unwrapped)
|
|
@@ -17,6 +17,7 @@ from fast_sentence_segment.dmo import NumberedListNormalizer
|
|
|
17
17
|
from fast_sentence_segment.dmo import QuestionExclamationSplitter
|
|
18
18
|
from fast_sentence_segment.dmo import SpacyDocSegmenter
|
|
19
19
|
from fast_sentence_segment.dmo import PostProcessStructure
|
|
20
|
+
from fast_sentence_segment.dmo import StripTrailingPeriodAfterQuote
|
|
20
21
|
|
|
21
22
|
|
|
22
23
|
class PerformSentenceSegmentation(BaseObject):
|
|
@@ -55,6 +56,7 @@ class PerformSentenceSegmentation(BaseObject):
|
|
|
55
56
|
self._question_exclamation_splitter = QuestionExclamationSplitter().process
|
|
56
57
|
self._title_name_merger = TitleNameMerger().process
|
|
57
58
|
self._post_process = PostProcessStructure().process
|
|
59
|
+
self._strip_trailing_period = StripTrailingPeriodAfterQuote().process
|
|
58
60
|
|
|
59
61
|
def _denormalize(self, text: str) -> str:
|
|
60
62
|
""" Restore normalized placeholders to original form """
|
|
@@ -129,6 +131,9 @@ class PerformSentenceSegmentation(BaseObject):
|
|
|
129
131
|
|
|
130
132
|
sentences = self._post_process(sentences)
|
|
131
133
|
|
|
134
|
+
# Strip spurious trailing periods after closing quotes (issue #7)
|
|
135
|
+
sentences = self._strip_trailing_period(sentences)
|
|
136
|
+
|
|
132
137
|
sentences = [
|
|
133
138
|
self._normalize_numbered_lists(x, denormalize=True)
|
|
134
139
|
for x in sentences
|
|
@@ -11,14 +11,26 @@ description = "Fast and Efficient Sentence Segmentation"
|
|
|
11
11
|
license = "MIT"
|
|
12
12
|
name = "fast-sentence-segment"
|
|
13
13
|
readme = "README.md"
|
|
14
|
-
version = "1.
|
|
14
|
+
version = "1.3.0"
|
|
15
15
|
|
|
16
16
|
keywords = ["nlp", "text", "preprocess", "segment"]
|
|
17
17
|
repository = "https://github.com/craigtrim/fast-sentence-segment"
|
|
18
18
|
|
|
19
19
|
classifiers = [
|
|
20
|
-
"Development Status ::
|
|
20
|
+
"Development Status :: 5 - Production/Stable",
|
|
21
|
+
"Intended Audience :: Developers",
|
|
22
|
+
"Intended Audience :: Science/Research",
|
|
23
|
+
"License :: OSI Approved :: MIT License",
|
|
24
|
+
"Programming Language :: Python :: 3",
|
|
25
|
+
"Programming Language :: Python :: 3.9",
|
|
26
|
+
"Programming Language :: Python :: 3.10",
|
|
27
|
+
"Programming Language :: Python :: 3.11",
|
|
28
|
+
"Programming Language :: Python :: 3.12",
|
|
21
29
|
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
30
|
+
"Topic :: Text Processing :: Linguistic",
|
|
31
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
32
|
+
"Operating System :: OS Independent",
|
|
33
|
+
"Typing :: Typed",
|
|
22
34
|
]
|
|
23
35
|
|
|
24
36
|
[tool.poetry.urls]
|
|
@@ -36,6 +48,7 @@ ruff = "*"
|
|
|
36
48
|
|
|
37
49
|
[tool.poetry.scripts]
|
|
38
50
|
segment = "fast_sentence_segment.cli:main"
|
|
51
|
+
segment-file = "fast_sentence_segment.cli:file_main"
|
|
39
52
|
|
|
40
53
|
[tool.poetry.build]
|
|
41
54
|
generate-setup-file = true
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
from setuptools import setup
|
|
3
|
+
|
|
4
|
+
packages = \
|
|
5
|
+
['fast_sentence_segment',
|
|
6
|
+
'fast_sentence_segment.bp',
|
|
7
|
+
'fast_sentence_segment.core',
|
|
8
|
+
'fast_sentence_segment.dmo',
|
|
9
|
+
'fast_sentence_segment.svc']
|
|
10
|
+
|
|
11
|
+
package_data = \
|
|
12
|
+
{'': ['*']}
|
|
13
|
+
|
|
14
|
+
install_requires = \
|
|
15
|
+
['spacy>=3.8.0,<4.0.0']
|
|
16
|
+
|
|
17
|
+
entry_points = \
|
|
18
|
+
{'console_scripts': ['segment = fast_sentence_segment.cli:main',
|
|
19
|
+
'segment-file = fast_sentence_segment.cli:file_main']}
|
|
20
|
+
|
|
21
|
+
setup_kwargs = {
|
|
22
|
+
'name': 'fast-sentence-segment',
|
|
23
|
+
'version': '1.3.0',
|
|
24
|
+
'description': 'Fast and Efficient Sentence Segmentation',
|
|
25
|
+
'long_description': '# Fast Sentence Segmentation\n\n[](https://pypi.org/project/fast-sentence-segment/)\n[](https://pypi.org/project/fast-sentence-segment/)\n[](https://opensource.org/licenses/MIT)\n[](https://spacy.io/)\n[](https://pepy.tech/project/fast-sentence-segment)\n[](https://pepy.tech/project/fast-sentence-segment)\n\nFast and efficient sentence segmentation using spaCy with surgical post-processing fixes. Handles complex edge cases like abbreviations (Dr., Mr., etc.), ellipses, quoted text, and multi-paragraph documents.\n\n## Why This Library?\n\n1. **Keep it local**: LLM API calls cost money and send your data to third parties. Run sentence segmentation entirely on your machine.\n2. **spaCy perfected**: spaCy is a great local model, but it makes mistakes. This library fixes most of spaCy\'s shortcomings.\n\n## Features\n\n- **Paragraph-aware segmentation**: Returns sentences grouped by paragraph\n- **Abbreviation handling**: Correctly handles "Dr.", "Mr.", "etc.", "p.m.", "a.m." without false splits\n- **Ellipsis preservation**: Keeps `...` intact while detecting sentence boundaries\n- **Question/exclamation splitting**: Properly splits on `?` and `!` followed by capital letters\n- **Cached processing**: LRU cache for repeated text processing\n- **Flexible output**: Nested lists (by paragraph) or flattened list of sentences\n- **Bullet point & numbered list normalization**: Cleans common list formats\n- **CLI tool**: Command-line interface for quick segmentation\n\n## Installation\n\n```bash\npip install fast-sentence-segment\n```\n\nAfter installation, download the spaCy model:\n\n```bash\npython -m spacy download en_core_web_sm\n```\n\n## Quick Start\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = "Do you like Dr. Who? I prefer Dr. Strange! Mr. T is also cool."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "Do you like Dr. Who?",\n "I prefer Dr. Strange!",\n "Mr. T is also cool."\n]\n```\n\nNotice how "Dr. Who?" stays together as a single sentence—the library correctly recognizes that a title followed by a single-word name ending in `?` or `!` is a name reference, not a sentence boundary.\n\n## Usage\n\n### Basic Segmentation\n\nThe `segment_text` function returns a list of lists, where each inner list represents a paragraph containing its sentences:\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = """Gandalf spoke softly. "All we have to decide is what to do with the time given us."\n\nFrodo nodded. The weight of the Ring pressed against his chest."""\n\nresults = segment_text(text)\n```\n\n```json\n[\n [\n "Gandalf spoke softly.",\n "\\"All we have to decide is what to do with the time given us.\\"."\n ],\n [\n "Frodo nodded.",\n "The weight of the Ring pressed against his chest."\n ]\n]\n```\n\n### Flattened Output\n\nIf you don\'t need paragraph boundaries, use the `flatten` parameter:\n\n```python\ntext = "At 9 a.m. the hobbits set out. By 3 p.m. they reached Rivendell. Mr. Frodo was exhausted."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "At 9 a.m. the hobbits set out.",\n "By 3 p.m. they reached Rivendell.",\n "Mr. Frodo was exhausted."\n]\n```\n\n### Direct Segmenter Access\n\nFor more control, use the `Segmenter` class directly:\n\n```python\nfrom fast_sentence_segment import Segmenter\n\nsegmenter = Segmenter()\nresults = segmenter.input_text("Your text here.")\n```\n\n### Command Line Interface\n\n```bash\n# Inline text\nsegment "Gandalf paused... You shall not pass! The Balrog roared."\n\n# Pipe from stdin\necho "Have you seen Dr. Who? It\'s brilliant!" | segment\n\n# Numbered output\nsegment -n -f silmarillion.txt\n\n# File-to-file (one sentence per line)\nsegment-file --input-file book.txt --output-file sentences.txt\n\n# Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)\nsegment-file --input-file book.txt --output-file sentences.txt --unwrap\n```\n\n## API Reference\n\n| Function | Parameters | Returns | Description |\n|----------|------------|---------|-------------|\n| `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False` | `list` | Main entry point for segmentation |\n| `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |\n\n### CLI Commands\n\n| Command | Description |\n|---------|-------------|\n| `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output. |\n| `segment-file --input-file IN --output-file OUT [--unwrap]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts. |\n\n## Why Nested Lists?\n\nThe segmentation process preserves document structure by segmenting into both paragraphs and sentences. Each outer list represents a paragraph, and each inner list contains that paragraph\'s sentences. This is useful for:\n\n- Document structure analysis\n- Paragraph-level processing\n- Maintaining original text organization\n\nUse `flatten=True` when you only need sentences without paragraph context.\n\n## Requirements\n\n- Python 3.9+\n- spaCy 3.8+\n- en_core_web_sm spaCy model\n\n## How It Works\n\nThis library uses spaCy for initial sentence segmentation, then applies surgical post-processing fixes for cases where spaCy\'s default behavior is incorrect:\n\n1. **Pre-processing**: Normalize numbered lists, preserve ellipses with placeholders\n2. **spaCy segmentation**: Use spaCy\'s sentence boundary detection\n3. **Post-processing**: Split on abbreviation boundaries, handle `?`/`!` + capital patterns\n4. **Denormalization**: Restore placeholders to original text\n\n## License\n\nMIT License - see [LICENSE](LICENSE) for details.\n\n## Contributing\n\nContributions are welcome! Please feel free to submit a Pull Request.\n\n1. Fork the repository\n2. Create your feature branch (`git checkout -b feature/amazing-feature`)\n3. Run tests (`make test`)\n4. Commit your changes\n5. Push to the branch\n6. Open a Pull Request\n',
|
|
26
|
+
'author': 'Craig Trim',
|
|
27
|
+
'author_email': 'craigtrim@gmail.com',
|
|
28
|
+
'maintainer': 'Craig Trim',
|
|
29
|
+
'maintainer_email': 'craigtrim@gmail.com',
|
|
30
|
+
'url': 'https://github.com/craigtrim/fast-sentence-segment',
|
|
31
|
+
'packages': packages,
|
|
32
|
+
'package_data': package_data,
|
|
33
|
+
'install_requires': install_requires,
|
|
34
|
+
'entry_points': entry_points,
|
|
35
|
+
'python_requires': '>=3.9,<4.0',
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
setup(**setup_kwargs)
|
|
@@ -1,18 +0,0 @@
|
|
|
1
|
-
from .bp import *
|
|
2
|
-
from .svc import *
|
|
3
|
-
from .dmo import *
|
|
4
|
-
|
|
5
|
-
from .bp.segmenter import Segmenter
|
|
6
|
-
|
|
7
|
-
segment = Segmenter().input_text
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
def segment_text(input_text: str, flatten: bool = False) -> list:
|
|
11
|
-
results = segment(input_text)
|
|
12
|
-
|
|
13
|
-
if flatten:
|
|
14
|
-
flat = []
|
|
15
|
-
[[flat.append(y) for y in x] for x in results]
|
|
16
|
-
return flat
|
|
17
|
-
|
|
18
|
-
return results
|
|
@@ -1,56 +0,0 @@
|
|
|
1
|
-
# -*- coding: UTF-8 -*-
|
|
2
|
-
"""CLI for fast-sentence-segment."""
|
|
3
|
-
|
|
4
|
-
import argparse
|
|
5
|
-
import logging
|
|
6
|
-
import sys
|
|
7
|
-
|
|
8
|
-
from fast_sentence_segment import segment_text
|
|
9
|
-
|
|
10
|
-
logging.disable(logging.CRITICAL)
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def main():
|
|
14
|
-
parser = argparse.ArgumentParser(
|
|
15
|
-
prog="segment",
|
|
16
|
-
description="Segment text into sentences",
|
|
17
|
-
)
|
|
18
|
-
parser.add_argument(
|
|
19
|
-
"text",
|
|
20
|
-
nargs="?",
|
|
21
|
-
help="Text to segment (or use stdin)",
|
|
22
|
-
)
|
|
23
|
-
parser.add_argument(
|
|
24
|
-
"-f", "--file",
|
|
25
|
-
help="Read text from file",
|
|
26
|
-
)
|
|
27
|
-
parser.add_argument(
|
|
28
|
-
"-n", "--numbered",
|
|
29
|
-
action="store_true",
|
|
30
|
-
help="Number output lines",
|
|
31
|
-
)
|
|
32
|
-
args = parser.parse_args()
|
|
33
|
-
|
|
34
|
-
# Get input text
|
|
35
|
-
if args.file:
|
|
36
|
-
with open(args.file, "r", encoding="utf-8") as f:
|
|
37
|
-
text = f.read()
|
|
38
|
-
elif args.text:
|
|
39
|
-
text = args.text
|
|
40
|
-
elif not sys.stdin.isatty():
|
|
41
|
-
text = sys.stdin.read()
|
|
42
|
-
else:
|
|
43
|
-
parser.print_help()
|
|
44
|
-
sys.exit(1)
|
|
45
|
-
|
|
46
|
-
# Segment and output
|
|
47
|
-
sentences = segment_text(text.strip(), flatten=True)
|
|
48
|
-
for i, sentence in enumerate(sentences, 1):
|
|
49
|
-
if args.numbered:
|
|
50
|
-
print(f"{i}. {sentence}")
|
|
51
|
-
else:
|
|
52
|
-
print(sentence)
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
if __name__ == "__main__":
|
|
56
|
-
main()
|
|
@@ -1,38 +0,0 @@
|
|
|
1
|
-
# -*- coding: utf-8 -*-
|
|
2
|
-
from setuptools import setup
|
|
3
|
-
|
|
4
|
-
packages = \
|
|
5
|
-
['fast_sentence_segment',
|
|
6
|
-
'fast_sentence_segment.bp',
|
|
7
|
-
'fast_sentence_segment.core',
|
|
8
|
-
'fast_sentence_segment.dmo',
|
|
9
|
-
'fast_sentence_segment.svc']
|
|
10
|
-
|
|
11
|
-
package_data = \
|
|
12
|
-
{'': ['*']}
|
|
13
|
-
|
|
14
|
-
install_requires = \
|
|
15
|
-
['spacy>=3.8.0,<4.0.0']
|
|
16
|
-
|
|
17
|
-
entry_points = \
|
|
18
|
-
{'console_scripts': ['segment = fast_sentence_segment.cli:main']}
|
|
19
|
-
|
|
20
|
-
setup_kwargs = {
|
|
21
|
-
'name': 'fast-sentence-segment',
|
|
22
|
-
'version': '1.2.1',
|
|
23
|
-
'description': 'Fast and Efficient Sentence Segmentation',
|
|
24
|
-
'long_description': '# Fast Sentence Segmentation\n\n[](https://pypi.org/project/fast-sentence-segment/)\n[](https://pypi.org/project/fast-sentence-segment/)\n[](https://opensource.org/licenses/MIT)\n[](https://spacy.io/)\n\nFast and efficient sentence segmentation using spaCy with surgical post-processing fixes. Handles complex edge cases like abbreviations (Dr., Mr., etc.), ellipses, quoted text, and multi-paragraph documents.\n\n## Why This Library?\n\n1. **Keep it local**: LLM API calls cost money and send your data to third parties. Run sentence segmentation entirely on your machine.\n2. **spaCy perfected**: spaCy is a great local model, but it makes mistakes. This library fixes most of spaCy\'s shortcomings.\n\n## Features\n\n- **Paragraph-aware segmentation**: Returns sentences grouped by paragraph\n- **Abbreviation handling**: Correctly handles "Dr.", "Mr.", "etc.", "p.m.", "a.m." without false splits\n- **Ellipsis preservation**: Keeps `...` intact while detecting sentence boundaries\n- **Question/exclamation splitting**: Properly splits on `?` and `!` followed by capital letters\n- **Cached processing**: LRU cache for repeated text processing\n- **Flexible output**: Nested lists (by paragraph) or flattened list of sentences\n- **Bullet point & numbered list normalization**: Cleans common list formats\n- **CLI tool**: Command-line interface for quick segmentation\n\n## Installation\n\n```bash\npip install fast-sentence-segment\n```\n\nAfter installation, download the spaCy model:\n\n```bash\npython -m spacy download en_core_web_sm\n```\n\n## Quick Start\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = "Do you like Dr. Who? I prefer Dr. Strange! Mr. T is also cool."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "Do you like Dr. Who?",\n "I prefer Dr. Strange!",\n "Mr. T is also cool."\n]\n```\n\nNotice how "Dr. Who?" stays together as a single sentence—the library correctly recognizes that a title followed by a single-word name ending in `?` or `!` is a name reference, not a sentence boundary.\n\n## Usage\n\n### Basic Segmentation\n\nThe `segment_text` function returns a list of lists, where each inner list represents a paragraph containing its sentences:\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = """Gandalf spoke softly. "All we have to decide is what to do with the time given us."\n\nFrodo nodded. The weight of the Ring pressed against his chest."""\n\nresults = segment_text(text)\n```\n\n```json\n[\n [\n "Gandalf spoke softly.",\n "\\"All we have to decide is what to do with the time given us.\\"."\n ],\n [\n "Frodo nodded.",\n "The weight of the Ring pressed against his chest."\n ]\n]\n```\n\n### Flattened Output\n\nIf you don\'t need paragraph boundaries, use the `flatten` parameter:\n\n```python\ntext = "At 9 a.m. the hobbits set out. By 3 p.m. they reached Rivendell. Mr. Frodo was exhausted."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "At 9 a.m. the hobbits set out.",\n "By 3 p.m. they reached Rivendell.",\n "Mr. Frodo was exhausted."\n]\n```\n\n### Direct Segmenter Access\n\nFor more control, use the `Segmenter` class directly:\n\n```python\nfrom fast_sentence_segment import Segmenter\n\nsegmenter = Segmenter()\nresults = segmenter.input_text("Your text here.")\n```\n\n### Command Line Interface\n\nSegment text directly from the terminal:\n\n```bash\n# Direct text input\necho "Have you seen Dr. Who? It\'s brilliant!" | segment\n```\n\n```\nHave you seen Dr. Who?\nIt\'s brilliant!\n```\n\n```bash\n# Numbered output\nsegment -n "Gandalf paused... You shall not pass! The Balrog roared."\n```\n\n```\n1. Gandalf paused...\n2. You shall not pass!\n3. The Balrog roared.\n```\n\n```bash\n# From file\nsegment -f silmarillion.txt\n```\n\n## API Reference\n\n| Function | Parameters | Returns | Description |\n|----------|------------|---------|-------------|\n| `segment_text()` | `input_text: str`, `flatten: bool = False` | `list` | Main entry point for segmentation |\n| `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |\n\n### CLI Options\n\n| Option | Description |\n|--------|-------------|\n| `text` | Text to segment (positional argument) |\n| `-f, --file` | Read text from file |\n| `-n, --numbered` | Number output lines |\n\n## Why Nested Lists?\n\nThe segmentation process preserves document structure by segmenting into both paragraphs and sentences. Each outer list represents a paragraph, and each inner list contains that paragraph\'s sentences. This is useful for:\n\n- Document structure analysis\n- Paragraph-level processing\n- Maintaining original text organization\n\nUse `flatten=True` when you only need sentences without paragraph context.\n\n## Requirements\n\n- Python 3.9+\n- spaCy 3.8+\n- en_core_web_sm spaCy model\n\n## How It Works\n\nThis library uses spaCy for initial sentence segmentation, then applies surgical post-processing fixes for cases where spaCy\'s default behavior is incorrect:\n\n1. **Pre-processing**: Normalize numbered lists, preserve ellipses with placeholders\n2. **spaCy segmentation**: Use spaCy\'s sentence boundary detection\n3. **Post-processing**: Split on abbreviation boundaries, handle `?`/`!` + capital patterns\n4. **Denormalization**: Restore placeholders to original text\n\n## License\n\nMIT License - see [LICENSE](LICENSE) for details.\n\n## Contributing\n\nContributions are welcome! Please feel free to submit a Pull Request.\n\n1. Fork the repository\n2. Create your feature branch (`git checkout -b feature/amazing-feature`)\n3. Run tests (`make test`)\n4. Commit your changes\n5. Push to the branch\n6. Open a Pull Request\n',
|
|
25
|
-
'author': 'Craig Trim',
|
|
26
|
-
'author_email': 'craigtrim@gmail.com',
|
|
27
|
-
'maintainer': 'Craig Trim',
|
|
28
|
-
'maintainer_email': 'craigtrim@gmail.com',
|
|
29
|
-
'url': 'https://github.com/craigtrim/fast-sentence-segment',
|
|
30
|
-
'packages': packages,
|
|
31
|
-
'package_data': package_data,
|
|
32
|
-
'install_requires': install_requires,
|
|
33
|
-
'entry_points': entry_points,
|
|
34
|
-
'python_requires': '>=3.9,<4.0',
|
|
35
|
-
}
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
setup(**setup_kwargs)
|
|
File without changes
|
{fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/bp/__init__.py
RENAMED
|
File without changes
|
{fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/bp/segmenter.py
RENAMED
|
File without changes
|
{fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/core/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/core/stopwatch.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{fast_sentence_segment-1.2.1 → fast_sentence_segment-1.3.0}/fast_sentence_segment/svc/__init__.py
RENAMED
|
File without changes
|