fast-sentence-segment 1.4.4__tar.gz → 1.4.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/PKG-INFO +3 -4
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/README.md +0 -1
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/__init__.py +25 -6
- fast_sentence_segment-1.4.5/fast_sentence_segment/cli.py +298 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/__init__.py +2 -0
- fast_sentence_segment-1.4.5/fast_sentence_segment/dmo/dialog_formatter.py +255 -0
- fast_sentence_segment-1.4.5/fast_sentence_segment/dmo/ocr_artifact_fixer.py +70 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/svc/perform_sentence_segmentation.py +5 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/pyproject.toml +1 -1
- fast_sentence_segment-1.4.5/setup.py +39 -0
- fast_sentence_segment-1.4.4/fast_sentence_segment/cli.py +0 -194
- fast_sentence_segment-1.4.4/setup.py +0 -39
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/LICENSE +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/bp/__init__.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/bp/segmenter.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/core/__init__.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/core/base_object.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/core/stopwatch.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/abbreviation_merger.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/abbreviation_splitter.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/abbreviations.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/bullet_point_cleaner.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/dehyphenator.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/ellipsis_normalizer.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/group_quoted_sentences.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/newlines_to_periods.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/normalize_quotes.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/numbered_list_normalizer.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/post_process_sentences.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/question_exclamation_splitter.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/spacy_doc_segmenter.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/strip_trailing_period_after_quote.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/title_name_merger.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/unwrap_hard_wrapped_text.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/svc/__init__.py +0 -0
- {fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/svc/perform_paragraph_segmentation.py +0 -0
|
@@ -1,9 +1,9 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
2
|
Name: fast-sentence-segment
|
|
3
|
-
Version: 1.4.
|
|
3
|
+
Version: 1.4.5
|
|
4
4
|
Summary: Fast and Efficient Sentence Segmentation
|
|
5
|
+
Home-page: https://github.com/craigtrim/fast-sentence-segment
|
|
5
6
|
License: MIT
|
|
6
|
-
License-File: LICENSE
|
|
7
7
|
Keywords: nlp,text,preprocess,segment
|
|
8
8
|
Author: Craig Trim
|
|
9
9
|
Author-email: craigtrim@gmail.com
|
|
@@ -33,7 +33,6 @@ Description-Content-Type: text/markdown
|
|
|
33
33
|
|
|
34
34
|
[](https://pypi.org/project/fast-sentence-segment/)
|
|
35
35
|
[](https://pypi.org/project/fast-sentence-segment/)
|
|
36
|
-
[](https://github.com/craigtrim/fast-sentence-segment/actions/workflows/ci.yml)
|
|
37
36
|
[](https://github.com/craigtrim/fast-sentence-segment/tree/master/tests)
|
|
38
37
|
[](https://opensource.org/licenses/MIT)
|
|
39
38
|
[](https://github.com/astral-sh/ruff)
|
|
@@ -2,7 +2,6 @@
|
|
|
2
2
|
|
|
3
3
|
[](https://pypi.org/project/fast-sentence-segment/)
|
|
4
4
|
[](https://pypi.org/project/fast-sentence-segment/)
|
|
5
|
-
[](https://github.com/craigtrim/fast-sentence-segment/actions/workflows/ci.yml)
|
|
6
5
|
[](https://github.com/craigtrim/fast-sentence-segment/tree/master/tests)
|
|
7
6
|
[](https://opensource.org/licenses/MIT)
|
|
8
7
|
[](https://github.com/astral-sh/ruff)
|
{fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/__init__.py
RENAMED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from typing import List, Optional, Union
|
|
2
|
+
|
|
1
3
|
from .bp import *
|
|
2
4
|
from .svc import *
|
|
3
5
|
from .dmo import *
|
|
@@ -5,6 +7,7 @@ from .dmo import *
|
|
|
5
7
|
from .bp.segmenter import Segmenter
|
|
6
8
|
from .dmo.unwrap_hard_wrapped_text import unwrap_hard_wrapped_text
|
|
7
9
|
from .dmo.normalize_quotes import normalize_quotes
|
|
10
|
+
from .dmo.dialog_formatter import format_dialog
|
|
8
11
|
|
|
9
12
|
segment = Segmenter().input_text
|
|
10
13
|
|
|
@@ -14,7 +17,8 @@ def segment_text(
|
|
|
14
17
|
flatten: bool = False,
|
|
15
18
|
unwrap: bool = False,
|
|
16
19
|
normalize: bool = True,
|
|
17
|
-
|
|
20
|
+
format: Optional[str] = None,
|
|
21
|
+
) -> Union[List, str]:
|
|
18
22
|
"""Segment text into sentences.
|
|
19
23
|
|
|
20
24
|
Args:
|
|
@@ -26,14 +30,23 @@ def segment_text(
|
|
|
26
30
|
normalize: If True (default), normalize unicode quote variants
|
|
27
31
|
to ASCII equivalents before segmenting. Ensures consistent
|
|
28
32
|
quote characters for downstream processing.
|
|
33
|
+
format: Optional output format. Supported values:
|
|
34
|
+
- None (default): Return list of sentences/paragraphs
|
|
35
|
+
- "dialog": Return formatted string with dialog-aware
|
|
36
|
+
paragraph grouping (keeps multi-sentence quotes together,
|
|
37
|
+
adds paragraph breaks between speakers)
|
|
29
38
|
|
|
30
39
|
Returns:
|
|
31
|
-
List of sentences (if flatten=True) or list
|
|
32
|
-
groups, each containing a list of sentences.
|
|
40
|
+
If format is None: List of sentences (if flatten=True) or list
|
|
41
|
+
of paragraph groups, each containing a list of sentences.
|
|
42
|
+
If format="dialog": Formatted string with paragraph breaks.
|
|
33
43
|
|
|
34
|
-
Related GitHub
|
|
44
|
+
Related GitHub Issues:
|
|
35
45
|
#6 - Review findings from Issue #5
|
|
36
46
|
https://github.com/craigtrim/fast-sentence-segment/issues/6
|
|
47
|
+
|
|
48
|
+
#10 - feat: Add --format flag for dialog-aware paragraph formatting
|
|
49
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/10
|
|
37
50
|
"""
|
|
38
51
|
if unwrap:
|
|
39
52
|
input_text = unwrap_hard_wrapped_text(input_text)
|
|
@@ -43,9 +56,15 @@ def segment_text(
|
|
|
43
56
|
|
|
44
57
|
results = segment(input_text)
|
|
45
58
|
|
|
59
|
+
# Flatten to list of sentences
|
|
60
|
+
flat = []
|
|
61
|
+
[[flat.append(y) for y in x] for x in results]
|
|
62
|
+
|
|
63
|
+
# Apply formatting if requested
|
|
64
|
+
if format == "dialog":
|
|
65
|
+
return format_dialog(flat)
|
|
66
|
+
|
|
46
67
|
if flatten:
|
|
47
|
-
flat = []
|
|
48
|
-
[[flat.append(y) for y in x] for x in results]
|
|
49
68
|
return flat
|
|
50
69
|
|
|
51
70
|
return results
|
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""CLI for fast-sentence-segment."""
|
|
3
|
+
|
|
4
|
+
import argparse
|
|
5
|
+
import itertools
|
|
6
|
+
import logging
|
|
7
|
+
import os
|
|
8
|
+
import sys
|
|
9
|
+
import threading
|
|
10
|
+
import time
|
|
11
|
+
|
|
12
|
+
from fast_sentence_segment import segment_text
|
|
13
|
+
from fast_sentence_segment.dmo.group_quoted_sentences import format_grouped_sentences
|
|
14
|
+
|
|
15
|
+
logging.disable(logging.CRITICAL)
|
|
16
|
+
|
|
17
|
+
# ANSI color codes
|
|
18
|
+
BOLD = "\033[1m"
|
|
19
|
+
DIM = "\033[2m"
|
|
20
|
+
CYAN = "\033[36m"
|
|
21
|
+
GREEN = "\033[32m"
|
|
22
|
+
YELLOW = "\033[33m"
|
|
23
|
+
RESET = "\033[0m"
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Spinner:
|
|
27
|
+
"""Animated spinner for long-running operations."""
|
|
28
|
+
|
|
29
|
+
def __init__(self, message: str):
|
|
30
|
+
self.message = message
|
|
31
|
+
self.running = False
|
|
32
|
+
self.thread = None
|
|
33
|
+
self.frames = itertools.cycle(["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"])
|
|
34
|
+
|
|
35
|
+
def _spin(self):
|
|
36
|
+
while self.running:
|
|
37
|
+
frame = next(self.frames)
|
|
38
|
+
print(f"\r {YELLOW}{frame}{RESET} {self.message}", end="", flush=True)
|
|
39
|
+
time.sleep(0.08)
|
|
40
|
+
|
|
41
|
+
def __enter__(self):
|
|
42
|
+
self.running = True
|
|
43
|
+
self.thread = threading.Thread(target=self._spin)
|
|
44
|
+
self.thread.start()
|
|
45
|
+
return self
|
|
46
|
+
|
|
47
|
+
def __exit__(self, *args):
|
|
48
|
+
self.running = False
|
|
49
|
+
if self.thread:
|
|
50
|
+
self.thread.join()
|
|
51
|
+
print(f"\r {' ' * (len(self.message) + 4)}\r", end="", flush=True)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _header(title: str):
|
|
55
|
+
print(f"\n{BOLD}{CYAN}{title}{RESET}")
|
|
56
|
+
print(f"{DIM}{'─' * 40}{RESET}")
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def _param(label: str, value: str):
|
|
60
|
+
print(f" {DIM}{label}:{RESET} {value}")
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _done(msg: str):
|
|
64
|
+
print(f"\n {GREEN}✓{RESET} {msg}")
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _file_size(path: str) -> str:
|
|
68
|
+
size = os.path.getsize(path)
|
|
69
|
+
if size < 1024:
|
|
70
|
+
return f"{size} B"
|
|
71
|
+
elif size < 1024 * 1024:
|
|
72
|
+
return f"{size / 1024:.1f} KB"
|
|
73
|
+
return f"{size / (1024 * 1024):.1f} MB"
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def main():
|
|
77
|
+
parser = argparse.ArgumentParser(
|
|
78
|
+
prog="segment",
|
|
79
|
+
description="Segment text into sentences",
|
|
80
|
+
)
|
|
81
|
+
parser.add_argument(
|
|
82
|
+
"text",
|
|
83
|
+
nargs="?",
|
|
84
|
+
help="Text to segment (or use stdin)",
|
|
85
|
+
)
|
|
86
|
+
parser.add_argument(
|
|
87
|
+
"-f", "--file",
|
|
88
|
+
help="Read text from file",
|
|
89
|
+
)
|
|
90
|
+
parser.add_argument(
|
|
91
|
+
"-n", "--numbered",
|
|
92
|
+
action="store_true",
|
|
93
|
+
help="Number output lines",
|
|
94
|
+
)
|
|
95
|
+
parser.add_argument(
|
|
96
|
+
"--unwrap",
|
|
97
|
+
action="store_true",
|
|
98
|
+
help="Unwrap hard-wrapped lines and dehyphenate split words",
|
|
99
|
+
)
|
|
100
|
+
parser.add_argument(
|
|
101
|
+
"--format",
|
|
102
|
+
action="store_true",
|
|
103
|
+
help="Format output with dialog-aware paragraph grouping",
|
|
104
|
+
)
|
|
105
|
+
args = parser.parse_args()
|
|
106
|
+
|
|
107
|
+
# Get input text
|
|
108
|
+
if args.file:
|
|
109
|
+
with open(args.file, "r", encoding="utf-8") as f:
|
|
110
|
+
text = f.read()
|
|
111
|
+
elif args.text:
|
|
112
|
+
text = args.text
|
|
113
|
+
elif not sys.stdin.isatty():
|
|
114
|
+
text = sys.stdin.read()
|
|
115
|
+
else:
|
|
116
|
+
parser.print_help()
|
|
117
|
+
sys.exit(1)
|
|
118
|
+
|
|
119
|
+
# Segment and output
|
|
120
|
+
result = segment_text(
|
|
121
|
+
text.strip(), flatten=True, unwrap=args.unwrap,
|
|
122
|
+
format="dialog" if args.format else None
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# If format is used, result is a string
|
|
126
|
+
if args.format:
|
|
127
|
+
print(result)
|
|
128
|
+
else:
|
|
129
|
+
# Result is a list of sentences
|
|
130
|
+
for i, sentence in enumerate(result, 1):
|
|
131
|
+
if args.numbered:
|
|
132
|
+
print(f"{i}. {sentence}")
|
|
133
|
+
else:
|
|
134
|
+
print(sentence)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def _generate_output_path(input_path: str) -> str:
|
|
138
|
+
"""Generate output path by inserting -clean before extension."""
|
|
139
|
+
base, ext = os.path.splitext(input_path)
|
|
140
|
+
return f"{base}-clean{ext}"
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _process_single_file(
|
|
144
|
+
input_file: str, output_file: str, unwrap: bool, normalize: bool, format: str = None
|
|
145
|
+
):
|
|
146
|
+
"""Process a single file and write output."""
|
|
147
|
+
# Show configuration
|
|
148
|
+
_param("Input", input_file)
|
|
149
|
+
_param("Output", output_file)
|
|
150
|
+
_param("Size", _file_size(input_file))
|
|
151
|
+
_param("Unwrap", "enabled" if unwrap else "disabled")
|
|
152
|
+
_param("Normalize quotes", "disabled" if not normalize else "enabled")
|
|
153
|
+
_param("Format", format if format else "default (one sentence per line)")
|
|
154
|
+
print()
|
|
155
|
+
|
|
156
|
+
# Step 1: Read file
|
|
157
|
+
print(f" {YELLOW}→{RESET} Reading input file...")
|
|
158
|
+
with open(input_file, "r", encoding="utf-8") as f:
|
|
159
|
+
text = f.read()
|
|
160
|
+
print(f" {GREEN}✓{RESET} Read {len(text):,} characters")
|
|
161
|
+
|
|
162
|
+
# Step 2: Segment text
|
|
163
|
+
print(f" {YELLOW}→{RESET} Segmenting text...", end="", flush=True)
|
|
164
|
+
start = time.perf_counter()
|
|
165
|
+
result = segment_text(
|
|
166
|
+
text.strip(), flatten=True, unwrap=unwrap, normalize=normalize, format=format,
|
|
167
|
+
)
|
|
168
|
+
elapsed = time.perf_counter() - start
|
|
169
|
+
|
|
170
|
+
# Step 3: Write output
|
|
171
|
+
if format:
|
|
172
|
+
# Format mode returns a string
|
|
173
|
+
sentence_count = result.count("\n") + 1 if result else 0
|
|
174
|
+
print(f"\r {GREEN}✓{RESET} Segmented text ({elapsed:.2f}s)")
|
|
175
|
+
with open(output_file, "w", encoding="utf-8") as f:
|
|
176
|
+
f.write(result + "\n")
|
|
177
|
+
print(f" {GREEN}✓{RESET} Written formatted output to {output_file}")
|
|
178
|
+
else:
|
|
179
|
+
# Default mode returns a list
|
|
180
|
+
sentences = result
|
|
181
|
+
print(f"\r {GREEN}✓{RESET} Segmented into {len(sentences):,} sentences ({elapsed:.2f}s)")
|
|
182
|
+
total = len(sentences)
|
|
183
|
+
with open(output_file, "w", encoding="utf-8") as f:
|
|
184
|
+
if unwrap:
|
|
185
|
+
f.write(format_grouped_sentences(sentences) + "\n")
|
|
186
|
+
print(f" {GREEN}✓{RESET} Written {total:,} sentences to {output_file}")
|
|
187
|
+
else:
|
|
188
|
+
for i, sentence in enumerate(sentences, 1):
|
|
189
|
+
f.write(sentence + "\n")
|
|
190
|
+
if i % 500 == 0 or i == total:
|
|
191
|
+
pct = (i / total) * 100
|
|
192
|
+
print(f"\r {YELLOW}→{RESET} Writing... {pct:.0f}% ({i:,}/{total:,})", end="", flush=True)
|
|
193
|
+
print(f"\r {GREEN}✓{RESET} Written {total:,} sentences to {output_file} ")
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def file_main():
|
|
197
|
+
parser = argparse.ArgumentParser(
|
|
198
|
+
prog="segment-file",
|
|
199
|
+
description="Segment a text file into sentences and write to an output file",
|
|
200
|
+
)
|
|
201
|
+
parser.add_argument(
|
|
202
|
+
"--input-file",
|
|
203
|
+
help="Path to input text file",
|
|
204
|
+
)
|
|
205
|
+
parser.add_argument(
|
|
206
|
+
"--input-dir",
|
|
207
|
+
help="Path to directory containing text files to process",
|
|
208
|
+
)
|
|
209
|
+
parser.add_argument(
|
|
210
|
+
"--output-file",
|
|
211
|
+
help="Path to output file (optional, defaults to input-file with -clean suffix)",
|
|
212
|
+
)
|
|
213
|
+
parser.add_argument(
|
|
214
|
+
"--unwrap", action="store_true",
|
|
215
|
+
help="Unwrap hard-wrapped lines (e.g., Project Gutenberg e-texts)",
|
|
216
|
+
)
|
|
217
|
+
parser.add_argument(
|
|
218
|
+
"--no-normalize-quotes", action="store_true",
|
|
219
|
+
help="Disable unicode quote normalization to ASCII equivalents",
|
|
220
|
+
)
|
|
221
|
+
parser.add_argument(
|
|
222
|
+
"--format",
|
|
223
|
+
action="store_true",
|
|
224
|
+
help="Format output with dialog-aware paragraph grouping",
|
|
225
|
+
)
|
|
226
|
+
args = parser.parse_args()
|
|
227
|
+
|
|
228
|
+
# Validate arguments
|
|
229
|
+
if not args.input_file and not args.input_dir:
|
|
230
|
+
print(f" {YELLOW}Error:{RESET} Either --input-file or --input-dir is required")
|
|
231
|
+
sys.exit(1)
|
|
232
|
+
if args.input_file and args.input_dir:
|
|
233
|
+
print(f" {YELLOW}Error:{RESET} Cannot specify both --input-file and --input-dir")
|
|
234
|
+
sys.exit(1)
|
|
235
|
+
if args.input_dir and args.output_file:
|
|
236
|
+
print(f" {YELLOW}Error:{RESET} --output-file cannot be used with --input-dir")
|
|
237
|
+
sys.exit(1)
|
|
238
|
+
|
|
239
|
+
normalize = not args.no_normalize_quotes
|
|
240
|
+
|
|
241
|
+
# Process directory
|
|
242
|
+
if args.input_dir:
|
|
243
|
+
if not os.path.isdir(args.input_dir):
|
|
244
|
+
print(f" {YELLOW}Error:{RESET} Directory not found: {args.input_dir}")
|
|
245
|
+
sys.exit(1)
|
|
246
|
+
|
|
247
|
+
# Find all .txt files
|
|
248
|
+
txt_files = sorted([
|
|
249
|
+
f for f in os.listdir(args.input_dir)
|
|
250
|
+
if f.endswith(".txt") and not f.endswith("-clean.txt")
|
|
251
|
+
])
|
|
252
|
+
|
|
253
|
+
if not txt_files:
|
|
254
|
+
print(f" {YELLOW}Error:{RESET} No .txt files found in {args.input_dir}")
|
|
255
|
+
sys.exit(1)
|
|
256
|
+
|
|
257
|
+
_header("segment-file (batch)")
|
|
258
|
+
print(f" {DIM}Processing {len(txt_files)} files in directory{RESET}")
|
|
259
|
+
print()
|
|
260
|
+
_param("Directory", args.input_dir)
|
|
261
|
+
_param("Files", str(len(txt_files)))
|
|
262
|
+
_param("Unwrap", "enabled" if args.unwrap else "disabled")
|
|
263
|
+
_param("Normalize quotes", "disabled" if not normalize else "enabled")
|
|
264
|
+
_param("Format", "dialog" if args.format else "default (one sentence per line)")
|
|
265
|
+
print()
|
|
266
|
+
|
|
267
|
+
format_value = "dialog" if args.format else None
|
|
268
|
+
for i, filename in enumerate(txt_files, 1):
|
|
269
|
+
input_path = os.path.join(args.input_dir, filename)
|
|
270
|
+
output_path = _generate_output_path(input_path)
|
|
271
|
+
print(f" {BOLD}[{i}/{len(txt_files)}]{RESET} {filename}")
|
|
272
|
+
_process_single_file(input_path, output_path, args.unwrap, normalize, format_value)
|
|
273
|
+
print()
|
|
274
|
+
|
|
275
|
+
print(f" {GREEN}Done! Processed {len(txt_files)} files.{RESET}")
|
|
276
|
+
print()
|
|
277
|
+
return
|
|
278
|
+
|
|
279
|
+
# Process single file
|
|
280
|
+
if not os.path.isfile(args.input_file):
|
|
281
|
+
print(f" {YELLOW}Error:{RESET} File not found: {args.input_file}")
|
|
282
|
+
sys.exit(1)
|
|
283
|
+
|
|
284
|
+
output_file = args.output_file or _generate_output_path(args.input_file)
|
|
285
|
+
|
|
286
|
+
_header("segment-file")
|
|
287
|
+
print(f" {DIM}Segmenting text file into sentences{RESET}")
|
|
288
|
+
print()
|
|
289
|
+
|
|
290
|
+
format_value = "dialog" if args.format else None
|
|
291
|
+
_process_single_file(args.input_file, output_file, args.unwrap, normalize, format_value)
|
|
292
|
+
|
|
293
|
+
print(f"\n {GREEN}Done!{RESET}")
|
|
294
|
+
print()
|
|
295
|
+
|
|
296
|
+
|
|
297
|
+
if __name__ == "__main__":
|
|
298
|
+
main()
|
{fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/dmo/__init__.py
RENAMED
|
@@ -13,3 +13,5 @@ from .unwrap_hard_wrapped_text import unwrap_hard_wrapped_text
|
|
|
13
13
|
from .normalize_quotes import normalize_quotes
|
|
14
14
|
from .group_quoted_sentences import group_quoted_sentences, format_grouped_sentences
|
|
15
15
|
from .strip_trailing_period_after_quote import StripTrailingPeriodAfterQuote
|
|
16
|
+
from .ocr_artifact_fixer import OcrArtifactFixer
|
|
17
|
+
from .dialog_formatter import DialogFormatter, format_dialog
|
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""Dialog-aware paragraph formatter for segmented text.
|
|
3
|
+
|
|
4
|
+
Formats segmented sentences into readable paragraphs with intelligent
|
|
5
|
+
grouping of dialog and narrative text. Keeps multi-sentence quoted speech
|
|
6
|
+
together and adds paragraph breaks between different speakers.
|
|
7
|
+
|
|
8
|
+
Related GitHub Issue:
|
|
9
|
+
#10 - feat: Add --format flag for dialog-aware paragraph formatting
|
|
10
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/10
|
|
11
|
+
|
|
12
|
+
Example:
|
|
13
|
+
>>> from fast_sentence_segment.dmo.dialog_formatter import format_dialog
|
|
14
|
+
>>> sentences = [
|
|
15
|
+
... '"Hello," said Jack.',
|
|
16
|
+
... '"How are you today?',
|
|
17
|
+
... 'I hope you are well."',
|
|
18
|
+
... '"I am fine," replied Mary.',
|
|
19
|
+
... ]
|
|
20
|
+
>>> print(format_dialog(sentences))
|
|
21
|
+
"Hello," said Jack.
|
|
22
|
+
|
|
23
|
+
"How are you today?
|
|
24
|
+
I hope you are well."
|
|
25
|
+
|
|
26
|
+
"I am fine," replied Mary.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
import re
|
|
30
|
+
from typing import List
|
|
31
|
+
|
|
32
|
+
from fast_sentence_segment.core import BaseObject
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
# Quote characters to track for dialog detection
|
|
36
|
+
DOUBLE_QUOTES = '""\""'
|
|
37
|
+
SINGLE_QUOTES = "'''"
|
|
38
|
+
ALL_QUOTES = DOUBLE_QUOTES + SINGLE_QUOTES
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _count_quotes(text: str) -> int:
|
|
42
|
+
"""Count quote characters in text (both single and double)."""
|
|
43
|
+
return sum(1 for c in text if c in ALL_QUOTES)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _starts_with_quote(text: str) -> bool:
|
|
47
|
+
"""Check if text starts with a quote character."""
|
|
48
|
+
text = text.lstrip()
|
|
49
|
+
return text and text[0] in ALL_QUOTES
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _ends_with_closing_quote(text: str) -> bool:
|
|
53
|
+
"""Check if text ends with a closing quote (possibly followed by punctuation)."""
|
|
54
|
+
text = text.rstrip()
|
|
55
|
+
if not text:
|
|
56
|
+
return False
|
|
57
|
+
# Check last few characters for closing quote pattern
|
|
58
|
+
# e.g., '" or "' or .' or ."
|
|
59
|
+
for i in range(min(3, len(text)), 0, -1):
|
|
60
|
+
if text[-i] in ALL_QUOTES:
|
|
61
|
+
return True
|
|
62
|
+
return False
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _is_complete_quote(text: str) -> bool:
|
|
66
|
+
"""Check if text contains a complete (balanced) quote.
|
|
67
|
+
|
|
68
|
+
A complete quote has an even number of quote characters,
|
|
69
|
+
meaning all opened quotes are closed.
|
|
70
|
+
"""
|
|
71
|
+
quote_count = _count_quotes(text)
|
|
72
|
+
return quote_count > 0 and quote_count % 2 == 0
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _sentence_is_dialog_continuation(sentence: str, in_quote: bool) -> bool:
|
|
76
|
+
"""Determine if sentence continues an open quote.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
sentence: The sentence to check.
|
|
80
|
+
in_quote: Whether we're currently inside an unclosed quote.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
True if this sentence is a continuation of open dialog.
|
|
84
|
+
"""
|
|
85
|
+
if in_quote:
|
|
86
|
+
return True
|
|
87
|
+
return False
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _get_quote_delta(sentence: str) -> int:
|
|
91
|
+
"""Get the net change in quote depth for a sentence.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Positive if more quotes opened than closed,
|
|
95
|
+
negative if more closed than opened,
|
|
96
|
+
zero if balanced.
|
|
97
|
+
"""
|
|
98
|
+
return _count_quotes(sentence) % 2
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
class DialogFormatter(BaseObject):
|
|
102
|
+
"""Formats segmented sentences with dialog-aware paragraph grouping.
|
|
103
|
+
|
|
104
|
+
This formatter analyzes sentence structure to intelligently group
|
|
105
|
+
text into paragraphs:
|
|
106
|
+
|
|
107
|
+
- Multi-sentence quoted speech stays together (same speaker)
|
|
108
|
+
- Paragraph breaks added between different speakers
|
|
109
|
+
- Narrative text grouped appropriately
|
|
110
|
+
- Handles both single and double quote styles
|
|
111
|
+
|
|
112
|
+
Example:
|
|
113
|
+
>>> formatter = DialogFormatter()
|
|
114
|
+
>>> sentences = ['"Hello," he said.', 'The door opened.']
|
|
115
|
+
>>> print(formatter.process(sentences))
|
|
116
|
+
"Hello," he said.
|
|
117
|
+
|
|
118
|
+
The door opened.
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
def __init__(self):
|
|
122
|
+
"""Initialize the DialogFormatter."""
|
|
123
|
+
BaseObject.__init__(self, __name__)
|
|
124
|
+
|
|
125
|
+
def process(self, sentences: List[str]) -> str:
|
|
126
|
+
"""Format sentences into dialog-aware paragraphs.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
sentences: List of segmented sentences.
|
|
130
|
+
|
|
131
|
+
Returns:
|
|
132
|
+
Formatted string with appropriate paragraph breaks.
|
|
133
|
+
"""
|
|
134
|
+
return format_dialog(sentences)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
def _is_narrative(sentence: str) -> bool:
|
|
138
|
+
"""Check if a sentence is narrative (no quotes at start)."""
|
|
139
|
+
return not _starts_with_quote(sentence)
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
def _ends_dialog_turn(sentence: str) -> bool:
|
|
143
|
+
"""Check if a sentence ends a dialog turn.
|
|
144
|
+
|
|
145
|
+
A dialog turn ends when the sentence ends with a closing quote
|
|
146
|
+
followed by optional punctuation or dialog tag ending.
|
|
147
|
+
"""
|
|
148
|
+
sentence = sentence.rstrip()
|
|
149
|
+
if not sentence:
|
|
150
|
+
return False
|
|
151
|
+
|
|
152
|
+
# Pattern: ends with quote + optional punctuation
|
|
153
|
+
# e.g., ." or .' or "' or '" or ," he said. etc.
|
|
154
|
+
# Check if there's a closing quote near the end
|
|
155
|
+
last_chars = sentence[-10:] if len(sentence) >= 10 else sentence
|
|
156
|
+
|
|
157
|
+
# Count quotes in last part - if odd from end, likely closes
|
|
158
|
+
for i, c in enumerate(reversed(last_chars)):
|
|
159
|
+
if c in ALL_QUOTES:
|
|
160
|
+
# Found a quote - check if it's likely a closer
|
|
161
|
+
# A closer is typically followed by punctuation or end
|
|
162
|
+
remaining = last_chars[len(last_chars) - i:]
|
|
163
|
+
if not remaining or all(ch in '.,!?;: ' for ch in remaining):
|
|
164
|
+
return True
|
|
165
|
+
# Also handle dialog tags: ,' he said.
|
|
166
|
+
if remaining and remaining[0] in '.,!?' and 'said' not in sentence.lower()[-20:]:
|
|
167
|
+
return True
|
|
168
|
+
break
|
|
169
|
+
|
|
170
|
+
return False
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def format_dialog(sentences: List[str]) -> str:
|
|
174
|
+
"""Format sentences into dialog-aware paragraphs.
|
|
175
|
+
|
|
176
|
+
Groups sentences intelligently based on dialog structure:
|
|
177
|
+
- Sentences within an unclosed quote stay grouped
|
|
178
|
+
- Complete quoted sentences become their own paragraphs
|
|
179
|
+
- Narrative text is grouped together
|
|
180
|
+
- Paragraph breaks separate different speakers/turns
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
sentences: List of segmented sentences.
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
Formatted string with paragraph breaks (double newlines)
|
|
187
|
+
between logical groups and single newlines within groups.
|
|
188
|
+
|
|
189
|
+
Example:
|
|
190
|
+
>>> sentences = [
|
|
191
|
+
... '"My dear sir," cried the man.',
|
|
192
|
+
... '"You had every reason to be carried away."',
|
|
193
|
+
... ]
|
|
194
|
+
>>> print(format_dialog(sentences))
|
|
195
|
+
"My dear sir," cried the man.
|
|
196
|
+
|
|
197
|
+
"You had every reason to be carried away."
|
|
198
|
+
"""
|
|
199
|
+
if not sentences:
|
|
200
|
+
return ""
|
|
201
|
+
|
|
202
|
+
paragraphs: List[List[str]] = []
|
|
203
|
+
current_para: List[str] = []
|
|
204
|
+
in_quote = False # Track if we're inside an unclosed quote
|
|
205
|
+
|
|
206
|
+
for i, sentence in enumerate(sentences):
|
|
207
|
+
sentence = sentence.strip()
|
|
208
|
+
if not sentence:
|
|
209
|
+
continue
|
|
210
|
+
|
|
211
|
+
quote_count = _count_quotes(sentence)
|
|
212
|
+
starts_quote = _starts_with_quote(sentence)
|
|
213
|
+
is_narrative = _is_narrative(sentence)
|
|
214
|
+
is_complete = _is_complete_quote(sentence)
|
|
215
|
+
|
|
216
|
+
# Get info about previous sentence
|
|
217
|
+
prev_sentence = current_para[-1] if current_para else ""
|
|
218
|
+
prev_was_narrative = _is_narrative(prev_sentence) if prev_sentence else False
|
|
219
|
+
prev_was_complete = _is_complete_quote(prev_sentence) if prev_sentence else False
|
|
220
|
+
|
|
221
|
+
# Determine if this sentence starts a new paragraph
|
|
222
|
+
should_start_new_para = False
|
|
223
|
+
|
|
224
|
+
if not current_para:
|
|
225
|
+
# First sentence always starts a new paragraph
|
|
226
|
+
should_start_new_para = True
|
|
227
|
+
elif in_quote:
|
|
228
|
+
# Inside an open quote - continue current paragraph
|
|
229
|
+
should_start_new_para = False
|
|
230
|
+
elif starts_quote:
|
|
231
|
+
# New quote starting - always new paragraph
|
|
232
|
+
should_start_new_para = True
|
|
233
|
+
elif is_narrative and prev_was_complete and not prev_was_narrative:
|
|
234
|
+
# Narrative after complete dialog - new paragraph
|
|
235
|
+
should_start_new_para = True
|
|
236
|
+
elif is_narrative and not prev_was_narrative and _ends_dialog_turn(prev_sentence):
|
|
237
|
+
# Narrative after dialog that ends a turn - new paragraph
|
|
238
|
+
should_start_new_para = True
|
|
239
|
+
|
|
240
|
+
if should_start_new_para and current_para:
|
|
241
|
+
paragraphs.append(current_para)
|
|
242
|
+
current_para = []
|
|
243
|
+
|
|
244
|
+
current_para.append(sentence)
|
|
245
|
+
|
|
246
|
+
# Update quote tracking
|
|
247
|
+
if quote_count % 2 == 1:
|
|
248
|
+
in_quote = not in_quote
|
|
249
|
+
|
|
250
|
+
# Don't forget the last paragraph
|
|
251
|
+
if current_para:
|
|
252
|
+
paragraphs.append(current_para)
|
|
253
|
+
|
|
254
|
+
# Format: join sentences in paragraph with newline, paragraphs with double newline
|
|
255
|
+
return "\n\n".join("\n".join(para) for para in paragraphs)
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# -*- coding: UTF-8 -*-
|
|
2
|
+
"""Fix common OCR/text extraction artifacts.
|
|
3
|
+
|
|
4
|
+
Ebook text files often contain artifacts where common word pairs
|
|
5
|
+
are incorrectly joined. This module fixes known patterns.
|
|
6
|
+
|
|
7
|
+
Related GitHub Issue:
|
|
8
|
+
#9 - Fix common OCR/cleaning artifacts (Iam, witha)
|
|
9
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/9
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from fast_sentence_segment.core import BaseObject
|
|
13
|
+
|
|
14
|
+
# Known OCR artifact patterns: (pattern, replacement)
|
|
15
|
+
# All patterns include surrounding spaces to ensure exact word boundaries
|
|
16
|
+
_OCR_ARTIFACTS = [
|
|
17
|
+
(" Iam ", " I am "),
|
|
18
|
+
(" Ihave ", " I have "),
|
|
19
|
+
(" ihave ", " I have "),
|
|
20
|
+
(" Ithink ", " I think "),
|
|
21
|
+
(" ithink ", " I think "),
|
|
22
|
+
(" anda ", " and a "),
|
|
23
|
+
(" witha ", " with a "),
|
|
24
|
+
(" sucha ", " such a "),
|
|
25
|
+
(" aliquid ", " a liquid "),
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class OcrArtifactFixer(BaseObject):
|
|
30
|
+
"""Fix common OCR/text extraction artifacts.
|
|
31
|
+
|
|
32
|
+
Detects and corrects known patterns where words are incorrectly
|
|
33
|
+
joined during OCR or text extraction processes.
|
|
34
|
+
|
|
35
|
+
Related GitHub Issue:
|
|
36
|
+
#9 - Fix common OCR/cleaning artifacts (Iam, witha)
|
|
37
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/9
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self):
|
|
41
|
+
"""Change Log
|
|
42
|
+
|
|
43
|
+
Created:
|
|
44
|
+
3-Feb-2026
|
|
45
|
+
craigtrim@gmail.com
|
|
46
|
+
* fix common OCR/cleaning artifacts
|
|
47
|
+
https://github.com/craigtrim/fast-sentence-segment/issues/9
|
|
48
|
+
"""
|
|
49
|
+
BaseObject.__init__(self, __name__)
|
|
50
|
+
|
|
51
|
+
@staticmethod
|
|
52
|
+
def process(input_text: str) -> str:
|
|
53
|
+
"""Fix known OCR artifact patterns.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
input_text: Text that may contain OCR artifacts.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Text with known OCR artifacts corrected.
|
|
60
|
+
|
|
61
|
+
Examples:
|
|
62
|
+
>>> OcrArtifactFixer.process("Jack, Iam so happy")
|
|
63
|
+
'Jack, I am so happy'
|
|
64
|
+
>>> OcrArtifactFixer.process("horizon witha hint")
|
|
65
|
+
'horizon with a hint'
|
|
66
|
+
"""
|
|
67
|
+
for pattern, replacement in _OCR_ARTIFACTS:
|
|
68
|
+
if pattern in input_text:
|
|
69
|
+
input_text = input_text.replace(pattern, replacement)
|
|
70
|
+
return input_text
|
|
@@ -55,6 +55,7 @@ from fast_sentence_segment.dmo import SpacyDocSegmenter
|
|
|
55
55
|
from fast_sentence_segment.dmo import PostProcessStructure
|
|
56
56
|
from fast_sentence_segment.dmo import StripTrailingPeriodAfterQuote
|
|
57
57
|
from fast_sentence_segment.dmo import Dehyphenator
|
|
58
|
+
from fast_sentence_segment.dmo import OcrArtifactFixer
|
|
58
59
|
|
|
59
60
|
|
|
60
61
|
class PerformSentenceSegmentation(BaseObject):
|
|
@@ -84,6 +85,7 @@ class PerformSentenceSegmentation(BaseObject):
|
|
|
84
85
|
self.__nlp = _load_spacy_model("en_core_web_sm")
|
|
85
86
|
|
|
86
87
|
self._dehyphenate = Dehyphenator.process
|
|
88
|
+
self._fix_ocr_artifacts = OcrArtifactFixer.process
|
|
87
89
|
self._newlines_to_periods = NewlinesToPeriods.process
|
|
88
90
|
self._normalize_numbered_lists = NumberedListNormalizer().process
|
|
89
91
|
self._normalize_ellipses = EllipsisNormalizer().process
|
|
@@ -138,6 +140,9 @@ class PerformSentenceSegmentation(BaseObject):
|
|
|
138
140
|
# Must happen before newlines are converted to periods
|
|
139
141
|
input_text = self._dehyphenate(input_text)
|
|
140
142
|
|
|
143
|
+
# Fix common OCR artifacts (issue #9)
|
|
144
|
+
input_text = self._fix_ocr_artifacts(input_text)
|
|
145
|
+
|
|
141
146
|
input_text = self._normalize_numbered_lists(input_text)
|
|
142
147
|
input_text = self._normalize_ellipses(input_text)
|
|
143
148
|
|
|
@@ -11,7 +11,7 @@ description = "Fast and Efficient Sentence Segmentation"
|
|
|
11
11
|
license = "MIT"
|
|
12
12
|
name = "fast-sentence-segment"
|
|
13
13
|
readme = "README.md"
|
|
14
|
-
version = "1.4.
|
|
14
|
+
version = "1.4.5"
|
|
15
15
|
|
|
16
16
|
keywords = ["nlp", "text", "preprocess", "segment"]
|
|
17
17
|
repository = "https://github.com/craigtrim/fast-sentence-segment"
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
from setuptools import setup
|
|
3
|
+
|
|
4
|
+
packages = \
|
|
5
|
+
['fast_sentence_segment',
|
|
6
|
+
'fast_sentence_segment.bp',
|
|
7
|
+
'fast_sentence_segment.core',
|
|
8
|
+
'fast_sentence_segment.dmo',
|
|
9
|
+
'fast_sentence_segment.svc']
|
|
10
|
+
|
|
11
|
+
package_data = \
|
|
12
|
+
{'': ['*']}
|
|
13
|
+
|
|
14
|
+
install_requires = \
|
|
15
|
+
['spacy>=3.8.0,<4.0.0']
|
|
16
|
+
|
|
17
|
+
entry_points = \
|
|
18
|
+
{'console_scripts': ['segment = fast_sentence_segment.cli:main',
|
|
19
|
+
'segment-file = fast_sentence_segment.cli:file_main']}
|
|
20
|
+
|
|
21
|
+
setup_kwargs = {
|
|
22
|
+
'name': 'fast-sentence-segment',
|
|
23
|
+
'version': '1.4.5',
|
|
24
|
+
'description': 'Fast and Efficient Sentence Segmentation',
|
|
25
|
+
'long_description': '# Fast Sentence Segmentation\n\n[](https://pypi.org/project/fast-sentence-segment/)\n[](https://pypi.org/project/fast-sentence-segment/)\n[](https://github.com/craigtrim/fast-sentence-segment/tree/master/tests)\n[](https://opensource.org/licenses/MIT)\n[](https://github.com/astral-sh/ruff)\n[](https://pepy.tech/project/fast-sentence-segment)\n[](https://pepy.tech/project/fast-sentence-segment)\n\nFast and efficient sentence segmentation using spaCy with surgical post-processing fixes. Handles complex edge cases like abbreviations (Dr., Mr., etc.), ellipses, quoted text, and multi-paragraph documents.\n\n## Why This Library?\n\n1. **Keep it local**: LLM API calls cost money and send your data to third parties. Run sentence segmentation entirely on your machine.\n2. **spaCy perfected**: spaCy is a great local model, but it makes mistakes. This library fixes most of spaCy\'s shortcomings.\n\n## Features\n\n- **Paragraph-aware segmentation**: Returns sentences grouped by paragraph\n- **Abbreviation handling**: Correctly handles "Dr.", "Mr.", "etc.", "p.m.", "a.m." without false splits\n- **Ellipsis preservation**: Keeps `...` intact while detecting sentence boundaries\n- **Question/exclamation splitting**: Properly splits on `?` and `!` followed by capital letters\n- **Cached processing**: LRU cache for repeated text processing\n- **Flexible output**: Nested lists (by paragraph) or flattened list of sentences\n- **Bullet point & numbered list normalization**: Cleans common list formats\n- **CLI tool**: Command-line interface for quick segmentation\n\n## Installation\n\n```bash\npip install fast-sentence-segment\n```\n\nAfter installation, download the spaCy model:\n\n```bash\npython -m spacy download en_core_web_sm\n```\n\n## Quick Start\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = "Do you like Dr. Who? I prefer Dr. Strange! Mr. T is also cool."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "Do you like Dr. Who?",\n "I prefer Dr. Strange!",\n "Mr. T is also cool."\n]\n```\n\nNotice how "Dr. Who?" stays together as a single sentence—the library correctly recognizes that a title followed by a single-word name ending in `?` or `!` is a name reference, not a sentence boundary.\n\n## Usage\n\n### Basic Segmentation\n\nThe `segment_text` function returns a list of lists, where each inner list represents a paragraph containing its sentences:\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = """Gandalf spoke softly. "All we have to decide is what to do with the time given us."\n\nFrodo nodded. The weight of the Ring pressed against his chest."""\n\nresults = segment_text(text)\n```\n\n```json\n[\n [\n "Gandalf spoke softly.",\n "\\"All we have to decide is what to do with the time given us.\\"."\n ],\n [\n "Frodo nodded.",\n "The weight of the Ring pressed against his chest."\n ]\n]\n```\n\n### Flattened Output\n\nIf you don\'t need paragraph boundaries, use the `flatten` parameter:\n\n```python\ntext = "At 9 a.m. the hobbits set out. By 3 p.m. they reached Rivendell. Mr. Frodo was exhausted."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "At 9 a.m. the hobbits set out.",\n "By 3 p.m. they reached Rivendell.",\n "Mr. Frodo was exhausted."\n]\n```\n\n### Direct Segmenter Access\n\nFor more control, use the `Segmenter` class directly:\n\n```python\nfrom fast_sentence_segment import Segmenter\n\nsegmenter = Segmenter()\nresults = segmenter.input_text("Your text here.")\n```\n\n### Command Line Interface\n\n```bash\n# Inline text\nsegment "Gandalf paused... You shall not pass! The Balrog roared."\n\n# Pipe from stdin\necho "Have you seen Dr. Who? It\'s brilliant!" | segment\n\n# Numbered output\nsegment -n -f silmarillion.txt\n\n# File-to-file (one sentence per line)\nsegment-file --input-file book.txt --output-file sentences.txt\n\n# Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)\nsegment-file --input-file book.txt --output-file sentences.txt --unwrap\n```\n\n## API Reference\n\n| Function | Parameters | Returns | Description |\n|----------|------------|---------|-------------|\n| `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False` | `list` | Main entry point for segmentation |\n| `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |\n\n### CLI Commands\n\n| Command | Description |\n|---------|-------------|\n| `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output. |\n| `segment-file --input-file IN --output-file OUT [--unwrap]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts. |\n\n## Why Nested Lists?\n\nThe segmentation process preserves document structure by segmenting into both paragraphs and sentences. Each outer list represents a paragraph, and each inner list contains that paragraph\'s sentences. This is useful for:\n\n- Document structure analysis\n- Paragraph-level processing\n- Maintaining original text organization\n\nUse `flatten=True` when you only need sentences without paragraph context.\n\n## Requirements\n\n- Python 3.9+\n- spaCy 3.8+\n- en_core_web_sm spaCy model\n\n## How It Works\n\nThis library uses spaCy for initial sentence segmentation, then applies surgical post-processing fixes for cases where spaCy\'s default behavior is incorrect:\n\n1. **Pre-processing**: Normalize numbered lists, preserve ellipses with placeholders\n2. **spaCy segmentation**: Use spaCy\'s sentence boundary detection\n3. **Post-processing**: Split on abbreviation boundaries, handle `?`/`!` + capital patterns\n4. **Denormalization**: Restore placeholders to original text\n\n## License\n\nMIT License - see [LICENSE](LICENSE) for details.\n\n## Contributing\n\nContributions are welcome! Please feel free to submit a Pull Request.\n\n1. Fork the repository\n2. Create your feature branch (`git checkout -b feature/amazing-feature`)\n3. Run tests (`make test`)\n4. Commit your changes\n5. Push to the branch\n6. Open a Pull Request\n',
|
|
26
|
+
'author': 'Craig Trim',
|
|
27
|
+
'author_email': 'craigtrim@gmail.com',
|
|
28
|
+
'maintainer': 'Craig Trim',
|
|
29
|
+
'maintainer_email': 'craigtrim@gmail.com',
|
|
30
|
+
'url': 'https://github.com/craigtrim/fast-sentence-segment',
|
|
31
|
+
'packages': packages,
|
|
32
|
+
'package_data': package_data,
|
|
33
|
+
'install_requires': install_requires,
|
|
34
|
+
'entry_points': entry_points,
|
|
35
|
+
'python_requires': '>=3.9,<3.13',
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
setup(**setup_kwargs)
|
|
@@ -1,194 +0,0 @@
|
|
|
1
|
-
# -*- coding: UTF-8 -*-
|
|
2
|
-
"""CLI for fast-sentence-segment."""
|
|
3
|
-
|
|
4
|
-
import argparse
|
|
5
|
-
import itertools
|
|
6
|
-
import logging
|
|
7
|
-
import os
|
|
8
|
-
import sys
|
|
9
|
-
import threading
|
|
10
|
-
import time
|
|
11
|
-
|
|
12
|
-
from fast_sentence_segment import segment_text
|
|
13
|
-
from fast_sentence_segment.dmo.group_quoted_sentences import format_grouped_sentences
|
|
14
|
-
|
|
15
|
-
logging.disable(logging.CRITICAL)
|
|
16
|
-
|
|
17
|
-
# ANSI color codes
|
|
18
|
-
BOLD = "\033[1m"
|
|
19
|
-
DIM = "\033[2m"
|
|
20
|
-
CYAN = "\033[36m"
|
|
21
|
-
GREEN = "\033[32m"
|
|
22
|
-
YELLOW = "\033[33m"
|
|
23
|
-
RESET = "\033[0m"
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class Spinner:
|
|
27
|
-
"""Animated spinner for long-running operations."""
|
|
28
|
-
|
|
29
|
-
def __init__(self, message: str):
|
|
30
|
-
self.message = message
|
|
31
|
-
self.running = False
|
|
32
|
-
self.thread = None
|
|
33
|
-
self.frames = itertools.cycle(["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"])
|
|
34
|
-
|
|
35
|
-
def _spin(self):
|
|
36
|
-
while self.running:
|
|
37
|
-
frame = next(self.frames)
|
|
38
|
-
print(f"\r {YELLOW}{frame}{RESET} {self.message}", end="", flush=True)
|
|
39
|
-
time.sleep(0.08)
|
|
40
|
-
|
|
41
|
-
def __enter__(self):
|
|
42
|
-
self.running = True
|
|
43
|
-
self.thread = threading.Thread(target=self._spin)
|
|
44
|
-
self.thread.start()
|
|
45
|
-
return self
|
|
46
|
-
|
|
47
|
-
def __exit__(self, *args):
|
|
48
|
-
self.running = False
|
|
49
|
-
if self.thread:
|
|
50
|
-
self.thread.join()
|
|
51
|
-
print(f"\r {' ' * (len(self.message) + 4)}\r", end="", flush=True)
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
def _header(title: str):
|
|
55
|
-
print(f"\n{BOLD}{CYAN}{title}{RESET}")
|
|
56
|
-
print(f"{DIM}{'─' * 40}{RESET}")
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
def _param(label: str, value: str):
|
|
60
|
-
print(f" {DIM}{label}:{RESET} {value}")
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
def _done(msg: str):
|
|
64
|
-
print(f"\n {GREEN}✓{RESET} {msg}")
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
def _file_size(path: str) -> str:
|
|
68
|
-
size = os.path.getsize(path)
|
|
69
|
-
if size < 1024:
|
|
70
|
-
return f"{size} B"
|
|
71
|
-
elif size < 1024 * 1024:
|
|
72
|
-
return f"{size / 1024:.1f} KB"
|
|
73
|
-
return f"{size / (1024 * 1024):.1f} MB"
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
def main():
|
|
77
|
-
parser = argparse.ArgumentParser(
|
|
78
|
-
prog="segment",
|
|
79
|
-
description="Segment text into sentences",
|
|
80
|
-
)
|
|
81
|
-
parser.add_argument(
|
|
82
|
-
"text",
|
|
83
|
-
nargs="?",
|
|
84
|
-
help="Text to segment (or use stdin)",
|
|
85
|
-
)
|
|
86
|
-
parser.add_argument(
|
|
87
|
-
"-f", "--file",
|
|
88
|
-
help="Read text from file",
|
|
89
|
-
)
|
|
90
|
-
parser.add_argument(
|
|
91
|
-
"-n", "--numbered",
|
|
92
|
-
action="store_true",
|
|
93
|
-
help="Number output lines",
|
|
94
|
-
)
|
|
95
|
-
parser.add_argument(
|
|
96
|
-
"--unwrap",
|
|
97
|
-
action="store_true",
|
|
98
|
-
help="Unwrap hard-wrapped lines and dehyphenate split words",
|
|
99
|
-
)
|
|
100
|
-
args = parser.parse_args()
|
|
101
|
-
|
|
102
|
-
# Get input text
|
|
103
|
-
if args.file:
|
|
104
|
-
with open(args.file, "r", encoding="utf-8") as f:
|
|
105
|
-
text = f.read()
|
|
106
|
-
elif args.text:
|
|
107
|
-
text = args.text
|
|
108
|
-
elif not sys.stdin.isatty():
|
|
109
|
-
text = sys.stdin.read()
|
|
110
|
-
else:
|
|
111
|
-
parser.print_help()
|
|
112
|
-
sys.exit(1)
|
|
113
|
-
|
|
114
|
-
# Segment and output
|
|
115
|
-
sentences = segment_text(text.strip(), flatten=True, unwrap=args.unwrap)
|
|
116
|
-
for i, sentence in enumerate(sentences, 1):
|
|
117
|
-
if args.numbered:
|
|
118
|
-
print(f"{i}. {sentence}")
|
|
119
|
-
else:
|
|
120
|
-
print(sentence)
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
def file_main():
|
|
124
|
-
parser = argparse.ArgumentParser(
|
|
125
|
-
prog="segment-file",
|
|
126
|
-
description="Segment a text file into sentences and write to an output file",
|
|
127
|
-
)
|
|
128
|
-
parser.add_argument(
|
|
129
|
-
"--input-file", required=True,
|
|
130
|
-
help="Path to input text file",
|
|
131
|
-
)
|
|
132
|
-
parser.add_argument(
|
|
133
|
-
"--output-file", required=True,
|
|
134
|
-
help="Path to output file",
|
|
135
|
-
)
|
|
136
|
-
parser.add_argument(
|
|
137
|
-
"--unwrap", action="store_true",
|
|
138
|
-
help="Unwrap hard-wrapped lines (e.g., Project Gutenberg e-texts)",
|
|
139
|
-
)
|
|
140
|
-
parser.add_argument(
|
|
141
|
-
"--no-normalize-quotes", action="store_true",
|
|
142
|
-
help="Disable unicode quote normalization to ASCII equivalents",
|
|
143
|
-
)
|
|
144
|
-
args = parser.parse_args()
|
|
145
|
-
|
|
146
|
-
# Echo command immediately
|
|
147
|
-
_header("segment-file")
|
|
148
|
-
print(f" {DIM}Segmenting text file into sentences{RESET}")
|
|
149
|
-
print()
|
|
150
|
-
|
|
151
|
-
# Show configuration
|
|
152
|
-
_param("Input", args.input_file)
|
|
153
|
-
_param("Output", args.output_file)
|
|
154
|
-
_param("Size", _file_size(args.input_file))
|
|
155
|
-
_param("Unwrap", "enabled" if args.unwrap else "disabled")
|
|
156
|
-
_param("Normalize quotes", "disabled" if args.no_normalize_quotes else "enabled")
|
|
157
|
-
print()
|
|
158
|
-
|
|
159
|
-
# Step 1: Read file
|
|
160
|
-
print(f" {YELLOW}→{RESET} Reading input file...")
|
|
161
|
-
with open(args.input_file, "r", encoding="utf-8") as f:
|
|
162
|
-
text = f.read()
|
|
163
|
-
print(f" {GREEN}✓{RESET} Read {len(text):,} characters")
|
|
164
|
-
|
|
165
|
-
# Step 2: Segment text
|
|
166
|
-
print(f" {YELLOW}→{RESET} Segmenting text...", end="", flush=True)
|
|
167
|
-
start = time.perf_counter()
|
|
168
|
-
normalize = not args.no_normalize_quotes
|
|
169
|
-
sentences = segment_text(
|
|
170
|
-
text.strip(), flatten=True, unwrap=args.unwrap, normalize=normalize,
|
|
171
|
-
)
|
|
172
|
-
elapsed = time.perf_counter() - start
|
|
173
|
-
print(f"\r {GREEN}✓{RESET} Segmented into {len(sentences):,} sentences ({elapsed:.2f}s)")
|
|
174
|
-
|
|
175
|
-
# Step 3: Write output
|
|
176
|
-
total = len(sentences)
|
|
177
|
-
with open(args.output_file, "w", encoding="utf-8") as f:
|
|
178
|
-
if args.unwrap:
|
|
179
|
-
f.write(format_grouped_sentences(sentences) + "\n")
|
|
180
|
-
print(f" {GREEN}✓{RESET} Written {total:,} sentences to {args.output_file}")
|
|
181
|
-
else:
|
|
182
|
-
for i, sentence in enumerate(sentences, 1):
|
|
183
|
-
f.write(sentence + "\n")
|
|
184
|
-
if i % 500 == 0 or i == total:
|
|
185
|
-
pct = (i / total) * 100
|
|
186
|
-
print(f"\r {YELLOW}→{RESET} Writing... {pct:.0f}% ({i:,}/{total:,})", end="", flush=True)
|
|
187
|
-
print(f"\r {GREEN}✓{RESET} Written {total:,} sentences to {args.output_file} ")
|
|
188
|
-
|
|
189
|
-
print(f"\n {GREEN}Done!{RESET}")
|
|
190
|
-
print()
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
if __name__ == "__main__":
|
|
194
|
-
main()
|
|
@@ -1,39 +0,0 @@
|
|
|
1
|
-
# -*- coding: utf-8 -*-
|
|
2
|
-
from setuptools import setup
|
|
3
|
-
|
|
4
|
-
packages = \
|
|
5
|
-
['fast_sentence_segment',
|
|
6
|
-
'fast_sentence_segment.bp',
|
|
7
|
-
'fast_sentence_segment.core',
|
|
8
|
-
'fast_sentence_segment.dmo',
|
|
9
|
-
'fast_sentence_segment.svc']
|
|
10
|
-
|
|
11
|
-
package_data = \
|
|
12
|
-
{'': ['*']}
|
|
13
|
-
|
|
14
|
-
install_requires = \
|
|
15
|
-
['spacy>=3.8.0,<4.0.0']
|
|
16
|
-
|
|
17
|
-
entry_points = \
|
|
18
|
-
{'console_scripts': ['segment = fast_sentence_segment.cli:main',
|
|
19
|
-
'segment-file = fast_sentence_segment.cli:file_main']}
|
|
20
|
-
|
|
21
|
-
setup_kwargs = {
|
|
22
|
-
'name': 'fast-sentence-segment',
|
|
23
|
-
'version': '1.4.4',
|
|
24
|
-
'description': 'Fast and Efficient Sentence Segmentation',
|
|
25
|
-
'long_description': '# Fast Sentence Segmentation\n\n[](https://pypi.org/project/fast-sentence-segment/)\n[](https://pypi.org/project/fast-sentence-segment/)\n[](https://github.com/craigtrim/fast-sentence-segment/actions/workflows/ci.yml)\n[](https://github.com/craigtrim/fast-sentence-segment/tree/master/tests)\n[](https://opensource.org/licenses/MIT)\n[](https://github.com/astral-sh/ruff)\n[](https://pepy.tech/project/fast-sentence-segment)\n[](https://pepy.tech/project/fast-sentence-segment)\n\nFast and efficient sentence segmentation using spaCy with surgical post-processing fixes. Handles complex edge cases like abbreviations (Dr., Mr., etc.), ellipses, quoted text, and multi-paragraph documents.\n\n## Why This Library?\n\n1. **Keep it local**: LLM API calls cost money and send your data to third parties. Run sentence segmentation entirely on your machine.\n2. **spaCy perfected**: spaCy is a great local model, but it makes mistakes. This library fixes most of spaCy\'s shortcomings.\n\n## Features\n\n- **Paragraph-aware segmentation**: Returns sentences grouped by paragraph\n- **Abbreviation handling**: Correctly handles "Dr.", "Mr.", "etc.", "p.m.", "a.m." without false splits\n- **Ellipsis preservation**: Keeps `...` intact while detecting sentence boundaries\n- **Question/exclamation splitting**: Properly splits on `?` and `!` followed by capital letters\n- **Cached processing**: LRU cache for repeated text processing\n- **Flexible output**: Nested lists (by paragraph) or flattened list of sentences\n- **Bullet point & numbered list normalization**: Cleans common list formats\n- **CLI tool**: Command-line interface for quick segmentation\n\n## Installation\n\n```bash\npip install fast-sentence-segment\n```\n\nAfter installation, download the spaCy model:\n\n```bash\npython -m spacy download en_core_web_sm\n```\n\n## Quick Start\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = "Do you like Dr. Who? I prefer Dr. Strange! Mr. T is also cool."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "Do you like Dr. Who?",\n "I prefer Dr. Strange!",\n "Mr. T is also cool."\n]\n```\n\nNotice how "Dr. Who?" stays together as a single sentence—the library correctly recognizes that a title followed by a single-word name ending in `?` or `!` is a name reference, not a sentence boundary.\n\n## Usage\n\n### Basic Segmentation\n\nThe `segment_text` function returns a list of lists, where each inner list represents a paragraph containing its sentences:\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = """Gandalf spoke softly. "All we have to decide is what to do with the time given us."\n\nFrodo nodded. The weight of the Ring pressed against his chest."""\n\nresults = segment_text(text)\n```\n\n```json\n[\n [\n "Gandalf spoke softly.",\n "\\"All we have to decide is what to do with the time given us.\\"."\n ],\n [\n "Frodo nodded.",\n "The weight of the Ring pressed against his chest."\n ]\n]\n```\n\n### Flattened Output\n\nIf you don\'t need paragraph boundaries, use the `flatten` parameter:\n\n```python\ntext = "At 9 a.m. the hobbits set out. By 3 p.m. they reached Rivendell. Mr. Frodo was exhausted."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "At 9 a.m. the hobbits set out.",\n "By 3 p.m. they reached Rivendell.",\n "Mr. Frodo was exhausted."\n]\n```\n\n### Direct Segmenter Access\n\nFor more control, use the `Segmenter` class directly:\n\n```python\nfrom fast_sentence_segment import Segmenter\n\nsegmenter = Segmenter()\nresults = segmenter.input_text("Your text here.")\n```\n\n### Command Line Interface\n\n```bash\n# Inline text\nsegment "Gandalf paused... You shall not pass! The Balrog roared."\n\n# Pipe from stdin\necho "Have you seen Dr. Who? It\'s brilliant!" | segment\n\n# Numbered output\nsegment -n -f silmarillion.txt\n\n# File-to-file (one sentence per line)\nsegment-file --input-file book.txt --output-file sentences.txt\n\n# Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)\nsegment-file --input-file book.txt --output-file sentences.txt --unwrap\n```\n\n## API Reference\n\n| Function | Parameters | Returns | Description |\n|----------|------------|---------|-------------|\n| `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False` | `list` | Main entry point for segmentation |\n| `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |\n\n### CLI Commands\n\n| Command | Description |\n|---------|-------------|\n| `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output. |\n| `segment-file --input-file IN --output-file OUT [--unwrap]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts. |\n\n## Why Nested Lists?\n\nThe segmentation process preserves document structure by segmenting into both paragraphs and sentences. Each outer list represents a paragraph, and each inner list contains that paragraph\'s sentences. This is useful for:\n\n- Document structure analysis\n- Paragraph-level processing\n- Maintaining original text organization\n\nUse `flatten=True` when you only need sentences without paragraph context.\n\n## Requirements\n\n- Python 3.9+\n- spaCy 3.8+\n- en_core_web_sm spaCy model\n\n## How It Works\n\nThis library uses spaCy for initial sentence segmentation, then applies surgical post-processing fixes for cases where spaCy\'s default behavior is incorrect:\n\n1. **Pre-processing**: Normalize numbered lists, preserve ellipses with placeholders\n2. **spaCy segmentation**: Use spaCy\'s sentence boundary detection\n3. **Post-processing**: Split on abbreviation boundaries, handle `?`/`!` + capital patterns\n4. **Denormalization**: Restore placeholders to original text\n\n## License\n\nMIT License - see [LICENSE](LICENSE) for details.\n\n## Contributing\n\nContributions are welcome! Please feel free to submit a Pull Request.\n\n1. Fork the repository\n2. Create your feature branch (`git checkout -b feature/amazing-feature`)\n3. Run tests (`make test`)\n4. Commit your changes\n5. Push to the branch\n6. Open a Pull Request\n',
|
|
26
|
-
'author': 'Craig Trim',
|
|
27
|
-
'author_email': 'craigtrim@gmail.com',
|
|
28
|
-
'maintainer': 'Craig Trim',
|
|
29
|
-
'maintainer_email': 'craigtrim@gmail.com',
|
|
30
|
-
'url': 'https://github.com/craigtrim/fast-sentence-segment',
|
|
31
|
-
'packages': packages,
|
|
32
|
-
'package_data': package_data,
|
|
33
|
-
'install_requires': install_requires,
|
|
34
|
-
'entry_points': entry_points,
|
|
35
|
-
'python_requires': '>=3.9,<3.13',
|
|
36
|
-
}
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
setup(**setup_kwargs)
|
|
File without changes
|
{fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/bp/__init__.py
RENAMED
|
File without changes
|
{fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/bp/segmenter.py
RENAMED
|
File without changes
|
{fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/core/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
{fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/core/stopwatch.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{fast_sentence_segment-1.4.4 → fast_sentence_segment-1.4.5}/fast_sentence_segment/svc/__init__.py
RENAMED
|
File without changes
|