fast-sentence-segment 1.4.5__tar.gz → 1.5.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/PKG-INFO +7 -4
  2. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/README.md +6 -3
  3. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/cli.py +24 -16
  4. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/dialog_formatter.py +121 -5
  5. fast_sentence_segment-1.5.3/fast_sentence_segment/dmo/unwrap_hard_wrapped_text.py +319 -0
  6. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/svc/perform_sentence_segmentation.py +14 -13
  7. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/pyproject.toml +1 -1
  8. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/setup.py +2 -2
  9. fast_sentence_segment-1.4.5/fast_sentence_segment/dmo/unwrap_hard_wrapped_text.py +0 -75
  10. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/LICENSE +0 -0
  11. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/__init__.py +0 -0
  12. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/bp/__init__.py +0 -0
  13. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/bp/segmenter.py +0 -0
  14. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/core/__init__.py +0 -0
  15. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/core/base_object.py +0 -0
  16. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/core/stopwatch.py +0 -0
  17. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/__init__.py +0 -0
  18. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/abbreviation_merger.py +0 -0
  19. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/abbreviation_splitter.py +0 -0
  20. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/abbreviations.py +0 -0
  21. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/bullet_point_cleaner.py +0 -0
  22. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/dehyphenator.py +0 -0
  23. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/ellipsis_normalizer.py +0 -0
  24. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/group_quoted_sentences.py +0 -0
  25. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/newlines_to_periods.py +0 -0
  26. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/normalize_quotes.py +0 -0
  27. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/numbered_list_normalizer.py +0 -0
  28. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/ocr_artifact_fixer.py +0 -0
  29. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/post_process_sentences.py +0 -0
  30. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/question_exclamation_splitter.py +0 -0
  31. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/spacy_doc_segmenter.py +0 -0
  32. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/strip_trailing_period_after_quote.py +0 -0
  33. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/dmo/title_name_merger.py +0 -0
  34. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/svc/__init__.py +0 -0
  35. {fast_sentence_segment-1.4.5 → fast_sentence_segment-1.5.3}/fast_sentence_segment/svc/perform_paragraph_segmentation.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: fast-sentence-segment
3
- Version: 1.4.5
3
+ Version: 1.5.3
4
4
  Summary: Fast and Efficient Sentence Segmentation
5
5
  Home-page: https://github.com/craigtrim/fast-sentence-segment
6
6
  License: MIT
@@ -164,21 +164,24 @@ segment-file --input-file book.txt --output-file sentences.txt
164
164
 
165
165
  # Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)
166
166
  segment-file --input-file book.txt --output-file sentences.txt --unwrap
167
+
168
+ # Dialog-aware formatting (implies --unwrap)
169
+ segment -f book.txt --format
167
170
  ```
168
171
 
169
172
  ## API Reference
170
173
 
171
174
  | Function | Parameters | Returns | Description |
172
175
  |----------|------------|---------|-------------|
173
- | `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False` | `list` | Main entry point for segmentation |
176
+ | `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False`, `format: str = None` | `list` or `str` | Main entry point for segmentation. Use `format="dialog"` for dialog-aware output. |
174
177
  | `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |
175
178
 
176
179
  ### CLI Commands
177
180
 
178
181
  | Command | Description |
179
182
  |---------|-------------|
180
- | `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output. |
181
- | `segment-file --input-file IN --output-file OUT [--unwrap]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts. |
183
+ | `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output, `--format` for dialog-aware paragraph grouping. |
184
+ | `segment-file --input-file IN --output-file OUT [--unwrap] [--format]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts, `--format` for dialog-aware formatting. |
182
185
 
183
186
  ## Why Nested Lists?
184
187
 
@@ -133,21 +133,24 @@ segment-file --input-file book.txt --output-file sentences.txt
133
133
 
134
134
  # Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)
135
135
  segment-file --input-file book.txt --output-file sentences.txt --unwrap
136
+
137
+ # Dialog-aware formatting (implies --unwrap)
138
+ segment -f book.txt --format
136
139
  ```
137
140
 
138
141
  ## API Reference
139
142
 
140
143
  | Function | Parameters | Returns | Description |
141
144
  |----------|------------|---------|-------------|
142
- | `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False` | `list` | Main entry point for segmentation |
145
+ | `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False`, `format: str = None` | `list` or `str` | Main entry point for segmentation. Use `format="dialog"` for dialog-aware output. |
143
146
  | `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |
144
147
 
145
148
  ### CLI Commands
146
149
 
147
150
  | Command | Description |
148
151
  |---------|-------------|
149
- | `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output. |
150
- | `segment-file --input-file IN --output-file OUT [--unwrap]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts. |
152
+ | `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output, `--format` for dialog-aware paragraph grouping. |
153
+ | `segment-file --input-file IN --output-file OUT [--unwrap] [--format]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts, `--format` for dialog-aware formatting. |
151
154
 
152
155
  ## Why Nested Lists?
153
156
 
@@ -100,10 +100,13 @@ def main():
100
100
  parser.add_argument(
101
101
  "--format",
102
102
  action="store_true",
103
- help="Format output with dialog-aware paragraph grouping",
103
+ help="Format output with dialog-aware paragraph grouping (implies --unwrap)",
104
104
  )
105
105
  args = parser.parse_args()
106
106
 
107
+ # --format implies --unwrap
108
+ unwrap = args.unwrap or args.format
109
+
107
110
  # Get input text
108
111
  if args.file:
109
112
  with open(args.file, "r", encoding="utf-8") as f:
@@ -118,7 +121,7 @@ def main():
118
121
 
119
122
  # Segment and output
120
123
  result = segment_text(
121
- text.strip(), flatten=True, unwrap=args.unwrap,
124
+ text.strip(), flatten=True, unwrap=unwrap,
122
125
  format="dialog" if args.format else None
123
126
  )
124
127
 
@@ -170,7 +173,6 @@ def _process_single_file(
170
173
  # Step 3: Write output
171
174
  if format:
172
175
  # Format mode returns a string
173
- sentence_count = result.count("\n") + 1 if result else 0
174
176
  print(f"\r {GREEN}✓{RESET} Segmented text ({elapsed:.2f}s)")
175
177
  with open(output_file, "w", encoding="utf-8") as f:
176
178
  f.write(result + "\n")
@@ -221,10 +223,13 @@ def file_main():
221
223
  parser.add_argument(
222
224
  "--format",
223
225
  action="store_true",
224
- help="Format output with dialog-aware paragraph grouping",
226
+ help="Format output with dialog-aware paragraph grouping (implies --unwrap)",
225
227
  )
226
228
  args = parser.parse_args()
227
229
 
230
+ # --format implies --unwrap
231
+ unwrap = args.unwrap or args.format
232
+
228
233
  # Validate arguments
229
234
  if not args.input_file and not args.input_dir:
230
235
  print(f" {YELLOW}Error:{RESET} Either --input-file or --input-dir is required")
@@ -240,36 +245,37 @@ def file_main():
240
245
 
241
246
  # Process directory
242
247
  if args.input_dir:
243
- if not os.path.isdir(args.input_dir):
244
- print(f" {YELLOW}Error:{RESET} Directory not found: {args.input_dir}")
248
+ input_dir = os.path.expanduser(args.input_dir)
249
+ if not os.path.isdir(input_dir):
250
+ print(f" {YELLOW}Error:{RESET} Directory not found: {input_dir}")
245
251
  sys.exit(1)
246
252
 
247
253
  # Find all .txt files
248
254
  txt_files = sorted([
249
- f for f in os.listdir(args.input_dir)
255
+ f for f in os.listdir(input_dir)
250
256
  if f.endswith(".txt") and not f.endswith("-clean.txt")
251
257
  ])
252
258
 
253
259
  if not txt_files:
254
- print(f" {YELLOW}Error:{RESET} No .txt files found in {args.input_dir}")
260
+ print(f" {YELLOW}Error:{RESET} No .txt files found in {input_dir}")
255
261
  sys.exit(1)
256
262
 
257
263
  _header("segment-file (batch)")
258
264
  print(f" {DIM}Processing {len(txt_files)} files in directory{RESET}")
259
265
  print()
260
- _param("Directory", args.input_dir)
266
+ _param("Directory", input_dir)
261
267
  _param("Files", str(len(txt_files)))
262
- _param("Unwrap", "enabled" if args.unwrap else "disabled")
268
+ _param("Unwrap", "enabled" if unwrap else "disabled")
263
269
  _param("Normalize quotes", "disabled" if not normalize else "enabled")
264
270
  _param("Format", "dialog" if args.format else "default (one sentence per line)")
265
271
  print()
266
272
 
267
273
  format_value = "dialog" if args.format else None
268
274
  for i, filename in enumerate(txt_files, 1):
269
- input_path = os.path.join(args.input_dir, filename)
275
+ input_path = os.path.join(input_dir, filename)
270
276
  output_path = _generate_output_path(input_path)
271
277
  print(f" {BOLD}[{i}/{len(txt_files)}]{RESET} {filename}")
272
- _process_single_file(input_path, output_path, args.unwrap, normalize, format_value)
278
+ _process_single_file(input_path, output_path, unwrap, normalize, format_value)
273
279
  print()
274
280
 
275
281
  print(f" {GREEN}Done! Processed {len(txt_files)} files.{RESET}")
@@ -277,18 +283,20 @@ def file_main():
277
283
  return
278
284
 
279
285
  # Process single file
280
- if not os.path.isfile(args.input_file):
281
- print(f" {YELLOW}Error:{RESET} File not found: {args.input_file}")
286
+ input_file = os.path.expanduser(args.input_file)
287
+ if not os.path.isfile(input_file):
288
+ print(f" {YELLOW}Error:{RESET} File not found: {input_file}")
282
289
  sys.exit(1)
283
290
 
284
- output_file = args.output_file or _generate_output_path(args.input_file)
291
+ output_file = args.output_file or _generate_output_path(input_file)
292
+ output_file = os.path.expanduser(output_file)
285
293
 
286
294
  _header("segment-file")
287
295
  print(f" {DIM}Segmenting text file into sentences{RESET}")
288
296
  print()
289
297
 
290
298
  format_value = "dialog" if args.format else None
291
- _process_single_file(args.input_file, output_file, args.unwrap, normalize, format_value)
299
+ _process_single_file(input_file, output_file, unwrap, normalize, format_value)
292
300
 
293
301
  print(f"\n {GREEN}Done!{RESET}")
294
302
  print()
@@ -37,16 +37,129 @@ DOUBLE_QUOTES = '""\""'
37
37
  SINGLE_QUOTES = "'''"
38
38
  ALL_QUOTES = DOUBLE_QUOTES + SINGLE_QUOTES
39
39
 
40
+ # Known elision words (case-insensitive)
41
+ # These are specific words where an apostrophe replaces omitted letters at the start
42
+ KNOWN_ELISION_WORDS = {
43
+ # 'it' elisions
44
+ "tis", "twas", "twere", "twill", "twould", "taint", "tother",
45
+ # Archaic oaths (very specific words)
46
+ "sblood", "sdeath", "swounds", "sbodikins", "slid", "strewth", "zounds",
47
+ # Common elisions with a-/be- prefix dropped
48
+ "bout", "bove", "cross", "fore", "fraid", "gainst", "live", "loft", "lone",
49
+ "long", "mid", "midst", "mong", "mongst", "neath", "round", "sleep", "tween",
50
+ "twixt", "wake", "ware", "way", "cause", "cuz", "coz", "hind", "low", "side",
51
+ "yond", "cept", "scaped", "specially", "splain", "spect",
52
+ # Cockney/dialect h-dropping (common words and their forms)
53
+ "e", "em", "er", "ere", "im", "is", "ave", "avin", "ead", "ear", "eard",
54
+ "eart", "eaven", "eavens", "eavy", "eck", "edge", "eel", "eight", "ell",
55
+ "elp", "en", "ero", "igh", "ill", "imself", "int", "it", "itch", "obby",
56
+ "old", "ole", "oliday", "oller", "ollow", "oly", "ome", "onest", "oney",
57
+ "onor", "onour", "ood", "ook", "oop", "ope", "orizon", "orn", "orrible",
58
+ "orse", "ospital", "ot", "otel", "our", "ouse", "ow", "owever", "uge",
59
+ "undred", "ungry", "unt", "urry", "urt", "usband", "alf", "all", "am",
60
+ "and", "andsome", "appen", "appy", "ard", "arm", "at", "ate",
61
+ # Cockney th-dropping
62
+ "at", "ese", "ey", "ose", "ough", "rough",
63
+ # Other prefix elisions
64
+ "count", "fter", "gain", "gin", "less", "nother", "nough", "nuff", "pears",
65
+ "pon", "prentice", "scuse", "spite", "spose", "stead", "tarnal", "tend",
66
+ "thout", "til", "till", "un",
67
+ # Modern colloquial
68
+ "kay", "sup", "dya", "ja", "yer", "copter",
69
+ # Musical
70
+ "cello",
71
+ # Year abbreviations handled separately (digits)
72
+ # 'member (remember)
73
+ "member",
74
+ }
75
+
76
+
77
+ def _is_elision(text: str, pos: int) -> bool:
78
+ """Check if apostrophe at position is a word-initial elision.
79
+
80
+ Elisions like 'tis, 'twas, 'cello, 'em replace omitted letters at word start.
81
+ Dialog quotes like 'Hello!' surround quoted speech.
82
+
83
+ Args:
84
+ text: The full text.
85
+ pos: Position of the apostrophe character.
86
+
87
+ Returns:
88
+ True if this appears to be an elision, not a dialog quote.
89
+ """
90
+ if pos >= len(text) - 1:
91
+ return False
92
+
93
+ next_char = text[pos + 1]
94
+
95
+ # If followed by a digit, it's a year abbreviation ('99, '20s)
96
+ if next_char.isdigit():
97
+ return True
98
+
99
+ # Extract the word after the apostrophe (letters only, up to non-letter)
100
+ word_start = pos + 1
101
+ word_end = word_start
102
+ while word_end < len(text) and text[word_end].isalpha():
103
+ word_end += 1
104
+
105
+ if word_end == word_start:
106
+ return False # No letters after apostrophe
107
+
108
+ word = text[word_start:word_end].lower()
109
+
110
+ # Check if it's a known elision word
111
+ return word in KNOWN_ELISION_WORDS
112
+
40
113
 
41
114
  def _count_quotes(text: str) -> int:
42
- """Count quote characters in text (both single and double)."""
43
- return sum(1 for c in text if c in ALL_QUOTES)
115
+ """Count actual quote characters in text, excluding apostrophes.
116
+
117
+ Apostrophes in contractions (don't, can't), possessives (Jack's, Joselito's),
118
+ and word-initial elisions ('tis, 'twas, 'cello, 'em) are NOT counted as quotes
119
+ because they don't indicate dialog boundaries.
120
+
121
+ A quote character is considered an apostrophe (not a quote) if:
122
+ - It's preceded by a letter AND followed by a letter (mid-word: don't, Joselito's)
123
+ - It's a word-initial elision ('tis, 'Twas, 'cello, '99)
124
+ """
125
+ count = 0
126
+ for i, c in enumerate(text):
127
+ if c not in ALL_QUOTES:
128
+ continue
129
+
130
+ # Check if this is a mid-word apostrophe (contraction/possessive)
131
+ prev_is_letter = i > 0 and text[i - 1].isalpha()
132
+ next_is_letter = i < len(text) - 1 and text[i + 1].isalpha()
133
+
134
+ if prev_is_letter and next_is_letter:
135
+ # Mid-word apostrophe (contraction/possessive) - don't count
136
+ continue
137
+
138
+ # Check if this is a word-initial elision
139
+ prev_is_word_boundary = i == 0 or not text[i - 1].isalnum()
140
+ if prev_is_word_boundary and c in SINGLE_QUOTES and _is_elision(text, i):
141
+ # Word-initial elision - don't count
142
+ continue
143
+
144
+ count += 1
145
+ return count
44
146
 
45
147
 
46
148
  def _starts_with_quote(text: str) -> bool:
47
- """Check if text starts with a quote character."""
149
+ """Check if text starts with a dialog quote (not an elision).
150
+
151
+ Returns True only for actual dialog openings, not for elisions
152
+ like 'tis, 'twas, 'cello, etc.
153
+ """
48
154
  text = text.lstrip()
49
- return text and text[0] in ALL_QUOTES
155
+ if not text or text[0] not in ALL_QUOTES:
156
+ return False
157
+
158
+ # Check if this is an elision rather than a dialog quote
159
+ if text[0] in SINGLE_QUOTES and _is_elision(text, 0):
160
+ return False
161
+
162
+ return True
50
163
 
51
164
 
52
165
  def _ends_with_closing_quote(text: str) -> bool:
@@ -211,7 +324,6 @@ def format_dialog(sentences: List[str]) -> str:
211
324
  quote_count = _count_quotes(sentence)
212
325
  starts_quote = _starts_with_quote(sentence)
213
326
  is_narrative = _is_narrative(sentence)
214
- is_complete = _is_complete_quote(sentence)
215
327
 
216
328
  # Get info about previous sentence
217
329
  prev_sentence = current_para[-1] if current_para else ""
@@ -230,6 +342,10 @@ def format_dialog(sentences: List[str]) -> str:
230
342
  elif starts_quote:
231
343
  # New quote starting - always new paragraph
232
344
  should_start_new_para = True
345
+ elif is_narrative and prev_was_narrative:
346
+ # Consecutive narrative sentences - each gets its own paragraph
347
+ # This gives clean ebook formatting with paragraph breaks
348
+ should_start_new_para = True
233
349
  elif is_narrative and prev_was_complete and not prev_was_narrative:
234
350
  # Narrative after complete dialog - new paragraph
235
351
  should_start_new_para = True
@@ -0,0 +1,319 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """Unwrap hard-wrapped text (e.g., Project Gutenberg e-texts).
3
+
4
+ Joins lines within paragraphs into continuous strings while
5
+ preserving paragraph boundaries (blank lines). Also dehyphenates
6
+ words that were split across lines for typesetting.
7
+
8
+ Related GitHub Issues:
9
+ #8 - Add dehyphenation support for words split across lines
10
+ https://github.com/craigtrim/fast-sentence-segment/issues/8
11
+
12
+ #12 - Unwrap fails when hard-wrap splits across blank line
13
+ https://github.com/craigtrim/fast-sentence-segment/issues/12
14
+
15
+ #14 - Make unwrap quote-aware to join text across blank lines inside open quotes
16
+ https://github.com/craigtrim/fast-sentence-segment/issues/14
17
+ """
18
+
19
+ import re
20
+
21
+ # Quote characters to track for dialog detection
22
+ # Includes straight and curly/smart quotes
23
+ DOUBLE_QUOTES = '"""\u201c\u201d' # " " " " "
24
+ SINGLE_QUOTES = "'''\u2018\u2019" # ' ' ' ' '
25
+ ALL_QUOTES = DOUBLE_QUOTES + SINGLE_QUOTES
26
+
27
+ # Known elision words where an apostrophe replaces omitted letters at word start
28
+ # These should NOT be counted as dialog quotes
29
+ KNOWN_ELISION_WORDS = {
30
+ # 'it' elisions
31
+ "tis", "twas", "twere", "twill", "twould", "taint", "tother",
32
+ # Archaic oaths
33
+ "sblood", "sdeath", "swounds", "sbodikins", "slid", "strewth", "zounds",
34
+ # Common elisions with a-/be- prefix dropped
35
+ "bout", "bove", "cross", "fore", "fraid", "gainst", "live", "loft", "lone",
36
+ "long", "mid", "midst", "mong", "mongst", "neath", "round", "sleep", "tween",
37
+ "twixt", "wake", "ware", "way", "cause", "cuz", "coz", "hind", "low", "side",
38
+ "yond", "cept", "scaped", "specially", "splain", "spect",
39
+ # Cockney/dialect h-dropping
40
+ "e", "em", "er", "ere", "im", "is", "ave", "avin", "ead", "ear", "eard",
41
+ "eart", "eaven", "eavens", "eavy", "eck", "edge", "eel", "eight", "ell",
42
+ "elp", "en", "ero", "igh", "ill", "imself", "int", "it", "itch", "obby",
43
+ "old", "ole", "oliday", "oller", "ollow", "oly", "ome", "onest", "oney",
44
+ "onor", "onour", "ood", "ook", "oop", "ope", "orizon", "orn", "orrible",
45
+ "orse", "ospital", "ot", "otel", "our", "ouse", "ow", "owever", "uge",
46
+ "undred", "ungry", "unt", "urry", "urt", "usband", "alf", "all", "am",
47
+ "and", "andsome", "appen", "appy", "ard", "arm", "at", "ate",
48
+ # Cockney th-dropping
49
+ "ese", "ey", "ose", "ough", "rough",
50
+ # Other prefix elisions
51
+ "count", "fter", "gain", "gin", "less", "nother", "nough", "nuff", "pears",
52
+ "pon", "prentice", "scuse", "spite", "spose", "stead", "tarnal", "tend",
53
+ "thout", "til", "till", "un",
54
+ # Modern colloquial
55
+ "kay", "sup", "dya", "ja", "yer", "copter",
56
+ # Musical
57
+ "cello",
58
+ # 'member (remember)
59
+ "member",
60
+ }
61
+
62
+ # Pattern to match hyphenated word breaks at end of line:
63
+ # - A single hyphen (not -- em-dash)
64
+ # - Followed by newline and optional whitespace
65
+ # - Followed by a lowercase letter (continuation of word)
66
+ _HYPHEN_LINE_BREAK_PATTERN = re.compile(r'(?<!-)-\n\s*([a-z])')
67
+
68
+
69
+ def _is_elision(text: str, pos: int) -> bool:
70
+ """Check if apostrophe at position is a word-initial elision.
71
+
72
+ Elisions like 'tis, 'twas, 'cello, 'em replace omitted letters at word start.
73
+ Dialog quotes like 'Hello!' surround quoted speech.
74
+
75
+ Args:
76
+ text: The full text.
77
+ pos: Position of the apostrophe character.
78
+
79
+ Returns:
80
+ True if this appears to be an elision, not a dialog quote.
81
+ """
82
+ if pos >= len(text) - 1:
83
+ return False
84
+
85
+ next_char = text[pos + 1]
86
+
87
+ # If followed by a digit, it's a year abbreviation ('99, '20s)
88
+ if next_char.isdigit():
89
+ return True
90
+
91
+ # Extract the word after the apostrophe (letters only, up to non-letter)
92
+ word_start = pos + 1
93
+ word_end = word_start
94
+ while word_end < len(text) and text[word_end].isalpha():
95
+ word_end += 1
96
+
97
+ if word_end == word_start:
98
+ return False # No letters after apostrophe
99
+
100
+ word = text[word_start:word_end].lower()
101
+
102
+ # Check if it's a known elision word
103
+ return word in KNOWN_ELISION_WORDS
104
+
105
+
106
+ def _count_quotes(text: str) -> int:
107
+ """Count actual quote characters in text, excluding apostrophes.
108
+
109
+ Apostrophes in contractions (don't, can't), possessives (Jack's, Joselito's),
110
+ and word-initial elisions ('tis, 'twas, 'cello, 'em) are NOT counted as quotes
111
+ because they don't indicate dialog boundaries.
112
+
113
+ A quote character is considered an apostrophe (not a quote) if:
114
+ - It's preceded by a letter AND followed by a letter (mid-word: don't, Joselito's)
115
+ - It's a word-initial elision ('tis, 'Twas, 'cello, '99)
116
+
117
+ Args:
118
+ text: The text to count quotes in.
119
+
120
+ Returns:
121
+ Number of actual quote characters (not apostrophes).
122
+ """
123
+ count = 0
124
+ for i, c in enumerate(text):
125
+ if c not in ALL_QUOTES:
126
+ continue
127
+
128
+ # Check if this is a mid-word apostrophe (contraction/possessive)
129
+ prev_is_letter = i > 0 and text[i - 1].isalpha()
130
+ next_is_letter = i < len(text) - 1 and text[i + 1].isalpha()
131
+
132
+ if prev_is_letter and next_is_letter:
133
+ # Mid-word apostrophe (contraction/possessive) - don't count
134
+ continue
135
+
136
+ # Check if this is a word-initial elision
137
+ prev_is_word_boundary = i == 0 or not text[i - 1].isalnum()
138
+ if prev_is_word_boundary and c in SINGLE_QUOTES and _is_elision(text, i):
139
+ # Word-initial elision - don't count
140
+ continue
141
+
142
+ count += 1
143
+ return count
144
+
145
+
146
+ def _is_inside_open_quote(text: str) -> bool:
147
+ """Check if text ends with an unclosed quote.
148
+
149
+ Counts quote characters (excluding apostrophes) and returns True
150
+ if the count is odd, meaning there's an open quote.
151
+
152
+ Args:
153
+ text: The text to check.
154
+
155
+ Returns:
156
+ True if there's an unclosed quote at the end of text.
157
+ """
158
+ return _count_quotes(text) % 2 == 1
159
+
160
+ # Characters that indicate end of sentence
161
+ _SENTENCE_END_PUNCT = {'.', '?', '!'}
162
+
163
+
164
+ def _ends_with_sentence_punct(text: str) -> bool:
165
+ """Check if text ends with sentence-ending punctuation.
166
+
167
+ Handles trailing quotes/parens: 'He said "Hello."' -> True
168
+ Handles ellipsis: 'He wondered...' -> True
169
+
170
+ Args:
171
+ text: The text to check.
172
+
173
+ Returns:
174
+ True if text ends with . ? ! or ... (possibly followed by quotes/parens).
175
+ """
176
+ if not text:
177
+ return False
178
+
179
+ # Strip trailing whitespace and quotes/parens (including curly quotes)
180
+ stripped = text.rstrip()
181
+ trailing_chars = {'"', "'", ')', ']', '\u201d', '\u2019'} # " ' ) ] " '
182
+ while stripped and stripped[-1] in trailing_chars:
183
+ stripped = stripped[:-1]
184
+
185
+ if not stripped:
186
+ return False
187
+
188
+ return stripped[-1] in _SENTENCE_END_PUNCT
189
+
190
+
191
+ def _dehyphenate_block(block: str) -> str:
192
+ """Remove hyphens from words split across lines.
193
+
194
+ Detects the pattern of a word fragment ending with a hyphen
195
+ at the end of a line, followed by the word continuation
196
+ starting with a lowercase letter on the next line.
197
+
198
+ Examples:
199
+ "bot-\\ntle" -> "bottle"
200
+ "cham-\\n bermaid" -> "chambermaid"
201
+
202
+ Args:
203
+ block: A paragraph block that may contain hyphenated line breaks.
204
+
205
+ Returns:
206
+ The block with hyphenated word breaks rejoined.
207
+ """
208
+ return _HYPHEN_LINE_BREAK_PATTERN.sub(r'\1', block)
209
+
210
+
211
+ def unwrap_hard_wrapped_text(text: str) -> str:
212
+ """Unwrap hard-wrapped paragraphs into continuous lines.
213
+
214
+ Splits on blank lines to identify paragraphs, then joins
215
+ lines within each paragraph into a single string with
216
+ single spaces. Also dehyphenates words that were split
217
+ across lines for typesetting purposes.
218
+
219
+ Special handling for spurious blank lines (issue #12):
220
+ When a single blank line appears mid-sentence (previous line
221
+ doesn't end with .?! and next line starts lowercase), the
222
+ text is joined rather than treated as a paragraph break.
223
+
224
+ Quote-aware joining (issue #14):
225
+ When we're inside an open quote (odd number of quote characters),
226
+ join across blank lines even if the previous line ends with
227
+ sentence punctuation and the next starts uppercase. This keeps
228
+ multi-sentence dialog together.
229
+
230
+ Examples:
231
+ >>> unwrap_hard_wrapped_text("a bot-\\ntle of wine")
232
+ 'a bottle of wine'
233
+ >>> unwrap_hard_wrapped_text("line one\\nline two")
234
+ 'line one line two'
235
+ >>> unwrap_hard_wrapped_text("His colour\\n\\nmounted;")
236
+ 'His colour mounted;'
237
+ >>> unwrap_hard_wrapped_text("'First.\\n\\nSecond.'")
238
+ "'First. Second.'"
239
+
240
+ Args:
241
+ text: Raw text with hard-wrapped lines.
242
+
243
+ Returns:
244
+ Text with paragraphs unwrapped into continuous strings,
245
+ separated by double newlines, with hyphenated words rejoined.
246
+ """
247
+ lines = text.splitlines()
248
+ paragraphs: list[list[str]] = []
249
+ current_para_lines: list[str] = []
250
+ blank_line_count = 0
251
+
252
+ for line in lines:
253
+ stripped = line.strip()
254
+
255
+ if not stripped:
256
+ # Blank line (or whitespace-only)
257
+ blank_line_count += 1
258
+ else:
259
+ # Non-blank line
260
+ if current_para_lines and blank_line_count > 0:
261
+ # We have previous content and saw blank line(s)
262
+ # Build the current paragraph text to check ending
263
+ prev_para = ' '.join(ln.strip() for ln in current_para_lines if ln.strip())
264
+
265
+ # Issue #12: Join across single blank line if:
266
+ # 1. Exactly one blank line
267
+ # 2. Previous paragraph doesn't end with sentence punctuation
268
+ # 3. Current line starts with lowercase
269
+ issue_12_join = (
270
+ blank_line_count == 1
271
+ and prev_para
272
+ and not _ends_with_sentence_punct(prev_para)
273
+ and stripped[0].islower()
274
+ )
275
+
276
+ # Issue #14: Join across blank line if inside open quote
277
+ # Even if previous ends with sentence punct and next starts uppercase,
278
+ # we should join if we're inside an unclosed quote
279
+ issue_14_join = (
280
+ blank_line_count == 1
281
+ and prev_para
282
+ and _is_inside_open_quote(prev_para)
283
+ )
284
+
285
+ should_join = issue_12_join or issue_14_join
286
+
287
+ if should_join:
288
+ # Treat as continuation of current paragraph
289
+ current_para_lines.append(line)
290
+ else:
291
+ # Finish current paragraph, start new one
292
+ paragraphs.append(current_para_lines)
293
+ current_para_lines = [line]
294
+ else:
295
+ # No blank lines seen, add to current paragraph
296
+ current_para_lines.append(line)
297
+
298
+ blank_line_count = 0
299
+
300
+ # Don't forget the last paragraph
301
+ if current_para_lines:
302
+ paragraphs.append(current_para_lines)
303
+
304
+ # Process each paragraph: dehyphenate and join lines
305
+ unwrapped = []
306
+ for para_lines in paragraphs:
307
+ block = '\n'.join(para_lines)
308
+ # First, dehyphenate words split across lines
309
+ block = _dehyphenate_block(block)
310
+ # Then join remaining lines with spaces
311
+ joined_lines = block.splitlines()
312
+ joined = ' '.join(ln.strip() for ln in joined_lines if ln.strip())
313
+ # Normalize multiple spaces to single space (OCR artifacts, formatting)
314
+ while ' ' in joined:
315
+ joined = joined.replace(' ', ' ')
316
+ if joined:
317
+ unwrapped.append(joined)
318
+
319
+ return '\n\n'.join(unwrapped)
@@ -43,19 +43,20 @@ def _load_spacy_model(model_name: str = "en_core_web_sm"):
43
43
 
44
44
  return spacy.load(model_name)
45
45
 
46
- from fast_sentence_segment.dmo import AbbreviationMerger
47
- from fast_sentence_segment.dmo import AbbreviationSplitter
48
- from fast_sentence_segment.dmo import TitleNameMerger
49
- from fast_sentence_segment.dmo import EllipsisNormalizer
50
- from fast_sentence_segment.dmo import NewlinesToPeriods
51
- from fast_sentence_segment.dmo import BulletPointCleaner
52
- from fast_sentence_segment.dmo import NumberedListNormalizer
53
- from fast_sentence_segment.dmo import QuestionExclamationSplitter
54
- from fast_sentence_segment.dmo import SpacyDocSegmenter
55
- from fast_sentence_segment.dmo import PostProcessStructure
56
- from fast_sentence_segment.dmo import StripTrailingPeriodAfterQuote
57
- from fast_sentence_segment.dmo import Dehyphenator
58
- from fast_sentence_segment.dmo import OcrArtifactFixer
46
+ # Imports after lazy spacy loading function (intentional)
47
+ from fast_sentence_segment.dmo import AbbreviationMerger # noqa: E402
48
+ from fast_sentence_segment.dmo import AbbreviationSplitter # noqa: E402
49
+ from fast_sentence_segment.dmo import TitleNameMerger # noqa: E402
50
+ from fast_sentence_segment.dmo import EllipsisNormalizer # noqa: E402
51
+ from fast_sentence_segment.dmo import NewlinesToPeriods # noqa: E402
52
+ from fast_sentence_segment.dmo import BulletPointCleaner # noqa: E402
53
+ from fast_sentence_segment.dmo import NumberedListNormalizer # noqa: E402
54
+ from fast_sentence_segment.dmo import QuestionExclamationSplitter # noqa: E402
55
+ from fast_sentence_segment.dmo import SpacyDocSegmenter # noqa: E402
56
+ from fast_sentence_segment.dmo import PostProcessStructure # noqa: E402
57
+ from fast_sentence_segment.dmo import StripTrailingPeriodAfterQuote # noqa: E402
58
+ from fast_sentence_segment.dmo import Dehyphenator # noqa: E402
59
+ from fast_sentence_segment.dmo import OcrArtifactFixer # noqa: E402
59
60
 
60
61
 
61
62
  class PerformSentenceSegmentation(BaseObject):
@@ -11,7 +11,7 @@ description = "Fast and Efficient Sentence Segmentation"
11
11
  license = "MIT"
12
12
  name = "fast-sentence-segment"
13
13
  readme = "README.md"
14
- version = "1.4.5"
14
+ version = "1.5.3"
15
15
 
16
16
  keywords = ["nlp", "text", "preprocess", "segment"]
17
17
  repository = "https://github.com/craigtrim/fast-sentence-segment"
@@ -20,9 +20,9 @@ entry_points = \
20
20
 
21
21
  setup_kwargs = {
22
22
  'name': 'fast-sentence-segment',
23
- 'version': '1.4.5',
23
+ 'version': '1.5.3',
24
24
  'description': 'Fast and Efficient Sentence Segmentation',
25
- 'long_description': '# Fast Sentence Segmentation\n\n[![PyPI version](https://img.shields.io/pypi/v/fast-sentence-segment.svg)](https://pypi.org/project/fast-sentence-segment/)\n[![Python versions](https://img.shields.io/pypi/pyversions/fast-sentence-segment.svg)](https://pypi.org/project/fast-sentence-segment/)\n[![Tests](https://img.shields.io/badge/tests-664-brightgreen)](https://github.com/craigtrim/fast-sentence-segment/tree/master/tests)\n[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)\n[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)\n[![Downloads](https://static.pepy.tech/badge/fast-sentence-segment)](https://pepy.tech/project/fast-sentence-segment)\n[![Downloads/Month](https://static.pepy.tech/badge/fast-sentence-segment/month)](https://pepy.tech/project/fast-sentence-segment)\n\nFast and efficient sentence segmentation using spaCy with surgical post-processing fixes. Handles complex edge cases like abbreviations (Dr., Mr., etc.), ellipses, quoted text, and multi-paragraph documents.\n\n## Why This Library?\n\n1. **Keep it local**: LLM API calls cost money and send your data to third parties. Run sentence segmentation entirely on your machine.\n2. **spaCy perfected**: spaCy is a great local model, but it makes mistakes. This library fixes most of spaCy\'s shortcomings.\n\n## Features\n\n- **Paragraph-aware segmentation**: Returns sentences grouped by paragraph\n- **Abbreviation handling**: Correctly handles "Dr.", "Mr.", "etc.", "p.m.", "a.m." without false splits\n- **Ellipsis preservation**: Keeps `...` intact while detecting sentence boundaries\n- **Question/exclamation splitting**: Properly splits on `?` and `!` followed by capital letters\n- **Cached processing**: LRU cache for repeated text processing\n- **Flexible output**: Nested lists (by paragraph) or flattened list of sentences\n- **Bullet point & numbered list normalization**: Cleans common list formats\n- **CLI tool**: Command-line interface for quick segmentation\n\n## Installation\n\n```bash\npip install fast-sentence-segment\n```\n\nAfter installation, download the spaCy model:\n\n```bash\npython -m spacy download en_core_web_sm\n```\n\n## Quick Start\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = "Do you like Dr. Who? I prefer Dr. Strange! Mr. T is also cool."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "Do you like Dr. Who?",\n "I prefer Dr. Strange!",\n "Mr. T is also cool."\n]\n```\n\nNotice how "Dr. Who?" stays together as a single sentence—the library correctly recognizes that a title followed by a single-word name ending in `?` or `!` is a name reference, not a sentence boundary.\n\n## Usage\n\n### Basic Segmentation\n\nThe `segment_text` function returns a list of lists, where each inner list represents a paragraph containing its sentences:\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = """Gandalf spoke softly. "All we have to decide is what to do with the time given us."\n\nFrodo nodded. The weight of the Ring pressed against his chest."""\n\nresults = segment_text(text)\n```\n\n```json\n[\n [\n "Gandalf spoke softly.",\n "\\"All we have to decide is what to do with the time given us.\\"."\n ],\n [\n "Frodo nodded.",\n "The weight of the Ring pressed against his chest."\n ]\n]\n```\n\n### Flattened Output\n\nIf you don\'t need paragraph boundaries, use the `flatten` parameter:\n\n```python\ntext = "At 9 a.m. the hobbits set out. By 3 p.m. they reached Rivendell. Mr. Frodo was exhausted."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "At 9 a.m. the hobbits set out.",\n "By 3 p.m. they reached Rivendell.",\n "Mr. Frodo was exhausted."\n]\n```\n\n### Direct Segmenter Access\n\nFor more control, use the `Segmenter` class directly:\n\n```python\nfrom fast_sentence_segment import Segmenter\n\nsegmenter = Segmenter()\nresults = segmenter.input_text("Your text here.")\n```\n\n### Command Line Interface\n\n```bash\n# Inline text\nsegment "Gandalf paused... You shall not pass! The Balrog roared."\n\n# Pipe from stdin\necho "Have you seen Dr. Who? It\'s brilliant!" | segment\n\n# Numbered output\nsegment -n -f silmarillion.txt\n\n# File-to-file (one sentence per line)\nsegment-file --input-file book.txt --output-file sentences.txt\n\n# Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)\nsegment-file --input-file book.txt --output-file sentences.txt --unwrap\n```\n\n## API Reference\n\n| Function | Parameters | Returns | Description |\n|----------|------------|---------|-------------|\n| `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False` | `list` | Main entry point for segmentation |\n| `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |\n\n### CLI Commands\n\n| Command | Description |\n|---------|-------------|\n| `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output. |\n| `segment-file --input-file IN --output-file OUT [--unwrap]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts. |\n\n## Why Nested Lists?\n\nThe segmentation process preserves document structure by segmenting into both paragraphs and sentences. Each outer list represents a paragraph, and each inner list contains that paragraph\'s sentences. This is useful for:\n\n- Document structure analysis\n- Paragraph-level processing\n- Maintaining original text organization\n\nUse `flatten=True` when you only need sentences without paragraph context.\n\n## Requirements\n\n- Python 3.9+\n- spaCy 3.8+\n- en_core_web_sm spaCy model\n\n## How It Works\n\nThis library uses spaCy for initial sentence segmentation, then applies surgical post-processing fixes for cases where spaCy\'s default behavior is incorrect:\n\n1. **Pre-processing**: Normalize numbered lists, preserve ellipses with placeholders\n2. **spaCy segmentation**: Use spaCy\'s sentence boundary detection\n3. **Post-processing**: Split on abbreviation boundaries, handle `?`/`!` + capital patterns\n4. **Denormalization**: Restore placeholders to original text\n\n## License\n\nMIT License - see [LICENSE](LICENSE) for details.\n\n## Contributing\n\nContributions are welcome! Please feel free to submit a Pull Request.\n\n1. Fork the repository\n2. Create your feature branch (`git checkout -b feature/amazing-feature`)\n3. Run tests (`make test`)\n4. Commit your changes\n5. Push to the branch\n6. Open a Pull Request\n',
25
+ 'long_description': '# Fast Sentence Segmentation\n\n[![PyPI version](https://img.shields.io/pypi/v/fast-sentence-segment.svg)](https://pypi.org/project/fast-sentence-segment/)\n[![Python versions](https://img.shields.io/pypi/pyversions/fast-sentence-segment.svg)](https://pypi.org/project/fast-sentence-segment/)\n[![Tests](https://img.shields.io/badge/tests-664-brightgreen)](https://github.com/craigtrim/fast-sentence-segment/tree/master/tests)\n[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)\n[![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)\n[![Downloads](https://static.pepy.tech/badge/fast-sentence-segment)](https://pepy.tech/project/fast-sentence-segment)\n[![Downloads/Month](https://static.pepy.tech/badge/fast-sentence-segment/month)](https://pepy.tech/project/fast-sentence-segment)\n\nFast and efficient sentence segmentation using spaCy with surgical post-processing fixes. Handles complex edge cases like abbreviations (Dr., Mr., etc.), ellipses, quoted text, and multi-paragraph documents.\n\n## Why This Library?\n\n1. **Keep it local**: LLM API calls cost money and send your data to third parties. Run sentence segmentation entirely on your machine.\n2. **spaCy perfected**: spaCy is a great local model, but it makes mistakes. This library fixes most of spaCy\'s shortcomings.\n\n## Features\n\n- **Paragraph-aware segmentation**: Returns sentences grouped by paragraph\n- **Abbreviation handling**: Correctly handles "Dr.", "Mr.", "etc.", "p.m.", "a.m." without false splits\n- **Ellipsis preservation**: Keeps `...` intact while detecting sentence boundaries\n- **Question/exclamation splitting**: Properly splits on `?` and `!` followed by capital letters\n- **Cached processing**: LRU cache for repeated text processing\n- **Flexible output**: Nested lists (by paragraph) or flattened list of sentences\n- **Bullet point & numbered list normalization**: Cleans common list formats\n- **CLI tool**: Command-line interface for quick segmentation\n\n## Installation\n\n```bash\npip install fast-sentence-segment\n```\n\nAfter installation, download the spaCy model:\n\n```bash\npython -m spacy download en_core_web_sm\n```\n\n## Quick Start\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = "Do you like Dr. Who? I prefer Dr. Strange! Mr. T is also cool."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "Do you like Dr. Who?",\n "I prefer Dr. Strange!",\n "Mr. T is also cool."\n]\n```\n\nNotice how "Dr. Who?" stays together as a single sentence—the library correctly recognizes that a title followed by a single-word name ending in `?` or `!` is a name reference, not a sentence boundary.\n\n## Usage\n\n### Basic Segmentation\n\nThe `segment_text` function returns a list of lists, where each inner list represents a paragraph containing its sentences:\n\n```python\nfrom fast_sentence_segment import segment_text\n\ntext = """Gandalf spoke softly. "All we have to decide is what to do with the time given us."\n\nFrodo nodded. The weight of the Ring pressed against his chest."""\n\nresults = segment_text(text)\n```\n\n```json\n[\n [\n "Gandalf spoke softly.",\n "\\"All we have to decide is what to do with the time given us.\\"."\n ],\n [\n "Frodo nodded.",\n "The weight of the Ring pressed against his chest."\n ]\n]\n```\n\n### Flattened Output\n\nIf you don\'t need paragraph boundaries, use the `flatten` parameter:\n\n```python\ntext = "At 9 a.m. the hobbits set out. By 3 p.m. they reached Rivendell. Mr. Frodo was exhausted."\n\nresults = segment_text(text, flatten=True)\n```\n\n```json\n[\n "At 9 a.m. the hobbits set out.",\n "By 3 p.m. they reached Rivendell.",\n "Mr. Frodo was exhausted."\n]\n```\n\n### Direct Segmenter Access\n\nFor more control, use the `Segmenter` class directly:\n\n```python\nfrom fast_sentence_segment import Segmenter\n\nsegmenter = Segmenter()\nresults = segmenter.input_text("Your text here.")\n```\n\n### Command Line Interface\n\n```bash\n# Inline text\nsegment "Gandalf paused... You shall not pass! The Balrog roared."\n\n# Pipe from stdin\necho "Have you seen Dr. Who? It\'s brilliant!" | segment\n\n# Numbered output\nsegment -n -f silmarillion.txt\n\n# File-to-file (one sentence per line)\nsegment-file --input-file book.txt --output-file sentences.txt\n\n# Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)\nsegment-file --input-file book.txt --output-file sentences.txt --unwrap\n\n# Dialog-aware formatting (implies --unwrap)\nsegment -f book.txt --format\n```\n\n## API Reference\n\n| Function | Parameters | Returns | Description |\n|----------|------------|---------|-------------|\n| `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False`, `format: str = None` | `list` or `str` | Main entry point for segmentation. Use `format="dialog"` for dialog-aware output. |\n| `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |\n\n### CLI Commands\n\n| Command | Description |\n|---------|-------------|\n| `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output, `--format` for dialog-aware paragraph grouping. |\n| `segment-file --input-file IN --output-file OUT [--unwrap] [--format]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts, `--format` for dialog-aware formatting. |\n\n## Why Nested Lists?\n\nThe segmentation process preserves document structure by segmenting into both paragraphs and sentences. Each outer list represents a paragraph, and each inner list contains that paragraph\'s sentences. This is useful for:\n\n- Document structure analysis\n- Paragraph-level processing\n- Maintaining original text organization\n\nUse `flatten=True` when you only need sentences without paragraph context.\n\n## Requirements\n\n- Python 3.9+\n- spaCy 3.8+\n- en_core_web_sm spaCy model\n\n## How It Works\n\nThis library uses spaCy for initial sentence segmentation, then applies surgical post-processing fixes for cases where spaCy\'s default behavior is incorrect:\n\n1. **Pre-processing**: Normalize numbered lists, preserve ellipses with placeholders\n2. **spaCy segmentation**: Use spaCy\'s sentence boundary detection\n3. **Post-processing**: Split on abbreviation boundaries, handle `?`/`!` + capital patterns\n4. **Denormalization**: Restore placeholders to original text\n\n## License\n\nMIT License - see [LICENSE](LICENSE) for details.\n\n## Contributing\n\nContributions are welcome! Please feel free to submit a Pull Request.\n\n1. Fork the repository\n2. Create your feature branch (`git checkout -b feature/amazing-feature`)\n3. Run tests (`make test`)\n4. Commit your changes\n5. Push to the branch\n6. Open a Pull Request\n',
26
26
  'author': 'Craig Trim',
27
27
  'author_email': 'craigtrim@gmail.com',
28
28
  'maintainer': 'Craig Trim',
@@ -1,75 +0,0 @@
1
- # -*- coding: UTF-8 -*-
2
- """Unwrap hard-wrapped text (e.g., Project Gutenberg e-texts).
3
-
4
- Joins lines within paragraphs into continuous strings while
5
- preserving paragraph boundaries (blank lines). Also dehyphenates
6
- words that were split across lines for typesetting.
7
-
8
- Related GitHub Issue:
9
- #8 - Add dehyphenation support for words split across lines
10
- https://github.com/craigtrim/fast-sentence-segment/issues/8
11
- """
12
-
13
- import re
14
-
15
- # Pattern to match hyphenated word breaks at end of line:
16
- # - A single hyphen (not -- em-dash)
17
- # - Followed by newline and optional whitespace
18
- # - Followed by a lowercase letter (continuation of word)
19
- _HYPHEN_LINE_BREAK_PATTERN = re.compile(r'(?<!-)-\n\s*([a-z])')
20
-
21
-
22
- def _dehyphenate_block(block: str) -> str:
23
- """Remove hyphens from words split across lines.
24
-
25
- Detects the pattern of a word fragment ending with a hyphen
26
- at the end of a line, followed by the word continuation
27
- starting with a lowercase letter on the next line.
28
-
29
- Examples:
30
- "bot-\\ntle" -> "bottle"
31
- "cham-\\n bermaid" -> "chambermaid"
32
-
33
- Args:
34
- block: A paragraph block that may contain hyphenated line breaks.
35
-
36
- Returns:
37
- The block with hyphenated word breaks rejoined.
38
- """
39
- return _HYPHEN_LINE_BREAK_PATTERN.sub(r'\1', block)
40
-
41
-
42
- def unwrap_hard_wrapped_text(text: str) -> str:
43
- """Unwrap hard-wrapped paragraphs into continuous lines.
44
-
45
- Splits on blank lines to identify paragraphs, then joins
46
- lines within each paragraph into a single string with
47
- single spaces. Also dehyphenates words that were split
48
- across lines for typesetting purposes.
49
-
50
- Examples:
51
- >>> unwrap_hard_wrapped_text("a bot-\\ntle of wine")
52
- 'a bottle of wine'
53
- >>> unwrap_hard_wrapped_text("line one\\nline two")
54
- 'line one line two'
55
-
56
- Args:
57
- text: Raw text with hard-wrapped lines.
58
-
59
- Returns:
60
- Text with paragraphs unwrapped into continuous strings,
61
- separated by double newlines, with hyphenated words rejoined.
62
- """
63
- blocks = re.split(r'\n\s*\n', text)
64
- unwrapped = []
65
-
66
- for block in blocks:
67
- # First, dehyphenate words split across lines
68
- block = _dehyphenate_block(block)
69
- # Then join remaining lines with spaces
70
- lines = block.splitlines()
71
- joined = ' '.join(line.strip() for line in lines if line.strip())
72
- if joined:
73
- unwrapped.append(joined)
74
-
75
- return '\n\n'.join(unwrapped)