fast-sentence-segment 1.4.5__py3-none-any.whl → 1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -100,10 +100,13 @@ def main():
100
100
  parser.add_argument(
101
101
  "--format",
102
102
  action="store_true",
103
- help="Format output with dialog-aware paragraph grouping",
103
+ help="Format output with dialog-aware paragraph grouping (implies --unwrap)",
104
104
  )
105
105
  args = parser.parse_args()
106
106
 
107
+ # --format implies --unwrap
108
+ unwrap = args.unwrap or args.format
109
+
107
110
  # Get input text
108
111
  if args.file:
109
112
  with open(args.file, "r", encoding="utf-8") as f:
@@ -118,7 +121,7 @@ def main():
118
121
 
119
122
  # Segment and output
120
123
  result = segment_text(
121
- text.strip(), flatten=True, unwrap=args.unwrap,
124
+ text.strip(), flatten=True, unwrap=unwrap,
122
125
  format="dialog" if args.format else None
123
126
  )
124
127
 
@@ -170,7 +173,6 @@ def _process_single_file(
170
173
  # Step 3: Write output
171
174
  if format:
172
175
  # Format mode returns a string
173
- sentence_count = result.count("\n") + 1 if result else 0
174
176
  print(f"\r {GREEN}✓{RESET} Segmented text ({elapsed:.2f}s)")
175
177
  with open(output_file, "w", encoding="utf-8") as f:
176
178
  f.write(result + "\n")
@@ -221,10 +223,13 @@ def file_main():
221
223
  parser.add_argument(
222
224
  "--format",
223
225
  action="store_true",
224
- help="Format output with dialog-aware paragraph grouping",
226
+ help="Format output with dialog-aware paragraph grouping (implies --unwrap)",
225
227
  )
226
228
  args = parser.parse_args()
227
229
 
230
+ # --format implies --unwrap
231
+ unwrap = args.unwrap or args.format
232
+
228
233
  # Validate arguments
229
234
  if not args.input_file and not args.input_dir:
230
235
  print(f" {YELLOW}Error:{RESET} Either --input-file or --input-dir is required")
@@ -240,36 +245,37 @@ def file_main():
240
245
 
241
246
  # Process directory
242
247
  if args.input_dir:
243
- if not os.path.isdir(args.input_dir):
244
- print(f" {YELLOW}Error:{RESET} Directory not found: {args.input_dir}")
248
+ input_dir = os.path.expanduser(args.input_dir)
249
+ if not os.path.isdir(input_dir):
250
+ print(f" {YELLOW}Error:{RESET} Directory not found: {input_dir}")
245
251
  sys.exit(1)
246
252
 
247
253
  # Find all .txt files
248
254
  txt_files = sorted([
249
- f for f in os.listdir(args.input_dir)
255
+ f for f in os.listdir(input_dir)
250
256
  if f.endswith(".txt") and not f.endswith("-clean.txt")
251
257
  ])
252
258
 
253
259
  if not txt_files:
254
- print(f" {YELLOW}Error:{RESET} No .txt files found in {args.input_dir}")
260
+ print(f" {YELLOW}Error:{RESET} No .txt files found in {input_dir}")
255
261
  sys.exit(1)
256
262
 
257
263
  _header("segment-file (batch)")
258
264
  print(f" {DIM}Processing {len(txt_files)} files in directory{RESET}")
259
265
  print()
260
- _param("Directory", args.input_dir)
266
+ _param("Directory", input_dir)
261
267
  _param("Files", str(len(txt_files)))
262
- _param("Unwrap", "enabled" if args.unwrap else "disabled")
268
+ _param("Unwrap", "enabled" if unwrap else "disabled")
263
269
  _param("Normalize quotes", "disabled" if not normalize else "enabled")
264
270
  _param("Format", "dialog" if args.format else "default (one sentence per line)")
265
271
  print()
266
272
 
267
273
  format_value = "dialog" if args.format else None
268
274
  for i, filename in enumerate(txt_files, 1):
269
- input_path = os.path.join(args.input_dir, filename)
275
+ input_path = os.path.join(input_dir, filename)
270
276
  output_path = _generate_output_path(input_path)
271
277
  print(f" {BOLD}[{i}/{len(txt_files)}]{RESET} {filename}")
272
- _process_single_file(input_path, output_path, args.unwrap, normalize, format_value)
278
+ _process_single_file(input_path, output_path, unwrap, normalize, format_value)
273
279
  print()
274
280
 
275
281
  print(f" {GREEN}Done! Processed {len(txt_files)} files.{RESET}")
@@ -277,18 +283,20 @@ def file_main():
277
283
  return
278
284
 
279
285
  # Process single file
280
- if not os.path.isfile(args.input_file):
281
- print(f" {YELLOW}Error:{RESET} File not found: {args.input_file}")
286
+ input_file = os.path.expanduser(args.input_file)
287
+ if not os.path.isfile(input_file):
288
+ print(f" {YELLOW}Error:{RESET} File not found: {input_file}")
282
289
  sys.exit(1)
283
290
 
284
- output_file = args.output_file or _generate_output_path(args.input_file)
291
+ output_file = args.output_file or _generate_output_path(input_file)
292
+ output_file = os.path.expanduser(output_file)
285
293
 
286
294
  _header("segment-file")
287
295
  print(f" {DIM}Segmenting text file into sentences{RESET}")
288
296
  print()
289
297
 
290
298
  format_value = "dialog" if args.format else None
291
- _process_single_file(args.input_file, output_file, args.unwrap, normalize, format_value)
299
+ _process_single_file(input_file, output_file, unwrap, normalize, format_value)
292
300
 
293
301
  print(f"\n {GREEN}Done!{RESET}")
294
302
  print()
@@ -37,16 +37,129 @@ DOUBLE_QUOTES = '""\""'
37
37
  SINGLE_QUOTES = "'''"
38
38
  ALL_QUOTES = DOUBLE_QUOTES + SINGLE_QUOTES
39
39
 
40
+ # Known elision words (case-insensitive)
41
+ # These are specific words where an apostrophe replaces omitted letters at the start
42
+ KNOWN_ELISION_WORDS = {
43
+ # 'it' elisions
44
+ "tis", "twas", "twere", "twill", "twould", "taint", "tother",
45
+ # Archaic oaths (very specific words)
46
+ "sblood", "sdeath", "swounds", "sbodikins", "slid", "strewth", "zounds",
47
+ # Common elisions with a-/be- prefix dropped
48
+ "bout", "bove", "cross", "fore", "fraid", "gainst", "live", "loft", "lone",
49
+ "long", "mid", "midst", "mong", "mongst", "neath", "round", "sleep", "tween",
50
+ "twixt", "wake", "ware", "way", "cause", "cuz", "coz", "hind", "low", "side",
51
+ "yond", "cept", "scaped", "specially", "splain", "spect",
52
+ # Cockney/dialect h-dropping (common words and their forms)
53
+ "e", "em", "er", "ere", "im", "is", "ave", "avin", "ead", "ear", "eard",
54
+ "eart", "eaven", "eavens", "eavy", "eck", "edge", "eel", "eight", "ell",
55
+ "elp", "en", "ero", "igh", "ill", "imself", "int", "it", "itch", "obby",
56
+ "old", "ole", "oliday", "oller", "ollow", "oly", "ome", "onest", "oney",
57
+ "onor", "onour", "ood", "ook", "oop", "ope", "orizon", "orn", "orrible",
58
+ "orse", "ospital", "ot", "otel", "our", "ouse", "ow", "owever", "uge",
59
+ "undred", "ungry", "unt", "urry", "urt", "usband", "alf", "all", "am",
60
+ "and", "andsome", "appen", "appy", "ard", "arm", "at", "ate",
61
+ # Cockney th-dropping
62
+ "at", "ese", "ey", "ose", "ough", "rough",
63
+ # Other prefix elisions
64
+ "count", "fter", "gain", "gin", "less", "nother", "nough", "nuff", "pears",
65
+ "pon", "prentice", "scuse", "spite", "spose", "stead", "tarnal", "tend",
66
+ "thout", "til", "till", "un",
67
+ # Modern colloquial
68
+ "kay", "sup", "dya", "ja", "yer", "copter",
69
+ # Musical
70
+ "cello",
71
+ # Year abbreviations handled separately (digits)
72
+ # 'member (remember)
73
+ "member",
74
+ }
75
+
76
+
77
+ def _is_elision(text: str, pos: int) -> bool:
78
+ """Check if apostrophe at position is a word-initial elision.
79
+
80
+ Elisions like 'tis, 'twas, 'cello, 'em replace omitted letters at word start.
81
+ Dialog quotes like 'Hello!' surround quoted speech.
82
+
83
+ Args:
84
+ text: The full text.
85
+ pos: Position of the apostrophe character.
86
+
87
+ Returns:
88
+ True if this appears to be an elision, not a dialog quote.
89
+ """
90
+ if pos >= len(text) - 1:
91
+ return False
92
+
93
+ next_char = text[pos + 1]
94
+
95
+ # If followed by a digit, it's a year abbreviation ('99, '20s)
96
+ if next_char.isdigit():
97
+ return True
98
+
99
+ # Extract the word after the apostrophe (letters only, up to non-letter)
100
+ word_start = pos + 1
101
+ word_end = word_start
102
+ while word_end < len(text) and text[word_end].isalpha():
103
+ word_end += 1
104
+
105
+ if word_end == word_start:
106
+ return False # No letters after apostrophe
107
+
108
+ word = text[word_start:word_end].lower()
109
+
110
+ # Check if it's a known elision word
111
+ return word in KNOWN_ELISION_WORDS
112
+
40
113
 
41
114
  def _count_quotes(text: str) -> int:
42
- """Count quote characters in text (both single and double)."""
43
- return sum(1 for c in text if c in ALL_QUOTES)
115
+ """Count actual quote characters in text, excluding apostrophes.
116
+
117
+ Apostrophes in contractions (don't, can't), possessives (Jack's, Joselito's),
118
+ and word-initial elisions ('tis, 'twas, 'cello, 'em) are NOT counted as quotes
119
+ because they don't indicate dialog boundaries.
120
+
121
+ A quote character is considered an apostrophe (not a quote) if:
122
+ - It's preceded by a letter AND followed by a letter (mid-word: don't, Joselito's)
123
+ - It's a word-initial elision ('tis, 'Twas, 'cello, '99)
124
+ """
125
+ count = 0
126
+ for i, c in enumerate(text):
127
+ if c not in ALL_QUOTES:
128
+ continue
129
+
130
+ # Check if this is a mid-word apostrophe (contraction/possessive)
131
+ prev_is_letter = i > 0 and text[i - 1].isalpha()
132
+ next_is_letter = i < len(text) - 1 and text[i + 1].isalpha()
133
+
134
+ if prev_is_letter and next_is_letter:
135
+ # Mid-word apostrophe (contraction/possessive) - don't count
136
+ continue
137
+
138
+ # Check if this is a word-initial elision
139
+ prev_is_word_boundary = i == 0 or not text[i - 1].isalnum()
140
+ if prev_is_word_boundary and c in SINGLE_QUOTES and _is_elision(text, i):
141
+ # Word-initial elision - don't count
142
+ continue
143
+
144
+ count += 1
145
+ return count
44
146
 
45
147
 
46
148
  def _starts_with_quote(text: str) -> bool:
47
- """Check if text starts with a quote character."""
149
+ """Check if text starts with a dialog quote (not an elision).
150
+
151
+ Returns True only for actual dialog openings, not for elisions
152
+ like 'tis, 'twas, 'cello, etc.
153
+ """
48
154
  text = text.lstrip()
49
- return text and text[0] in ALL_QUOTES
155
+ if not text or text[0] not in ALL_QUOTES:
156
+ return False
157
+
158
+ # Check if this is an elision rather than a dialog quote
159
+ if text[0] in SINGLE_QUOTES and _is_elision(text, 0):
160
+ return False
161
+
162
+ return True
50
163
 
51
164
 
52
165
  def _ends_with_closing_quote(text: str) -> bool:
@@ -211,7 +324,6 @@ def format_dialog(sentences: List[str]) -> str:
211
324
  quote_count = _count_quotes(sentence)
212
325
  starts_quote = _starts_with_quote(sentence)
213
326
  is_narrative = _is_narrative(sentence)
214
- is_complete = _is_complete_quote(sentence)
215
327
 
216
328
  # Get info about previous sentence
217
329
  prev_sentence = current_para[-1] if current_para else ""
@@ -230,6 +342,10 @@ def format_dialog(sentences: List[str]) -> str:
230
342
  elif starts_quote:
231
343
  # New quote starting - always new paragraph
232
344
  should_start_new_para = True
345
+ elif is_narrative and prev_was_narrative:
346
+ # Consecutive narrative sentences - each gets its own paragraph
347
+ # This gives clean ebook formatting with paragraph breaks
348
+ should_start_new_para = True
233
349
  elif is_narrative and prev_was_complete and not prev_was_narrative:
234
350
  # Narrative after complete dialog - new paragraph
235
351
  should_start_new_para = True
@@ -5,13 +5,60 @@ Joins lines within paragraphs into continuous strings while
5
5
  preserving paragraph boundaries (blank lines). Also dehyphenates
6
6
  words that were split across lines for typesetting.
7
7
 
8
- Related GitHub Issue:
8
+ Related GitHub Issues:
9
9
  #8 - Add dehyphenation support for words split across lines
10
10
  https://github.com/craigtrim/fast-sentence-segment/issues/8
11
+
12
+ #12 - Unwrap fails when hard-wrap splits across blank line
13
+ https://github.com/craigtrim/fast-sentence-segment/issues/12
14
+
15
+ #14 - Make unwrap quote-aware to join text across blank lines inside open quotes
16
+ https://github.com/craigtrim/fast-sentence-segment/issues/14
11
17
  """
12
18
 
13
19
  import re
14
20
 
21
+ # Quote characters to track for dialog detection
22
+ # Includes straight and curly/smart quotes
23
+ DOUBLE_QUOTES = '"""\u201c\u201d' # " " " " "
24
+ SINGLE_QUOTES = "'''\u2018\u2019" # ' ' ' ' '
25
+ ALL_QUOTES = DOUBLE_QUOTES + SINGLE_QUOTES
26
+
27
+ # Known elision words where an apostrophe replaces omitted letters at word start
28
+ # These should NOT be counted as dialog quotes
29
+ KNOWN_ELISION_WORDS = {
30
+ # 'it' elisions
31
+ "tis", "twas", "twere", "twill", "twould", "taint", "tother",
32
+ # Archaic oaths
33
+ "sblood", "sdeath", "swounds", "sbodikins", "slid", "strewth", "zounds",
34
+ # Common elisions with a-/be- prefix dropped
35
+ "bout", "bove", "cross", "fore", "fraid", "gainst", "live", "loft", "lone",
36
+ "long", "mid", "midst", "mong", "mongst", "neath", "round", "sleep", "tween",
37
+ "twixt", "wake", "ware", "way", "cause", "cuz", "coz", "hind", "low", "side",
38
+ "yond", "cept", "scaped", "specially", "splain", "spect",
39
+ # Cockney/dialect h-dropping
40
+ "e", "em", "er", "ere", "im", "is", "ave", "avin", "ead", "ear", "eard",
41
+ "eart", "eaven", "eavens", "eavy", "eck", "edge", "eel", "eight", "ell",
42
+ "elp", "en", "ero", "igh", "ill", "imself", "int", "it", "itch", "obby",
43
+ "old", "ole", "oliday", "oller", "ollow", "oly", "ome", "onest", "oney",
44
+ "onor", "onour", "ood", "ook", "oop", "ope", "orizon", "orn", "orrible",
45
+ "orse", "ospital", "ot", "otel", "our", "ouse", "ow", "owever", "uge",
46
+ "undred", "ungry", "unt", "urry", "urt", "usband", "alf", "all", "am",
47
+ "and", "andsome", "appen", "appy", "ard", "arm", "at", "ate",
48
+ # Cockney th-dropping
49
+ "ese", "ey", "ose", "ough", "rough",
50
+ # Other prefix elisions
51
+ "count", "fter", "gain", "gin", "less", "nother", "nough", "nuff", "pears",
52
+ "pon", "prentice", "scuse", "spite", "spose", "stead", "tarnal", "tend",
53
+ "thout", "til", "till", "un",
54
+ # Modern colloquial
55
+ "kay", "sup", "dya", "ja", "yer", "copter",
56
+ # Musical
57
+ "cello",
58
+ # 'member (remember)
59
+ "member",
60
+ }
61
+
15
62
  # Pattern to match hyphenated word breaks at end of line:
16
63
  # - A single hyphen (not -- em-dash)
17
64
  # - Followed by newline and optional whitespace
@@ -19,6 +66,128 @@ import re
19
66
  _HYPHEN_LINE_BREAK_PATTERN = re.compile(r'(?<!-)-\n\s*([a-z])')
20
67
 
21
68
 
69
+ def _is_elision(text: str, pos: int) -> bool:
70
+ """Check if apostrophe at position is a word-initial elision.
71
+
72
+ Elisions like 'tis, 'twas, 'cello, 'em replace omitted letters at word start.
73
+ Dialog quotes like 'Hello!' surround quoted speech.
74
+
75
+ Args:
76
+ text: The full text.
77
+ pos: Position of the apostrophe character.
78
+
79
+ Returns:
80
+ True if this appears to be an elision, not a dialog quote.
81
+ """
82
+ if pos >= len(text) - 1:
83
+ return False
84
+
85
+ next_char = text[pos + 1]
86
+
87
+ # If followed by a digit, it's a year abbreviation ('99, '20s)
88
+ if next_char.isdigit():
89
+ return True
90
+
91
+ # Extract the word after the apostrophe (letters only, up to non-letter)
92
+ word_start = pos + 1
93
+ word_end = word_start
94
+ while word_end < len(text) and text[word_end].isalpha():
95
+ word_end += 1
96
+
97
+ if word_end == word_start:
98
+ return False # No letters after apostrophe
99
+
100
+ word = text[word_start:word_end].lower()
101
+
102
+ # Check if it's a known elision word
103
+ return word in KNOWN_ELISION_WORDS
104
+
105
+
106
+ def _count_quotes(text: str) -> int:
107
+ """Count actual quote characters in text, excluding apostrophes.
108
+
109
+ Apostrophes in contractions (don't, can't), possessives (Jack's, Joselito's),
110
+ and word-initial elisions ('tis, 'twas, 'cello, 'em) are NOT counted as quotes
111
+ because they don't indicate dialog boundaries.
112
+
113
+ A quote character is considered an apostrophe (not a quote) if:
114
+ - It's preceded by a letter AND followed by a letter (mid-word: don't, Joselito's)
115
+ - It's a word-initial elision ('tis, 'Twas, 'cello, '99)
116
+
117
+ Args:
118
+ text: The text to count quotes in.
119
+
120
+ Returns:
121
+ Number of actual quote characters (not apostrophes).
122
+ """
123
+ count = 0
124
+ for i, c in enumerate(text):
125
+ if c not in ALL_QUOTES:
126
+ continue
127
+
128
+ # Check if this is a mid-word apostrophe (contraction/possessive)
129
+ prev_is_letter = i > 0 and text[i - 1].isalpha()
130
+ next_is_letter = i < len(text) - 1 and text[i + 1].isalpha()
131
+
132
+ if prev_is_letter and next_is_letter:
133
+ # Mid-word apostrophe (contraction/possessive) - don't count
134
+ continue
135
+
136
+ # Check if this is a word-initial elision
137
+ prev_is_word_boundary = i == 0 or not text[i - 1].isalnum()
138
+ if prev_is_word_boundary and c in SINGLE_QUOTES and _is_elision(text, i):
139
+ # Word-initial elision - don't count
140
+ continue
141
+
142
+ count += 1
143
+ return count
144
+
145
+
146
+ def _is_inside_open_quote(text: str) -> bool:
147
+ """Check if text ends with an unclosed quote.
148
+
149
+ Counts quote characters (excluding apostrophes) and returns True
150
+ if the count is odd, meaning there's an open quote.
151
+
152
+ Args:
153
+ text: The text to check.
154
+
155
+ Returns:
156
+ True if there's an unclosed quote at the end of text.
157
+ """
158
+ return _count_quotes(text) % 2 == 1
159
+
160
+ # Characters that indicate end of sentence
161
+ _SENTENCE_END_PUNCT = {'.', '?', '!'}
162
+
163
+
164
+ def _ends_with_sentence_punct(text: str) -> bool:
165
+ """Check if text ends with sentence-ending punctuation.
166
+
167
+ Handles trailing quotes/parens: 'He said "Hello."' -> True
168
+ Handles ellipsis: 'He wondered...' -> True
169
+
170
+ Args:
171
+ text: The text to check.
172
+
173
+ Returns:
174
+ True if text ends with . ? ! or ... (possibly followed by quotes/parens).
175
+ """
176
+ if not text:
177
+ return False
178
+
179
+ # Strip trailing whitespace and quotes/parens (including curly quotes)
180
+ stripped = text.rstrip()
181
+ trailing_chars = {'"', "'", ')', ']', '\u201d', '\u2019'} # " ' ) ] " '
182
+ while stripped and stripped[-1] in trailing_chars:
183
+ stripped = stripped[:-1]
184
+
185
+ if not stripped:
186
+ return False
187
+
188
+ return stripped[-1] in _SENTENCE_END_PUNCT
189
+
190
+
22
191
  def _dehyphenate_block(block: str) -> str:
23
192
  """Remove hyphens from words split across lines.
24
193
 
@@ -47,11 +216,26 @@ def unwrap_hard_wrapped_text(text: str) -> str:
47
216
  single spaces. Also dehyphenates words that were split
48
217
  across lines for typesetting purposes.
49
218
 
219
+ Special handling for spurious blank lines (issue #12):
220
+ When a single blank line appears mid-sentence (previous line
221
+ doesn't end with .?! and next line starts lowercase), the
222
+ text is joined rather than treated as a paragraph break.
223
+
224
+ Quote-aware joining (issue #14):
225
+ When we're inside an open quote (odd number of quote characters),
226
+ join across blank lines even if the previous line ends with
227
+ sentence punctuation and the next starts uppercase. This keeps
228
+ multi-sentence dialog together.
229
+
50
230
  Examples:
51
231
  >>> unwrap_hard_wrapped_text("a bot-\\ntle of wine")
52
232
  'a bottle of wine'
53
233
  >>> unwrap_hard_wrapped_text("line one\\nline two")
54
234
  'line one line two'
235
+ >>> unwrap_hard_wrapped_text("His colour\\n\\nmounted;")
236
+ 'His colour mounted;'
237
+ >>> unwrap_hard_wrapped_text("'First.\\n\\nSecond.'")
238
+ "'First. Second.'"
55
239
 
56
240
  Args:
57
241
  text: Raw text with hard-wrapped lines.
@@ -60,15 +244,75 @@ def unwrap_hard_wrapped_text(text: str) -> str:
60
244
  Text with paragraphs unwrapped into continuous strings,
61
245
  separated by double newlines, with hyphenated words rejoined.
62
246
  """
63
- blocks = re.split(r'\n\s*\n', text)
64
- unwrapped = []
247
+ lines = text.splitlines()
248
+ paragraphs: list[list[str]] = []
249
+ current_para_lines: list[str] = []
250
+ blank_line_count = 0
251
+
252
+ for line in lines:
253
+ stripped = line.strip()
254
+
255
+ if not stripped:
256
+ # Blank line (or whitespace-only)
257
+ blank_line_count += 1
258
+ else:
259
+ # Non-blank line
260
+ if current_para_lines and blank_line_count > 0:
261
+ # We have previous content and saw blank line(s)
262
+ # Build the current paragraph text to check ending
263
+ prev_para = ' '.join(ln.strip() for ln in current_para_lines if ln.strip())
65
264
 
66
- for block in blocks:
265
+ # Issue #12: Join across single blank line if:
266
+ # 1. Exactly one blank line
267
+ # 2. Previous paragraph doesn't end with sentence punctuation
268
+ # 3. Current line starts with lowercase
269
+ issue_12_join = (
270
+ blank_line_count == 1
271
+ and prev_para
272
+ and not _ends_with_sentence_punct(prev_para)
273
+ and stripped[0].islower()
274
+ )
275
+
276
+ # Issue #14: Join across blank line if inside open quote
277
+ # Even if previous ends with sentence punct and next starts uppercase,
278
+ # we should join if we're inside an unclosed quote
279
+ issue_14_join = (
280
+ blank_line_count == 1
281
+ and prev_para
282
+ and _is_inside_open_quote(prev_para)
283
+ )
284
+
285
+ should_join = issue_12_join or issue_14_join
286
+
287
+ if should_join:
288
+ # Treat as continuation of current paragraph
289
+ current_para_lines.append(line)
290
+ else:
291
+ # Finish current paragraph, start new one
292
+ paragraphs.append(current_para_lines)
293
+ current_para_lines = [line]
294
+ else:
295
+ # No blank lines seen, add to current paragraph
296
+ current_para_lines.append(line)
297
+
298
+ blank_line_count = 0
299
+
300
+ # Don't forget the last paragraph
301
+ if current_para_lines:
302
+ paragraphs.append(current_para_lines)
303
+
304
+ # Process each paragraph: dehyphenate and join lines
305
+ unwrapped = []
306
+ for para_lines in paragraphs:
307
+ block = '\n'.join(para_lines)
67
308
  # First, dehyphenate words split across lines
68
309
  block = _dehyphenate_block(block)
69
310
  # Then join remaining lines with spaces
70
- lines = block.splitlines()
71
- joined = ' '.join(line.strip() for line in lines if line.strip())
311
+ joined_lines = block.splitlines()
312
+ joined = ' '.join(ln.strip() for ln in joined_lines if ln.strip())
313
+ # Normalize multiple spaces to single space (OCR artifacts, formatting)
314
+ while ' ' in joined:
315
+ joined = joined.replace(' ', ' ')
72
316
  if joined:
73
317
  unwrapped.append(joined)
74
318
 
@@ -43,19 +43,20 @@ def _load_spacy_model(model_name: str = "en_core_web_sm"):
43
43
 
44
44
  return spacy.load(model_name)
45
45
 
46
- from fast_sentence_segment.dmo import AbbreviationMerger
47
- from fast_sentence_segment.dmo import AbbreviationSplitter
48
- from fast_sentence_segment.dmo import TitleNameMerger
49
- from fast_sentence_segment.dmo import EllipsisNormalizer
50
- from fast_sentence_segment.dmo import NewlinesToPeriods
51
- from fast_sentence_segment.dmo import BulletPointCleaner
52
- from fast_sentence_segment.dmo import NumberedListNormalizer
53
- from fast_sentence_segment.dmo import QuestionExclamationSplitter
54
- from fast_sentence_segment.dmo import SpacyDocSegmenter
55
- from fast_sentence_segment.dmo import PostProcessStructure
56
- from fast_sentence_segment.dmo import StripTrailingPeriodAfterQuote
57
- from fast_sentence_segment.dmo import Dehyphenator
58
- from fast_sentence_segment.dmo import OcrArtifactFixer
46
+ # Imports after lazy spacy loading function (intentional)
47
+ from fast_sentence_segment.dmo import AbbreviationMerger # noqa: E402
48
+ from fast_sentence_segment.dmo import AbbreviationSplitter # noqa: E402
49
+ from fast_sentence_segment.dmo import TitleNameMerger # noqa: E402
50
+ from fast_sentence_segment.dmo import EllipsisNormalizer # noqa: E402
51
+ from fast_sentence_segment.dmo import NewlinesToPeriods # noqa: E402
52
+ from fast_sentence_segment.dmo import BulletPointCleaner # noqa: E402
53
+ from fast_sentence_segment.dmo import NumberedListNormalizer # noqa: E402
54
+ from fast_sentence_segment.dmo import QuestionExclamationSplitter # noqa: E402
55
+ from fast_sentence_segment.dmo import SpacyDocSegmenter # noqa: E402
56
+ from fast_sentence_segment.dmo import PostProcessStructure # noqa: E402
57
+ from fast_sentence_segment.dmo import StripTrailingPeriodAfterQuote # noqa: E402
58
+ from fast_sentence_segment.dmo import Dehyphenator # noqa: E402
59
+ from fast_sentence_segment.dmo import OcrArtifactFixer # noqa: E402
59
60
 
60
61
 
61
62
  class PerformSentenceSegmentation(BaseObject):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: fast-sentence-segment
3
- Version: 1.4.5
3
+ Version: 1.5.3
4
4
  Summary: Fast and Efficient Sentence Segmentation
5
5
  Home-page: https://github.com/craigtrim/fast-sentence-segment
6
6
  License: MIT
@@ -164,21 +164,24 @@ segment-file --input-file book.txt --output-file sentences.txt
164
164
 
165
165
  # Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)
166
166
  segment-file --input-file book.txt --output-file sentences.txt --unwrap
167
+
168
+ # Dialog-aware formatting (implies --unwrap)
169
+ segment -f book.txt --format
167
170
  ```
168
171
 
169
172
  ## API Reference
170
173
 
171
174
  | Function | Parameters | Returns | Description |
172
175
  |----------|------------|---------|-------------|
173
- | `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False` | `list` | Main entry point for segmentation |
176
+ | `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False`, `format: str = None` | `list` or `str` | Main entry point for segmentation. Use `format="dialog"` for dialog-aware output. |
174
177
  | `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |
175
178
 
176
179
  ### CLI Commands
177
180
 
178
181
  | Command | Description |
179
182
  |---------|-------------|
180
- | `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output. |
181
- | `segment-file --input-file IN --output-file OUT [--unwrap]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts. |
183
+ | `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output, `--format` for dialog-aware paragraph grouping. |
184
+ | `segment-file --input-file IN --output-file OUT [--unwrap] [--format]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts, `--format` for dialog-aware formatting. |
182
185
 
183
186
  ## Why Nested Lists?
184
187
 
@@ -1,7 +1,7 @@
1
1
  fast_sentence_segment/__init__.py,sha256=DI7cyxtqnWd_5lrtGXqkIm8Aje0h55nGAHGgP6zXRyE,2278
2
2
  fast_sentence_segment/bp/__init__.py,sha256=j2-WfQ9WwVuXeGSjvV6XLVwEdvau8sdAQe4Pa4DrYi8,33
3
3
  fast_sentence_segment/bp/segmenter.py,sha256=UW6DguPgA56h-pPYRsfJhjIzBe40j6NdjkwYxamASyA,1928
4
- fast_sentence_segment/cli.py,sha256=8AA_V3hcOVL8ENXboFDuJTimFbHKK5KbjzIeS9Cs-xA,9568
4
+ fast_sentence_segment/cli.py,sha256=WFhHTYrHUP0PqXeJ-6bR6hlXsV4ql190c2S2nBf-uCA,9775
5
5
  fast_sentence_segment/core/__init__.py,sha256=uoBersYyVStJ5a8zJpQz1GDGaloEdAv2jGHw1292hRM,108
6
6
  fast_sentence_segment/core/base_object.py,sha256=AYr7yzusIwawjbKdvcv4yTEnhmx6M583kDZzhzPOmq4,635
7
7
  fast_sentence_segment/core/stopwatch.py,sha256=hE6hMz2q6rduaKi58KZmiAL-lRtyh_wWCANhl4KLkRQ,879
@@ -11,7 +11,7 @@ fast_sentence_segment/dmo/abbreviation_splitter.py,sha256=03mSyJcLooNyIjXx6mPlrn
11
11
  fast_sentence_segment/dmo/abbreviations.py,sha256=CGJrJDo6pmYd3pTNEQbdOo8N6tnkCnwyL2X7Si663Os,2530
12
12
  fast_sentence_segment/dmo/bullet_point_cleaner.py,sha256=WOZQRWXiiyRi8rOuEIw36EmkaXmATHL9_Dxb2rderw4,1606
13
13
  fast_sentence_segment/dmo/dehyphenator.py,sha256=6BJTie7tClRAifeiW8V2CdAAbcbknhtqmKylAdRZ7ko,1776
14
- fast_sentence_segment/dmo/dialog_formatter.py,sha256=C5r3t3vY6ZbU26g0JGIk8BRlXhMn1LGPljw_WPTuCSg,8276
14
+ fast_sentence_segment/dmo/dialog_formatter.py,sha256=CLtXGTRBC4PmH47vQ4WSlWIFi5r4rtJ7mFxs4xyBcmg,12824
15
15
  fast_sentence_segment/dmo/ellipsis_normalizer.py,sha256=lHs9dLFfKJe-2vFNe17Hik90g3_kXX347OzGP_IOT08,1521
16
16
  fast_sentence_segment/dmo/group_quoted_sentences.py,sha256=Ifh_kUwi7sMbzbZvrTgEKkzWe50AafUDhVKVPR9h7wQ,5092
17
17
  fast_sentence_segment/dmo/newlines_to_periods.py,sha256=PUrXreqZWiITINfoJL5xRRlXJH6noH0cdXtW1EqAh8I,1517
@@ -23,12 +23,12 @@ fast_sentence_segment/dmo/question_exclamation_splitter.py,sha256=cRsWRu8zb6wOWG
23
23
  fast_sentence_segment/dmo/spacy_doc_segmenter.py,sha256=Kb65TYMhrbpTYEey5vb7TyhCjUHVxmugHYIeKkntCwk,5147
24
24
  fast_sentence_segment/dmo/strip_trailing_period_after_quote.py,sha256=wYkoLy5XJKZIblJXBvDAB8-a81UTQOhOf2u91wjJWUw,2259
25
25
  fast_sentence_segment/dmo/title_name_merger.py,sha256=zbG04_VjwM8TtT8LhavvmZqIZL_2xgT2OTxWkK_Zt1s,5133
26
- fast_sentence_segment/dmo/unwrap_hard_wrapped_text.py,sha256=V1T5RsJBaII_iGJMyWvv6rb2mny8pnVd428oVZL0n5I,2457
26
+ fast_sentence_segment/dmo/unwrap_hard_wrapped_text.py,sha256=UlWQSb6H8hGwCu719mYEuc4rBzH68wGWwuRH9c4S_xw,11573
27
27
  fast_sentence_segment/svc/__init__.py,sha256=9B12mXxBnlalH4OAm1AMLwUMa-RLi2ilv7qhqv26q7g,144
28
28
  fast_sentence_segment/svc/perform_paragraph_segmentation.py,sha256=zLKw9rSzb0NNfx4MyEeoGrHwhxTtH5oDrYcAL2LMVHY,1378
29
- fast_sentence_segment/svc/perform_sentence_segmentation.py,sha256=vpZ7lyOKxIAgHRk-rfgNI4BtAC38vU5UbYokkw8JhK8,7639
30
- fast_sentence_segment-1.4.5.dist-info/LICENSE,sha256=vou5JCLAT5nHcsUv-AkjUYAihYfN9mwPDXxV2DHyHBo,1067
31
- fast_sentence_segment-1.4.5.dist-info/METADATA,sha256=1xZQ63f20VnbsLC_leQ40nIXLRbSkfUwnbxZmtBGYo0,7785
32
- fast_sentence_segment-1.4.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
33
- fast_sentence_segment-1.4.5.dist-info/entry_points.txt,sha256=Zc8OwFKj3ofnjy5ZIFqHzDkIEWweV1AP1xap1ZFGD8M,107
34
- fast_sentence_segment-1.4.5.dist-info/RECORD,,
29
+ fast_sentence_segment/svc/perform_sentence_segmentation.py,sha256=I58wdpoRqa79qrn-GxZhqPSg7DF-5curtLhIBBdi33E,7879
30
+ fast_sentence_segment-1.5.3.dist-info/LICENSE,sha256=vou5JCLAT5nHcsUv-AkjUYAihYfN9mwPDXxV2DHyHBo,1067
31
+ fast_sentence_segment-1.5.3.dist-info/METADATA,sha256=0TIiLm5CuM0-_h1Rr_CWHeBlkcYsFS2t5-NWgHdpwUI,8038
32
+ fast_sentence_segment-1.5.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
33
+ fast_sentence_segment-1.5.3.dist-info/entry_points.txt,sha256=Zc8OwFKj3ofnjy5ZIFqHzDkIEWweV1AP1xap1ZFGD8M,107
34
+ fast_sentence_segment-1.5.3.dist-info/RECORD,,