fast-sentence-segment 1.4.4__py3-none-any.whl → 1.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,5 @@
1
+ from typing import List, Optional, Union
2
+
1
3
  from .bp import *
2
4
  from .svc import *
3
5
  from .dmo import *
@@ -5,6 +7,7 @@ from .dmo import *
5
7
  from .bp.segmenter import Segmenter
6
8
  from .dmo.unwrap_hard_wrapped_text import unwrap_hard_wrapped_text
7
9
  from .dmo.normalize_quotes import normalize_quotes
10
+ from .dmo.dialog_formatter import format_dialog
8
11
 
9
12
  segment = Segmenter().input_text
10
13
 
@@ -14,7 +17,8 @@ def segment_text(
14
17
  flatten: bool = False,
15
18
  unwrap: bool = False,
16
19
  normalize: bool = True,
17
- ) -> list:
20
+ format: Optional[str] = None,
21
+ ) -> Union[List, str]:
18
22
  """Segment text into sentences.
19
23
 
20
24
  Args:
@@ -26,14 +30,23 @@ def segment_text(
26
30
  normalize: If True (default), normalize unicode quote variants
27
31
  to ASCII equivalents before segmenting. Ensures consistent
28
32
  quote characters for downstream processing.
33
+ format: Optional output format. Supported values:
34
+ - None (default): Return list of sentences/paragraphs
35
+ - "dialog": Return formatted string with dialog-aware
36
+ paragraph grouping (keeps multi-sentence quotes together,
37
+ adds paragraph breaks between speakers)
29
38
 
30
39
  Returns:
31
- List of sentences (if flatten=True) or list of paragraph
32
- groups, each containing a list of sentences.
40
+ If format is None: List of sentences (if flatten=True) or list
41
+ of paragraph groups, each containing a list of sentences.
42
+ If format="dialog": Formatted string with paragraph breaks.
33
43
 
34
- Related GitHub Issue:
44
+ Related GitHub Issues:
35
45
  #6 - Review findings from Issue #5
36
46
  https://github.com/craigtrim/fast-sentence-segment/issues/6
47
+
48
+ #10 - feat: Add --format flag for dialog-aware paragraph formatting
49
+ https://github.com/craigtrim/fast-sentence-segment/issues/10
37
50
  """
38
51
  if unwrap:
39
52
  input_text = unwrap_hard_wrapped_text(input_text)
@@ -43,9 +56,15 @@ def segment_text(
43
56
 
44
57
  results = segment(input_text)
45
58
 
59
+ # Flatten to list of sentences
60
+ flat = []
61
+ [[flat.append(y) for y in x] for x in results]
62
+
63
+ # Apply formatting if requested
64
+ if format == "dialog":
65
+ return format_dialog(flat)
66
+
46
67
  if flatten:
47
- flat = []
48
- [[flat.append(y) for y in x] for x in results]
49
68
  return flat
50
69
 
51
70
  return results
@@ -97,8 +97,16 @@ def main():
97
97
  action="store_true",
98
98
  help="Unwrap hard-wrapped lines and dehyphenate split words",
99
99
  )
100
+ parser.add_argument(
101
+ "--format",
102
+ action="store_true",
103
+ help="Format output with dialog-aware paragraph grouping (implies --unwrap)",
104
+ )
100
105
  args = parser.parse_args()
101
106
 
107
+ # --format implies --unwrap
108
+ unwrap = args.unwrap or args.format
109
+
102
110
  # Get input text
103
111
  if args.file:
104
112
  with open(args.file, "r", encoding="utf-8") as f:
@@ -112,12 +120,79 @@ def main():
112
120
  sys.exit(1)
113
121
 
114
122
  # Segment and output
115
- sentences = segment_text(text.strip(), flatten=True, unwrap=args.unwrap)
116
- for i, sentence in enumerate(sentences, 1):
117
- if args.numbered:
118
- print(f"{i}. {sentence}")
119
- else:
120
- print(sentence)
123
+ result = segment_text(
124
+ text.strip(), flatten=True, unwrap=unwrap,
125
+ format="dialog" if args.format else None
126
+ )
127
+
128
+ # If format is used, result is a string
129
+ if args.format:
130
+ print(result)
131
+ else:
132
+ # Result is a list of sentences
133
+ for i, sentence in enumerate(result, 1):
134
+ if args.numbered:
135
+ print(f"{i}. {sentence}")
136
+ else:
137
+ print(sentence)
138
+
139
+
140
+ def _generate_output_path(input_path: str) -> str:
141
+ """Generate output path by inserting -clean before extension."""
142
+ base, ext = os.path.splitext(input_path)
143
+ return f"{base}-clean{ext}"
144
+
145
+
146
+ def _process_single_file(
147
+ input_file: str, output_file: str, unwrap: bool, normalize: bool, format: str = None
148
+ ):
149
+ """Process a single file and write output."""
150
+ # Show configuration
151
+ _param("Input", input_file)
152
+ _param("Output", output_file)
153
+ _param("Size", _file_size(input_file))
154
+ _param("Unwrap", "enabled" if unwrap else "disabled")
155
+ _param("Normalize quotes", "disabled" if not normalize else "enabled")
156
+ _param("Format", format if format else "default (one sentence per line)")
157
+ print()
158
+
159
+ # Step 1: Read file
160
+ print(f" {YELLOW}→{RESET} Reading input file...")
161
+ with open(input_file, "r", encoding="utf-8") as f:
162
+ text = f.read()
163
+ print(f" {GREEN}✓{RESET} Read {len(text):,} characters")
164
+
165
+ # Step 2: Segment text
166
+ print(f" {YELLOW}→{RESET} Segmenting text...", end="", flush=True)
167
+ start = time.perf_counter()
168
+ result = segment_text(
169
+ text.strip(), flatten=True, unwrap=unwrap, normalize=normalize, format=format,
170
+ )
171
+ elapsed = time.perf_counter() - start
172
+
173
+ # Step 3: Write output
174
+ if format:
175
+ # Format mode returns a string
176
+ print(f"\r {GREEN}✓{RESET} Segmented text ({elapsed:.2f}s)")
177
+ with open(output_file, "w", encoding="utf-8") as f:
178
+ f.write(result + "\n")
179
+ print(f" {GREEN}✓{RESET} Written formatted output to {output_file}")
180
+ else:
181
+ # Default mode returns a list
182
+ sentences = result
183
+ print(f"\r {GREEN}✓{RESET} Segmented into {len(sentences):,} sentences ({elapsed:.2f}s)")
184
+ total = len(sentences)
185
+ with open(output_file, "w", encoding="utf-8") as f:
186
+ if unwrap:
187
+ f.write(format_grouped_sentences(sentences) + "\n")
188
+ print(f" {GREEN}✓{RESET} Written {total:,} sentences to {output_file}")
189
+ else:
190
+ for i, sentence in enumerate(sentences, 1):
191
+ f.write(sentence + "\n")
192
+ if i % 500 == 0 or i == total:
193
+ pct = (i / total) * 100
194
+ print(f"\r {YELLOW}→{RESET} Writing... {pct:.0f}% ({i:,}/{total:,})", end="", flush=True)
195
+ print(f"\r {GREEN}✓{RESET} Written {total:,} sentences to {output_file} ")
121
196
 
122
197
 
123
198
  def file_main():
@@ -126,12 +201,16 @@ def file_main():
126
201
  description="Segment a text file into sentences and write to an output file",
127
202
  )
128
203
  parser.add_argument(
129
- "--input-file", required=True,
204
+ "--input-file",
130
205
  help="Path to input text file",
131
206
  )
132
207
  parser.add_argument(
133
- "--output-file", required=True,
134
- help="Path to output file",
208
+ "--input-dir",
209
+ help="Path to directory containing text files to process",
210
+ )
211
+ parser.add_argument(
212
+ "--output-file",
213
+ help="Path to output file (optional, defaults to input-file with -clean suffix)",
135
214
  )
136
215
  parser.add_argument(
137
216
  "--unwrap", action="store_true",
@@ -141,50 +220,83 @@ def file_main():
141
220
  "--no-normalize-quotes", action="store_true",
142
221
  help="Disable unicode quote normalization to ASCII equivalents",
143
222
  )
223
+ parser.add_argument(
224
+ "--format",
225
+ action="store_true",
226
+ help="Format output with dialog-aware paragraph grouping (implies --unwrap)",
227
+ )
144
228
  args = parser.parse_args()
145
229
 
146
- # Echo command immediately
147
- _header("segment-file")
148
- print(f" {DIM}Segmenting text file into sentences{RESET}")
149
- print()
230
+ # --format implies --unwrap
231
+ unwrap = args.unwrap or args.format
150
232
 
151
- # Show configuration
152
- _param("Input", args.input_file)
153
- _param("Output", args.output_file)
154
- _param("Size", _file_size(args.input_file))
155
- _param("Unwrap", "enabled" if args.unwrap else "disabled")
156
- _param("Normalize quotes", "disabled" if args.no_normalize_quotes else "enabled")
157
- print()
158
-
159
- # Step 1: Read file
160
- print(f" {YELLOW}→{RESET} Reading input file...")
161
- with open(args.input_file, "r", encoding="utf-8") as f:
162
- text = f.read()
163
- print(f" {GREEN}✓{RESET} Read {len(text):,} characters")
233
+ # Validate arguments
234
+ if not args.input_file and not args.input_dir:
235
+ print(f" {YELLOW}Error:{RESET} Either --input-file or --input-dir is required")
236
+ sys.exit(1)
237
+ if args.input_file and args.input_dir:
238
+ print(f" {YELLOW}Error:{RESET} Cannot specify both --input-file and --input-dir")
239
+ sys.exit(1)
240
+ if args.input_dir and args.output_file:
241
+ print(f" {YELLOW}Error:{RESET} --output-file cannot be used with --input-dir")
242
+ sys.exit(1)
164
243
 
165
- # Step 2: Segment text
166
- print(f" {YELLOW}→{RESET} Segmenting text...", end="", flush=True)
167
- start = time.perf_counter()
168
244
  normalize = not args.no_normalize_quotes
169
- sentences = segment_text(
170
- text.strip(), flatten=True, unwrap=args.unwrap, normalize=normalize,
171
- )
172
- elapsed = time.perf_counter() - start
173
- print(f"\r {GREEN}✓{RESET} Segmented into {len(sentences):,} sentences ({elapsed:.2f}s)")
174
245
 
175
- # Step 3: Write output
176
- total = len(sentences)
177
- with open(args.output_file, "w", encoding="utf-8") as f:
178
- if args.unwrap:
179
- f.write(format_grouped_sentences(sentences) + "\n")
180
- print(f" {GREEN}✓{RESET} Written {total:,} sentences to {args.output_file}")
181
- else:
182
- for i, sentence in enumerate(sentences, 1):
183
- f.write(sentence + "\n")
184
- if i % 500 == 0 or i == total:
185
- pct = (i / total) * 100
186
- print(f"\r {YELLOW}→{RESET} Writing... {pct:.0f}% ({i:,}/{total:,})", end="", flush=True)
187
- print(f"\r {GREEN}✓{RESET} Written {total:,} sentences to {args.output_file} ")
246
+ # Process directory
247
+ if args.input_dir:
248
+ input_dir = os.path.expanduser(args.input_dir)
249
+ if not os.path.isdir(input_dir):
250
+ print(f" {YELLOW}Error:{RESET} Directory not found: {input_dir}")
251
+ sys.exit(1)
252
+
253
+ # Find all .txt files
254
+ txt_files = sorted([
255
+ f for f in os.listdir(input_dir)
256
+ if f.endswith(".txt") and not f.endswith("-clean.txt")
257
+ ])
258
+
259
+ if not txt_files:
260
+ print(f" {YELLOW}Error:{RESET} No .txt files found in {input_dir}")
261
+ sys.exit(1)
262
+
263
+ _header("segment-file (batch)")
264
+ print(f" {DIM}Processing {len(txt_files)} files in directory{RESET}")
265
+ print()
266
+ _param("Directory", input_dir)
267
+ _param("Files", str(len(txt_files)))
268
+ _param("Unwrap", "enabled" if unwrap else "disabled")
269
+ _param("Normalize quotes", "disabled" if not normalize else "enabled")
270
+ _param("Format", "dialog" if args.format else "default (one sentence per line)")
271
+ print()
272
+
273
+ format_value = "dialog" if args.format else None
274
+ for i, filename in enumerate(txt_files, 1):
275
+ input_path = os.path.join(input_dir, filename)
276
+ output_path = _generate_output_path(input_path)
277
+ print(f" {BOLD}[{i}/{len(txt_files)}]{RESET} {filename}")
278
+ _process_single_file(input_path, output_path, unwrap, normalize, format_value)
279
+ print()
280
+
281
+ print(f" {GREEN}Done! Processed {len(txt_files)} files.{RESET}")
282
+ print()
283
+ return
284
+
285
+ # Process single file
286
+ input_file = os.path.expanduser(args.input_file)
287
+ if not os.path.isfile(input_file):
288
+ print(f" {YELLOW}Error:{RESET} File not found: {input_file}")
289
+ sys.exit(1)
290
+
291
+ output_file = args.output_file or _generate_output_path(input_file)
292
+ output_file = os.path.expanduser(output_file)
293
+
294
+ _header("segment-file")
295
+ print(f" {DIM}Segmenting text file into sentences{RESET}")
296
+ print()
297
+
298
+ format_value = "dialog" if args.format else None
299
+ _process_single_file(input_file, output_file, unwrap, normalize, format_value)
188
300
 
189
301
  print(f"\n {GREEN}Done!{RESET}")
190
302
  print()
@@ -13,3 +13,5 @@ from .unwrap_hard_wrapped_text import unwrap_hard_wrapped_text
13
13
  from .normalize_quotes import normalize_quotes
14
14
  from .group_quoted_sentences import group_quoted_sentences, format_grouped_sentences
15
15
  from .strip_trailing_period_after_quote import StripTrailingPeriodAfterQuote
16
+ from .ocr_artifact_fixer import OcrArtifactFixer
17
+ from .dialog_formatter import DialogFormatter, format_dialog
@@ -0,0 +1,371 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """Dialog-aware paragraph formatter for segmented text.
3
+
4
+ Formats segmented sentences into readable paragraphs with intelligent
5
+ grouping of dialog and narrative text. Keeps multi-sentence quoted speech
6
+ together and adds paragraph breaks between different speakers.
7
+
8
+ Related GitHub Issue:
9
+ #10 - feat: Add --format flag for dialog-aware paragraph formatting
10
+ https://github.com/craigtrim/fast-sentence-segment/issues/10
11
+
12
+ Example:
13
+ >>> from fast_sentence_segment.dmo.dialog_formatter import format_dialog
14
+ >>> sentences = [
15
+ ... '"Hello," said Jack.',
16
+ ... '"How are you today?',
17
+ ... 'I hope you are well."',
18
+ ... '"I am fine," replied Mary.',
19
+ ... ]
20
+ >>> print(format_dialog(sentences))
21
+ "Hello," said Jack.
22
+
23
+ "How are you today?
24
+ I hope you are well."
25
+
26
+ "I am fine," replied Mary.
27
+ """
28
+
29
+ import re
30
+ from typing import List
31
+
32
+ from fast_sentence_segment.core import BaseObject
33
+
34
+
35
+ # Quote characters to track for dialog detection
36
+ DOUBLE_QUOTES = '""\""'
37
+ SINGLE_QUOTES = "'''"
38
+ ALL_QUOTES = DOUBLE_QUOTES + SINGLE_QUOTES
39
+
40
+ # Known elision words (case-insensitive)
41
+ # These are specific words where an apostrophe replaces omitted letters at the start
42
+ KNOWN_ELISION_WORDS = {
43
+ # 'it' elisions
44
+ "tis", "twas", "twere", "twill", "twould", "taint", "tother",
45
+ # Archaic oaths (very specific words)
46
+ "sblood", "sdeath", "swounds", "sbodikins", "slid", "strewth", "zounds",
47
+ # Common elisions with a-/be- prefix dropped
48
+ "bout", "bove", "cross", "fore", "fraid", "gainst", "live", "loft", "lone",
49
+ "long", "mid", "midst", "mong", "mongst", "neath", "round", "sleep", "tween",
50
+ "twixt", "wake", "ware", "way", "cause", "cuz", "coz", "hind", "low", "side",
51
+ "yond", "cept", "scaped", "specially", "splain", "spect",
52
+ # Cockney/dialect h-dropping (common words and their forms)
53
+ "e", "em", "er", "ere", "im", "is", "ave", "avin", "ead", "ear", "eard",
54
+ "eart", "eaven", "eavens", "eavy", "eck", "edge", "eel", "eight", "ell",
55
+ "elp", "en", "ero", "igh", "ill", "imself", "int", "it", "itch", "obby",
56
+ "old", "ole", "oliday", "oller", "ollow", "oly", "ome", "onest", "oney",
57
+ "onor", "onour", "ood", "ook", "oop", "ope", "orizon", "orn", "orrible",
58
+ "orse", "ospital", "ot", "otel", "our", "ouse", "ow", "owever", "uge",
59
+ "undred", "ungry", "unt", "urry", "urt", "usband", "alf", "all", "am",
60
+ "and", "andsome", "appen", "appy", "ard", "arm", "at", "ate",
61
+ # Cockney th-dropping
62
+ "at", "ese", "ey", "ose", "ough", "rough",
63
+ # Other prefix elisions
64
+ "count", "fter", "gain", "gin", "less", "nother", "nough", "nuff", "pears",
65
+ "pon", "prentice", "scuse", "spite", "spose", "stead", "tarnal", "tend",
66
+ "thout", "til", "till", "un",
67
+ # Modern colloquial
68
+ "kay", "sup", "dya", "ja", "yer", "copter",
69
+ # Musical
70
+ "cello",
71
+ # Year abbreviations handled separately (digits)
72
+ # 'member (remember)
73
+ "member",
74
+ }
75
+
76
+
77
+ def _is_elision(text: str, pos: int) -> bool:
78
+ """Check if apostrophe at position is a word-initial elision.
79
+
80
+ Elisions like 'tis, 'twas, 'cello, 'em replace omitted letters at word start.
81
+ Dialog quotes like 'Hello!' surround quoted speech.
82
+
83
+ Args:
84
+ text: The full text.
85
+ pos: Position of the apostrophe character.
86
+
87
+ Returns:
88
+ True if this appears to be an elision, not a dialog quote.
89
+ """
90
+ if pos >= len(text) - 1:
91
+ return False
92
+
93
+ next_char = text[pos + 1]
94
+
95
+ # If followed by a digit, it's a year abbreviation ('99, '20s)
96
+ if next_char.isdigit():
97
+ return True
98
+
99
+ # Extract the word after the apostrophe (letters only, up to non-letter)
100
+ word_start = pos + 1
101
+ word_end = word_start
102
+ while word_end < len(text) and text[word_end].isalpha():
103
+ word_end += 1
104
+
105
+ if word_end == word_start:
106
+ return False # No letters after apostrophe
107
+
108
+ word = text[word_start:word_end].lower()
109
+
110
+ # Check if it's a known elision word
111
+ return word in KNOWN_ELISION_WORDS
112
+
113
+
114
+ def _count_quotes(text: str) -> int:
115
+ """Count actual quote characters in text, excluding apostrophes.
116
+
117
+ Apostrophes in contractions (don't, can't), possessives (Jack's, Joselito's),
118
+ and word-initial elisions ('tis, 'twas, 'cello, 'em) are NOT counted as quotes
119
+ because they don't indicate dialog boundaries.
120
+
121
+ A quote character is considered an apostrophe (not a quote) if:
122
+ - It's preceded by a letter AND followed by a letter (mid-word: don't, Joselito's)
123
+ - It's a word-initial elision ('tis, 'Twas, 'cello, '99)
124
+ """
125
+ count = 0
126
+ for i, c in enumerate(text):
127
+ if c not in ALL_QUOTES:
128
+ continue
129
+
130
+ # Check if this is a mid-word apostrophe (contraction/possessive)
131
+ prev_is_letter = i > 0 and text[i - 1].isalpha()
132
+ next_is_letter = i < len(text) - 1 and text[i + 1].isalpha()
133
+
134
+ if prev_is_letter and next_is_letter:
135
+ # Mid-word apostrophe (contraction/possessive) - don't count
136
+ continue
137
+
138
+ # Check if this is a word-initial elision
139
+ prev_is_word_boundary = i == 0 or not text[i - 1].isalnum()
140
+ if prev_is_word_boundary and c in SINGLE_QUOTES and _is_elision(text, i):
141
+ # Word-initial elision - don't count
142
+ continue
143
+
144
+ count += 1
145
+ return count
146
+
147
+
148
+ def _starts_with_quote(text: str) -> bool:
149
+ """Check if text starts with a dialog quote (not an elision).
150
+
151
+ Returns True only for actual dialog openings, not for elisions
152
+ like 'tis, 'twas, 'cello, etc.
153
+ """
154
+ text = text.lstrip()
155
+ if not text or text[0] not in ALL_QUOTES:
156
+ return False
157
+
158
+ # Check if this is an elision rather than a dialog quote
159
+ if text[0] in SINGLE_QUOTES and _is_elision(text, 0):
160
+ return False
161
+
162
+ return True
163
+
164
+
165
+ def _ends_with_closing_quote(text: str) -> bool:
166
+ """Check if text ends with a closing quote (possibly followed by punctuation)."""
167
+ text = text.rstrip()
168
+ if not text:
169
+ return False
170
+ # Check last few characters for closing quote pattern
171
+ # e.g., '" or "' or .' or ."
172
+ for i in range(min(3, len(text)), 0, -1):
173
+ if text[-i] in ALL_QUOTES:
174
+ return True
175
+ return False
176
+
177
+
178
+ def _is_complete_quote(text: str) -> bool:
179
+ """Check if text contains a complete (balanced) quote.
180
+
181
+ A complete quote has an even number of quote characters,
182
+ meaning all opened quotes are closed.
183
+ """
184
+ quote_count = _count_quotes(text)
185
+ return quote_count > 0 and quote_count % 2 == 0
186
+
187
+
188
+ def _sentence_is_dialog_continuation(sentence: str, in_quote: bool) -> bool:
189
+ """Determine if sentence continues an open quote.
190
+
191
+ Args:
192
+ sentence: The sentence to check.
193
+ in_quote: Whether we're currently inside an unclosed quote.
194
+
195
+ Returns:
196
+ True if this sentence is a continuation of open dialog.
197
+ """
198
+ if in_quote:
199
+ return True
200
+ return False
201
+
202
+
203
+ def _get_quote_delta(sentence: str) -> int:
204
+ """Get the net change in quote depth for a sentence.
205
+
206
+ Returns:
207
+ Positive if more quotes opened than closed,
208
+ negative if more closed than opened,
209
+ zero if balanced.
210
+ """
211
+ return _count_quotes(sentence) % 2
212
+
213
+
214
+ class DialogFormatter(BaseObject):
215
+ """Formats segmented sentences with dialog-aware paragraph grouping.
216
+
217
+ This formatter analyzes sentence structure to intelligently group
218
+ text into paragraphs:
219
+
220
+ - Multi-sentence quoted speech stays together (same speaker)
221
+ - Paragraph breaks added between different speakers
222
+ - Narrative text grouped appropriately
223
+ - Handles both single and double quote styles
224
+
225
+ Example:
226
+ >>> formatter = DialogFormatter()
227
+ >>> sentences = ['"Hello," he said.', 'The door opened.']
228
+ >>> print(formatter.process(sentences))
229
+ "Hello," he said.
230
+
231
+ The door opened.
232
+ """
233
+
234
+ def __init__(self):
235
+ """Initialize the DialogFormatter."""
236
+ BaseObject.__init__(self, __name__)
237
+
238
+ def process(self, sentences: List[str]) -> str:
239
+ """Format sentences into dialog-aware paragraphs.
240
+
241
+ Args:
242
+ sentences: List of segmented sentences.
243
+
244
+ Returns:
245
+ Formatted string with appropriate paragraph breaks.
246
+ """
247
+ return format_dialog(sentences)
248
+
249
+
250
+ def _is_narrative(sentence: str) -> bool:
251
+ """Check if a sentence is narrative (no quotes at start)."""
252
+ return not _starts_with_quote(sentence)
253
+
254
+
255
+ def _ends_dialog_turn(sentence: str) -> bool:
256
+ """Check if a sentence ends a dialog turn.
257
+
258
+ A dialog turn ends when the sentence ends with a closing quote
259
+ followed by optional punctuation or dialog tag ending.
260
+ """
261
+ sentence = sentence.rstrip()
262
+ if not sentence:
263
+ return False
264
+
265
+ # Pattern: ends with quote + optional punctuation
266
+ # e.g., ." or .' or "' or '" or ," he said. etc.
267
+ # Check if there's a closing quote near the end
268
+ last_chars = sentence[-10:] if len(sentence) >= 10 else sentence
269
+
270
+ # Count quotes in last part - if odd from end, likely closes
271
+ for i, c in enumerate(reversed(last_chars)):
272
+ if c in ALL_QUOTES:
273
+ # Found a quote - check if it's likely a closer
274
+ # A closer is typically followed by punctuation or end
275
+ remaining = last_chars[len(last_chars) - i:]
276
+ if not remaining or all(ch in '.,!?;: ' for ch in remaining):
277
+ return True
278
+ # Also handle dialog tags: ,' he said.
279
+ if remaining and remaining[0] in '.,!?' and 'said' not in sentence.lower()[-20:]:
280
+ return True
281
+ break
282
+
283
+ return False
284
+
285
+
286
+ def format_dialog(sentences: List[str]) -> str:
287
+ """Format sentences into dialog-aware paragraphs.
288
+
289
+ Groups sentences intelligently based on dialog structure:
290
+ - Sentences within an unclosed quote stay grouped
291
+ - Complete quoted sentences become their own paragraphs
292
+ - Narrative text is grouped together
293
+ - Paragraph breaks separate different speakers/turns
294
+
295
+ Args:
296
+ sentences: List of segmented sentences.
297
+
298
+ Returns:
299
+ Formatted string with paragraph breaks (double newlines)
300
+ between logical groups and single newlines within groups.
301
+
302
+ Example:
303
+ >>> sentences = [
304
+ ... '"My dear sir," cried the man.',
305
+ ... '"You had every reason to be carried away."',
306
+ ... ]
307
+ >>> print(format_dialog(sentences))
308
+ "My dear sir," cried the man.
309
+
310
+ "You had every reason to be carried away."
311
+ """
312
+ if not sentences:
313
+ return ""
314
+
315
+ paragraphs: List[List[str]] = []
316
+ current_para: List[str] = []
317
+ in_quote = False # Track if we're inside an unclosed quote
318
+
319
+ for i, sentence in enumerate(sentences):
320
+ sentence = sentence.strip()
321
+ if not sentence:
322
+ continue
323
+
324
+ quote_count = _count_quotes(sentence)
325
+ starts_quote = _starts_with_quote(sentence)
326
+ is_narrative = _is_narrative(sentence)
327
+
328
+ # Get info about previous sentence
329
+ prev_sentence = current_para[-1] if current_para else ""
330
+ prev_was_narrative = _is_narrative(prev_sentence) if prev_sentence else False
331
+ prev_was_complete = _is_complete_quote(prev_sentence) if prev_sentence else False
332
+
333
+ # Determine if this sentence starts a new paragraph
334
+ should_start_new_para = False
335
+
336
+ if not current_para:
337
+ # First sentence always starts a new paragraph
338
+ should_start_new_para = True
339
+ elif in_quote:
340
+ # Inside an open quote - continue current paragraph
341
+ should_start_new_para = False
342
+ elif starts_quote:
343
+ # New quote starting - always new paragraph
344
+ should_start_new_para = True
345
+ elif is_narrative and prev_was_narrative:
346
+ # Consecutive narrative sentences - each gets its own paragraph
347
+ # This gives clean ebook formatting with paragraph breaks
348
+ should_start_new_para = True
349
+ elif is_narrative and prev_was_complete and not prev_was_narrative:
350
+ # Narrative after complete dialog - new paragraph
351
+ should_start_new_para = True
352
+ elif is_narrative and not prev_was_narrative and _ends_dialog_turn(prev_sentence):
353
+ # Narrative after dialog that ends a turn - new paragraph
354
+ should_start_new_para = True
355
+
356
+ if should_start_new_para and current_para:
357
+ paragraphs.append(current_para)
358
+ current_para = []
359
+
360
+ current_para.append(sentence)
361
+
362
+ # Update quote tracking
363
+ if quote_count % 2 == 1:
364
+ in_quote = not in_quote
365
+
366
+ # Don't forget the last paragraph
367
+ if current_para:
368
+ paragraphs.append(current_para)
369
+
370
+ # Format: join sentences in paragraph with newline, paragraphs with double newline
371
+ return "\n\n".join("\n".join(para) for para in paragraphs)
@@ -0,0 +1,70 @@
1
+ # -*- coding: UTF-8 -*-
2
+ """Fix common OCR/text extraction artifacts.
3
+
4
+ Ebook text files often contain artifacts where common word pairs
5
+ are incorrectly joined. This module fixes known patterns.
6
+
7
+ Related GitHub Issue:
8
+ #9 - Fix common OCR/cleaning artifacts (Iam, witha)
9
+ https://github.com/craigtrim/fast-sentence-segment/issues/9
10
+ """
11
+
12
+ from fast_sentence_segment.core import BaseObject
13
+
14
+ # Known OCR artifact patterns: (pattern, replacement)
15
+ # All patterns include surrounding spaces to ensure exact word boundaries
16
+ _OCR_ARTIFACTS = [
17
+ (" Iam ", " I am "),
18
+ (" Ihave ", " I have "),
19
+ (" ihave ", " I have "),
20
+ (" Ithink ", " I think "),
21
+ (" ithink ", " I think "),
22
+ (" anda ", " and a "),
23
+ (" witha ", " with a "),
24
+ (" sucha ", " such a "),
25
+ (" aliquid ", " a liquid "),
26
+ ]
27
+
28
+
29
+ class OcrArtifactFixer(BaseObject):
30
+ """Fix common OCR/text extraction artifacts.
31
+
32
+ Detects and corrects known patterns where words are incorrectly
33
+ joined during OCR or text extraction processes.
34
+
35
+ Related GitHub Issue:
36
+ #9 - Fix common OCR/cleaning artifacts (Iam, witha)
37
+ https://github.com/craigtrim/fast-sentence-segment/issues/9
38
+ """
39
+
40
+ def __init__(self):
41
+ """Change Log
42
+
43
+ Created:
44
+ 3-Feb-2026
45
+ craigtrim@gmail.com
46
+ * fix common OCR/cleaning artifacts
47
+ https://github.com/craigtrim/fast-sentence-segment/issues/9
48
+ """
49
+ BaseObject.__init__(self, __name__)
50
+
51
+ @staticmethod
52
+ def process(input_text: str) -> str:
53
+ """Fix known OCR artifact patterns.
54
+
55
+ Args:
56
+ input_text: Text that may contain OCR artifacts.
57
+
58
+ Returns:
59
+ Text with known OCR artifacts corrected.
60
+
61
+ Examples:
62
+ >>> OcrArtifactFixer.process("Jack, Iam so happy")
63
+ 'Jack, I am so happy'
64
+ >>> OcrArtifactFixer.process("horizon witha hint")
65
+ 'horizon with a hint'
66
+ """
67
+ for pattern, replacement in _OCR_ARTIFACTS:
68
+ if pattern in input_text:
69
+ input_text = input_text.replace(pattern, replacement)
70
+ return input_text
@@ -5,13 +5,60 @@ Joins lines within paragraphs into continuous strings while
5
5
  preserving paragraph boundaries (blank lines). Also dehyphenates
6
6
  words that were split across lines for typesetting.
7
7
 
8
- Related GitHub Issue:
8
+ Related GitHub Issues:
9
9
  #8 - Add dehyphenation support for words split across lines
10
10
  https://github.com/craigtrim/fast-sentence-segment/issues/8
11
+
12
+ #12 - Unwrap fails when hard-wrap splits across blank line
13
+ https://github.com/craigtrim/fast-sentence-segment/issues/12
14
+
15
+ #14 - Make unwrap quote-aware to join text across blank lines inside open quotes
16
+ https://github.com/craigtrim/fast-sentence-segment/issues/14
11
17
  """
12
18
 
13
19
  import re
14
20
 
21
+ # Quote characters to track for dialog detection
22
+ # Includes straight and curly/smart quotes
23
+ DOUBLE_QUOTES = '"""\u201c\u201d' # " " " " "
24
+ SINGLE_QUOTES = "'''\u2018\u2019" # ' ' ' ' '
25
+ ALL_QUOTES = DOUBLE_QUOTES + SINGLE_QUOTES
26
+
27
+ # Known elision words where an apostrophe replaces omitted letters at word start
28
+ # These should NOT be counted as dialog quotes
29
+ KNOWN_ELISION_WORDS = {
30
+ # 'it' elisions
31
+ "tis", "twas", "twere", "twill", "twould", "taint", "tother",
32
+ # Archaic oaths
33
+ "sblood", "sdeath", "swounds", "sbodikins", "slid", "strewth", "zounds",
34
+ # Common elisions with a-/be- prefix dropped
35
+ "bout", "bove", "cross", "fore", "fraid", "gainst", "live", "loft", "lone",
36
+ "long", "mid", "midst", "mong", "mongst", "neath", "round", "sleep", "tween",
37
+ "twixt", "wake", "ware", "way", "cause", "cuz", "coz", "hind", "low", "side",
38
+ "yond", "cept", "scaped", "specially", "splain", "spect",
39
+ # Cockney/dialect h-dropping
40
+ "e", "em", "er", "ere", "im", "is", "ave", "avin", "ead", "ear", "eard",
41
+ "eart", "eaven", "eavens", "eavy", "eck", "edge", "eel", "eight", "ell",
42
+ "elp", "en", "ero", "igh", "ill", "imself", "int", "it", "itch", "obby",
43
+ "old", "ole", "oliday", "oller", "ollow", "oly", "ome", "onest", "oney",
44
+ "onor", "onour", "ood", "ook", "oop", "ope", "orizon", "orn", "orrible",
45
+ "orse", "ospital", "ot", "otel", "our", "ouse", "ow", "owever", "uge",
46
+ "undred", "ungry", "unt", "urry", "urt", "usband", "alf", "all", "am",
47
+ "and", "andsome", "appen", "appy", "ard", "arm", "at", "ate",
48
+ # Cockney th-dropping
49
+ "ese", "ey", "ose", "ough", "rough",
50
+ # Other prefix elisions
51
+ "count", "fter", "gain", "gin", "less", "nother", "nough", "nuff", "pears",
52
+ "pon", "prentice", "scuse", "spite", "spose", "stead", "tarnal", "tend",
53
+ "thout", "til", "till", "un",
54
+ # Modern colloquial
55
+ "kay", "sup", "dya", "ja", "yer", "copter",
56
+ # Musical
57
+ "cello",
58
+ # 'member (remember)
59
+ "member",
60
+ }
61
+
15
62
  # Pattern to match hyphenated word breaks at end of line:
16
63
  # - A single hyphen (not -- em-dash)
17
64
  # - Followed by newline and optional whitespace
@@ -19,6 +66,128 @@ import re
19
66
  _HYPHEN_LINE_BREAK_PATTERN = re.compile(r'(?<!-)-\n\s*([a-z])')
20
67
 
21
68
 
69
+ def _is_elision(text: str, pos: int) -> bool:
70
+ """Check if apostrophe at position is a word-initial elision.
71
+
72
+ Elisions like 'tis, 'twas, 'cello, 'em replace omitted letters at word start.
73
+ Dialog quotes like 'Hello!' surround quoted speech.
74
+
75
+ Args:
76
+ text: The full text.
77
+ pos: Position of the apostrophe character.
78
+
79
+ Returns:
80
+ True if this appears to be an elision, not a dialog quote.
81
+ """
82
+ if pos >= len(text) - 1:
83
+ return False
84
+
85
+ next_char = text[pos + 1]
86
+
87
+ # If followed by a digit, it's a year abbreviation ('99, '20s)
88
+ if next_char.isdigit():
89
+ return True
90
+
91
+ # Extract the word after the apostrophe (letters only, up to non-letter)
92
+ word_start = pos + 1
93
+ word_end = word_start
94
+ while word_end < len(text) and text[word_end].isalpha():
95
+ word_end += 1
96
+
97
+ if word_end == word_start:
98
+ return False # No letters after apostrophe
99
+
100
+ word = text[word_start:word_end].lower()
101
+
102
+ # Check if it's a known elision word
103
+ return word in KNOWN_ELISION_WORDS
104
+
105
+
106
+ def _count_quotes(text: str) -> int:
107
+ """Count actual quote characters in text, excluding apostrophes.
108
+
109
+ Apostrophes in contractions (don't, can't), possessives (Jack's, Joselito's),
110
+ and word-initial elisions ('tis, 'twas, 'cello, 'em) are NOT counted as quotes
111
+ because they don't indicate dialog boundaries.
112
+
113
+ A quote character is considered an apostrophe (not a quote) if:
114
+ - It's preceded by a letter AND followed by a letter (mid-word: don't, Joselito's)
115
+ - It's a word-initial elision ('tis, 'Twas, 'cello, '99)
116
+
117
+ Args:
118
+ text: The text to count quotes in.
119
+
120
+ Returns:
121
+ Number of actual quote characters (not apostrophes).
122
+ """
123
+ count = 0
124
+ for i, c in enumerate(text):
125
+ if c not in ALL_QUOTES:
126
+ continue
127
+
128
+ # Check if this is a mid-word apostrophe (contraction/possessive)
129
+ prev_is_letter = i > 0 and text[i - 1].isalpha()
130
+ next_is_letter = i < len(text) - 1 and text[i + 1].isalpha()
131
+
132
+ if prev_is_letter and next_is_letter:
133
+ # Mid-word apostrophe (contraction/possessive) - don't count
134
+ continue
135
+
136
+ # Check if this is a word-initial elision
137
+ prev_is_word_boundary = i == 0 or not text[i - 1].isalnum()
138
+ if prev_is_word_boundary and c in SINGLE_QUOTES and _is_elision(text, i):
139
+ # Word-initial elision - don't count
140
+ continue
141
+
142
+ count += 1
143
+ return count
144
+
145
+
146
+ def _is_inside_open_quote(text: str) -> bool:
147
+ """Check if text ends with an unclosed quote.
148
+
149
+ Counts quote characters (excluding apostrophes) and returns True
150
+ if the count is odd, meaning there's an open quote.
151
+
152
+ Args:
153
+ text: The text to check.
154
+
155
+ Returns:
156
+ True if there's an unclosed quote at the end of text.
157
+ """
158
+ return _count_quotes(text) % 2 == 1
159
+
160
+ # Characters that indicate end of sentence
161
+ _SENTENCE_END_PUNCT = {'.', '?', '!'}
162
+
163
+
164
+ def _ends_with_sentence_punct(text: str) -> bool:
165
+ """Check if text ends with sentence-ending punctuation.
166
+
167
+ Handles trailing quotes/parens: 'He said "Hello."' -> True
168
+ Handles ellipsis: 'He wondered...' -> True
169
+
170
+ Args:
171
+ text: The text to check.
172
+
173
+ Returns:
174
+ True if text ends with . ? ! or ... (possibly followed by quotes/parens).
175
+ """
176
+ if not text:
177
+ return False
178
+
179
+ # Strip trailing whitespace and quotes/parens (including curly quotes)
180
+ stripped = text.rstrip()
181
+ trailing_chars = {'"', "'", ')', ']', '\u201d', '\u2019'} # " ' ) ] " '
182
+ while stripped and stripped[-1] in trailing_chars:
183
+ stripped = stripped[:-1]
184
+
185
+ if not stripped:
186
+ return False
187
+
188
+ return stripped[-1] in _SENTENCE_END_PUNCT
189
+
190
+
22
191
  def _dehyphenate_block(block: str) -> str:
23
192
  """Remove hyphens from words split across lines.
24
193
 
@@ -47,11 +216,26 @@ def unwrap_hard_wrapped_text(text: str) -> str:
47
216
  single spaces. Also dehyphenates words that were split
48
217
  across lines for typesetting purposes.
49
218
 
219
+ Special handling for spurious blank lines (issue #12):
220
+ When a single blank line appears mid-sentence (previous line
221
+ doesn't end with .?! and next line starts lowercase), the
222
+ text is joined rather than treated as a paragraph break.
223
+
224
+ Quote-aware joining (issue #14):
225
+ When we're inside an open quote (odd number of quote characters),
226
+ join across blank lines even if the previous line ends with
227
+ sentence punctuation and the next starts uppercase. This keeps
228
+ multi-sentence dialog together.
229
+
50
230
  Examples:
51
231
  >>> unwrap_hard_wrapped_text("a bot-\\ntle of wine")
52
232
  'a bottle of wine'
53
233
  >>> unwrap_hard_wrapped_text("line one\\nline two")
54
234
  'line one line two'
235
+ >>> unwrap_hard_wrapped_text("His colour\\n\\nmounted;")
236
+ 'His colour mounted;'
237
+ >>> unwrap_hard_wrapped_text("'First.\\n\\nSecond.'")
238
+ "'First. Second.'"
55
239
 
56
240
  Args:
57
241
  text: Raw text with hard-wrapped lines.
@@ -60,15 +244,75 @@ def unwrap_hard_wrapped_text(text: str) -> str:
60
244
  Text with paragraphs unwrapped into continuous strings,
61
245
  separated by double newlines, with hyphenated words rejoined.
62
246
  """
63
- blocks = re.split(r'\n\s*\n', text)
64
- unwrapped = []
247
+ lines = text.splitlines()
248
+ paragraphs: list[list[str]] = []
249
+ current_para_lines: list[str] = []
250
+ blank_line_count = 0
251
+
252
+ for line in lines:
253
+ stripped = line.strip()
254
+
255
+ if not stripped:
256
+ # Blank line (or whitespace-only)
257
+ blank_line_count += 1
258
+ else:
259
+ # Non-blank line
260
+ if current_para_lines and blank_line_count > 0:
261
+ # We have previous content and saw blank line(s)
262
+ # Build the current paragraph text to check ending
263
+ prev_para = ' '.join(ln.strip() for ln in current_para_lines if ln.strip())
65
264
 
66
- for block in blocks:
265
+ # Issue #12: Join across single blank line if:
266
+ # 1. Exactly one blank line
267
+ # 2. Previous paragraph doesn't end with sentence punctuation
268
+ # 3. Current line starts with lowercase
269
+ issue_12_join = (
270
+ blank_line_count == 1
271
+ and prev_para
272
+ and not _ends_with_sentence_punct(prev_para)
273
+ and stripped[0].islower()
274
+ )
275
+
276
+ # Issue #14: Join across blank line if inside open quote
277
+ # Even if previous ends with sentence punct and next starts uppercase,
278
+ # we should join if we're inside an unclosed quote
279
+ issue_14_join = (
280
+ blank_line_count == 1
281
+ and prev_para
282
+ and _is_inside_open_quote(prev_para)
283
+ )
284
+
285
+ should_join = issue_12_join or issue_14_join
286
+
287
+ if should_join:
288
+ # Treat as continuation of current paragraph
289
+ current_para_lines.append(line)
290
+ else:
291
+ # Finish current paragraph, start new one
292
+ paragraphs.append(current_para_lines)
293
+ current_para_lines = [line]
294
+ else:
295
+ # No blank lines seen, add to current paragraph
296
+ current_para_lines.append(line)
297
+
298
+ blank_line_count = 0
299
+
300
+ # Don't forget the last paragraph
301
+ if current_para_lines:
302
+ paragraphs.append(current_para_lines)
303
+
304
+ # Process each paragraph: dehyphenate and join lines
305
+ unwrapped = []
306
+ for para_lines in paragraphs:
307
+ block = '\n'.join(para_lines)
67
308
  # First, dehyphenate words split across lines
68
309
  block = _dehyphenate_block(block)
69
310
  # Then join remaining lines with spaces
70
- lines = block.splitlines()
71
- joined = ' '.join(line.strip() for line in lines if line.strip())
311
+ joined_lines = block.splitlines()
312
+ joined = ' '.join(ln.strip() for ln in joined_lines if ln.strip())
313
+ # Normalize multiple spaces to single space (OCR artifacts, formatting)
314
+ while ' ' in joined:
315
+ joined = joined.replace(' ', ' ')
72
316
  if joined:
73
317
  unwrapped.append(joined)
74
318
 
@@ -43,18 +43,20 @@ def _load_spacy_model(model_name: str = "en_core_web_sm"):
43
43
 
44
44
  return spacy.load(model_name)
45
45
 
46
- from fast_sentence_segment.dmo import AbbreviationMerger
47
- from fast_sentence_segment.dmo import AbbreviationSplitter
48
- from fast_sentence_segment.dmo import TitleNameMerger
49
- from fast_sentence_segment.dmo import EllipsisNormalizer
50
- from fast_sentence_segment.dmo import NewlinesToPeriods
51
- from fast_sentence_segment.dmo import BulletPointCleaner
52
- from fast_sentence_segment.dmo import NumberedListNormalizer
53
- from fast_sentence_segment.dmo import QuestionExclamationSplitter
54
- from fast_sentence_segment.dmo import SpacyDocSegmenter
55
- from fast_sentence_segment.dmo import PostProcessStructure
56
- from fast_sentence_segment.dmo import StripTrailingPeriodAfterQuote
57
- from fast_sentence_segment.dmo import Dehyphenator
46
+ # Imports after lazy spacy loading function (intentional)
47
+ from fast_sentence_segment.dmo import AbbreviationMerger # noqa: E402
48
+ from fast_sentence_segment.dmo import AbbreviationSplitter # noqa: E402
49
+ from fast_sentence_segment.dmo import TitleNameMerger # noqa: E402
50
+ from fast_sentence_segment.dmo import EllipsisNormalizer # noqa: E402
51
+ from fast_sentence_segment.dmo import NewlinesToPeriods # noqa: E402
52
+ from fast_sentence_segment.dmo import BulletPointCleaner # noqa: E402
53
+ from fast_sentence_segment.dmo import NumberedListNormalizer # noqa: E402
54
+ from fast_sentence_segment.dmo import QuestionExclamationSplitter # noqa: E402
55
+ from fast_sentence_segment.dmo import SpacyDocSegmenter # noqa: E402
56
+ from fast_sentence_segment.dmo import PostProcessStructure # noqa: E402
57
+ from fast_sentence_segment.dmo import StripTrailingPeriodAfterQuote # noqa: E402
58
+ from fast_sentence_segment.dmo import Dehyphenator # noqa: E402
59
+ from fast_sentence_segment.dmo import OcrArtifactFixer # noqa: E402
58
60
 
59
61
 
60
62
  class PerformSentenceSegmentation(BaseObject):
@@ -84,6 +86,7 @@ class PerformSentenceSegmentation(BaseObject):
84
86
  self.__nlp = _load_spacy_model("en_core_web_sm")
85
87
 
86
88
  self._dehyphenate = Dehyphenator.process
89
+ self._fix_ocr_artifacts = OcrArtifactFixer.process
87
90
  self._newlines_to_periods = NewlinesToPeriods.process
88
91
  self._normalize_numbered_lists = NumberedListNormalizer().process
89
92
  self._normalize_ellipses = EllipsisNormalizer().process
@@ -138,6 +141,9 @@ class PerformSentenceSegmentation(BaseObject):
138
141
  # Must happen before newlines are converted to periods
139
142
  input_text = self._dehyphenate(input_text)
140
143
 
144
+ # Fix common OCR artifacts (issue #9)
145
+ input_text = self._fix_ocr_artifacts(input_text)
146
+
141
147
  input_text = self._normalize_numbered_lists(input_text)
142
148
  input_text = self._normalize_ellipses(input_text)
143
149
 
@@ -1,9 +1,9 @@
1
- Metadata-Version: 2.4
1
+ Metadata-Version: 2.1
2
2
  Name: fast-sentence-segment
3
- Version: 1.4.4
3
+ Version: 1.5.3
4
4
  Summary: Fast and Efficient Sentence Segmentation
5
+ Home-page: https://github.com/craigtrim/fast-sentence-segment
5
6
  License: MIT
6
- License-File: LICENSE
7
7
  Keywords: nlp,text,preprocess,segment
8
8
  Author: Craig Trim
9
9
  Author-email: craigtrim@gmail.com
@@ -33,7 +33,6 @@ Description-Content-Type: text/markdown
33
33
 
34
34
  [![PyPI version](https://img.shields.io/pypi/v/fast-sentence-segment.svg)](https://pypi.org/project/fast-sentence-segment/)
35
35
  [![Python versions](https://img.shields.io/pypi/pyversions/fast-sentence-segment.svg)](https://pypi.org/project/fast-sentence-segment/)
36
- [![CI](https://img.shields.io/github/actions/workflow/status/craigtrim/fast-sentence-segment/ci.yml?branch=master&label=CI)](https://github.com/craigtrim/fast-sentence-segment/actions/workflows/ci.yml)
37
36
  [![Tests](https://img.shields.io/badge/tests-664-brightgreen)](https://github.com/craigtrim/fast-sentence-segment/tree/master/tests)
38
37
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
39
38
  [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
@@ -165,21 +164,24 @@ segment-file --input-file book.txt --output-file sentences.txt
165
164
 
166
165
  # Unwrap hard-wrapped e-texts (Project Gutenberg, etc.)
167
166
  segment-file --input-file book.txt --output-file sentences.txt --unwrap
167
+
168
+ # Dialog-aware formatting (implies --unwrap)
169
+ segment -f book.txt --format
168
170
  ```
169
171
 
170
172
  ## API Reference
171
173
 
172
174
  | Function | Parameters | Returns | Description |
173
175
  |----------|------------|---------|-------------|
174
- | `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False` | `list` | Main entry point for segmentation |
176
+ | `segment_text()` | `input_text: str`, `flatten: bool = False`, `unwrap: bool = False`, `format: str = None` | `list` or `str` | Main entry point for segmentation. Use `format="dialog"` for dialog-aware output. |
175
177
  | `Segmenter.input_text()` | `input_text: str` | `list[list[str]]` | Cached paragraph-aware segmentation |
176
178
 
177
179
  ### CLI Commands
178
180
 
179
181
  | Command | Description |
180
182
  |---------|-------------|
181
- | `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output. |
182
- | `segment-file --input-file IN --output-file OUT [--unwrap]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts. |
183
+ | `segment [text]` | Segment text from argument, `-f FILE`, or stdin. Use `-n` for numbered output, `--format` for dialog-aware paragraph grouping. |
184
+ | `segment-file --input-file IN --output-file OUT [--unwrap] [--format]` | Segment a file and write one sentence per line. Use `--unwrap` for hard-wrapped e-texts, `--format` for dialog-aware formatting. |
183
185
 
184
186
  ## Why Nested Lists?
185
187
 
@@ -1,32 +1,34 @@
1
- fast_sentence_segment/__init__.py,sha256=jeb4yCy89ivyqbo-4ldJLquPAG_XR_33Q7nrDjqPxvE,1465
1
+ fast_sentence_segment/__init__.py,sha256=DI7cyxtqnWd_5lrtGXqkIm8Aje0h55nGAHGgP6zXRyE,2278
2
2
  fast_sentence_segment/bp/__init__.py,sha256=j2-WfQ9WwVuXeGSjvV6XLVwEdvau8sdAQe4Pa4DrYi8,33
3
3
  fast_sentence_segment/bp/segmenter.py,sha256=UW6DguPgA56h-pPYRsfJhjIzBe40j6NdjkwYxamASyA,1928
4
- fast_sentence_segment/cli.py,sha256=vr1Gh-pq4bIPcnhUF6c7ckGdEfoyrI_r0XcrJrIfjEA,5640
4
+ fast_sentence_segment/cli.py,sha256=WFhHTYrHUP0PqXeJ-6bR6hlXsV4ql190c2S2nBf-uCA,9775
5
5
  fast_sentence_segment/core/__init__.py,sha256=uoBersYyVStJ5a8zJpQz1GDGaloEdAv2jGHw1292hRM,108
6
6
  fast_sentence_segment/core/base_object.py,sha256=AYr7yzusIwawjbKdvcv4yTEnhmx6M583kDZzhzPOmq4,635
7
7
  fast_sentence_segment/core/stopwatch.py,sha256=hE6hMz2q6rduaKi58KZmiAL-lRtyh_wWCANhl4KLkRQ,879
8
- fast_sentence_segment/dmo/__init__.py,sha256=N0lLHVn6zKeg6h1LIfoc4XeXPUY-uSbMT45dP2_vn8M,862
8
+ fast_sentence_segment/dmo/__init__.py,sha256=emn-F46GMpR5dTQXbhMWGexj7sOfWWuTpRWiENjIadQ,972
9
9
  fast_sentence_segment/dmo/abbreviation_merger.py,sha256=tCXM6yCfMryJvMIVWIxP_EocoibZi8vohFzJ5tvMYr0,4432
10
10
  fast_sentence_segment/dmo/abbreviation_splitter.py,sha256=03mSyJcLooNyIjXx6mPlrnjmKgZW-uhUIqG4U-MbIGw,2981
11
11
  fast_sentence_segment/dmo/abbreviations.py,sha256=CGJrJDo6pmYd3pTNEQbdOo8N6tnkCnwyL2X7Si663Os,2530
12
12
  fast_sentence_segment/dmo/bullet_point_cleaner.py,sha256=WOZQRWXiiyRi8rOuEIw36EmkaXmATHL9_Dxb2rderw4,1606
13
13
  fast_sentence_segment/dmo/dehyphenator.py,sha256=6BJTie7tClRAifeiW8V2CdAAbcbknhtqmKylAdRZ7ko,1776
14
+ fast_sentence_segment/dmo/dialog_formatter.py,sha256=CLtXGTRBC4PmH47vQ4WSlWIFi5r4rtJ7mFxs4xyBcmg,12824
14
15
  fast_sentence_segment/dmo/ellipsis_normalizer.py,sha256=lHs9dLFfKJe-2vFNe17Hik90g3_kXX347OzGP_IOT08,1521
15
16
  fast_sentence_segment/dmo/group_quoted_sentences.py,sha256=Ifh_kUwi7sMbzbZvrTgEKkzWe50AafUDhVKVPR9h7wQ,5092
16
17
  fast_sentence_segment/dmo/newlines_to_periods.py,sha256=PUrXreqZWiITINfoJL5xRRlXJH6noH0cdXtW1EqAh8I,1517
17
18
  fast_sentence_segment/dmo/normalize_quotes.py,sha256=mr53qo_tj_I9XzElOKjUQvCtDQh7mBCGy-iqsHZDX14,2881
18
19
  fast_sentence_segment/dmo/numbered_list_normalizer.py,sha256=q0sOCW8Jkn2vTXlUcVhmDvYES3yvJx1oUVl_8y7eL4E,1672
20
+ fast_sentence_segment/dmo/ocr_artifact_fixer.py,sha256=lhU6Nfp4_g5yChm1zxgwM5R5ixk3pzhrk1qEgNJa8Hc,2139
19
21
  fast_sentence_segment/dmo/post_process_sentences.py,sha256=5jxG3TmFjxIExMPLhnCB5JT1lXQvFU9r4qQGoATGrWk,916
20
22
  fast_sentence_segment/dmo/question_exclamation_splitter.py,sha256=cRsWRu8zb6wOWG-BjMahHfz4YGutKiV9lW7dE-q3tgc,2006
21
23
  fast_sentence_segment/dmo/spacy_doc_segmenter.py,sha256=Kb65TYMhrbpTYEey5vb7TyhCjUHVxmugHYIeKkntCwk,5147
22
24
  fast_sentence_segment/dmo/strip_trailing_period_after_quote.py,sha256=wYkoLy5XJKZIblJXBvDAB8-a81UTQOhOf2u91wjJWUw,2259
23
25
  fast_sentence_segment/dmo/title_name_merger.py,sha256=zbG04_VjwM8TtT8LhavvmZqIZL_2xgT2OTxWkK_Zt1s,5133
24
- fast_sentence_segment/dmo/unwrap_hard_wrapped_text.py,sha256=V1T5RsJBaII_iGJMyWvv6rb2mny8pnVd428oVZL0n5I,2457
26
+ fast_sentence_segment/dmo/unwrap_hard_wrapped_text.py,sha256=UlWQSb6H8hGwCu719mYEuc4rBzH68wGWwuRH9c4S_xw,11573
25
27
  fast_sentence_segment/svc/__init__.py,sha256=9B12mXxBnlalH4OAm1AMLwUMa-RLi2ilv7qhqv26q7g,144
26
28
  fast_sentence_segment/svc/perform_paragraph_segmentation.py,sha256=zLKw9rSzb0NNfx4MyEeoGrHwhxTtH5oDrYcAL2LMVHY,1378
27
- fast_sentence_segment/svc/perform_sentence_segmentation.py,sha256=mAJEPWqNQFbnlj7Rb7yiXIRHCAdlgsN0jAbg7e2qpMU,7421
28
- fast_sentence_segment-1.4.4.dist-info/METADATA,sha256=8EZrIvdE8CWTW64_HqkMH8eF6CzXs6UDdaFjcce7LTA,7947
29
- fast_sentence_segment-1.4.4.dist-info/WHEEL,sha256=kJCRJT_g0adfAJzTx2GUMmS80rTJIVHRCfG0DQgLq3o,88
30
- fast_sentence_segment-1.4.4.dist-info/entry_points.txt,sha256=Zc8OwFKj3ofnjy5ZIFqHzDkIEWweV1AP1xap1ZFGD8M,107
31
- fast_sentence_segment-1.4.4.dist-info/licenses/LICENSE,sha256=vou5JCLAT5nHcsUv-AkjUYAihYfN9mwPDXxV2DHyHBo,1067
32
- fast_sentence_segment-1.4.4.dist-info/RECORD,,
29
+ fast_sentence_segment/svc/perform_sentence_segmentation.py,sha256=I58wdpoRqa79qrn-GxZhqPSg7DF-5curtLhIBBdi33E,7879
30
+ fast_sentence_segment-1.5.3.dist-info/LICENSE,sha256=vou5JCLAT5nHcsUv-AkjUYAihYfN9mwPDXxV2DHyHBo,1067
31
+ fast_sentence_segment-1.5.3.dist-info/METADATA,sha256=0TIiLm5CuM0-_h1Rr_CWHeBlkcYsFS2t5-NWgHdpwUI,8038
32
+ fast_sentence_segment-1.5.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
33
+ fast_sentence_segment-1.5.3.dist-info/entry_points.txt,sha256=Zc8OwFKj3ofnjy5ZIFqHzDkIEWweV1AP1xap1ZFGD8M,107
34
+ fast_sentence_segment-1.5.3.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 2.3.1
2
+ Generator: poetry-core 1.9.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any