pystylometry 1.3.0__py3-none-any.whl → 1.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pystylometry/__init__.py +42 -3
- pystylometry/_types.py +205 -3
- pystylometry/cli.py +321 -0
- pystylometry/lexical/__init__.py +5 -1
- pystylometry/lexical/repetition.py +506 -0
- pystylometry/lexical/ttr.py +288 -97
- pystylometry-1.3.5.dist-info/LICENSE +21 -0
- pystylometry-1.3.5.dist-info/METADATA +78 -0
- {pystylometry-1.3.0.dist-info → pystylometry-1.3.5.dist-info}/RECORD +11 -9
- {pystylometry-1.3.0.dist-info → pystylometry-1.3.5.dist-info}/WHEEL +1 -1
- {pystylometry-1.3.0.dist-info → pystylometry-1.3.5.dist-info}/entry_points.txt +1 -0
- pystylometry-1.3.0.dist-info/METADATA +0 -136
pystylometry/__init__.py
CHANGED
|
@@ -40,14 +40,13 @@ Usage:
|
|
|
40
40
|
print(result.pattern_confidence)
|
|
41
41
|
"""
|
|
42
42
|
|
|
43
|
+
from . import lexical # noqa: E402
|
|
43
44
|
from ._types import AnalysisResult
|
|
45
|
+
from .tokenizer import TokenizationStats, Tokenizer, TokenMetadata
|
|
44
46
|
|
|
45
47
|
# Version
|
|
46
48
|
__version__ = "0.1.0"
|
|
47
49
|
|
|
48
|
-
# Core exports - always available
|
|
49
|
-
from . import lexical
|
|
50
|
-
|
|
51
50
|
# Optional exports - may raise ImportError if dependencies not installed
|
|
52
51
|
try:
|
|
53
52
|
from . import readability # noqa: F401
|
|
@@ -87,6 +86,41 @@ _CONSISTENCY_AVAILABLE = True
|
|
|
87
86
|
_STYLISTIC_AVAILABLE = True
|
|
88
87
|
|
|
89
88
|
|
|
89
|
+
def tokenize(text: str, **kwargs: object) -> list[str]:
|
|
90
|
+
"""Tokenize text using the stylometric tokenizer.
|
|
91
|
+
|
|
92
|
+
Convenience wrapper around Tokenizer.tokenize(). All keyword arguments
|
|
93
|
+
are forwarded to the Tokenizer constructor.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
text: Input text to tokenize.
|
|
97
|
+
**kwargs: Options forwarded to Tokenizer (lowercase, strip_numbers,
|
|
98
|
+
expand_contractions, etc.).
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
List of token strings.
|
|
102
|
+
|
|
103
|
+
Example:
|
|
104
|
+
>>> from pystylometry import tokenize
|
|
105
|
+
>>> tokenize("Hello, world! It's a test.")
|
|
106
|
+
['hello', 'world', "it's", 'a', 'test']
|
|
107
|
+
"""
|
|
108
|
+
return Tokenizer(**kwargs).tokenize(text) # type: ignore[arg-type]
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def tokenize_with_metadata(text: str, **kwargs: object) -> list[TokenMetadata]:
|
|
112
|
+
"""Tokenize text and return tokens with positional and type metadata.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
text: Input text to tokenize.
|
|
116
|
+
**kwargs: Options forwarded to Tokenizer.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
List of TokenMetadata objects.
|
|
120
|
+
"""
|
|
121
|
+
return Tokenizer(**kwargs).tokenize_with_metadata(text) # type: ignore[arg-type]
|
|
122
|
+
|
|
123
|
+
|
|
90
124
|
def analyze(
|
|
91
125
|
text: str,
|
|
92
126
|
lexical_metrics: bool = True,
|
|
@@ -225,6 +259,11 @@ __all__ = [
|
|
|
225
259
|
"__version__",
|
|
226
260
|
"analyze",
|
|
227
261
|
"get_available_modules",
|
|
262
|
+
"tokenize",
|
|
263
|
+
"tokenize_with_metadata",
|
|
264
|
+
"Tokenizer",
|
|
265
|
+
"TokenMetadata",
|
|
266
|
+
"TokenizationStats",
|
|
228
267
|
"lexical",
|
|
229
268
|
]
|
|
230
269
|
|
pystylometry/_types.py
CHANGED
|
@@ -23,7 +23,7 @@ from __future__ import annotations
|
|
|
23
23
|
|
|
24
24
|
import statistics
|
|
25
25
|
from dataclasses import dataclass
|
|
26
|
-
from typing import Any
|
|
26
|
+
from typing import Any, Optional
|
|
27
27
|
|
|
28
28
|
# ===== Distribution and Chunking =====
|
|
29
29
|
# Related to GitHub Issue #27: Native chunked analysis with Distribution dataclass
|
|
@@ -316,8 +316,8 @@ class HapaxLexiconResult:
|
|
|
316
316
|
class TTRResult:
|
|
317
317
|
"""Result from Type-Token Ratio (TTR) analysis.
|
|
318
318
|
|
|
319
|
-
|
|
320
|
-
|
|
319
|
+
Measures vocabulary richness through the ratio of unique words (types)
|
|
320
|
+
to total words (tokens).
|
|
321
321
|
|
|
322
322
|
All numeric metrics include both a mean value (convenient access) and
|
|
323
323
|
a full distribution with per-chunk values and statistics.
|
|
@@ -370,6 +370,208 @@ class TTRResult:
|
|
|
370
370
|
metadata: dict[str, Any]
|
|
371
371
|
|
|
372
372
|
|
|
373
|
+
@dataclass
|
|
374
|
+
class TTRAggregateResult:
|
|
375
|
+
"""Aggregated TTR statistics for a collection of texts.
|
|
376
|
+
|
|
377
|
+
Computes group-level summary statistics (mean, std, min, max, median)
|
|
378
|
+
across multiple ``TTRResult`` objects. Useful for comparative analysis
|
|
379
|
+
across authors, genres, or time periods.
|
|
380
|
+
|
|
381
|
+
Related GitHub Issue:
|
|
382
|
+
#43 - Inline stylometry-ttr into pystylometry (remove external dependency)
|
|
383
|
+
https://github.com/craigtrim/pystylometry/issues/43
|
|
384
|
+
|
|
385
|
+
Example:
|
|
386
|
+
>>> from pystylometry.lexical import compute_ttr, TTRAggregator
|
|
387
|
+
>>> results = [compute_ttr(t) for t in texts]
|
|
388
|
+
>>> agg = TTRAggregator()
|
|
389
|
+
>>> stats = agg.aggregate(results, group_id="Austen")
|
|
390
|
+
>>> stats.ttr_mean
|
|
391
|
+
0.412
|
|
392
|
+
"""
|
|
393
|
+
|
|
394
|
+
group_id: str
|
|
395
|
+
text_count: int
|
|
396
|
+
total_words: int
|
|
397
|
+
|
|
398
|
+
# Raw TTR statistics
|
|
399
|
+
ttr_mean: float
|
|
400
|
+
ttr_std: float
|
|
401
|
+
ttr_min: float
|
|
402
|
+
ttr_max: float
|
|
403
|
+
ttr_median: float
|
|
404
|
+
|
|
405
|
+
# Root TTR (Guiraud's index) statistics
|
|
406
|
+
root_ttr_mean: float
|
|
407
|
+
root_ttr_std: float
|
|
408
|
+
|
|
409
|
+
# Log TTR (Herdan's C) statistics
|
|
410
|
+
log_ttr_mean: float
|
|
411
|
+
log_ttr_std: float
|
|
412
|
+
|
|
413
|
+
# STTR statistics (None if no texts had enough words for STTR)
|
|
414
|
+
sttr_mean: Optional[float]
|
|
415
|
+
sttr_std: Optional[float]
|
|
416
|
+
|
|
417
|
+
# Delta std mean (None if no texts had delta metrics)
|
|
418
|
+
delta_std_mean: Optional[float]
|
|
419
|
+
|
|
420
|
+
metadata: dict[str, Any]
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
# ===== Repetition Detection Results =====
|
|
424
|
+
# Related to GitHub Issue #28: Verbal tics detection for slop analysis
|
|
425
|
+
# https://github.com/craigtrim/pystylometry/issues/28
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
@dataclass
|
|
429
|
+
class RepetitiveWord:
|
|
430
|
+
"""A single word flagged as abnormally repetitive.
|
|
431
|
+
|
|
432
|
+
The repetition_score is the ratio of observed count to expected count
|
|
433
|
+
based on the word's frequency in the British National Corpus (BNC).
|
|
434
|
+
Higher scores indicate stronger overrepresentation.
|
|
435
|
+
|
|
436
|
+
Related GitHub Issue:
|
|
437
|
+
#28 - Verbal tics detection for slop analysis
|
|
438
|
+
https://github.com/craigtrim/pystylometry/issues/28
|
|
439
|
+
|
|
440
|
+
Attributes:
|
|
441
|
+
word: The flagged word (lowercased).
|
|
442
|
+
count: Observed count in the text.
|
|
443
|
+
expected_count: Expected count based on BNC relative frequency × text length.
|
|
444
|
+
0.0 if word not found in BNC.
|
|
445
|
+
repetition_score: count / expected_count. float('inf') if expected_count is 0.
|
|
446
|
+
bnc_bucket: BNC frequency bucket (1-100, 1=most frequent). None if not in BNC.
|
|
447
|
+
chunk_counts: Per-chunk occurrence counts (for distribution analysis).
|
|
448
|
+
distribution_entropy: Shannon entropy of the word's chunk distribution.
|
|
449
|
+
Low entropy = suspiciously even spread (model tic).
|
|
450
|
+
High entropy = clustered usage (human writing about a specific scene).
|
|
451
|
+
distribution_variance: Variance of per-chunk counts.
|
|
452
|
+
"""
|
|
453
|
+
|
|
454
|
+
word: str
|
|
455
|
+
count: int
|
|
456
|
+
expected_count: float
|
|
457
|
+
repetition_score: float
|
|
458
|
+
bnc_bucket: int | None
|
|
459
|
+
chunk_counts: list[int]
|
|
460
|
+
distribution_entropy: float
|
|
461
|
+
distribution_variance: float
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
@dataclass
|
|
465
|
+
class RepetitiveUnigramsResult:
|
|
466
|
+
"""Result from repetitive unigram detection.
|
|
467
|
+
|
|
468
|
+
Identifies content words that appear far more frequently than expected
|
|
469
|
+
based on their frequency in the British National Corpus (BNC, ~100M tokens).
|
|
470
|
+
This is a key indicator of AI-generated "slop" where models exhibit verbal
|
|
471
|
+
tics — repeating certain words with suspicious regularity.
|
|
472
|
+
|
|
473
|
+
Related GitHub Issue:
|
|
474
|
+
#28 - Verbal tics detection for slop analysis
|
|
475
|
+
https://github.com/craigtrim/pystylometry/issues/28
|
|
476
|
+
|
|
477
|
+
The slop_score provides a single aggregate metric:
|
|
478
|
+
slop_score = flagged_words_per_10k × mean_repetition_score
|
|
479
|
+
|
|
480
|
+
Where:
|
|
481
|
+
- flagged_words_per_10k = count of flagged words / (total content words / 10000)
|
|
482
|
+
- mean_repetition_score = mean repetition_score across all flagged words
|
|
483
|
+
|
|
484
|
+
Higher slop_score = more likely AI-generated verbal tics.
|
|
485
|
+
|
|
486
|
+
References:
|
|
487
|
+
British National Corpus Consortium. (2007). The British National Corpus,
|
|
488
|
+
version 3 (BNC XML Edition). http://www.natcorp.ox.ac.uk/
|
|
489
|
+
|
|
490
|
+
Example:
|
|
491
|
+
>>> result = compute_repetitive_unigrams(text)
|
|
492
|
+
>>> for w in result.repetitive_words[:5]:
|
|
493
|
+
... print(f"{w.word}: {w.count}x (expected {w.expected_count:.1f}, "
|
|
494
|
+
... f"score {w.repetition_score:.1f})")
|
|
495
|
+
shimmered: 23x (expected 0.1, score 266.2)
|
|
496
|
+
>>> result.slop_score
|
|
497
|
+
42.7
|
|
498
|
+
"""
|
|
499
|
+
|
|
500
|
+
repetitive_words: list[RepetitiveWord] # Sorted by repetition_score descending
|
|
501
|
+
total_content_words: int
|
|
502
|
+
flagged_count: int # Number of words exceeding threshold
|
|
503
|
+
flagged_words_per_10k: float # flagged_count / (total_content_words / 10000)
|
|
504
|
+
mean_repetition_score: float # Mean score across flagged words
|
|
505
|
+
slop_score: float # Aggregate: flagged_words_per_10k × mean_repetition_score
|
|
506
|
+
total_content_words_dist: Distribution
|
|
507
|
+
chunk_size: int
|
|
508
|
+
chunk_count: int
|
|
509
|
+
metadata: dict[str, Any]
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
@dataclass
|
|
513
|
+
class RepetitiveNgram:
|
|
514
|
+
"""A single n-gram flagged as abnormally repetitive.
|
|
515
|
+
|
|
516
|
+
Content n-grams (bigrams, trigrams, etc.) should rarely repeat verbatim
|
|
517
|
+
in natural writing. N-grams that repeat beyond a length-scaled threshold
|
|
518
|
+
are flagged.
|
|
519
|
+
|
|
520
|
+
Related GitHub Issue:
|
|
521
|
+
#28 - Verbal tics detection for slop analysis
|
|
522
|
+
https://github.com/craigtrim/pystylometry/issues/28
|
|
523
|
+
|
|
524
|
+
Attributes:
|
|
525
|
+
ngram: The flagged n-gram as a tuple of words.
|
|
526
|
+
count: Observed count in the text.
|
|
527
|
+
frequency_per_10k: Occurrences per 10,000 n-grams.
|
|
528
|
+
chunk_counts: Per-chunk occurrence counts.
|
|
529
|
+
distribution_entropy: Shannon entropy of the n-gram's chunk distribution.
|
|
530
|
+
distribution_variance: Variance of per-chunk counts.
|
|
531
|
+
"""
|
|
532
|
+
|
|
533
|
+
ngram: tuple[str, ...]
|
|
534
|
+
count: int
|
|
535
|
+
frequency_per_10k: float
|
|
536
|
+
chunk_counts: list[int]
|
|
537
|
+
distribution_entropy: float
|
|
538
|
+
distribution_variance: float
|
|
539
|
+
|
|
540
|
+
|
|
541
|
+
@dataclass
|
|
542
|
+
class RepetitiveNgramsResult:
|
|
543
|
+
"""Result from repetitive n-gram detection.
|
|
544
|
+
|
|
545
|
+
Detects bigrams, trigrams, or higher-order n-grams that repeat more than
|
|
546
|
+
expected within the text. No external corpus is required — content n-grams
|
|
547
|
+
should not repeat verbatim often in natural writing.
|
|
548
|
+
|
|
549
|
+
N-grams composed entirely of function words (e.g., "of the", "in a") are
|
|
550
|
+
excluded since their repetition is expected.
|
|
551
|
+
|
|
552
|
+
Related GitHub Issue:
|
|
553
|
+
#28 - Verbal tics detection for slop analysis
|
|
554
|
+
https://github.com/craigtrim/pystylometry/issues/28
|
|
555
|
+
|
|
556
|
+
Example:
|
|
557
|
+
>>> result = compute_repetitive_ngrams(text, n=2)
|
|
558
|
+
>>> for ng in result.repetitive_ngrams[:5]:
|
|
559
|
+
... print(f"{' '.join(ng.ngram)}: {ng.count}x "
|
|
560
|
+
... f"({ng.frequency_per_10k:.1f} per 10k)")
|
|
561
|
+
uncomfortable truth: 8x (1.6 per 10k)
|
|
562
|
+
"""
|
|
563
|
+
|
|
564
|
+
repetitive_ngrams: list[RepetitiveNgram] # Sorted by count descending
|
|
565
|
+
n: int | tuple[int, ...] # N-gram order(s) analyzed
|
|
566
|
+
total_ngrams: int
|
|
567
|
+
flagged_count: int
|
|
568
|
+
flagged_per_10k: float # flagged_count / (total_ngrams / 10000)
|
|
569
|
+
total_ngrams_dist: Distribution
|
|
570
|
+
chunk_size: int
|
|
571
|
+
chunk_count: int
|
|
572
|
+
metadata: dict[str, Any]
|
|
573
|
+
|
|
574
|
+
|
|
373
575
|
# ===== Readability Results =====
|
|
374
576
|
|
|
375
577
|
|
pystylometry/cli.py
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
Usage:
|
|
4
4
|
pystylometry-drift <file> [--window-size=N] [--stride=N] [--mode=MODE] [--json]
|
|
5
5
|
pystylometry-drift <file> --plot [output.png]
|
|
6
|
+
pystylometry-tokenize <file> [--json] [--metadata] [--stats]
|
|
6
7
|
|
|
7
8
|
Example:
|
|
8
9
|
pystylometry-drift manuscript.txt
|
|
@@ -10,6 +11,9 @@ Example:
|
|
|
10
11
|
pystylometry-drift manuscript.txt --json
|
|
11
12
|
pystylometry-drift manuscript.txt --plot
|
|
12
13
|
pystylometry-drift manuscript.txt --plot drift_report.png
|
|
14
|
+
pystylometry-tokenize manuscript.txt
|
|
15
|
+
pystylometry-tokenize manuscript.txt --json --metadata
|
|
16
|
+
pystylometry-tokenize manuscript.txt --stats
|
|
13
17
|
"""
|
|
14
18
|
|
|
15
19
|
from __future__ import annotations
|
|
@@ -423,5 +427,322 @@ The generated viewer includes:
|
|
|
423
427
|
print()
|
|
424
428
|
|
|
425
429
|
|
|
430
|
+
def tokenize_cli() -> None:
|
|
431
|
+
"""CLI entry point for stylometric tokenization."""
|
|
432
|
+
parser = argparse.ArgumentParser(
|
|
433
|
+
prog="pystylometry-tokenize",
|
|
434
|
+
description="Tokenize text for stylometric analysis.",
|
|
435
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
436
|
+
epilog="""
|
|
437
|
+
Examples:
|
|
438
|
+
pystylometry-tokenize manuscript.txt
|
|
439
|
+
pystylometry-tokenize manuscript.txt --json
|
|
440
|
+
pystylometry-tokenize manuscript.txt --json --metadata
|
|
441
|
+
pystylometry-tokenize manuscript.txt --stats
|
|
442
|
+
pystylometry-tokenize manuscript.txt -U --expand-contractions
|
|
443
|
+
pystylometry-tokenize manuscript.txt --min-length 3 --strip-numbers
|
|
444
|
+
""",
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
parser.add_argument(
|
|
448
|
+
"file",
|
|
449
|
+
type=Path,
|
|
450
|
+
help="Path to text file to tokenize",
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
# Output mode
|
|
454
|
+
output_group = parser.add_argument_group("output")
|
|
455
|
+
output_group.add_argument(
|
|
456
|
+
"-j",
|
|
457
|
+
"--json",
|
|
458
|
+
action="store_true",
|
|
459
|
+
help="Output as JSON (list of strings, or list of objects with --metadata)",
|
|
460
|
+
)
|
|
461
|
+
output_group.add_argument(
|
|
462
|
+
"-m",
|
|
463
|
+
"--metadata",
|
|
464
|
+
action="store_true",
|
|
465
|
+
help="Include token type and position metadata (implies --json)",
|
|
466
|
+
)
|
|
467
|
+
output_group.add_argument(
|
|
468
|
+
"-s",
|
|
469
|
+
"--stats",
|
|
470
|
+
action="store_true",
|
|
471
|
+
help="Show tokenization statistics instead of tokens",
|
|
472
|
+
)
|
|
473
|
+
|
|
474
|
+
# Core behavior
|
|
475
|
+
behavior_group = parser.add_argument_group("behavior")
|
|
476
|
+
behavior_group.add_argument(
|
|
477
|
+
"-U",
|
|
478
|
+
"--no-lowercase",
|
|
479
|
+
action="store_true",
|
|
480
|
+
help="Preserve original case (default: lowercase)",
|
|
481
|
+
)
|
|
482
|
+
behavior_group.add_argument(
|
|
483
|
+
"-e",
|
|
484
|
+
"--expand-contractions",
|
|
485
|
+
action="store_true",
|
|
486
|
+
help="Expand contractions (it's -> it is)",
|
|
487
|
+
)
|
|
488
|
+
behavior_group.add_argument(
|
|
489
|
+
"-n",
|
|
490
|
+
"--strip-numbers",
|
|
491
|
+
action="store_true",
|
|
492
|
+
help="Remove numeric tokens",
|
|
493
|
+
)
|
|
494
|
+
behavior_group.add_argument(
|
|
495
|
+
"--keep-punctuation",
|
|
496
|
+
action="store_true",
|
|
497
|
+
help="Keep punctuation tokens (default: stripped)",
|
|
498
|
+
)
|
|
499
|
+
|
|
500
|
+
# Filtering
|
|
501
|
+
filter_group = parser.add_argument_group("filtering")
|
|
502
|
+
filter_group.add_argument(
|
|
503
|
+
"--min-length",
|
|
504
|
+
type=int,
|
|
505
|
+
default=1,
|
|
506
|
+
metavar="N",
|
|
507
|
+
help="Minimum token length (default: 1)",
|
|
508
|
+
)
|
|
509
|
+
filter_group.add_argument(
|
|
510
|
+
"--max-length",
|
|
511
|
+
type=int,
|
|
512
|
+
default=None,
|
|
513
|
+
metavar="N",
|
|
514
|
+
help="Maximum token length (default: unlimited)",
|
|
515
|
+
)
|
|
516
|
+
filter_group.add_argument(
|
|
517
|
+
"--preserve-urls",
|
|
518
|
+
action="store_true",
|
|
519
|
+
help="Keep URL tokens",
|
|
520
|
+
)
|
|
521
|
+
filter_group.add_argument(
|
|
522
|
+
"--preserve-emails",
|
|
523
|
+
action="store_true",
|
|
524
|
+
help="Keep email tokens",
|
|
525
|
+
)
|
|
526
|
+
filter_group.add_argument(
|
|
527
|
+
"--preserve-hashtags",
|
|
528
|
+
action="store_true",
|
|
529
|
+
help="Keep hashtag tokens",
|
|
530
|
+
)
|
|
531
|
+
filter_group.add_argument(
|
|
532
|
+
"--preserve-mentions",
|
|
533
|
+
action="store_true",
|
|
534
|
+
help="Keep @mention tokens",
|
|
535
|
+
)
|
|
536
|
+
|
|
537
|
+
# Advanced
|
|
538
|
+
advanced_group = parser.add_argument_group("advanced")
|
|
539
|
+
advanced_group.add_argument(
|
|
540
|
+
"--expand-abbreviations",
|
|
541
|
+
action="store_true",
|
|
542
|
+
help="Expand abbreviations (Dr. -> Doctor)",
|
|
543
|
+
)
|
|
544
|
+
advanced_group.add_argument(
|
|
545
|
+
"--strip-accents",
|
|
546
|
+
action="store_true",
|
|
547
|
+
help="Remove accents from characters",
|
|
548
|
+
)
|
|
549
|
+
advanced_group.add_argument(
|
|
550
|
+
"--no-clean",
|
|
551
|
+
action="store_true",
|
|
552
|
+
help="Skip text cleaning (italics, brackets, page markers)",
|
|
553
|
+
)
|
|
554
|
+
advanced_group.add_argument(
|
|
555
|
+
"--no-unicode-normalize",
|
|
556
|
+
action="store_true",
|
|
557
|
+
help="Skip unicode normalization",
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
args = parser.parse_args()
|
|
561
|
+
|
|
562
|
+
# --- ANSI colors ---
|
|
563
|
+
use_color = sys.stderr.isatty()
|
|
564
|
+
|
|
565
|
+
def _c(code: str, text: str) -> str:
|
|
566
|
+
return f"\033[{code}m{text}\033[0m" if use_color else text
|
|
567
|
+
|
|
568
|
+
bold = lambda t: _c("1", t) # noqa: E731
|
|
569
|
+
dim = lambda t: _c("2", t) # noqa: E731
|
|
570
|
+
cyan = lambda t: _c("36", t) # noqa: E731
|
|
571
|
+
green = lambda t: _c("32", t) # noqa: E731
|
|
572
|
+
yellow = lambda t: _c("33", t) # noqa: E731
|
|
573
|
+
|
|
574
|
+
# --- Validate file ---
|
|
575
|
+
if not args.file.exists():
|
|
576
|
+
print(f"Error: File not found: {args.file}", file=sys.stderr)
|
|
577
|
+
sys.exit(1)
|
|
578
|
+
|
|
579
|
+
try:
|
|
580
|
+
text = args.file.read_text(encoding="utf-8")
|
|
581
|
+
except Exception as e:
|
|
582
|
+
print(f"Error reading file: {e}", file=sys.stderr)
|
|
583
|
+
sys.exit(1)
|
|
584
|
+
|
|
585
|
+
# --- Build Tokenizer kwargs ---
|
|
586
|
+
tokenizer_kwargs = {
|
|
587
|
+
"lowercase": not args.no_lowercase,
|
|
588
|
+
"min_length": args.min_length,
|
|
589
|
+
"max_length": args.max_length,
|
|
590
|
+
"strip_numbers": args.strip_numbers,
|
|
591
|
+
"strip_punctuation": not args.keep_punctuation,
|
|
592
|
+
"preserve_urls": args.preserve_urls,
|
|
593
|
+
"preserve_emails": args.preserve_emails,
|
|
594
|
+
"preserve_hashtags": args.preserve_hashtags,
|
|
595
|
+
"preserve_mentions": args.preserve_mentions,
|
|
596
|
+
"expand_contractions": args.expand_contractions,
|
|
597
|
+
"expand_abbreviations": args.expand_abbreviations,
|
|
598
|
+
"strip_accents": args.strip_accents,
|
|
599
|
+
"normalize_unicode": not args.no_unicode_normalize,
|
|
600
|
+
"clean_text": not args.no_clean,
|
|
601
|
+
}
|
|
602
|
+
|
|
603
|
+
# Collect active options for banner
|
|
604
|
+
active_opts = []
|
|
605
|
+
if args.no_lowercase:
|
|
606
|
+
active_opts.append("preserve case")
|
|
607
|
+
if args.expand_contractions:
|
|
608
|
+
active_opts.append("expand contractions")
|
|
609
|
+
if args.expand_abbreviations:
|
|
610
|
+
active_opts.append("expand abbreviations")
|
|
611
|
+
if args.strip_numbers:
|
|
612
|
+
active_opts.append("strip numbers")
|
|
613
|
+
if args.keep_punctuation:
|
|
614
|
+
active_opts.append("keep punctuation")
|
|
615
|
+
if args.strip_accents:
|
|
616
|
+
active_opts.append("strip accents")
|
|
617
|
+
if args.no_clean:
|
|
618
|
+
active_opts.append("skip cleaning")
|
|
619
|
+
if args.no_unicode_normalize:
|
|
620
|
+
active_opts.append("skip unicode normalization")
|
|
621
|
+
if args.preserve_urls:
|
|
622
|
+
active_opts.append("preserve URLs")
|
|
623
|
+
if args.preserve_emails:
|
|
624
|
+
active_opts.append("preserve emails")
|
|
625
|
+
if args.preserve_hashtags:
|
|
626
|
+
active_opts.append("preserve hashtags")
|
|
627
|
+
if args.preserve_mentions:
|
|
628
|
+
active_opts.append("preserve mentions")
|
|
629
|
+
if args.min_length > 1:
|
|
630
|
+
active_opts.append(f"min length {args.min_length}")
|
|
631
|
+
if args.max_length is not None:
|
|
632
|
+
active_opts.append(f"max length {args.max_length}")
|
|
633
|
+
|
|
634
|
+
# Determine output format
|
|
635
|
+
if args.stats:
|
|
636
|
+
output_format = "Statistics"
|
|
637
|
+
elif args.metadata:
|
|
638
|
+
output_format = "JSON (with metadata)"
|
|
639
|
+
elif args.json:
|
|
640
|
+
output_format = "JSON"
|
|
641
|
+
else:
|
|
642
|
+
output_format = "One token per line"
|
|
643
|
+
|
|
644
|
+
# --- Banner (to stderr so stdout stays pipeable) ---
|
|
645
|
+
char_count = len(text)
|
|
646
|
+
line_count = text.count("\n") + 1
|
|
647
|
+
|
|
648
|
+
banner = sys.stderr
|
|
649
|
+
print(file=banner)
|
|
650
|
+
print(f" {bold('PYSTYLOMETRY')} {dim('—')} {cyan('Stylometric Tokenizer')}", file=banner)
|
|
651
|
+
print(f" {dim('═' * 71)}", file=banner)
|
|
652
|
+
print(file=banner)
|
|
653
|
+
print(f" {bold('INPUT')}", file=banner)
|
|
654
|
+
print(f" {dim('─' * 71)}", file=banner)
|
|
655
|
+
print(f" File: {args.file}", file=banner)
|
|
656
|
+
print(f" Size: {char_count:,} characters / {line_count:,} lines", file=banner)
|
|
657
|
+
print(file=banner)
|
|
658
|
+
print(f" {bold('CONFIGURATION')}", file=banner)
|
|
659
|
+
print(f" {dim('─' * 71)}", file=banner)
|
|
660
|
+
print(f" Case: {'preserve' if args.no_lowercase else 'lowercase'}", file=banner)
|
|
661
|
+
print(
|
|
662
|
+
f" Punctuation: {'keep' if args.keep_punctuation else 'strip'}",
|
|
663
|
+
file=banner,
|
|
664
|
+
)
|
|
665
|
+
print(
|
|
666
|
+
f" Contractions: {'expand' if args.expand_contractions else 'preserve'}",
|
|
667
|
+
file=banner,
|
|
668
|
+
)
|
|
669
|
+
print(f" Numbers: {'strip' if args.strip_numbers else 'keep'}", file=banner)
|
|
670
|
+
if active_opts:
|
|
671
|
+
print(f" Active options: {', '.join(active_opts)}", file=banner)
|
|
672
|
+
print(file=banner)
|
|
673
|
+
print(f" {bold('OUTPUT')}", file=banner)
|
|
674
|
+
print(f" {dim('─' * 71)}", file=banner)
|
|
675
|
+
print(f" Format: {output_format}", file=banner)
|
|
676
|
+
print(file=banner)
|
|
677
|
+
|
|
678
|
+
# --- Tokenize ---
|
|
679
|
+
from pystylometry.tokenizer import Tokenizer
|
|
680
|
+
|
|
681
|
+
tokenizer = Tokenizer(**tokenizer_kwargs)
|
|
682
|
+
|
|
683
|
+
if args.stats:
|
|
684
|
+
stats = tokenizer.get_statistics(text)
|
|
685
|
+
print(f" {bold('RESULTS')}", file=banner)
|
|
686
|
+
print(f" {dim('─' * 71)}", file=banner)
|
|
687
|
+
print(f" Total tokens: {green(f'{stats.total_tokens:,}')}", file=banner)
|
|
688
|
+
print(f" Unique tokens: {green(f'{stats.unique_tokens:,}')}", file=banner)
|
|
689
|
+
print(f" Word tokens: {stats.word_tokens:,}", file=banner)
|
|
690
|
+
print(f" Number tokens: {stats.number_tokens:,}", file=banner)
|
|
691
|
+
print(f" Punctuation: {stats.punctuation_tokens:,}", file=banner)
|
|
692
|
+
print(f" URLs: {stats.url_tokens:,}", file=banner)
|
|
693
|
+
print(f" Emails: {stats.email_tokens:,}", file=banner)
|
|
694
|
+
print(f" Hashtags: {stats.hashtag_tokens:,}", file=banner)
|
|
695
|
+
print(f" Mentions: {stats.mention_tokens:,}", file=banner)
|
|
696
|
+
print(f" Avg length: {stats.average_token_length:.1f}", file=banner)
|
|
697
|
+
print(f" Min length: {stats.min_token_length}", file=banner)
|
|
698
|
+
print(f" Max length: {stats.max_token_length}", file=banner)
|
|
699
|
+
print(file=banner)
|
|
700
|
+
|
|
701
|
+
if args.json:
|
|
702
|
+
import dataclasses
|
|
703
|
+
|
|
704
|
+
print(json.dumps(dataclasses.asdict(stats), indent=2))
|
|
705
|
+
|
|
706
|
+
elif args.metadata or (args.json and args.metadata):
|
|
707
|
+
metadata_list = tokenizer.tokenize_with_metadata(text)
|
|
708
|
+
count = len(metadata_list)
|
|
709
|
+
print(
|
|
710
|
+
f" {yellow('Tokenizing...')} {green(f'{count:,}')} tokens extracted",
|
|
711
|
+
file=banner,
|
|
712
|
+
)
|
|
713
|
+
print(file=banner)
|
|
714
|
+
output = [
|
|
715
|
+
{
|
|
716
|
+
"token": m.token,
|
|
717
|
+
"start": m.start,
|
|
718
|
+
"end": m.end,
|
|
719
|
+
"type": m.token_type,
|
|
720
|
+
}
|
|
721
|
+
for m in metadata_list
|
|
722
|
+
]
|
|
723
|
+
print(json.dumps(output, indent=2))
|
|
724
|
+
|
|
725
|
+
elif args.json:
|
|
726
|
+
tokens = tokenizer.tokenize(text)
|
|
727
|
+
count = len(tokens)
|
|
728
|
+
print(
|
|
729
|
+
f" {yellow('Tokenizing...')} {green(f'{count:,}')} tokens extracted",
|
|
730
|
+
file=banner,
|
|
731
|
+
)
|
|
732
|
+
print(file=banner)
|
|
733
|
+
print(json.dumps(tokens, indent=2))
|
|
734
|
+
|
|
735
|
+
else:
|
|
736
|
+
tokens = tokenizer.tokenize(text)
|
|
737
|
+
count = len(tokens)
|
|
738
|
+
print(
|
|
739
|
+
f" {yellow('Tokenizing...')} {green(f'{count:,}')} tokens extracted",
|
|
740
|
+
file=banner,
|
|
741
|
+
)
|
|
742
|
+
print(file=banner)
|
|
743
|
+
for token in tokens:
|
|
744
|
+
print(token)
|
|
745
|
+
|
|
746
|
+
|
|
426
747
|
if __name__ == "__main__":
|
|
427
748
|
drift_cli()
|
pystylometry/lexical/__init__.py
CHANGED
|
@@ -5,12 +5,14 @@ from .advanced_diversity import compute_hdd, compute_mattr, compute_msttr, compu
|
|
|
5
5
|
from .function_words import compute_function_words
|
|
6
6
|
from .hapax import compute_hapax_ratios, compute_hapax_with_lexicon_analysis
|
|
7
7
|
from .mtld import compute_mtld
|
|
8
|
-
from .
|
|
8
|
+
from .repetition import compute_repetitive_ngrams, compute_repetitive_unigrams
|
|
9
|
+
from .ttr import TTRAggregator, compute_ttr
|
|
9
10
|
from .word_frequency_sophistication import compute_word_frequency_sophistication
|
|
10
11
|
from .yule import compute_yule
|
|
11
12
|
|
|
12
13
|
__all__ = [
|
|
13
14
|
"compute_ttr",
|
|
15
|
+
"TTRAggregator",
|
|
14
16
|
"compute_mtld",
|
|
15
17
|
"compute_yule",
|
|
16
18
|
"compute_hapax_ratios",
|
|
@@ -21,4 +23,6 @@ __all__ = [
|
|
|
21
23
|
"compute_hdd",
|
|
22
24
|
"compute_msttr",
|
|
23
25
|
"compute_word_frequency_sophistication",
|
|
26
|
+
"compute_repetitive_unigrams",
|
|
27
|
+
"compute_repetitive_ngrams",
|
|
24
28
|
]
|