pystylometry 1.3.1__py3-none-any.whl → 1.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pystylometry/__init__.py +42 -3
- pystylometry/_types.py +53 -3
- pystylometry/cli.py +695 -0
- pystylometry/lexical/__init__.py +4 -1
- pystylometry/lexical/bnc_frequency.py +309 -0
- pystylometry/lexical/ttr.py +288 -97
- pystylometry/viz/jsx/__init__.py +2 -0
- pystylometry/viz/jsx/bnc_frequency.py +495 -0
- {pystylometry-1.3.1.dist-info → pystylometry-1.3.6.dist-info}/METADATA +16 -3
- {pystylometry-1.3.1.dist-info → pystylometry-1.3.6.dist-info}/RECORD +13 -11
- {pystylometry-1.3.1.dist-info → pystylometry-1.3.6.dist-info}/entry_points.txt +2 -0
- {pystylometry-1.3.1.dist-info → pystylometry-1.3.6.dist-info}/LICENSE +0 -0
- {pystylometry-1.3.1.dist-info → pystylometry-1.3.6.dist-info}/WHEEL +0 -0
pystylometry/lexical/ttr.py
CHANGED
|
@@ -1,149 +1,340 @@
|
|
|
1
|
-
"""Type-Token Ratio (TTR) analysis
|
|
1
|
+
"""Type-Token Ratio (TTR) analysis with native chunked computation.
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
3
|
+
Computes multiple TTR variants for measuring lexical diversity (vocabulary
|
|
4
|
+
richness). All metrics are computed per-chunk and wrapped in Distribution
|
|
5
|
+
objects for stylometric fingerprinting.
|
|
5
6
|
|
|
6
|
-
|
|
7
|
+
Previously delegated to the external ``stylometry-ttr`` package; now
|
|
8
|
+
computed inline using only the Python standard library (``math`` and
|
|
9
|
+
``statistics``).
|
|
10
|
+
|
|
11
|
+
Related GitHub Issues:
|
|
7
12
|
#27 - Native chunked analysis with Distribution dataclass
|
|
8
13
|
https://github.com/craigtrim/pystylometry/issues/27
|
|
14
|
+
|
|
15
|
+
#43 - Inline stylometry-ttr into pystylometry (remove external dependency)
|
|
16
|
+
https://github.com/craigtrim/pystylometry/issues/43
|
|
17
|
+
|
|
18
|
+
References:
|
|
19
|
+
Guiraud, P. (1960). Problèmes et méthodes de la statistique linguistique.
|
|
20
|
+
Herdan, G. (1960). Type-token Mathematics: A Textbook of Mathematical
|
|
21
|
+
Linguistics. Mouton.
|
|
22
|
+
Johnson, W. (1944). Studies in language behavior: I. A program of research.
|
|
23
|
+
Psychological Monographs, 56(2), 1-15.
|
|
9
24
|
"""
|
|
10
25
|
|
|
11
26
|
from __future__ import annotations
|
|
12
27
|
|
|
13
|
-
|
|
28
|
+
import math
|
|
29
|
+
import statistics
|
|
30
|
+
from typing import Optional
|
|
31
|
+
|
|
32
|
+
from .._types import Distribution, TTRAggregateResult, TTRResult, make_distribution
|
|
33
|
+
from ..tokenizer import Tokenizer
|
|
34
|
+
|
|
35
|
+
# ---------------------------------------------------------------------------
|
|
36
|
+
# Internal helpers
|
|
37
|
+
# ---------------------------------------------------------------------------
|
|
38
|
+
|
|
39
|
+
# Minimum words required before STTR computation is meaningful.
|
|
40
|
+
# With fewer words we cannot form at least two full chunks, so the
|
|
41
|
+
# standardised metric would be unreliable.
|
|
42
|
+
_MIN_WORDS_FOR_STTR = 2000
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _compute_chunk_ttrs(tokens: list[str], chunk_size: int) -> list[float]:
|
|
46
|
+
"""Compute per-chunk raw TTR values for non-overlapping chunks.
|
|
47
|
+
|
|
48
|
+
Only full-sized chunks are included so that every TTR is measured on the
|
|
49
|
+
same token count, keeping the standardised metric unbiased.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
tokens: Full token list.
|
|
53
|
+
chunk_size: Number of tokens per chunk.
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
List of per-chunk TTR values (may be empty if too few tokens).
|
|
57
|
+
"""
|
|
58
|
+
total = len(tokens)
|
|
59
|
+
chunk_ttrs: list[float] = []
|
|
60
|
+
for i in range(0, total - chunk_size + 1, chunk_size):
|
|
61
|
+
chunk = tokens[i : i + chunk_size]
|
|
62
|
+
chunk_ttrs.append(len(set(chunk)) / chunk_size)
|
|
63
|
+
return chunk_ttrs
|
|
64
|
+
|
|
14
65
|
|
|
66
|
+
def _compute_deltas(
|
|
67
|
+
chunk_ttrs: list[float],
|
|
68
|
+
) -> tuple[Optional[float], Optional[float], Optional[float], Optional[float]]:
|
|
69
|
+
"""Compute delta metrics: TTR(n) - TTR(n-1) for consecutive chunks.
|
|
15
70
|
|
|
16
|
-
|
|
71
|
+
Delta metrics capture chunk-to-chunk vocabulary variability:
|
|
72
|
+
- delta_mean: average change (positive = expanding vocabulary)
|
|
73
|
+
- delta_std: volatility of change (stylometric fingerprint)
|
|
74
|
+
- delta_min: largest negative swing
|
|
75
|
+
- delta_max: largest positive swing
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
chunk_ttrs: Per-chunk TTR values (needs >= 2 values).
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
Tuple of (delta_mean, delta_std, delta_min, delta_max).
|
|
82
|
+
All ``None`` when fewer than 2 chunks are available.
|
|
17
83
|
"""
|
|
18
|
-
|
|
84
|
+
if len(chunk_ttrs) < 2:
|
|
85
|
+
return None, None, None, None
|
|
86
|
+
|
|
87
|
+
deltas = [chunk_ttrs[i] - chunk_ttrs[i - 1] for i in range(1, len(chunk_ttrs))]
|
|
88
|
+
d_mean = statistics.mean(deltas)
|
|
89
|
+
d_std = statistics.stdev(deltas) if len(deltas) > 1 else 0.0
|
|
90
|
+
return d_mean, d_std, min(deltas), max(deltas)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
# ---------------------------------------------------------------------------
|
|
94
|
+
# Public API
|
|
95
|
+
# ---------------------------------------------------------------------------
|
|
96
|
+
|
|
19
97
|
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
98
|
+
def compute_ttr(
|
|
99
|
+
text: str,
|
|
100
|
+
text_id: str | None = None,
|
|
101
|
+
chunk_size: int = 1000,
|
|
102
|
+
) -> TTRResult:
|
|
103
|
+
"""Compute Type-Token Ratio (TTR) metrics for vocabulary richness.
|
|
104
|
+
|
|
105
|
+
Tokenises the input with pystylometry's ``Tokenizer`` (lowercase, words
|
|
106
|
+
only), then computes five TTR-family metrics. Each metric is computed
|
|
107
|
+
per-chunk and the full per-chunk distribution is exposed via a
|
|
108
|
+
``Distribution`` object for stylometric fingerprinting.
|
|
23
109
|
|
|
24
110
|
Metrics computed:
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
111
|
+
- **Raw TTR**: ``unique_words / total_words``
|
|
112
|
+
- **Root TTR** (Guiraud's index): ``unique_words / sqrt(total_words)``
|
|
113
|
+
- **Log TTR** (Herdan's C): ``log(unique_words) / log(total_words)``
|
|
114
|
+
- **STTR**: Mean TTR across fixed-size chunks (reduces length bias).
|
|
115
|
+
Only computed when the text has >= 2000 words.
|
|
116
|
+
- **Delta Std**: Std-dev of chunk-to-chunk TTR change (vocabulary
|
|
117
|
+
consistency). Only computed when >= 2 chunks are available.
|
|
30
118
|
|
|
31
|
-
Related GitHub
|
|
119
|
+
Related GitHub Issues:
|
|
32
120
|
#27 - Native chunked analysis with Distribution dataclass
|
|
33
121
|
https://github.com/craigtrim/pystylometry/issues/27
|
|
34
122
|
|
|
123
|
+
#43 - Inline stylometry-ttr into pystylometry
|
|
124
|
+
https://github.com/craigtrim/pystylometry/issues/43
|
|
125
|
+
|
|
35
126
|
References:
|
|
36
|
-
Guiraud, P. (1960). Problèmes et méthodes de la statistique
|
|
127
|
+
Guiraud, P. (1960). Problèmes et méthodes de la statistique
|
|
128
|
+
linguistique.
|
|
37
129
|
Herdan, G. (1960). Type-token Mathematics: A Textbook of Mathematical
|
|
38
130
|
Linguistics. Mouton.
|
|
39
|
-
Johnson, W. (1944). Studies in language behavior: I. A program of
|
|
40
|
-
Psychological Monographs, 56(2), 1-15.
|
|
131
|
+
Johnson, W. (1944). Studies in language behavior: I. A program of
|
|
132
|
+
research. Psychological Monographs, 56(2), 1-15.
|
|
41
133
|
|
|
42
134
|
Args:
|
|
43
|
-
text: Input text to
|
|
44
|
-
text_id: Optional identifier for the text (
|
|
45
|
-
chunk_size: Number of words per chunk
|
|
46
|
-
|
|
47
|
-
so this parameter is included for API consistency but actual chunking
|
|
48
|
-
behavior is delegated to stylometry-ttr.
|
|
135
|
+
text: Input text to analyse.
|
|
136
|
+
text_id: Optional identifier for the text (stored in metadata).
|
|
137
|
+
chunk_size: Number of words per chunk for STTR and per-chunk
|
|
138
|
+
distributions (default: 1000).
|
|
49
139
|
|
|
50
140
|
Returns:
|
|
51
|
-
TTRResult with all TTR variants
|
|
52
|
-
objects for stylometric fingerprinting.
|
|
141
|
+
TTRResult with all TTR variants, Distribution objects, and metadata.
|
|
53
142
|
|
|
54
143
|
Example:
|
|
55
144
|
>>> result = compute_ttr("The quick brown fox jumps over the lazy dog.")
|
|
56
145
|
>>> print(f"Raw TTR: {result.ttr:.3f}")
|
|
57
|
-
Raw TTR:
|
|
146
|
+
Raw TTR: 1.000
|
|
58
147
|
>>> print(f"Root TTR: {result.root_ttr:.3f}")
|
|
59
|
-
Root TTR:
|
|
60
|
-
>>> print(f"STTR: {result.sttr:.3f}")
|
|
61
|
-
STTR: 1.000
|
|
148
|
+
Root TTR: 3.000
|
|
62
149
|
|
|
63
150
|
>>> # With text identifier
|
|
64
151
|
>>> result = compute_ttr("Sample text here.", text_id="sample-001")
|
|
65
152
|
>>> print(result.metadata["text_id"])
|
|
66
153
|
sample-001
|
|
67
154
|
"""
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
root_ttr_val = ttr_result.root_ttr
|
|
84
|
-
log_ttr_val = ttr_result.log_ttr
|
|
85
|
-
sttr_val = ttr_result.sttr if ttr_result.sttr is not None else 0.0
|
|
86
|
-
delta_std_val = ttr_result.delta_std if ttr_result.delta_std is not None else 0.0
|
|
87
|
-
|
|
88
|
-
# Create single-value distributions from stylometry-ttr results
|
|
89
|
-
# The stylometry-ttr package handles its own internal chunking for STTR
|
|
90
|
-
# so we wrap the aggregate results in Distribution objects
|
|
91
|
-
ttr_dist = (
|
|
92
|
-
make_distribution([ttr_val])
|
|
93
|
-
if ttr_val is not None
|
|
94
|
-
else Distribution(
|
|
95
|
-
values=[], mean=float("nan"), median=float("nan"), std=0.0, range=0.0, iqr=0.0
|
|
155
|
+
# Tokenise using pystylometry's own tokenizer (lowercase, words only)
|
|
156
|
+
tokenizer = Tokenizer(lowercase=True, strip_punctuation=True)
|
|
157
|
+
tokens = tokenizer.tokenize(text)
|
|
158
|
+
|
|
159
|
+
total_words = len(tokens)
|
|
160
|
+
|
|
161
|
+
# --- empty / trivial text --------------------------------------------------
|
|
162
|
+
if total_words == 0:
|
|
163
|
+
empty_dist = Distribution(
|
|
164
|
+
values=[],
|
|
165
|
+
mean=float("nan"),
|
|
166
|
+
median=float("nan"),
|
|
167
|
+
std=0.0,
|
|
168
|
+
range=0.0,
|
|
169
|
+
iqr=0.0,
|
|
96
170
|
)
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
171
|
+
return TTRResult(
|
|
172
|
+
total_words=0,
|
|
173
|
+
unique_words=0,
|
|
174
|
+
ttr=0.0,
|
|
175
|
+
root_ttr=0.0,
|
|
176
|
+
log_ttr=0.0,
|
|
177
|
+
sttr=0.0,
|
|
178
|
+
delta_std=0.0,
|
|
179
|
+
ttr_dist=empty_dist,
|
|
180
|
+
root_ttr_dist=empty_dist,
|
|
181
|
+
log_ttr_dist=empty_dist,
|
|
182
|
+
sttr_dist=empty_dist,
|
|
183
|
+
delta_std_dist=empty_dist,
|
|
184
|
+
chunk_size=chunk_size,
|
|
185
|
+
chunk_count=0,
|
|
186
|
+
metadata={
|
|
187
|
+
"text_id": text_id or "",
|
|
188
|
+
"sttr_available": False,
|
|
189
|
+
"delta_std_available": False,
|
|
190
|
+
},
|
|
103
191
|
)
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
192
|
+
|
|
193
|
+
# --- global metrics --------------------------------------------------------
|
|
194
|
+
unique_words = len(set(tokens))
|
|
195
|
+
ttr_val = unique_words / total_words
|
|
196
|
+
root_ttr_val = unique_words / math.sqrt(total_words)
|
|
197
|
+
log_ttr_val = math.log(unique_words) / math.log(total_words) if total_words > 1 else 0.0
|
|
198
|
+
|
|
199
|
+
# --- per-chunk metrics -----------------------------------------------------
|
|
200
|
+
chunk_ttrs = _compute_chunk_ttrs(tokens, chunk_size)
|
|
201
|
+
chunk_count = len(chunk_ttrs)
|
|
202
|
+
|
|
203
|
+
# STTR: mean TTR across chunks (only meaningful with enough text)
|
|
204
|
+
sttr_available = total_words >= _MIN_WORDS_FOR_STTR and chunk_count >= 1
|
|
205
|
+
if sttr_available:
|
|
206
|
+
sttr_val = statistics.mean(chunk_ttrs)
|
|
207
|
+
else:
|
|
208
|
+
sttr_val = 0.0
|
|
209
|
+
|
|
210
|
+
# Delta metrics
|
|
211
|
+
delta_mean, delta_std_val, delta_min, delta_max = _compute_deltas(chunk_ttrs)
|
|
212
|
+
delta_std_available = delta_std_val is not None
|
|
213
|
+
if delta_std_val is None:
|
|
214
|
+
delta_std_val = 0.0
|
|
215
|
+
|
|
216
|
+
# --- build Distribution objects --------------------------------------------
|
|
217
|
+
# For per-chunk distributions: compute root_ttr and log_ttr per chunk as well
|
|
218
|
+
if chunk_count >= 1:
|
|
219
|
+
ttr_dist = make_distribution(chunk_ttrs)
|
|
220
|
+
|
|
221
|
+
# Root TTR per chunk: for each chunk of chunk_size tokens,
|
|
222
|
+
# root_ttr = unique / sqrt(chunk_size)
|
|
223
|
+
root_ttr_chunks = [
|
|
224
|
+
len(set(tokens[i : i + chunk_size])) / math.sqrt(chunk_size)
|
|
225
|
+
for i in range(0, total_words - chunk_size + 1, chunk_size)
|
|
226
|
+
]
|
|
227
|
+
root_ttr_dist = make_distribution(root_ttr_chunks)
|
|
228
|
+
|
|
229
|
+
# Log TTR per chunk
|
|
230
|
+
log_ttr_chunks = []
|
|
231
|
+
for i in range(0, total_words - chunk_size + 1, chunk_size):
|
|
232
|
+
chunk = tokens[i : i + chunk_size]
|
|
233
|
+
u = len(set(chunk))
|
|
234
|
+
t = len(chunk)
|
|
235
|
+
val = math.log(u) / math.log(t) if t > 1 else 0.0
|
|
236
|
+
log_ttr_chunks.append(val)
|
|
237
|
+
log_ttr_dist = make_distribution(log_ttr_chunks)
|
|
238
|
+
|
|
239
|
+
sttr_dist = (
|
|
240
|
+
make_distribution(chunk_ttrs) if sttr_available else make_distribution([sttr_val])
|
|
117
241
|
)
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
make_distribution([delta_std_val])
|
|
121
|
-
if ttr_result.delta_std is not None
|
|
122
|
-
else Distribution(
|
|
123
|
-
values=[], mean=float("nan"), median=float("nan"), std=0.0, range=0.0, iqr=0.0
|
|
242
|
+
delta_std_dist = (
|
|
243
|
+
make_distribution([delta_std_val]) if delta_std_available else make_distribution([0.0])
|
|
124
244
|
)
|
|
125
|
-
|
|
245
|
+
else:
|
|
246
|
+
# Not enough text for any chunks — wrap globals in single-value dists
|
|
247
|
+
ttr_dist = make_distribution([ttr_val])
|
|
248
|
+
root_ttr_dist = make_distribution([root_ttr_val])
|
|
249
|
+
log_ttr_dist = make_distribution([log_ttr_val])
|
|
250
|
+
sttr_dist = make_distribution([sttr_val])
|
|
251
|
+
delta_std_dist = make_distribution([0.0])
|
|
126
252
|
|
|
127
|
-
# Convert to our TTRResult dataclass
|
|
128
253
|
return TTRResult(
|
|
129
|
-
total_words=
|
|
130
|
-
unique_words=
|
|
131
|
-
ttr=ttr_val
|
|
132
|
-
root_ttr=root_ttr_val
|
|
133
|
-
log_ttr=log_ttr_val
|
|
134
|
-
sttr=sttr_val,
|
|
135
|
-
delta_std=delta_std_val,
|
|
254
|
+
total_words=total_words,
|
|
255
|
+
unique_words=unique_words,
|
|
256
|
+
ttr=round(ttr_val, 6),
|
|
257
|
+
root_ttr=round(root_ttr_val, 4),
|
|
258
|
+
log_ttr=round(log_ttr_val, 6),
|
|
259
|
+
sttr=round(sttr_val, 6),
|
|
260
|
+
delta_std=round(delta_std_val, 6),
|
|
136
261
|
ttr_dist=ttr_dist,
|
|
137
262
|
root_ttr_dist=root_ttr_dist,
|
|
138
263
|
log_ttr_dist=log_ttr_dist,
|
|
139
264
|
sttr_dist=sttr_dist,
|
|
140
265
|
delta_std_dist=delta_std_dist,
|
|
141
266
|
chunk_size=chunk_size,
|
|
142
|
-
chunk_count=
|
|
267
|
+
chunk_count=chunk_count if chunk_count >= 1 else 1,
|
|
143
268
|
metadata={
|
|
144
269
|
"text_id": text_id or "",
|
|
145
|
-
"
|
|
146
|
-
"
|
|
147
|
-
"delta_std_available": ttr_result.delta_std is not None,
|
|
270
|
+
"sttr_available": sttr_available,
|
|
271
|
+
"delta_std_available": delta_std_available,
|
|
148
272
|
},
|
|
149
273
|
)
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
# ---------------------------------------------------------------------------
|
|
277
|
+
# Aggregation
|
|
278
|
+
# ---------------------------------------------------------------------------
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
class TTRAggregator:
|
|
282
|
+
"""Aggregate per-text TTR results into group-level statistics.
|
|
283
|
+
|
|
284
|
+
Useful for comparing vocabulary richness across authors, genres, or
|
|
285
|
+
time periods by computing summary statistics (mean, std, min, max,
|
|
286
|
+
median) over a collection of ``TTRResult`` objects.
|
|
287
|
+
|
|
288
|
+
Related GitHub Issue:
|
|
289
|
+
#43 - Inline stylometry-ttr into pystylometry
|
|
290
|
+
https://github.com/craigtrim/pystylometry/issues/43
|
|
291
|
+
|
|
292
|
+
Example:
|
|
293
|
+
>>> from pystylometry.lexical import compute_ttr, TTRAggregator
|
|
294
|
+
>>> results = [compute_ttr(t) for t in texts]
|
|
295
|
+
>>> agg = TTRAggregator()
|
|
296
|
+
>>> stats = agg.aggregate(results, group_id="Shakespeare")
|
|
297
|
+
>>> print(stats.ttr_mean)
|
|
298
|
+
0.412
|
|
299
|
+
"""
|
|
300
|
+
|
|
301
|
+
def aggregate(self, results: list[TTRResult], group_id: str) -> TTRAggregateResult:
|
|
302
|
+
"""Compute aggregate statistics from multiple TTR results.
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
results: List of per-text ``TTRResult`` objects.
|
|
306
|
+
group_id: Identifier for the group (e.g. author name).
|
|
307
|
+
|
|
308
|
+
Returns:
|
|
309
|
+
``TTRAggregateResult`` with group-level statistics.
|
|
310
|
+
|
|
311
|
+
Raises:
|
|
312
|
+
ValueError: If *results* is empty.
|
|
313
|
+
"""
|
|
314
|
+
if not results:
|
|
315
|
+
raise ValueError("Cannot aggregate empty results list")
|
|
316
|
+
|
|
317
|
+
ttrs = [r.ttr for r in results]
|
|
318
|
+
root_ttrs = [r.root_ttr for r in results]
|
|
319
|
+
log_ttrs = [r.log_ttr for r in results]
|
|
320
|
+
sttrs = [r.sttr for r in results if r.metadata.get("sttr_available")]
|
|
321
|
+
delta_stds = [r.delta_std for r in results if r.metadata.get("delta_std_available")]
|
|
322
|
+
|
|
323
|
+
return TTRAggregateResult(
|
|
324
|
+
group_id=group_id,
|
|
325
|
+
text_count=len(results),
|
|
326
|
+
total_words=sum(r.total_words for r in results),
|
|
327
|
+
ttr_mean=round(statistics.mean(ttrs), 6),
|
|
328
|
+
ttr_std=round(statistics.stdev(ttrs), 6) if len(ttrs) > 1 else 0.0,
|
|
329
|
+
ttr_min=round(min(ttrs), 6),
|
|
330
|
+
ttr_max=round(max(ttrs), 6),
|
|
331
|
+
ttr_median=round(statistics.median(ttrs), 6),
|
|
332
|
+
root_ttr_mean=round(statistics.mean(root_ttrs), 4),
|
|
333
|
+
root_ttr_std=round(statistics.stdev(root_ttrs), 4) if len(root_ttrs) > 1 else 0.0,
|
|
334
|
+
log_ttr_mean=round(statistics.mean(log_ttrs), 6),
|
|
335
|
+
log_ttr_std=round(statistics.stdev(log_ttrs), 6) if len(log_ttrs) > 1 else 0.0,
|
|
336
|
+
sttr_mean=round(statistics.mean(sttrs), 6) if sttrs else None,
|
|
337
|
+
sttr_std=round(statistics.stdev(sttrs), 6) if len(sttrs) > 1 else None,
|
|
338
|
+
delta_std_mean=round(statistics.mean(delta_stds), 6) if delta_stds else None,
|
|
339
|
+
metadata={"group_id": group_id, "text_count": len(results)},
|
|
340
|
+
)
|
pystylometry/viz/jsx/__init__.py
CHANGED
|
@@ -20,6 +20,7 @@ Example:
|
|
|
20
20
|
>>> export_drift_viewer("drift_analyzer.html")
|
|
21
21
|
"""
|
|
22
22
|
|
|
23
|
+
from .bnc_frequency import export_bnc_frequency_jsx
|
|
23
24
|
from .report import export_drift_report_jsx
|
|
24
25
|
from .timeline import export_drift_timeline_jsx
|
|
25
26
|
from .viewer import export_drift_viewer
|
|
@@ -28,4 +29,5 @@ __all__ = [
|
|
|
28
29
|
"export_drift_timeline_jsx",
|
|
29
30
|
"export_drift_report_jsx",
|
|
30
31
|
"export_drift_viewer",
|
|
32
|
+
"export_bnc_frequency_jsx",
|
|
31
33
|
]
|