@higher.archi/boe 1.0.25 → 1.0.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,513 @@
1
+ /**
2
+ * Sentiment Engine Strategy
3
+ *
4
+ * Core execution logic for all sentiment strategies:
5
+ * - token-level: Score individual tokens in the full text
6
+ * - document-level: Split into sentences, score each, average compounds
7
+ * - aspect-based: Match sentences to aspect terms, score per aspect
8
+ */
9
+
10
+ import type { IWorkingMemory, Fact } from '../../core';
11
+
12
+ import type {
13
+ CompiledSentimentRuleSet,
14
+ CompiledTokenLevelSentimentRuleSet,
15
+ CompiledDocumentLevelSentimentRuleSet,
16
+ CompiledAspectBasedSentimentRuleSet,
17
+ CompiledSentimentLexicon,
18
+ SentimentOptions,
19
+ SentimentResult,
20
+ SentenceResult,
21
+ AspectResult,
22
+ TokenScore,
23
+ TokenModifier
24
+ } from './types';
25
+ import { resolveSentimentLabel } from './types';
26
+
27
+ // ========================================
28
+ // Helpers
29
+ // ========================================
30
+
31
+ /** Split text into tokens on whitespace and punctuation boundaries */
32
+ function tokenize(text: string): string[] {
33
+ return text.match(/\w+(?:'\w+)?/g) ?? [];
34
+ }
35
+
36
+ /** Check if a token is ALL CAPS (must be >= 2 chars) */
37
+ function isAllCaps(token: string): boolean {
38
+ return token.length >= 2 && token === token.toUpperCase() && /[A-Z]/.test(token);
39
+ }
40
+
41
+ /** Count trailing exclamation marks in text */
42
+ function countTrailingExclamations(text: string): number {
43
+ const match = text.match(/!+\s*$/);
44
+ return match ? match[0].replace(/\s/g, '').length : 0;
45
+ }
46
+
47
+ /** Split text into sentences using a regex pattern */
48
+ function splitSentences(text: string, pattern: RegExp): string[] {
49
+ return text
50
+ .split(pattern)
51
+ .map(s => s.trim())
52
+ .filter(s => s.length > 0);
53
+ }
54
+
55
+ /** Sliding window multi-word idiom matching */
56
+ function scanIdioms(
57
+ tokens: string[],
58
+ idioms: Record<string, number>,
59
+ maxLen: number
60
+ ): { startIdx: number; endIdx: number; phrase: string; score: number }[] {
61
+ if (maxLen === 0) return [];
62
+
63
+ const matches: { startIdx: number; endIdx: number; phrase: string; score: number }[] = [];
64
+ const lowerTokens = tokens.map(t => t.toLowerCase());
65
+
66
+ for (let i = 0; i < lowerTokens.length; i++) {
67
+ // Try longest match first
68
+ for (let len = Math.min(maxLen, lowerTokens.length - i); len >= 2; len--) {
69
+ const phrase = lowerTokens.slice(i, i + len).join(' ');
70
+ if (phrase in idioms) {
71
+ matches.push({
72
+ startIdx: i,
73
+ endIdx: i + len - 1,
74
+ phrase,
75
+ score: idioms[phrase]
76
+ });
77
+ break; // longest match wins for this position
78
+ }
79
+ }
80
+ }
81
+
82
+ return matches;
83
+ }
84
+
85
+ /** Core token scoring loop */
86
+ function scoreTokens(
87
+ tokens: string[],
88
+ lexicon: CompiledSentimentLexicon,
89
+ capsBoost: number,
90
+ negationWindow: number,
91
+ onToken?: (token: TokenScore) => void
92
+ ): TokenScore[] {
93
+ const results: TokenScore[] = [];
94
+
95
+ // Pre-scan idioms
96
+ const idiomMatches = scanIdioms(tokens, lexicon.idioms, lexicon._idiomMaxLength);
97
+ const idiomCovered = new Set<number>();
98
+ for (const match of idiomMatches) {
99
+ for (let i = match.startIdx; i <= match.endIdx; i++) {
100
+ idiomCovered.add(i);
101
+ }
102
+ }
103
+
104
+ // Insert idiom scores at their start positions
105
+ const idiomAtStart = new Map<number, { phrase: string; score: number }>();
106
+ for (const match of idiomMatches) {
107
+ idiomAtStart.set(match.startIdx, { phrase: match.phrase, score: match.score });
108
+ }
109
+
110
+ let negationCountdown = 0;
111
+ let pendingBooster = 0;
112
+
113
+ for (let i = 0; i < tokens.length; i++) {
114
+ const token = tokens[i];
115
+ const normalized = token.toLowerCase();
116
+
117
+ // Check if this token starts an idiom
118
+ if (idiomAtStart.has(i)) {
119
+ const idiom = idiomAtStart.get(i)!;
120
+ let adjustedScore = idiom.score;
121
+ const modifiers: TokenModifier[] = ['idiom'];
122
+
123
+ if (negationCountdown > 0) {
124
+ adjustedScore *= -1;
125
+ modifiers.push('negated');
126
+ negationCountdown--;
127
+ }
128
+
129
+ if (pendingBooster !== 0) {
130
+ adjustedScore *= pendingBooster;
131
+ modifiers.push('boosted');
132
+ pendingBooster = 0;
133
+ }
134
+
135
+ const result: TokenScore = {
136
+ token: idiom.phrase,
137
+ rawScore: idiom.score,
138
+ adjustedScore,
139
+ modifiers
140
+ };
141
+ results.push(result);
142
+ if (onToken) onToken(result);
143
+ continue;
144
+ }
145
+
146
+ // Skip tokens covered by an idiom (not at start position)
147
+ if (idiomCovered.has(i)) continue;
148
+
149
+ // Check negation
150
+ if (lexicon.negation.has(normalized)) {
151
+ negationCountdown = negationWindow;
152
+ const result: TokenScore = {
153
+ token,
154
+ rawScore: 0,
155
+ adjustedScore: 0,
156
+ modifiers: []
157
+ };
158
+ results.push(result);
159
+ if (onToken) onToken(result);
160
+ continue;
161
+ }
162
+
163
+ // Check booster
164
+ if (normalized in lexicon.boosters) {
165
+ pendingBooster = lexicon.boosters[normalized];
166
+ const result: TokenScore = {
167
+ token,
168
+ rawScore: 0,
169
+ adjustedScore: 0,
170
+ modifiers: []
171
+ };
172
+ results.push(result);
173
+ if (onToken) onToken(result);
174
+ continue;
175
+ }
176
+
177
+ // Lexicon lookup
178
+ const rawScore = lexicon.lexicon[normalized] ?? 0;
179
+
180
+ if (rawScore === 0) {
181
+ const result: TokenScore = {
182
+ token,
183
+ rawScore: 0,
184
+ adjustedScore: 0,
185
+ modifiers: []
186
+ };
187
+ results.push(result);
188
+ if (onToken) onToken(result);
189
+ continue;
190
+ }
191
+
192
+ let adjustedScore = rawScore;
193
+ const modifiers: TokenModifier[] = [];
194
+
195
+ // Apply pending booster
196
+ if (pendingBooster !== 0) {
197
+ adjustedScore *= pendingBooster;
198
+ modifiers.push('boosted');
199
+ pendingBooster = 0;
200
+ }
201
+
202
+ // Apply negation
203
+ if (negationCountdown > 0) {
204
+ adjustedScore *= -1;
205
+ modifiers.push('negated');
206
+ negationCountdown--;
207
+ }
208
+
209
+ // Apply caps boost
210
+ if (isAllCaps(token)) {
211
+ adjustedScore *= capsBoost;
212
+ modifiers.push('caps');
213
+ }
214
+
215
+ const result: TokenScore = {
216
+ token,
217
+ rawScore,
218
+ adjustedScore,
219
+ modifiers
220
+ };
221
+ results.push(result);
222
+ if (onToken) onToken(result);
223
+ }
224
+
225
+ return results;
226
+ }
227
+
228
+ /** Compute compound score: normalize via sum / sqrt(sum^2 + alpha), apply exclamation boost */
229
+ function computeCompound(
230
+ tokenScores: TokenScore[],
231
+ exclamationBoost: number,
232
+ exclamationCount: number
233
+ ): number {
234
+ const alpha = 15;
235
+ const sum = tokenScores.reduce((acc, t) => acc + t.adjustedScore, 0);
236
+ let compound = sum / Math.sqrt(sum * sum + alpha);
237
+
238
+ // Apply exclamation boost
239
+ if (exclamationCount > 0) {
240
+ const boost = Math.pow(exclamationBoost, Math.min(exclamationCount, 4));
241
+ compound = compound * boost;
242
+ }
243
+
244
+ // Clamp to [-1, 1]
245
+ return Math.max(-1, Math.min(1, compound));
246
+ }
247
+
248
+ /** Compute positive/negative/neutral proportions from token scores */
249
+ function computeProportions(tokenScores: TokenScore[]): { positive: number; negative: number; neutral: number } {
250
+ if (tokenScores.length === 0) {
251
+ return { positive: 0, negative: 0, neutral: 1 };
252
+ }
253
+
254
+ let pos = 0;
255
+ let neg = 0;
256
+ let neu = 0;
257
+
258
+ for (const t of tokenScores) {
259
+ if (t.adjustedScore > 0) pos++;
260
+ else if (t.adjustedScore < 0) neg++;
261
+ else neu++;
262
+ }
263
+
264
+ const total = tokenScores.length;
265
+ return {
266
+ positive: round(pos / total, 4),
267
+ negative: round(neg / total, 4),
268
+ neutral: round(neu / total, 4)
269
+ };
270
+ }
271
+
272
+ function round(value: number, decimals: number): number {
273
+ const factor = Math.pow(10, decimals);
274
+ return Math.round(value * factor) / factor;
275
+ }
276
+
277
+ // ========================================
278
+ // Executor
279
+ // ========================================
280
+
281
+ export class SentimentExecutor {
282
+ run(
283
+ ruleSet: CompiledSentimentRuleSet,
284
+ wm: IWorkingMemory,
285
+ options: SentimentOptions = {}
286
+ ): SentimentResult {
287
+ const startTime = performance.now();
288
+
289
+ // Gather all text facts from working memory
290
+ const facts = wm.getAll();
291
+ const texts = extractTexts(facts);
292
+ const fullText = texts.join(' ');
293
+
294
+ return this.scoreText(ruleSet, fullText, options, startTime);
295
+ }
296
+
297
+ /** Score a single text string directly (no WorkingMemory involved) */
298
+ scoreText(
299
+ ruleSet: CompiledSentimentRuleSet,
300
+ text: string,
301
+ options: SentimentOptions = {},
302
+ startTime: number = performance.now()
303
+ ): SentimentResult {
304
+ switch (ruleSet.strategy) {
305
+ case 'token-level':
306
+ return this.runTokenLevel(ruleSet, text, options, startTime);
307
+ case 'document-level':
308
+ return this.runDocumentLevel(ruleSet, text, options, startTime);
309
+ case 'aspect-based':
310
+ return this.runAspectBased(ruleSet, text, options, startTime);
311
+ default:
312
+ throw new Error(`Unknown sentiment strategy: '${(ruleSet as any).strategy}'`);
313
+ }
314
+ }
315
+
316
+ // ========================================
317
+ // Token-Level Strategy
318
+ // ========================================
319
+
320
+ private runTokenLevel(
321
+ ruleSet: CompiledTokenLevelSentimentRuleSet,
322
+ text: string,
323
+ options: SentimentOptions,
324
+ startTime: number
325
+ ): SentimentResult {
326
+ const tokens = tokenize(text);
327
+ const exclamationCount = countTrailingExclamations(text);
328
+ const tokenScores = scoreTokens(
329
+ tokens, ruleSet.lexicon, ruleSet.config.capsBoost,
330
+ ruleSet.config.negationWindow, options.onToken
331
+ );
332
+
333
+ const compound = computeCompound(tokenScores, ruleSet.config.exclamationBoost, exclamationCount);
334
+ const proportions = computeProportions(tokenScores);
335
+ const executionTimeMs = round((performance.now() - startTime) * 100, 0) / 100;
336
+
337
+ return {
338
+ compound: round(compound, 4),
339
+ label: resolveSentimentLabel(compound),
340
+ positive: proportions.positive,
341
+ negative: proportions.negative,
342
+ neutral: proportions.neutral,
343
+ tokens: tokenScores,
344
+ strategy: 'token-level',
345
+ executionTimeMs
346
+ };
347
+ }
348
+
349
+ // ========================================
350
+ // Document-Level Strategy
351
+ // ========================================
352
+
353
+ private runDocumentLevel(
354
+ ruleSet: CompiledDocumentLevelSentimentRuleSet,
355
+ text: string,
356
+ options: SentimentOptions,
357
+ startTime: number
358
+ ): SentimentResult {
359
+ const sentences = splitSentences(text, ruleSet.sentenceSplitPattern);
360
+
361
+ if (sentences.length === 0) {
362
+ const executionTimeMs = round((performance.now() - startTime) * 100, 0) / 100;
363
+ return {
364
+ compound: 0,
365
+ label: 'neutral',
366
+ positive: 0,
367
+ negative: 0,
368
+ neutral: 1,
369
+ sentences: [],
370
+ strategy: 'document-level',
371
+ executionTimeMs
372
+ };
373
+ }
374
+
375
+ const sentenceResults: SentenceResult[] = [];
376
+
377
+ for (const sentenceText of sentences) {
378
+ const tokens = tokenize(sentenceText);
379
+ const exclamationCount = countTrailingExclamations(sentenceText);
380
+ const tokenScores = scoreTokens(
381
+ tokens, ruleSet.lexicon, ruleSet.config.capsBoost,
382
+ ruleSet.config.negationWindow, options.onToken
383
+ );
384
+ const compound = computeCompound(tokenScores, ruleSet.config.exclamationBoost, exclamationCount);
385
+
386
+ sentenceResults.push({
387
+ text: sentenceText,
388
+ compound: round(compound, 4),
389
+ label: resolveSentimentLabel(compound),
390
+ tokens: tokenScores
391
+ });
392
+ }
393
+
394
+ // Overall compound is average of sentence compounds
395
+ const avgCompound = sentenceResults.reduce((sum, s) => sum + s.compound, 0) / sentenceResults.length;
396
+
397
+ // Aggregate proportions across all tokens
398
+ const allTokens = sentenceResults.flatMap(s => s.tokens);
399
+ const proportions = computeProportions(allTokens);
400
+ const executionTimeMs = round((performance.now() - startTime) * 100, 0) / 100;
401
+
402
+ return {
403
+ compound: round(avgCompound, 4),
404
+ label: resolveSentimentLabel(avgCompound),
405
+ positive: proportions.positive,
406
+ negative: proportions.negative,
407
+ neutral: proportions.neutral,
408
+ sentences: sentenceResults,
409
+ strategy: 'document-level',
410
+ executionTimeMs
411
+ };
412
+ }
413
+
414
+ // ========================================
415
+ // Aspect-Based Strategy
416
+ // ========================================
417
+
418
+ private runAspectBased(
419
+ ruleSet: CompiledAspectBasedSentimentRuleSet,
420
+ text: string,
421
+ options: SentimentOptions,
422
+ startTime: number
423
+ ): SentimentResult {
424
+ // Split into sentences using default pattern
425
+ const sentences = splitSentences(text, /[.!?]+/);
426
+
427
+ const aspectResults: AspectResult[] = [];
428
+
429
+ for (const aspect of ruleSet.aspects) {
430
+ const matchingSentences: SentenceResult[] = [];
431
+
432
+ for (const sentenceText of sentences) {
433
+ // Check if sentence contains the aspect term
434
+ if (!sentenceText.toLowerCase().includes(aspect)) continue;
435
+
436
+ const tokens = tokenize(sentenceText);
437
+ const exclamationCount = countTrailingExclamations(sentenceText);
438
+ const tokenScores = scoreTokens(
439
+ tokens, ruleSet.lexicon, ruleSet.config.capsBoost,
440
+ ruleSet.config.negationWindow, options.onToken
441
+ );
442
+ const compound = computeCompound(tokenScores, ruleSet.config.exclamationBoost, exclamationCount);
443
+
444
+ matchingSentences.push({
445
+ text: sentenceText,
446
+ compound: round(compound, 4),
447
+ label: resolveSentimentLabel(compound),
448
+ tokens: tokenScores
449
+ });
450
+ }
451
+
452
+ const aspectCompound = matchingSentences.length > 0
453
+ ? matchingSentences.reduce((sum, s) => sum + s.compound, 0) / matchingSentences.length
454
+ : 0;
455
+
456
+ aspectResults.push({
457
+ aspect,
458
+ compound: round(aspectCompound, 4),
459
+ label: resolveSentimentLabel(aspectCompound),
460
+ sentences: matchingSentences
461
+ });
462
+ }
463
+
464
+ // Overall compound is average of aspect compounds (only aspects with data)
465
+ const aspectsWithData = aspectResults.filter(a => a.sentences.length > 0);
466
+ const avgCompound = aspectsWithData.length > 0
467
+ ? aspectsWithData.reduce((sum, a) => sum + a.compound, 0) / aspectsWithData.length
468
+ : 0;
469
+
470
+ // Aggregate proportions from all aspect tokens
471
+ const allTokens = aspectResults.flatMap(a => a.sentences.flatMap(s => s.tokens));
472
+ const proportions = computeProportions(allTokens);
473
+ const executionTimeMs = round((performance.now() - startTime) * 100, 0) / 100;
474
+
475
+ return {
476
+ compound: round(avgCompound, 4),
477
+ label: resolveSentimentLabel(avgCompound),
478
+ positive: proportions.positive,
479
+ negative: proportions.negative,
480
+ neutral: proportions.neutral,
481
+ aspects: aspectResults,
482
+ strategy: 'aspect-based',
483
+ executionTimeMs
484
+ };
485
+ }
486
+ }
487
+
488
+ // ========================================
489
+ // Module-Level Helpers
490
+ // ========================================
491
+
492
+ /** Extract text content from facts - looks for text/content/body/message string fields */
493
+ function extractTexts(facts: Fact[]): string[] {
494
+ const texts: string[] = [];
495
+ for (const fact of facts) {
496
+ const data = fact.data;
497
+ if (typeof data === 'string') {
498
+ texts.push(data);
499
+ } else if (data && typeof data === 'object') {
500
+ // Try common text field names
501
+ for (const key of ['text', 'content', 'body', 'message', 'review', 'comment', 'feedback']) {
502
+ if (typeof (data as any)[key] === 'string') {
503
+ texts.push((data as any)[key]);
504
+ break;
505
+ }
506
+ }
507
+ }
508
+ }
509
+ return texts;
510
+ }
511
+
512
+ /** Singleton instance */
513
+ export const sentimentStrategy = new SentimentExecutor();
@@ -0,0 +1,198 @@
1
+ /**
2
+ * Sentiment Engine Types
3
+ *
4
+ * Text sentiment analysis engine that scores text using dictionary-lookup
5
+ * and heuristic rules. Supports token-level, document-level, and aspect-based
6
+ * analysis strategies with injectable lexicon configuration.
7
+ */
8
+
9
+ // ========================================
10
+ // Semantic Types
11
+ // ========================================
12
+
13
+ /** Sentiment analysis strategy */
14
+ export type SentimentStrategy = 'token-level' | 'document-level' | 'aspect-based';
15
+
16
+ /** Resolved sentiment label */
17
+ export type SentimentLabel = 'positive' | 'neutral' | 'negative' | 'mixed';
18
+
19
+ // ========================================
20
+ // Lexicon Types
21
+ // ========================================
22
+
23
+ /** Injectable lexicon configuration (user-facing, optional fields) */
24
+ export type SentimentLexicon = {
25
+ lexicon: Record<string, number>;
26
+ boosters?: Record<string, number>;
27
+ negation?: string[];
28
+ idioms?: Record<string, number>;
29
+ };
30
+
31
+ /** Compiled lexicon with all fields required, keys lowercased */
32
+ export type CompiledSentimentLexicon = {
33
+ lexicon: Record<string, number>;
34
+ boosters: Record<string, number>;
35
+ negation: Set<string>;
36
+ idioms: Record<string, number>;
37
+ _idiomMaxLength: number;
38
+ };
39
+
40
+ // ========================================
41
+ // Config Defaults
42
+ // ========================================
43
+
44
+ export const SENTIMENT_DEFAULTS = {
45
+ capsBoost: 1.2,
46
+ exclamationBoost: 1.1,
47
+ negationWindow: 3
48
+ } as const;
49
+
50
+ // ========================================
51
+ // Strategy-Specific Config Types
52
+ // ========================================
53
+
54
+ export type SentimentConfig = {
55
+ capsBoost?: number;
56
+ exclamationBoost?: number;
57
+ negationWindow?: number;
58
+ };
59
+
60
+ // ========================================
61
+ // Source RuleSet Types (Discriminated Union)
62
+ // ========================================
63
+
64
+ type SentimentRuleSetBase = {
65
+ id: string;
66
+ name?: string;
67
+ mode: 'sentiment';
68
+ lexiconConfig: SentimentLexicon;
69
+ config?: SentimentConfig;
70
+ };
71
+
72
+ /** Token-level: score individual tokens in the full text */
73
+ export type TokenLevelSentimentRuleSet = SentimentRuleSetBase & {
74
+ strategy: 'token-level';
75
+ };
76
+
77
+ /** Document-level: split into sentences, score each, average */
78
+ export type DocumentLevelSentimentRuleSet = SentimentRuleSetBase & {
79
+ strategy: 'document-level';
80
+ sentenceSplitPattern?: string;
81
+ };
82
+
83
+ /** Aspect-based: match sentences to aspect terms, score per aspect */
84
+ export type AspectBasedSentimentRuleSet = SentimentRuleSetBase & {
85
+ strategy: 'aspect-based';
86
+ aspects: string[];
87
+ };
88
+
89
+ export type SentimentRuleSet =
90
+ | TokenLevelSentimentRuleSet
91
+ | DocumentLevelSentimentRuleSet
92
+ | AspectBasedSentimentRuleSet;
93
+
94
+ // ========================================
95
+ // Compiled RuleSet Types
96
+ // ========================================
97
+
98
+ type CompiledSentimentRuleSetBase = {
99
+ id: string;
100
+ name?: string;
101
+ mode: 'sentiment';
102
+ lexicon: CompiledSentimentLexicon;
103
+ config: {
104
+ capsBoost: number;
105
+ exclamationBoost: number;
106
+ negationWindow: number;
107
+ };
108
+ };
109
+
110
+ export type CompiledTokenLevelSentimentRuleSet = CompiledSentimentRuleSetBase & {
111
+ strategy: 'token-level';
112
+ };
113
+
114
+ export type CompiledDocumentLevelSentimentRuleSet = CompiledSentimentRuleSetBase & {
115
+ strategy: 'document-level';
116
+ sentenceSplitPattern: RegExp;
117
+ };
118
+
119
+ export type CompiledAspectBasedSentimentRuleSet = CompiledSentimentRuleSetBase & {
120
+ strategy: 'aspect-based';
121
+ aspects: string[];
122
+ };
123
+
124
+ export type CompiledSentimentRuleSet =
125
+ | CompiledTokenLevelSentimentRuleSet
126
+ | CompiledDocumentLevelSentimentRuleSet
127
+ | CompiledAspectBasedSentimentRuleSet;
128
+
129
+ // ========================================
130
+ // Runtime Result Types
131
+ // ========================================
132
+
133
+ /** Modifier applied to a token score */
134
+ export type TokenModifier = 'negated' | 'boosted' | 'caps' | 'idiom';
135
+
136
+ /** Per-token scoring detail */
137
+ export type TokenScore = {
138
+ token: string;
139
+ rawScore: number;
140
+ adjustedScore: number;
141
+ modifiers: TokenModifier[];
142
+ };
143
+
144
+ /** Per-sentence result (document-level and aspect-based) */
145
+ export type SentenceResult = {
146
+ text: string;
147
+ compound: number;
148
+ label: SentimentLabel;
149
+ tokens: TokenScore[];
150
+ };
151
+
152
+ /** Per-aspect result (aspect-based only) */
153
+ export type AspectResult = {
154
+ aspect: string;
155
+ compound: number;
156
+ label: SentimentLabel;
157
+ sentences: SentenceResult[];
158
+ };
159
+
160
+ /** Full sentiment analysis result */
161
+ export type SentimentResult = {
162
+ compound: number;
163
+ label: SentimentLabel;
164
+ positive: number;
165
+ negative: number;
166
+ neutral: number;
167
+ tokens?: TokenScore[];
168
+ sentences?: SentenceResult[];
169
+ aspects?: AspectResult[];
170
+ strategy: SentimentStrategy;
171
+ executionTimeMs: number;
172
+ };
173
+
174
+ /** Runtime options */
175
+ export type SentimentOptions = {
176
+ onToken?: (token: TokenScore) => void;
177
+ };
178
+
179
+ /** Result from ingest() -- includes both the individual text score and the running aggregate */
180
+ export type IngestResult = SentimentResult & {
181
+ /** Compound score for just the ingested text (not the aggregate) */
182
+ itemCompound: number;
183
+ /** Label for just the ingested text */
184
+ itemLabel: SentimentLabel;
185
+ /** Total number of texts ingested so far */
186
+ totalIngested: number;
187
+ };
188
+
189
+ // ========================================
190
+ // Resolver Functions
191
+ // ========================================
192
+
193
+ /** Resolve a compound score (-1 to 1) to a sentiment label */
194
+ export function resolveSentimentLabel(compound: number): SentimentLabel {
195
+ if (compound > 0.05) return 'positive';
196
+ if (compound < -0.05) return 'negative';
197
+ return 'neutral';
198
+ }