@livekit/agents 0.4.6 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +17 -0
- package/dist/audio.cjs +77 -0
- package/dist/audio.cjs.map +1 -0
- package/dist/audio.js +48 -37
- package/dist/audio.js.map +1 -1
- package/dist/cli.cjs +131 -0
- package/dist/cli.cjs.map +1 -0
- package/dist/cli.js +96 -122
- package/dist/cli.js.map +1 -1
- package/dist/generator.cjs +36 -0
- package/dist/generator.cjs.map +1 -0
- package/dist/generator.js +8 -22
- package/dist/generator.js.map +1 -1
- package/dist/http_server.cjs +72 -0
- package/dist/http_server.cjs.map +1 -0
- package/dist/http_server.d.ts +1 -1
- package/dist/http_server.js +44 -47
- package/dist/http_server.js.map +1 -1
- package/dist/index.cjs +78 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.js +26 -28
- package/dist/index.js.map +1 -1
- package/dist/ipc/job_executor.cjs +33 -0
- package/dist/ipc/job_executor.cjs.map +1 -0
- package/dist/ipc/job_executor.js +7 -4
- package/dist/ipc/job_executor.js.map +1 -1
- package/dist/ipc/job_main.cjs +147 -0
- package/dist/ipc/job_main.cjs.map +1 -0
- package/dist/ipc/job_main.d.ts +1 -1
- package/dist/ipc/job_main.js +103 -103
- package/dist/ipc/job_main.js.map +1 -1
- package/dist/ipc/message.cjs +17 -0
- package/dist/ipc/message.cjs.map +1 -0
- package/dist/ipc/message.js +0 -1
- package/dist/ipc/message.js.map +1 -1
- package/dist/ipc/proc_job_executor.cjs +174 -0
- package/dist/ipc/proc_job_executor.cjs.map +1 -0
- package/dist/ipc/proc_job_executor.js +130 -126
- package/dist/ipc/proc_job_executor.js.map +1 -1
- package/dist/ipc/proc_pool.cjs +126 -0
- package/dist/ipc/proc_pool.cjs.map +1 -0
- package/dist/ipc/proc_pool.js +93 -96
- package/dist/ipc/proc_pool.js.map +1 -1
- package/dist/job.cjs +230 -0
- package/dist/job.cjs.map +1 -0
- package/dist/job.d.ts +6 -1
- package/dist/job.d.ts.map +1 -1
- package/dist/job.js +195 -198
- package/dist/job.js.map +1 -1
- package/dist/llm/chat_context.cjs +131 -0
- package/dist/llm/chat_context.cjs.map +1 -0
- package/dist/llm/chat_context.js +98 -86
- package/dist/llm/chat_context.js.map +1 -1
- package/dist/llm/function_context.cjs +103 -0
- package/dist/llm/function_context.cjs.map +1 -0
- package/dist/llm/function_context.js +72 -81
- package/dist/llm/function_context.js.map +1 -1
- package/dist/llm/function_context.test.cjs +218 -0
- package/dist/llm/function_context.test.cjs.map +1 -0
- package/dist/llm/function_context.test.js +209 -210
- package/dist/llm/function_context.test.js.map +1 -1
- package/dist/llm/index.cjs +43 -0
- package/dist/llm/index.cjs.map +1 -0
- package/dist/llm/index.js +22 -6
- package/dist/llm/index.js.map +1 -1
- package/dist/llm/llm.cjs +76 -0
- package/dist/llm/llm.cjs.map +1 -0
- package/dist/llm/llm.js +48 -42
- package/dist/llm/llm.js.map +1 -1
- package/dist/log.cjs +57 -0
- package/dist/log.cjs.map +1 -0
- package/dist/log.js +27 -26
- package/dist/log.js.map +1 -1
- package/dist/multimodal/agent_playout.cjs +228 -0
- package/dist/multimodal/agent_playout.cjs.map +1 -0
- package/dist/multimodal/agent_playout.d.ts +1 -1
- package/dist/multimodal/agent_playout.js +193 -180
- package/dist/multimodal/agent_playout.js.map +1 -1
- package/dist/multimodal/index.cjs +25 -0
- package/dist/multimodal/index.cjs.map +1 -0
- package/dist/multimodal/index.js +2 -5
- package/dist/multimodal/index.js.map +1 -1
- package/dist/multimodal/multimodal_agent.cjs +404 -0
- package/dist/multimodal/multimodal_agent.cjs.map +1 -0
- package/dist/multimodal/multimodal_agent.d.ts +1 -1
- package/dist/multimodal/multimodal_agent.js +351 -330
- package/dist/multimodal/multimodal_agent.js.map +1 -1
- package/dist/pipeline/agent_output.cjs +172 -0
- package/dist/pipeline/agent_output.cjs.map +1 -0
- package/dist/pipeline/agent_output.js +136 -138
- package/dist/pipeline/agent_output.js.map +1 -1
- package/dist/pipeline/agent_playout.cjs +169 -0
- package/dist/pipeline/agent_playout.cjs.map +1 -0
- package/dist/pipeline/agent_playout.js +126 -136
- package/dist/pipeline/agent_playout.js.map +1 -1
- package/dist/pipeline/human_input.cjs +158 -0
- package/dist/pipeline/human_input.cjs.map +1 -0
- package/dist/pipeline/human_input.js +124 -125
- package/dist/pipeline/human_input.js.map +1 -1
- package/dist/pipeline/index.cjs +31 -0
- package/dist/pipeline/index.cjs.map +1 -0
- package/dist/pipeline/index.js +8 -4
- package/dist/pipeline/index.js.map +1 -1
- package/dist/pipeline/pipeline_agent.cjs +642 -0
- package/dist/pipeline/pipeline_agent.cjs.map +1 -0
- package/dist/pipeline/pipeline_agent.js +595 -651
- package/dist/pipeline/pipeline_agent.js.map +1 -1
- package/dist/pipeline/speech_handle.cjs +128 -0
- package/dist/pipeline/speech_handle.cjs.map +1 -0
- package/dist/pipeline/speech_handle.js +102 -100
- package/dist/pipeline/speech_handle.js.map +1 -1
- package/dist/plugin.cjs +46 -0
- package/dist/plugin.cjs.map +1 -0
- package/dist/plugin.js +20 -20
- package/dist/plugin.js.map +1 -1
- package/dist/stt/index.cjs +38 -0
- package/dist/stt/index.cjs.map +1 -0
- package/dist/stt/index.js +13 -5
- package/dist/stt/index.js.map +1 -1
- package/dist/stt/stream_adapter.cjs +87 -0
- package/dist/stt/stream_adapter.cjs.map +1 -0
- package/dist/stt/stream_adapter.js +58 -55
- package/dist/stt/stream_adapter.js.map +1 -1
- package/dist/stt/stt.cjs +98 -0
- package/dist/stt/stt.cjs.map +1 -0
- package/dist/stt/stt.js +63 -98
- package/dist/stt/stt.js.map +1 -1
- package/dist/tokenize/basic/basic.cjs +98 -0
- package/dist/tokenize/basic/basic.cjs.map +1 -0
- package/dist/tokenize/basic/basic.d.ts +1 -1
- package/dist/tokenize/basic/basic.d.ts.map +1 -1
- package/dist/tokenize/basic/basic.js +56 -45
- package/dist/tokenize/basic/basic.js.map +1 -1
- package/dist/tokenize/basic/hyphenator.cjs +425 -0
- package/dist/tokenize/basic/hyphenator.cjs.map +1 -0
- package/dist/tokenize/basic/hyphenator.js +66 -82
- package/dist/tokenize/basic/hyphenator.js.map +1 -1
- package/dist/tokenize/basic/index.cjs +35 -0
- package/dist/tokenize/basic/index.cjs.map +1 -0
- package/dist/tokenize/basic/index.js +7 -4
- package/dist/tokenize/basic/index.js.map +1 -1
- package/dist/tokenize/basic/paragraph.cjs +57 -0
- package/dist/tokenize/basic/paragraph.cjs.map +1 -0
- package/dist/tokenize/basic/paragraph.js +30 -35
- package/dist/tokenize/basic/paragraph.js.map +1 -1
- package/dist/tokenize/basic/sentence.cjs +89 -0
- package/dist/tokenize/basic/sentence.cjs.map +1 -0
- package/dist/tokenize/basic/sentence.d.ts.map +1 -1
- package/dist/tokenize/basic/sentence.js +62 -57
- package/dist/tokenize/basic/sentence.js.map +1 -1
- package/dist/tokenize/basic/word.cjs +44 -0
- package/dist/tokenize/basic/word.cjs.map +1 -0
- package/dist/tokenize/basic/word.js +17 -20
- package/dist/tokenize/basic/word.js.map +1 -1
- package/dist/tokenize/index.cjs +55 -0
- package/dist/tokenize/index.cjs.map +1 -0
- package/dist/tokenize/index.js +18 -7
- package/dist/tokenize/index.js.map +1 -1
- package/dist/tokenize/token_stream.cjs +164 -0
- package/dist/tokenize/token_stream.cjs.map +1 -0
- package/dist/tokenize/token_stream.js +133 -139
- package/dist/tokenize/token_stream.js.map +1 -1
- package/dist/tokenize/tokenizer.cjs +184 -0
- package/dist/tokenize/tokenizer.cjs.map +1 -0
- package/dist/tokenize/tokenizer.js +138 -99
- package/dist/tokenize/tokenizer.js.map +1 -1
- package/dist/tokenize/tokenizer.test.cjs +220 -0
- package/dist/tokenize/tokenizer.test.cjs.map +1 -0
- package/dist/tokenize/tokenizer.test.d.ts +2 -0
- package/dist/tokenize/tokenizer.test.d.ts.map +1 -0
- package/dist/tokenize/tokenizer.test.js +219 -0
- package/dist/tokenize/tokenizer.test.js.map +1 -0
- package/dist/transcription.cjs +131 -0
- package/dist/transcription.cjs.map +1 -0
- package/dist/transcription.js +99 -96
- package/dist/transcription.js.map +1 -1
- package/dist/tts/index.cjs +38 -0
- package/dist/tts/index.cjs.map +1 -0
- package/dist/tts/index.js +13 -5
- package/dist/tts/index.js.map +1 -1
- package/dist/tts/stream_adapter.cjs +78 -0
- package/dist/tts/stream_adapter.cjs.map +1 -0
- package/dist/tts/stream_adapter.js +50 -47
- package/dist/tts/stream_adapter.js.map +1 -1
- package/dist/tts/tts.cjs +127 -0
- package/dist/tts/tts.cjs.map +1 -0
- package/dist/tts/tts.js +90 -120
- package/dist/tts/tts.js.map +1 -1
- package/dist/utils.cjs +284 -0
- package/dist/utils.cjs.map +1 -0
- package/dist/utils.js +242 -247
- package/dist/utils.js.map +1 -1
- package/dist/vad.cjs +92 -0
- package/dist/vad.cjs.map +1 -0
- package/dist/vad.js +57 -52
- package/dist/vad.js.map +1 -1
- package/dist/version.cjs +29 -0
- package/dist/version.cjs.map +1 -0
- package/dist/version.js +4 -4
- package/dist/version.js.map +1 -1
- package/dist/worker.cjs +577 -0
- package/dist/worker.cjs.map +1 -0
- package/dist/worker.d.ts +1 -1
- package/dist/worker.d.ts.map +1 -1
- package/dist/worker.js +512 -484
- package/dist/worker.js.map +1 -1
- package/package.json +18 -8
- package/src/ipc/job_main.ts +66 -64
- package/src/job.ts +3 -2
- package/src/pipeline/pipeline_agent.ts +23 -23
- package/src/tokenize/basic/basic.ts +1 -1
- package/src/tokenize/basic/sentence.ts +14 -8
- package/src/tokenize/tokenizer.test.ts +255 -0
- package/src/worker.ts +1 -0
|
@@ -0,0 +1,255 @@
|
|
|
1
|
+
// SPDX-FileCopyrightText: 2024 LiveKit, Inc.
|
|
2
|
+
//
|
|
3
|
+
// SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
import { describe, expect, it } from 'vitest';
|
|
5
|
+
import { SentenceTokenizer, WordTokenizer, hyphenateWord } from './basic/index.js';
|
|
6
|
+
import { splitParagraphs } from './basic/paragraph.js';
|
|
7
|
+
|
|
8
|
+
const TEXT =
|
|
9
|
+
'Hi! ' +
|
|
10
|
+
'LiveKit is a platform for live audio and video applications and services. ' +
|
|
11
|
+
'R.T.C stands for Real-Time Communication... again R.T.C. ' +
|
|
12
|
+
'Mr. Theo is testing the sentence tokenizer. ' +
|
|
13
|
+
'This is a test. Another test. ' +
|
|
14
|
+
'A short sentence. ' +
|
|
15
|
+
'A longer sentence that is longer than the previous sentence. ' +
|
|
16
|
+
'f(x) = x * 2.54 + 42. ' +
|
|
17
|
+
'Hey! Hi! Hello! ';
|
|
18
|
+
|
|
19
|
+
const EXPECTED_MIN_20 = [
|
|
20
|
+
'Hi! LiveKit is a platform for live audio and video applications and services.',
|
|
21
|
+
'R.T.C stands for Real-Time Communication... again R.T.C.',
|
|
22
|
+
'Mr. Theo is testing the sentence tokenizer.',
|
|
23
|
+
'This is a test. Another test.',
|
|
24
|
+
'A short sentence. A longer sentence that is longer than the previous sentence.',
|
|
25
|
+
'f(x) = x * 2.54 + 42.',
|
|
26
|
+
'Hey! Hi! Hello!',
|
|
27
|
+
];
|
|
28
|
+
|
|
29
|
+
const WORDS_TEXT = 'This is a test. Blabla another test! multiple consecutive spaces: done';
|
|
30
|
+
const WORDS_EXPECTED = [
|
|
31
|
+
'This',
|
|
32
|
+
'is',
|
|
33
|
+
'a',
|
|
34
|
+
'test',
|
|
35
|
+
'Blabla',
|
|
36
|
+
'another',
|
|
37
|
+
'test',
|
|
38
|
+
'multiple',
|
|
39
|
+
'consecutive',
|
|
40
|
+
'spaces',
|
|
41
|
+
'done',
|
|
42
|
+
];
|
|
43
|
+
|
|
44
|
+
const WORDS_PUNCT_TEXT =
|
|
45
|
+
'This is <phoneme alphabet="cmu-arpabet" ph="AE K CH UW AH L IY">actually</phoneme> tricky to handle.';
|
|
46
|
+
const WORDS_PUNCT_EXPECTED = [
|
|
47
|
+
'This',
|
|
48
|
+
'is',
|
|
49
|
+
'<phoneme',
|
|
50
|
+
'alphabet="cmu-arpabet"',
|
|
51
|
+
'ph="AE',
|
|
52
|
+
'K',
|
|
53
|
+
'CH',
|
|
54
|
+
'UW',
|
|
55
|
+
'AH',
|
|
56
|
+
'L',
|
|
57
|
+
'IY">actually</phoneme>',
|
|
58
|
+
'tricky',
|
|
59
|
+
'to',
|
|
60
|
+
'handle.',
|
|
61
|
+
];
|
|
62
|
+
|
|
63
|
+
const HYPHENATOR_TEXT = ['Segment', 'expected', 'communication', 'window', 'welcome', 'bedroom'];
|
|
64
|
+
const HYPHENATOR_EXPECTED = [
|
|
65
|
+
['Seg', 'ment'],
|
|
66
|
+
['ex', 'pect', 'ed'],
|
|
67
|
+
['com', 'mu', 'ni', 'ca', 'tion'],
|
|
68
|
+
['win', 'dow'],
|
|
69
|
+
['wel', 'come'],
|
|
70
|
+
['bed', 'room'],
|
|
71
|
+
];
|
|
72
|
+
|
|
73
|
+
const PARAGRAPH_TEST_CASES: [string, [string, number, number][]][] = [
|
|
74
|
+
['Single paragraph.', [['Single paragraph.', 0, 17]]],
|
|
75
|
+
[
|
|
76
|
+
'Paragraph 1.\n\nParagraph 2.',
|
|
77
|
+
[
|
|
78
|
+
['Paragraph 1.', 0, 12],
|
|
79
|
+
['Paragraph 2.', 14, 26],
|
|
80
|
+
],
|
|
81
|
+
],
|
|
82
|
+
[
|
|
83
|
+
'Para 1.\n\nPara 2.\n\nPara 3.',
|
|
84
|
+
[
|
|
85
|
+
['Para 1.', 0, 7],
|
|
86
|
+
['Para 2.', 9, 16],
|
|
87
|
+
['Para 3.', 18, 25],
|
|
88
|
+
],
|
|
89
|
+
],
|
|
90
|
+
['\n\nParagraph with leading newlines.', [['Paragraph with leading newlines.', 2, 34]]],
|
|
91
|
+
['Paragraph with trailing newlines.\n\n', [['Paragraph with trailing newlines.', 0, 33]]],
|
|
92
|
+
[
|
|
93
|
+
'\n\n Paragraph with leading and trailing spaces. \n\n',
|
|
94
|
+
[['Paragraph with leading and trailing spaces.', 4, 47]],
|
|
95
|
+
],
|
|
96
|
+
[
|
|
97
|
+
'Para 1.\n\n\n\nPara 2.', // Multiple newlines between paragraphs
|
|
98
|
+
[
|
|
99
|
+
['Para 1.', 0, 7],
|
|
100
|
+
['Para 2.', 11, 18],
|
|
101
|
+
],
|
|
102
|
+
],
|
|
103
|
+
[
|
|
104
|
+
'Para 1.\n \n \nPara 2.', // Newlines with spaces between paragraphs
|
|
105
|
+
[
|
|
106
|
+
['Para 1.', 0, 7],
|
|
107
|
+
['Para 2.', 12, 19],
|
|
108
|
+
],
|
|
109
|
+
],
|
|
110
|
+
[
|
|
111
|
+
'', // Empty string
|
|
112
|
+
[],
|
|
113
|
+
],
|
|
114
|
+
[
|
|
115
|
+
'\n\n\n', // Only newlines
|
|
116
|
+
[],
|
|
117
|
+
],
|
|
118
|
+
[
|
|
119
|
+
'Line 1\nLine 2\nLine 3', // Single paragraph with newlines
|
|
120
|
+
[['Line 1\nLine 2\nLine 3', 0, 20]],
|
|
121
|
+
],
|
|
122
|
+
];
|
|
123
|
+
|
|
124
|
+
describe('tokenizer', () => {
|
|
125
|
+
describe('SentenceTokenizer', () => {
|
|
126
|
+
const tokenizer = new SentenceTokenizer();
|
|
127
|
+
|
|
128
|
+
it('should tokenize sentences correctly', () => {
|
|
129
|
+
expect(tokenizer.tokenize(TEXT).every((x, i) => EXPECTED_MIN_20[i] === x)).toBeTruthy();
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
it('should stream tokenize sentences correctly', async () => {
|
|
133
|
+
const pattern = [1, 2, 4];
|
|
134
|
+
let text = TEXT;
|
|
135
|
+
const chunks = [];
|
|
136
|
+
const patternIter = Array(Math.ceil(text.length / pattern.reduce((sum, num) => sum + num, 0)))
|
|
137
|
+
.fill(pattern)
|
|
138
|
+
.flat()
|
|
139
|
+
[Symbol.iterator]();
|
|
140
|
+
|
|
141
|
+
for (const size of patternIter) {
|
|
142
|
+
if (!text) break;
|
|
143
|
+
chunks.push(text.slice(undefined, size));
|
|
144
|
+
text = text.slice(size);
|
|
145
|
+
}
|
|
146
|
+
const stream = tokenizer.stream();
|
|
147
|
+
for (const chunk of chunks) {
|
|
148
|
+
stream.pushText(chunk);
|
|
149
|
+
}
|
|
150
|
+
stream.endInput();
|
|
151
|
+
stream.close();
|
|
152
|
+
|
|
153
|
+
for (const x of EXPECTED_MIN_20) {
|
|
154
|
+
await stream.next().then((value) => {
|
|
155
|
+
if (value.value) {
|
|
156
|
+
expect(value.value.token).toStrictEqual(x);
|
|
157
|
+
}
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
});
|
|
161
|
+
});
|
|
162
|
+
describe('WordTokenizer', () => {
|
|
163
|
+
const tokenizer = new WordTokenizer();
|
|
164
|
+
|
|
165
|
+
it('should tokenize words correctly', () => {
|
|
166
|
+
expect(tokenizer.tokenize(WORDS_TEXT).every((x, i) => WORDS_EXPECTED[i] === x)).toBeTruthy();
|
|
167
|
+
});
|
|
168
|
+
|
|
169
|
+
it('should stream tokenize words correctly', async () => {
|
|
170
|
+
const pattern = [1, 2, 4];
|
|
171
|
+
let text = WORDS_TEXT;
|
|
172
|
+
const chunks = [];
|
|
173
|
+
const patternIter = Array(Math.ceil(text.length / pattern.reduce((sum, num) => sum + num, 0)))
|
|
174
|
+
.fill(pattern)
|
|
175
|
+
.flat()
|
|
176
|
+
[Symbol.iterator]();
|
|
177
|
+
|
|
178
|
+
for (const size of patternIter) {
|
|
179
|
+
if (!text) break;
|
|
180
|
+
chunks.push(text.slice(undefined, size));
|
|
181
|
+
text = text.slice(size);
|
|
182
|
+
}
|
|
183
|
+
const stream = tokenizer.stream();
|
|
184
|
+
for (const chunk of chunks) {
|
|
185
|
+
stream.pushText(chunk);
|
|
186
|
+
}
|
|
187
|
+
stream.endInput();
|
|
188
|
+
stream.close();
|
|
189
|
+
|
|
190
|
+
for (const x of WORDS_EXPECTED) {
|
|
191
|
+
await stream.next().then((value) => {
|
|
192
|
+
if (value.value) {
|
|
193
|
+
expect(value.value.token).toStrictEqual(x);
|
|
194
|
+
}
|
|
195
|
+
});
|
|
196
|
+
}
|
|
197
|
+
});
|
|
198
|
+
|
|
199
|
+
describe('punctuation handling', () => {
|
|
200
|
+
const tokenizerPunct = new WordTokenizer(false);
|
|
201
|
+
|
|
202
|
+
it('should tokenize words correctly', () => {
|
|
203
|
+
expect(
|
|
204
|
+
tokenizerPunct.tokenize(WORDS_PUNCT_TEXT).every((x, i) => WORDS_PUNCT_EXPECTED[i] === x),
|
|
205
|
+
).toBeTruthy();
|
|
206
|
+
});
|
|
207
|
+
|
|
208
|
+
it('should stream tokenize words correctly', async () => {
|
|
209
|
+
const pattern = [1, 2, 4];
|
|
210
|
+
let text = WORDS_PUNCT_TEXT;
|
|
211
|
+
const chunks = [];
|
|
212
|
+
const patternIter = Array(
|
|
213
|
+
Math.ceil(text.length / pattern.reduce((sum, num) => sum + num, 0)),
|
|
214
|
+
)
|
|
215
|
+
.fill(pattern)
|
|
216
|
+
.flat()
|
|
217
|
+
[Symbol.iterator]();
|
|
218
|
+
|
|
219
|
+
for (const size of patternIter) {
|
|
220
|
+
if (!text) break;
|
|
221
|
+
chunks.push(text.slice(undefined, size));
|
|
222
|
+
text = text.slice(size);
|
|
223
|
+
}
|
|
224
|
+
const stream = tokenizerPunct.stream();
|
|
225
|
+
for (const chunk of chunks) {
|
|
226
|
+
stream.pushText(chunk);
|
|
227
|
+
}
|
|
228
|
+
stream.endInput();
|
|
229
|
+
stream.close();
|
|
230
|
+
|
|
231
|
+
for (const x of WORDS_PUNCT_EXPECTED) {
|
|
232
|
+
await stream.next().then((value) => {
|
|
233
|
+
if (value.value) {
|
|
234
|
+
expect(value.value.token).toStrictEqual(x);
|
|
235
|
+
}
|
|
236
|
+
});
|
|
237
|
+
}
|
|
238
|
+
});
|
|
239
|
+
});
|
|
240
|
+
});
|
|
241
|
+
describe('hyphenateWord', () => {
|
|
242
|
+
it('should hyphenate correctly', () => {
|
|
243
|
+
HYPHENATOR_TEXT.forEach((x, i) => {
|
|
244
|
+
expect(hyphenateWord(x)).toStrictEqual(HYPHENATOR_EXPECTED[i]);
|
|
245
|
+
});
|
|
246
|
+
});
|
|
247
|
+
});
|
|
248
|
+
describe('splitParagraphs', () => {
|
|
249
|
+
it('should tokenize paragraphs correctly', () => {
|
|
250
|
+
PARAGRAPH_TEST_CASES.forEach(([a, b]) => {
|
|
251
|
+
expect(splitParagraphs(a)).toStrictEqual(b);
|
|
252
|
+
});
|
|
253
|
+
});
|
|
254
|
+
});
|
|
255
|
+
});
|