@livekit/agents 0.4.6 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +17 -0
- package/dist/audio.cjs +77 -0
- package/dist/audio.cjs.map +1 -0
- package/dist/audio.js +48 -37
- package/dist/audio.js.map +1 -1
- package/dist/cli.cjs +131 -0
- package/dist/cli.cjs.map +1 -0
- package/dist/cli.js +96 -122
- package/dist/cli.js.map +1 -1
- package/dist/generator.cjs +36 -0
- package/dist/generator.cjs.map +1 -0
- package/dist/generator.js +8 -22
- package/dist/generator.js.map +1 -1
- package/dist/http_server.cjs +72 -0
- package/dist/http_server.cjs.map +1 -0
- package/dist/http_server.d.ts +1 -1
- package/dist/http_server.js +44 -47
- package/dist/http_server.js.map +1 -1
- package/dist/index.cjs +78 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.js +26 -28
- package/dist/index.js.map +1 -1
- package/dist/ipc/job_executor.cjs +33 -0
- package/dist/ipc/job_executor.cjs.map +1 -0
- package/dist/ipc/job_executor.js +7 -4
- package/dist/ipc/job_executor.js.map +1 -1
- package/dist/ipc/job_main.cjs +147 -0
- package/dist/ipc/job_main.cjs.map +1 -0
- package/dist/ipc/job_main.d.ts +1 -1
- package/dist/ipc/job_main.js +103 -103
- package/dist/ipc/job_main.js.map +1 -1
- package/dist/ipc/message.cjs +17 -0
- package/dist/ipc/message.cjs.map +1 -0
- package/dist/ipc/message.js +0 -1
- package/dist/ipc/message.js.map +1 -1
- package/dist/ipc/proc_job_executor.cjs +174 -0
- package/dist/ipc/proc_job_executor.cjs.map +1 -0
- package/dist/ipc/proc_job_executor.js +130 -126
- package/dist/ipc/proc_job_executor.js.map +1 -1
- package/dist/ipc/proc_pool.cjs +126 -0
- package/dist/ipc/proc_pool.cjs.map +1 -0
- package/dist/ipc/proc_pool.js +93 -96
- package/dist/ipc/proc_pool.js.map +1 -1
- package/dist/job.cjs +230 -0
- package/dist/job.cjs.map +1 -0
- package/dist/job.d.ts +6 -1
- package/dist/job.d.ts.map +1 -1
- package/dist/job.js +195 -198
- package/dist/job.js.map +1 -1
- package/dist/llm/chat_context.cjs +131 -0
- package/dist/llm/chat_context.cjs.map +1 -0
- package/dist/llm/chat_context.js +98 -86
- package/dist/llm/chat_context.js.map +1 -1
- package/dist/llm/function_context.cjs +103 -0
- package/dist/llm/function_context.cjs.map +1 -0
- package/dist/llm/function_context.js +72 -81
- package/dist/llm/function_context.js.map +1 -1
- package/dist/llm/function_context.test.cjs +218 -0
- package/dist/llm/function_context.test.cjs.map +1 -0
- package/dist/llm/function_context.test.js +209 -210
- package/dist/llm/function_context.test.js.map +1 -1
- package/dist/llm/index.cjs +43 -0
- package/dist/llm/index.cjs.map +1 -0
- package/dist/llm/index.js +22 -6
- package/dist/llm/index.js.map +1 -1
- package/dist/llm/llm.cjs +76 -0
- package/dist/llm/llm.cjs.map +1 -0
- package/dist/llm/llm.js +48 -42
- package/dist/llm/llm.js.map +1 -1
- package/dist/log.cjs +57 -0
- package/dist/log.cjs.map +1 -0
- package/dist/log.js +27 -26
- package/dist/log.js.map +1 -1
- package/dist/multimodal/agent_playout.cjs +228 -0
- package/dist/multimodal/agent_playout.cjs.map +1 -0
- package/dist/multimodal/agent_playout.d.ts +1 -1
- package/dist/multimodal/agent_playout.js +193 -180
- package/dist/multimodal/agent_playout.js.map +1 -1
- package/dist/multimodal/index.cjs +25 -0
- package/dist/multimodal/index.cjs.map +1 -0
- package/dist/multimodal/index.js +2 -5
- package/dist/multimodal/index.js.map +1 -1
- package/dist/multimodal/multimodal_agent.cjs +404 -0
- package/dist/multimodal/multimodal_agent.cjs.map +1 -0
- package/dist/multimodal/multimodal_agent.d.ts +1 -1
- package/dist/multimodal/multimodal_agent.js +351 -330
- package/dist/multimodal/multimodal_agent.js.map +1 -1
- package/dist/pipeline/agent_output.cjs +172 -0
- package/dist/pipeline/agent_output.cjs.map +1 -0
- package/dist/pipeline/agent_output.js +136 -138
- package/dist/pipeline/agent_output.js.map +1 -1
- package/dist/pipeline/agent_playout.cjs +169 -0
- package/dist/pipeline/agent_playout.cjs.map +1 -0
- package/dist/pipeline/agent_playout.js +126 -136
- package/dist/pipeline/agent_playout.js.map +1 -1
- package/dist/pipeline/human_input.cjs +158 -0
- package/dist/pipeline/human_input.cjs.map +1 -0
- package/dist/pipeline/human_input.js +124 -125
- package/dist/pipeline/human_input.js.map +1 -1
- package/dist/pipeline/index.cjs +31 -0
- package/dist/pipeline/index.cjs.map +1 -0
- package/dist/pipeline/index.js +8 -4
- package/dist/pipeline/index.js.map +1 -1
- package/dist/pipeline/pipeline_agent.cjs +642 -0
- package/dist/pipeline/pipeline_agent.cjs.map +1 -0
- package/dist/pipeline/pipeline_agent.js +595 -651
- package/dist/pipeline/pipeline_agent.js.map +1 -1
- package/dist/pipeline/speech_handle.cjs +128 -0
- package/dist/pipeline/speech_handle.cjs.map +1 -0
- package/dist/pipeline/speech_handle.js +102 -100
- package/dist/pipeline/speech_handle.js.map +1 -1
- package/dist/plugin.cjs +46 -0
- package/dist/plugin.cjs.map +1 -0
- package/dist/plugin.js +20 -20
- package/dist/plugin.js.map +1 -1
- package/dist/stt/index.cjs +38 -0
- package/dist/stt/index.cjs.map +1 -0
- package/dist/stt/index.js +13 -5
- package/dist/stt/index.js.map +1 -1
- package/dist/stt/stream_adapter.cjs +87 -0
- package/dist/stt/stream_adapter.cjs.map +1 -0
- package/dist/stt/stream_adapter.js +58 -55
- package/dist/stt/stream_adapter.js.map +1 -1
- package/dist/stt/stt.cjs +98 -0
- package/dist/stt/stt.cjs.map +1 -0
- package/dist/stt/stt.js +63 -98
- package/dist/stt/stt.js.map +1 -1
- package/dist/tokenize/basic/basic.cjs +98 -0
- package/dist/tokenize/basic/basic.cjs.map +1 -0
- package/dist/tokenize/basic/basic.d.ts +1 -1
- package/dist/tokenize/basic/basic.d.ts.map +1 -1
- package/dist/tokenize/basic/basic.js +56 -45
- package/dist/tokenize/basic/basic.js.map +1 -1
- package/dist/tokenize/basic/hyphenator.cjs +425 -0
- package/dist/tokenize/basic/hyphenator.cjs.map +1 -0
- package/dist/tokenize/basic/hyphenator.js +66 -82
- package/dist/tokenize/basic/hyphenator.js.map +1 -1
- package/dist/tokenize/basic/index.cjs +35 -0
- package/dist/tokenize/basic/index.cjs.map +1 -0
- package/dist/tokenize/basic/index.js +7 -4
- package/dist/tokenize/basic/index.js.map +1 -1
- package/dist/tokenize/basic/paragraph.cjs +57 -0
- package/dist/tokenize/basic/paragraph.cjs.map +1 -0
- package/dist/tokenize/basic/paragraph.js +30 -35
- package/dist/tokenize/basic/paragraph.js.map +1 -1
- package/dist/tokenize/basic/sentence.cjs +89 -0
- package/dist/tokenize/basic/sentence.cjs.map +1 -0
- package/dist/tokenize/basic/sentence.d.ts.map +1 -1
- package/dist/tokenize/basic/sentence.js +62 -57
- package/dist/tokenize/basic/sentence.js.map +1 -1
- package/dist/tokenize/basic/word.cjs +44 -0
- package/dist/tokenize/basic/word.cjs.map +1 -0
- package/dist/tokenize/basic/word.js +17 -20
- package/dist/tokenize/basic/word.js.map +1 -1
- package/dist/tokenize/index.cjs +55 -0
- package/dist/tokenize/index.cjs.map +1 -0
- package/dist/tokenize/index.js +18 -7
- package/dist/tokenize/index.js.map +1 -1
- package/dist/tokenize/token_stream.cjs +164 -0
- package/dist/tokenize/token_stream.cjs.map +1 -0
- package/dist/tokenize/token_stream.js +133 -139
- package/dist/tokenize/token_stream.js.map +1 -1
- package/dist/tokenize/tokenizer.cjs +184 -0
- package/dist/tokenize/tokenizer.cjs.map +1 -0
- package/dist/tokenize/tokenizer.js +138 -99
- package/dist/tokenize/tokenizer.js.map +1 -1
- package/dist/tokenize/tokenizer.test.cjs +220 -0
- package/dist/tokenize/tokenizer.test.cjs.map +1 -0
- package/dist/tokenize/tokenizer.test.d.ts +2 -0
- package/dist/tokenize/tokenizer.test.d.ts.map +1 -0
- package/dist/tokenize/tokenizer.test.js +219 -0
- package/dist/tokenize/tokenizer.test.js.map +1 -0
- package/dist/transcription.cjs +131 -0
- package/dist/transcription.cjs.map +1 -0
- package/dist/transcription.js +99 -96
- package/dist/transcription.js.map +1 -1
- package/dist/tts/index.cjs +38 -0
- package/dist/tts/index.cjs.map +1 -0
- package/dist/tts/index.js +13 -5
- package/dist/tts/index.js.map +1 -1
- package/dist/tts/stream_adapter.cjs +78 -0
- package/dist/tts/stream_adapter.cjs.map +1 -0
- package/dist/tts/stream_adapter.js +50 -47
- package/dist/tts/stream_adapter.js.map +1 -1
- package/dist/tts/tts.cjs +127 -0
- package/dist/tts/tts.cjs.map +1 -0
- package/dist/tts/tts.js +90 -120
- package/dist/tts/tts.js.map +1 -1
- package/dist/utils.cjs +284 -0
- package/dist/utils.cjs.map +1 -0
- package/dist/utils.js +242 -247
- package/dist/utils.js.map +1 -1
- package/dist/vad.cjs +92 -0
- package/dist/vad.cjs.map +1 -0
- package/dist/vad.js +57 -52
- package/dist/vad.js.map +1 -1
- package/dist/version.cjs +29 -0
- package/dist/version.cjs.map +1 -0
- package/dist/version.js +4 -4
- package/dist/version.js.map +1 -1
- package/dist/worker.cjs +577 -0
- package/dist/worker.cjs.map +1 -0
- package/dist/worker.d.ts +1 -1
- package/dist/worker.d.ts.map +1 -1
- package/dist/worker.js +512 -484
- package/dist/worker.js.map +1 -1
- package/package.json +18 -8
- package/src/ipc/job_main.ts +66 -64
- package/src/job.ts +3 -2
- package/src/pipeline/pipeline_agent.ts +23 -23
- package/src/tokenize/basic/basic.ts +1 -1
- package/src/tokenize/basic/sentence.ts +14 -8
- package/src/tokenize/tokenizer.test.ts +255 -0
- package/src/worker.ts +1 -0
|
@@ -1,117 +1,156 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
1
|
+
import { AsyncIterableQueue } from "../utils.js";
|
|
2
|
+
const PUNCTUATIONS = [
|
|
3
|
+
"!",
|
|
4
|
+
'"',
|
|
5
|
+
"#",
|
|
6
|
+
"$",
|
|
7
|
+
"%",
|
|
8
|
+
"&",
|
|
9
|
+
"'",
|
|
10
|
+
"(",
|
|
11
|
+
")",
|
|
12
|
+
"*",
|
|
13
|
+
"+",
|
|
14
|
+
",",
|
|
15
|
+
"-",
|
|
16
|
+
".",
|
|
17
|
+
"/",
|
|
18
|
+
":",
|
|
19
|
+
";",
|
|
20
|
+
"<",
|
|
21
|
+
"=",
|
|
22
|
+
">",
|
|
23
|
+
"?",
|
|
24
|
+
"@",
|
|
25
|
+
"[",
|
|
26
|
+
"\\",
|
|
27
|
+
"]",
|
|
28
|
+
"^",
|
|
29
|
+
"_",
|
|
30
|
+
"`",
|
|
31
|
+
"{",
|
|
32
|
+
"|",
|
|
33
|
+
"}",
|
|
34
|
+
"~",
|
|
35
|
+
"\xB1",
|
|
36
|
+
"\u2014",
|
|
37
|
+
"\u2018",
|
|
38
|
+
"\u2019",
|
|
39
|
+
"\u201C",
|
|
40
|
+
"\u201D",
|
|
41
|
+
"\u2026"
|
|
10
42
|
];
|
|
11
|
-
|
|
43
|
+
class SentenceTokenizer {
|
|
12
44
|
}
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
45
|
+
class SentenceStream {
|
|
46
|
+
static FLUSH_SENTINEL = Symbol("FLUSH_SENTINEL");
|
|
47
|
+
input = new AsyncIterableQueue();
|
|
48
|
+
queue = new AsyncIterableQueue();
|
|
49
|
+
#closed = false;
|
|
50
|
+
get closed() {
|
|
51
|
+
return this.#closed;
|
|
52
|
+
}
|
|
53
|
+
/** Push a string of text to the tokenizer */
|
|
54
|
+
pushText(text) {
|
|
55
|
+
if (this.input.closed) {
|
|
56
|
+
throw new Error("Input is closed");
|
|
20
57
|
}
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
if (this.input.closed) {
|
|
24
|
-
throw new Error('Input is closed');
|
|
25
|
-
}
|
|
26
|
-
if (this.#closed) {
|
|
27
|
-
throw new Error('Stream is closed');
|
|
28
|
-
}
|
|
29
|
-
this.input.put(text);
|
|
58
|
+
if (this.#closed) {
|
|
59
|
+
throw new Error("Stream is closed");
|
|
30
60
|
}
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
throw new Error('Stream is closed');
|
|
38
|
-
}
|
|
39
|
-
this.input.put(SentenceStream.FLUSH_SENTINEL);
|
|
61
|
+
this.input.put(text);
|
|
62
|
+
}
|
|
63
|
+
/** Flush the tokenizer, causing it to process all pending text */
|
|
64
|
+
flush() {
|
|
65
|
+
if (this.input.closed) {
|
|
66
|
+
throw new Error("Input is closed");
|
|
40
67
|
}
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
if (this.input.closed) {
|
|
44
|
-
throw new Error('Input is closed');
|
|
45
|
-
}
|
|
46
|
-
if (this.#closed) {
|
|
47
|
-
throw new Error('Stream is closed');
|
|
48
|
-
}
|
|
49
|
-
this.input.close();
|
|
68
|
+
if (this.#closed) {
|
|
69
|
+
throw new Error("Stream is closed");
|
|
50
70
|
}
|
|
51
|
-
|
|
52
|
-
|
|
71
|
+
this.input.put(SentenceStream.FLUSH_SENTINEL);
|
|
72
|
+
}
|
|
73
|
+
/** Mark the input as ended and forbid additional pushes */
|
|
74
|
+
endInput() {
|
|
75
|
+
if (this.input.closed) {
|
|
76
|
+
throw new Error("Input is closed");
|
|
53
77
|
}
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
this.input.close();
|
|
57
|
-
this.queue.close();
|
|
58
|
-
this.#closed = true;
|
|
59
|
-
}
|
|
60
|
-
[Symbol.asyncIterator]() {
|
|
61
|
-
return this;
|
|
78
|
+
if (this.#closed) {
|
|
79
|
+
throw new Error("Stream is closed");
|
|
62
80
|
}
|
|
81
|
+
this.input.close();
|
|
82
|
+
}
|
|
83
|
+
next() {
|
|
84
|
+
return this.queue.next();
|
|
85
|
+
}
|
|
86
|
+
/** Close both the input and output of the tokenizer stream */
|
|
87
|
+
close() {
|
|
88
|
+
this.input.close();
|
|
89
|
+
this.queue.close();
|
|
90
|
+
this.#closed = true;
|
|
91
|
+
}
|
|
92
|
+
[Symbol.asyncIterator]() {
|
|
93
|
+
return this;
|
|
94
|
+
}
|
|
63
95
|
}
|
|
64
|
-
|
|
96
|
+
class WordTokenizer {
|
|
65
97
|
}
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
}
|
|
79
|
-
if (this.#closed) {
|
|
80
|
-
throw new Error('Stream is closed');
|
|
81
|
-
}
|
|
82
|
-
this.input.put(text);
|
|
98
|
+
class WordStream {
|
|
99
|
+
static FLUSH_SENTINEL = Symbol("FLUSH_SENTINEL");
|
|
100
|
+
input = new AsyncIterableQueue();
|
|
101
|
+
queue = new AsyncIterableQueue();
|
|
102
|
+
#closed = false;
|
|
103
|
+
get closed() {
|
|
104
|
+
return this.#closed;
|
|
105
|
+
}
|
|
106
|
+
/** Push a string of text to the tokenizer */
|
|
107
|
+
pushText(text) {
|
|
108
|
+
if (this.input.closed) {
|
|
109
|
+
throw new Error("Input is closed");
|
|
83
110
|
}
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
if (this.input.closed) {
|
|
87
|
-
throw new Error('Input is closed');
|
|
88
|
-
}
|
|
89
|
-
if (this.#closed) {
|
|
90
|
-
throw new Error('Stream is closed');
|
|
91
|
-
}
|
|
92
|
-
this.input.put(WordStream.FLUSH_SENTINEL);
|
|
111
|
+
if (this.#closed) {
|
|
112
|
+
throw new Error("Stream is closed");
|
|
93
113
|
}
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
throw new Error('Stream is closed');
|
|
101
|
-
}
|
|
102
|
-
this.input.close();
|
|
114
|
+
this.input.put(text);
|
|
115
|
+
}
|
|
116
|
+
/** Flush the tokenizer, causing it to process all pending text */
|
|
117
|
+
flush() {
|
|
118
|
+
if (this.input.closed) {
|
|
119
|
+
throw new Error("Input is closed");
|
|
103
120
|
}
|
|
104
|
-
|
|
105
|
-
|
|
121
|
+
if (this.#closed) {
|
|
122
|
+
throw new Error("Stream is closed");
|
|
106
123
|
}
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
124
|
+
this.input.put(WordStream.FLUSH_SENTINEL);
|
|
125
|
+
}
|
|
126
|
+
/** Mark the input as ended and forbid additional pushes */
|
|
127
|
+
endInput() {
|
|
128
|
+
if (this.input.closed) {
|
|
129
|
+
throw new Error("Input is closed");
|
|
112
130
|
}
|
|
113
|
-
|
|
114
|
-
|
|
131
|
+
if (this.#closed) {
|
|
132
|
+
throw new Error("Stream is closed");
|
|
115
133
|
}
|
|
134
|
+
this.input.close();
|
|
135
|
+
}
|
|
136
|
+
next() {
|
|
137
|
+
return this.queue.next();
|
|
138
|
+
}
|
|
139
|
+
/** Close both the input and output of the tokenizer stream */
|
|
140
|
+
close() {
|
|
141
|
+
this.input.close();
|
|
142
|
+
this.queue.close();
|
|
143
|
+
this.#closed = true;
|
|
144
|
+
}
|
|
145
|
+
[Symbol.asyncIterator]() {
|
|
146
|
+
return this;
|
|
147
|
+
}
|
|
116
148
|
}
|
|
149
|
+
export {
|
|
150
|
+
PUNCTUATIONS,
|
|
151
|
+
SentenceStream,
|
|
152
|
+
SentenceTokenizer,
|
|
153
|
+
WordStream,
|
|
154
|
+
WordTokenizer
|
|
155
|
+
};
|
|
117
156
|
//# sourceMappingURL=tokenizer.js.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"
|
|
1
|
+
{"version":3,"sources":["../../src/tokenize/tokenizer.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2024 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport { AsyncIterableQueue } from '../utils.js';\n\n// prettier-ignore\nexport const PUNCTUATIONS = [\n '!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=',\n '>', '?', '@', '[', '\\\\', ']', '^', '_', '`', '{', '|', '}', '~', '±', '—', '‘', '’', '“', '”',\n '…',\n]\n\nexport interface TokenData {\n segmentId: string;\n token: string;\n}\n\nexport abstract class SentenceTokenizer {\n abstract tokenize(text: string, language?: string): string[];\n\n /**\n * Returns a {@link SentenceStream} that can be used to push strings and receive smaller segments.\n */\n abstract stream(): SentenceStream;\n}\n\nexport abstract class SentenceStream {\n protected static readonly FLUSH_SENTINEL = Symbol('FLUSH_SENTINEL');\n protected input = new AsyncIterableQueue<string | typeof SentenceStream.FLUSH_SENTINEL>();\n protected queue = new AsyncIterableQueue<TokenData>();\n #closed = false;\n\n get closed(): boolean {\n return this.#closed;\n }\n\n /** Push a string of text to the tokenizer */\n pushText(text: string) {\n if (this.input.closed) {\n throw new Error('Input is closed');\n }\n if (this.#closed) {\n throw new Error('Stream is closed');\n }\n this.input.put(text);\n }\n\n /** Flush the tokenizer, causing it to process all pending text */\n flush() {\n if (this.input.closed) {\n throw new Error('Input is closed');\n }\n if (this.#closed) {\n throw new Error('Stream is closed');\n }\n this.input.put(SentenceStream.FLUSH_SENTINEL);\n }\n\n /** Mark the input as ended and forbid additional pushes */\n endInput() {\n if (this.input.closed) {\n throw new Error('Input is closed');\n }\n if (this.#closed) {\n throw new Error('Stream is closed');\n }\n this.input.close();\n }\n\n next(): Promise<IteratorResult<TokenData>> {\n return this.queue.next();\n }\n\n /** Close both the input and output of the tokenizer stream */\n close() {\n this.input.close();\n this.queue.close();\n this.#closed = true;\n }\n\n [Symbol.asyncIterator](): SentenceStream {\n return this;\n }\n}\n\nexport abstract class WordTokenizer {\n abstract tokenize(text: string, language?: string): string[];\n\n /**\n * Returns a {@link WordStream} that can be used to push words and receive smaller segments.\n */\n abstract stream(): WordStream;\n}\n\nexport abstract class WordStream {\n protected static readonly FLUSH_SENTINEL = Symbol('FLUSH_SENTINEL');\n protected input = new AsyncIterableQueue<string | typeof WordStream.FLUSH_SENTINEL>();\n protected queue = new AsyncIterableQueue<TokenData>();\n #closed = false;\n\n get closed(): boolean {\n return this.#closed;\n }\n\n /** Push a string of text to the tokenizer */\n pushText(text: string) {\n if (this.input.closed) {\n throw new Error('Input is closed');\n }\n if (this.#closed) {\n throw new Error('Stream is closed');\n }\n this.input.put(text);\n }\n\n /** Flush the tokenizer, causing it to process all pending text */\n flush() {\n if (this.input.closed) {\n throw new Error('Input is closed');\n }\n if (this.#closed) {\n throw new Error('Stream is closed');\n }\n this.input.put(WordStream.FLUSH_SENTINEL);\n }\n\n /** Mark the input as ended and forbid additional pushes */\n endInput() {\n if (this.input.closed) {\n throw new Error('Input is closed');\n }\n if (this.#closed) {\n throw new Error('Stream is closed');\n }\n this.input.close();\n }\n\n next(): Promise<IteratorResult<TokenData>> {\n return this.queue.next();\n }\n\n /** Close both the input and output of the tokenizer stream */\n close() {\n this.input.close();\n this.queue.close();\n this.#closed = true;\n }\n\n [Symbol.asyncIterator](): WordStream {\n return this;\n }\n}\n"],"mappings":"AAGA,SAAS,0BAA0B;AAG5B,MAAM,eAAe;AAAA,EAC1B;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAC1F;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAM;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAAK;AAAA,EAC3F;AACF;AAOO,MAAe,kBAAkB;AAOxC;AAEO,MAAe,eAAe;AAAA,EACnC,OAA0B,iBAAiB,OAAO,gBAAgB;AAAA,EACxD,QAAQ,IAAI,mBAAkE;AAAA,EAC9E,QAAQ,IAAI,mBAA8B;AAAA,EACpD,UAAU;AAAA,EAEV,IAAI,SAAkB;AACpB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA,EAGA,SAAS,MAAc;AACrB,QAAI,KAAK,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,iBAAiB;AAAA,IACnC;AACA,QAAI,KAAK,SAAS;AAChB,YAAM,IAAI,MAAM,kBAAkB;AAAA,IACpC;AACA,SAAK,MAAM,IAAI,IAAI;AAAA,EACrB;AAAA;AAAA,EAGA,QAAQ;AACN,QAAI,KAAK,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,iBAAiB;AAAA,IACnC;AACA,QAAI,KAAK,SAAS;AAChB,YAAM,IAAI,MAAM,kBAAkB;AAAA,IACpC;AACA,SAAK,MAAM,IAAI,eAAe,cAAc;AAAA,EAC9C;AAAA;AAAA,EAGA,WAAW;AACT,QAAI,KAAK,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,iBAAiB;AAAA,IACnC;AACA,QAAI,KAAK,SAAS;AAChB,YAAM,IAAI,MAAM,kBAAkB;AAAA,IACpC;AACA,SAAK,MAAM,MAAM;AAAA,EACnB;AAAA,EAEA,OAA2C;AACzC,WAAO,KAAK,MAAM,KAAK;AAAA,EACzB;AAAA;AAAA,EAGA,QAAQ;AACN,SAAK,MAAM,MAAM;AACjB,SAAK,MAAM,MAAM;AACjB,SAAK,UAAU;AAAA,EACjB;AAAA,EAEA,CAAC,OAAO,aAAa,IAAoB;AACvC,WAAO;AAAA,EACT;AACF;AAEO,MAAe,cAAc;AAOpC;AAEO,MAAe,WAAW;AAAA,EAC/B,OAA0B,iBAAiB,OAAO,gBAAgB;AAAA,EACxD,QAAQ,IAAI,mBAA8D;AAAA,EAC1E,QAAQ,IAAI,mBAA8B;AAAA,EACpD,UAAU;AAAA,EAEV,IAAI,SAAkB;AACpB,WAAO,KAAK;AAAA,EACd;AAAA;AAAA,EAGA,SAAS,MAAc;AACrB,QAAI,KAAK,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,iBAAiB;AAAA,IACnC;AACA,QAAI,KAAK,SAAS;AAChB,YAAM,IAAI,MAAM,kBAAkB;AAAA,IACpC;AACA,SAAK,MAAM,IAAI,IAAI;AAAA,EACrB;AAAA;AAAA,EAGA,QAAQ;AACN,QAAI,KAAK,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,iBAAiB;AAAA,IACnC;AACA,QAAI,KAAK,SAAS;AAChB,YAAM,IAAI,MAAM,kBAAkB;AAAA,IACpC;AACA,SAAK,MAAM,IAAI,WAAW,cAAc;AAAA,EAC1C;AAAA;AAAA,EAGA,WAAW;AACT,QAAI,KAAK,MAAM,QAAQ;AACrB,YAAM,IAAI,MAAM,iBAAiB;AAAA,IACnC;AACA,QAAI,KAAK,SAAS;AAChB,YAAM,IAAI,MAAM,kBAAkB;AAAA,IACpC;AACA,SAAK,MAAM,MAAM;AAAA,EACnB;AAAA,EAEA,OAA2C;AACzC,WAAO,KAAK,MAAM,KAAK;AAAA,EACzB;AAAA;AAAA,EAGA,QAAQ;AACN,SAAK,MAAM,MAAM;AACjB,SAAK,MAAM,MAAM;AACjB,SAAK,UAAU;AAAA,EACjB;AAAA,EAEA,CAAC,OAAO,aAAa,IAAgB;AACnC,WAAO;AAAA,EACT;AACF;","names":[]}
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var import_vitest = require("vitest");
|
|
3
|
+
var import_basic = require("./basic/index.cjs");
|
|
4
|
+
var import_paragraph = require("./basic/paragraph.cjs");
|
|
5
|
+
const TEXT = "Hi! LiveKit is a platform for live audio and video applications and services. R.T.C stands for Real-Time Communication... again R.T.C. Mr. Theo is testing the sentence tokenizer. This is a test. Another test. A short sentence. A longer sentence that is longer than the previous sentence. f(x) = x * 2.54 + 42. Hey! Hi! Hello! ";
|
|
6
|
+
const EXPECTED_MIN_20 = [
|
|
7
|
+
"Hi! LiveKit is a platform for live audio and video applications and services.",
|
|
8
|
+
"R.T.C stands for Real-Time Communication... again R.T.C.",
|
|
9
|
+
"Mr. Theo is testing the sentence tokenizer.",
|
|
10
|
+
"This is a test. Another test.",
|
|
11
|
+
"A short sentence. A longer sentence that is longer than the previous sentence.",
|
|
12
|
+
"f(x) = x * 2.54 + 42.",
|
|
13
|
+
"Hey! Hi! Hello!"
|
|
14
|
+
];
|
|
15
|
+
const WORDS_TEXT = "This is a test. Blabla another test! multiple consecutive spaces: done";
|
|
16
|
+
const WORDS_EXPECTED = [
|
|
17
|
+
"This",
|
|
18
|
+
"is",
|
|
19
|
+
"a",
|
|
20
|
+
"test",
|
|
21
|
+
"Blabla",
|
|
22
|
+
"another",
|
|
23
|
+
"test",
|
|
24
|
+
"multiple",
|
|
25
|
+
"consecutive",
|
|
26
|
+
"spaces",
|
|
27
|
+
"done"
|
|
28
|
+
];
|
|
29
|
+
const WORDS_PUNCT_TEXT = 'This is <phoneme alphabet="cmu-arpabet" ph="AE K CH UW AH L IY">actually</phoneme> tricky to handle.';
|
|
30
|
+
const WORDS_PUNCT_EXPECTED = [
|
|
31
|
+
"This",
|
|
32
|
+
"is",
|
|
33
|
+
"<phoneme",
|
|
34
|
+
'alphabet="cmu-arpabet"',
|
|
35
|
+
'ph="AE',
|
|
36
|
+
"K",
|
|
37
|
+
"CH",
|
|
38
|
+
"UW",
|
|
39
|
+
"AH",
|
|
40
|
+
"L",
|
|
41
|
+
'IY">actually</phoneme>',
|
|
42
|
+
"tricky",
|
|
43
|
+
"to",
|
|
44
|
+
"handle."
|
|
45
|
+
];
|
|
46
|
+
const HYPHENATOR_TEXT = ["Segment", "expected", "communication", "window", "welcome", "bedroom"];
|
|
47
|
+
const HYPHENATOR_EXPECTED = [
|
|
48
|
+
["Seg", "ment"],
|
|
49
|
+
["ex", "pect", "ed"],
|
|
50
|
+
["com", "mu", "ni", "ca", "tion"],
|
|
51
|
+
["win", "dow"],
|
|
52
|
+
["wel", "come"],
|
|
53
|
+
["bed", "room"]
|
|
54
|
+
];
|
|
55
|
+
const PARAGRAPH_TEST_CASES = [
|
|
56
|
+
["Single paragraph.", [["Single paragraph.", 0, 17]]],
|
|
57
|
+
[
|
|
58
|
+
"Paragraph 1.\n\nParagraph 2.",
|
|
59
|
+
[
|
|
60
|
+
["Paragraph 1.", 0, 12],
|
|
61
|
+
["Paragraph 2.", 14, 26]
|
|
62
|
+
]
|
|
63
|
+
],
|
|
64
|
+
[
|
|
65
|
+
"Para 1.\n\nPara 2.\n\nPara 3.",
|
|
66
|
+
[
|
|
67
|
+
["Para 1.", 0, 7],
|
|
68
|
+
["Para 2.", 9, 16],
|
|
69
|
+
["Para 3.", 18, 25]
|
|
70
|
+
]
|
|
71
|
+
],
|
|
72
|
+
["\n\nParagraph with leading newlines.", [["Paragraph with leading newlines.", 2, 34]]],
|
|
73
|
+
["Paragraph with trailing newlines.\n\n", [["Paragraph with trailing newlines.", 0, 33]]],
|
|
74
|
+
[
|
|
75
|
+
"\n\n Paragraph with leading and trailing spaces. \n\n",
|
|
76
|
+
[["Paragraph with leading and trailing spaces.", 4, 47]]
|
|
77
|
+
],
|
|
78
|
+
[
|
|
79
|
+
"Para 1.\n\n\n\nPara 2.",
|
|
80
|
+
// Multiple newlines between paragraphs
|
|
81
|
+
[
|
|
82
|
+
["Para 1.", 0, 7],
|
|
83
|
+
["Para 2.", 11, 18]
|
|
84
|
+
]
|
|
85
|
+
],
|
|
86
|
+
[
|
|
87
|
+
"Para 1.\n \n \nPara 2.",
|
|
88
|
+
// Newlines with spaces between paragraphs
|
|
89
|
+
[
|
|
90
|
+
["Para 1.", 0, 7],
|
|
91
|
+
["Para 2.", 12, 19]
|
|
92
|
+
]
|
|
93
|
+
],
|
|
94
|
+
[
|
|
95
|
+
"",
|
|
96
|
+
// Empty string
|
|
97
|
+
[]
|
|
98
|
+
],
|
|
99
|
+
[
|
|
100
|
+
"\n\n\n",
|
|
101
|
+
// Only newlines
|
|
102
|
+
[]
|
|
103
|
+
],
|
|
104
|
+
[
|
|
105
|
+
"Line 1\nLine 2\nLine 3",
|
|
106
|
+
// Single paragraph with newlines
|
|
107
|
+
[["Line 1\nLine 2\nLine 3", 0, 20]]
|
|
108
|
+
]
|
|
109
|
+
];
|
|
110
|
+
(0, import_vitest.describe)("tokenizer", () => {
|
|
111
|
+
(0, import_vitest.describe)("SentenceTokenizer", () => {
|
|
112
|
+
const tokenizer = new import_basic.SentenceTokenizer();
|
|
113
|
+
(0, import_vitest.it)("should tokenize sentences correctly", () => {
|
|
114
|
+
(0, import_vitest.expect)(tokenizer.tokenize(TEXT).every((x, i) => EXPECTED_MIN_20[i] === x)).toBeTruthy();
|
|
115
|
+
});
|
|
116
|
+
(0, import_vitest.it)("should stream tokenize sentences correctly", async () => {
|
|
117
|
+
const pattern = [1, 2, 4];
|
|
118
|
+
let text = TEXT;
|
|
119
|
+
const chunks = [];
|
|
120
|
+
const patternIter = Array(Math.ceil(text.length / pattern.reduce((sum, num) => sum + num, 0))).fill(pattern).flat()[Symbol.iterator]();
|
|
121
|
+
for (const size of patternIter) {
|
|
122
|
+
if (!text) break;
|
|
123
|
+
chunks.push(text.slice(void 0, size));
|
|
124
|
+
text = text.slice(size);
|
|
125
|
+
}
|
|
126
|
+
const stream = tokenizer.stream();
|
|
127
|
+
for (const chunk of chunks) {
|
|
128
|
+
stream.pushText(chunk);
|
|
129
|
+
}
|
|
130
|
+
stream.endInput();
|
|
131
|
+
stream.close();
|
|
132
|
+
for (const x of EXPECTED_MIN_20) {
|
|
133
|
+
await stream.next().then((value) => {
|
|
134
|
+
if (value.value) {
|
|
135
|
+
(0, import_vitest.expect)(value.value.token).toStrictEqual(x);
|
|
136
|
+
}
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
});
|
|
140
|
+
});
|
|
141
|
+
(0, import_vitest.describe)("WordTokenizer", () => {
|
|
142
|
+
const tokenizer = new import_basic.WordTokenizer();
|
|
143
|
+
(0, import_vitest.it)("should tokenize words correctly", () => {
|
|
144
|
+
(0, import_vitest.expect)(tokenizer.tokenize(WORDS_TEXT).every((x, i) => WORDS_EXPECTED[i] === x)).toBeTruthy();
|
|
145
|
+
});
|
|
146
|
+
(0, import_vitest.it)("should stream tokenize words correctly", async () => {
|
|
147
|
+
const pattern = [1, 2, 4];
|
|
148
|
+
let text = WORDS_TEXT;
|
|
149
|
+
const chunks = [];
|
|
150
|
+
const patternIter = Array(Math.ceil(text.length / pattern.reduce((sum, num) => sum + num, 0))).fill(pattern).flat()[Symbol.iterator]();
|
|
151
|
+
for (const size of patternIter) {
|
|
152
|
+
if (!text) break;
|
|
153
|
+
chunks.push(text.slice(void 0, size));
|
|
154
|
+
text = text.slice(size);
|
|
155
|
+
}
|
|
156
|
+
const stream = tokenizer.stream();
|
|
157
|
+
for (const chunk of chunks) {
|
|
158
|
+
stream.pushText(chunk);
|
|
159
|
+
}
|
|
160
|
+
stream.endInput();
|
|
161
|
+
stream.close();
|
|
162
|
+
for (const x of WORDS_EXPECTED) {
|
|
163
|
+
await stream.next().then((value) => {
|
|
164
|
+
if (value.value) {
|
|
165
|
+
(0, import_vitest.expect)(value.value.token).toStrictEqual(x);
|
|
166
|
+
}
|
|
167
|
+
});
|
|
168
|
+
}
|
|
169
|
+
});
|
|
170
|
+
(0, import_vitest.describe)("punctuation handling", () => {
|
|
171
|
+
const tokenizerPunct = new import_basic.WordTokenizer(false);
|
|
172
|
+
(0, import_vitest.it)("should tokenize words correctly", () => {
|
|
173
|
+
(0, import_vitest.expect)(
|
|
174
|
+
tokenizerPunct.tokenize(WORDS_PUNCT_TEXT).every((x, i) => WORDS_PUNCT_EXPECTED[i] === x)
|
|
175
|
+
).toBeTruthy();
|
|
176
|
+
});
|
|
177
|
+
(0, import_vitest.it)("should stream tokenize words correctly", async () => {
|
|
178
|
+
const pattern = [1, 2, 4];
|
|
179
|
+
let text = WORDS_PUNCT_TEXT;
|
|
180
|
+
const chunks = [];
|
|
181
|
+
const patternIter = Array(
|
|
182
|
+
Math.ceil(text.length / pattern.reduce((sum, num) => sum + num, 0))
|
|
183
|
+
).fill(pattern).flat()[Symbol.iterator]();
|
|
184
|
+
for (const size of patternIter) {
|
|
185
|
+
if (!text) break;
|
|
186
|
+
chunks.push(text.slice(void 0, size));
|
|
187
|
+
text = text.slice(size);
|
|
188
|
+
}
|
|
189
|
+
const stream = tokenizerPunct.stream();
|
|
190
|
+
for (const chunk of chunks) {
|
|
191
|
+
stream.pushText(chunk);
|
|
192
|
+
}
|
|
193
|
+
stream.endInput();
|
|
194
|
+
stream.close();
|
|
195
|
+
for (const x of WORDS_PUNCT_EXPECTED) {
|
|
196
|
+
await stream.next().then((value) => {
|
|
197
|
+
if (value.value) {
|
|
198
|
+
(0, import_vitest.expect)(value.value.token).toStrictEqual(x);
|
|
199
|
+
}
|
|
200
|
+
});
|
|
201
|
+
}
|
|
202
|
+
});
|
|
203
|
+
});
|
|
204
|
+
});
|
|
205
|
+
(0, import_vitest.describe)("hyphenateWord", () => {
|
|
206
|
+
(0, import_vitest.it)("should hyphenate correctly", () => {
|
|
207
|
+
HYPHENATOR_TEXT.forEach((x, i) => {
|
|
208
|
+
(0, import_vitest.expect)((0, import_basic.hyphenateWord)(x)).toStrictEqual(HYPHENATOR_EXPECTED[i]);
|
|
209
|
+
});
|
|
210
|
+
});
|
|
211
|
+
});
|
|
212
|
+
(0, import_vitest.describe)("splitParagraphs", () => {
|
|
213
|
+
(0, import_vitest.it)("should tokenize paragraphs correctly", () => {
|
|
214
|
+
PARAGRAPH_TEST_CASES.forEach(([a, b]) => {
|
|
215
|
+
(0, import_vitest.expect)((0, import_paragraph.splitParagraphs)(a)).toStrictEqual(b);
|
|
216
|
+
});
|
|
217
|
+
});
|
|
218
|
+
});
|
|
219
|
+
});
|
|
220
|
+
//# sourceMappingURL=tokenizer.test.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../../src/tokenize/tokenizer.test.ts"],"sourcesContent":["// SPDX-FileCopyrightText: 2024 LiveKit, Inc.\n//\n// SPDX-License-Identifier: Apache-2.0\nimport { describe, expect, it } from 'vitest';\nimport { SentenceTokenizer, WordTokenizer, hyphenateWord } from './basic/index.js';\nimport { splitParagraphs } from './basic/paragraph.js';\n\nconst TEXT =\n 'Hi! ' +\n 'LiveKit is a platform for live audio and video applications and services. ' +\n 'R.T.C stands for Real-Time Communication... again R.T.C. ' +\n 'Mr. Theo is testing the sentence tokenizer. ' +\n 'This is a test. Another test. ' +\n 'A short sentence. ' +\n 'A longer sentence that is longer than the previous sentence. ' +\n 'f(x) = x * 2.54 + 42. ' +\n 'Hey! Hi! Hello! ';\n\nconst EXPECTED_MIN_20 = [\n 'Hi! LiveKit is a platform for live audio and video applications and services.',\n 'R.T.C stands for Real-Time Communication... again R.T.C.',\n 'Mr. Theo is testing the sentence tokenizer.',\n 'This is a test. Another test.',\n 'A short sentence. A longer sentence that is longer than the previous sentence.',\n 'f(x) = x * 2.54 + 42.',\n 'Hey! Hi! Hello!',\n];\n\nconst WORDS_TEXT = 'This is a test. Blabla another test! multiple consecutive spaces: done';\nconst WORDS_EXPECTED = [\n 'This',\n 'is',\n 'a',\n 'test',\n 'Blabla',\n 'another',\n 'test',\n 'multiple',\n 'consecutive',\n 'spaces',\n 'done',\n];\n\nconst WORDS_PUNCT_TEXT =\n 'This is <phoneme alphabet=\"cmu-arpabet\" ph=\"AE K CH UW AH L IY\">actually</phoneme> tricky to handle.';\nconst WORDS_PUNCT_EXPECTED = [\n 'This',\n 'is',\n '<phoneme',\n 'alphabet=\"cmu-arpabet\"',\n 'ph=\"AE',\n 'K',\n 'CH',\n 'UW',\n 'AH',\n 'L',\n 'IY\">actually</phoneme>',\n 'tricky',\n 'to',\n 'handle.',\n];\n\nconst HYPHENATOR_TEXT = ['Segment', 'expected', 'communication', 'window', 'welcome', 'bedroom'];\nconst HYPHENATOR_EXPECTED = [\n ['Seg', 'ment'],\n ['ex', 'pect', 'ed'],\n ['com', 'mu', 'ni', 'ca', 'tion'],\n ['win', 'dow'],\n ['wel', 'come'],\n ['bed', 'room'],\n];\n\nconst PARAGRAPH_TEST_CASES: [string, [string, number, number][]][] = [\n ['Single paragraph.', [['Single paragraph.', 0, 17]]],\n [\n 'Paragraph 1.\\n\\nParagraph 2.',\n [\n ['Paragraph 1.', 0, 12],\n ['Paragraph 2.', 14, 26],\n ],\n ],\n [\n 'Para 1.\\n\\nPara 2.\\n\\nPara 3.',\n [\n ['Para 1.', 0, 7],\n ['Para 2.', 9, 16],\n ['Para 3.', 18, 25],\n ],\n ],\n ['\\n\\nParagraph with leading newlines.', [['Paragraph with leading newlines.', 2, 34]]],\n ['Paragraph with trailing newlines.\\n\\n', [['Paragraph with trailing newlines.', 0, 33]]],\n [\n '\\n\\n Paragraph with leading and trailing spaces. \\n\\n',\n [['Paragraph with leading and trailing spaces.', 4, 47]],\n ],\n [\n 'Para 1.\\n\\n\\n\\nPara 2.', // Multiple newlines between paragraphs\n [\n ['Para 1.', 0, 7],\n ['Para 2.', 11, 18],\n ],\n ],\n [\n 'Para 1.\\n \\n \\nPara 2.', // Newlines with spaces between paragraphs\n [\n ['Para 1.', 0, 7],\n ['Para 2.', 12, 19],\n ],\n ],\n [\n '', // Empty string\n [],\n ],\n [\n '\\n\\n\\n', // Only newlines\n [],\n ],\n [\n 'Line 1\\nLine 2\\nLine 3', // Single paragraph with newlines\n [['Line 1\\nLine 2\\nLine 3', 0, 20]],\n ],\n];\n\ndescribe('tokenizer', () => {\n describe('SentenceTokenizer', () => {\n const tokenizer = new SentenceTokenizer();\n\n it('should tokenize sentences correctly', () => {\n expect(tokenizer.tokenize(TEXT).every((x, i) => EXPECTED_MIN_20[i] === x)).toBeTruthy();\n });\n\n it('should stream tokenize sentences correctly', async () => {\n const pattern = [1, 2, 4];\n let text = TEXT;\n const chunks = [];\n const patternIter = Array(Math.ceil(text.length / pattern.reduce((sum, num) => sum + num, 0)))\n .fill(pattern)\n .flat()\n [Symbol.iterator]();\n\n for (const size of patternIter) {\n if (!text) break;\n chunks.push(text.slice(undefined, size));\n text = text.slice(size);\n }\n const stream = tokenizer.stream();\n for (const chunk of chunks) {\n stream.pushText(chunk);\n }\n stream.endInput();\n stream.close();\n\n for (const x of EXPECTED_MIN_20) {\n await stream.next().then((value) => {\n if (value.value) {\n expect(value.value.token).toStrictEqual(x);\n }\n });\n }\n });\n });\n describe('WordTokenizer', () => {\n const tokenizer = new WordTokenizer();\n\n it('should tokenize words correctly', () => {\n expect(tokenizer.tokenize(WORDS_TEXT).every((x, i) => WORDS_EXPECTED[i] === x)).toBeTruthy();\n });\n\n it('should stream tokenize words correctly', async () => {\n const pattern = [1, 2, 4];\n let text = WORDS_TEXT;\n const chunks = [];\n const patternIter = Array(Math.ceil(text.length / pattern.reduce((sum, num) => sum + num, 0)))\n .fill(pattern)\n .flat()\n [Symbol.iterator]();\n\n for (const size of patternIter) {\n if (!text) break;\n chunks.push(text.slice(undefined, size));\n text = text.slice(size);\n }\n const stream = tokenizer.stream();\n for (const chunk of chunks) {\n stream.pushText(chunk);\n }\n stream.endInput();\n stream.close();\n\n for (const x of WORDS_EXPECTED) {\n await stream.next().then((value) => {\n if (value.value) {\n expect(value.value.token).toStrictEqual(x);\n }\n });\n }\n });\n\n describe('punctuation handling', () => {\n const tokenizerPunct = new WordTokenizer(false);\n\n it('should tokenize words correctly', () => {\n expect(\n tokenizerPunct.tokenize(WORDS_PUNCT_TEXT).every((x, i) => WORDS_PUNCT_EXPECTED[i] === x),\n ).toBeTruthy();\n });\n\n it('should stream tokenize words correctly', async () => {\n const pattern = [1, 2, 4];\n let text = WORDS_PUNCT_TEXT;\n const chunks = [];\n const patternIter = Array(\n Math.ceil(text.length / pattern.reduce((sum, num) => sum + num, 0)),\n )\n .fill(pattern)\n .flat()\n [Symbol.iterator]();\n\n for (const size of patternIter) {\n if (!text) break;\n chunks.push(text.slice(undefined, size));\n text = text.slice(size);\n }\n const stream = tokenizerPunct.stream();\n for (const chunk of chunks) {\n stream.pushText(chunk);\n }\n stream.endInput();\n stream.close();\n\n for (const x of WORDS_PUNCT_EXPECTED) {\n await stream.next().then((value) => {\n if (value.value) {\n expect(value.value.token).toStrictEqual(x);\n }\n });\n }\n });\n });\n });\n describe('hyphenateWord', () => {\n it('should hyphenate correctly', () => {\n HYPHENATOR_TEXT.forEach((x, i) => {\n expect(hyphenateWord(x)).toStrictEqual(HYPHENATOR_EXPECTED[i]);\n });\n });\n });\n describe('splitParagraphs', () => {\n it('should tokenize paragraphs correctly', () => {\n PARAGRAPH_TEST_CASES.forEach(([a, b]) => {\n expect(splitParagraphs(a)).toStrictEqual(b);\n });\n });\n });\n});\n"],"mappings":";AAGA,oBAAqC;AACrC,mBAAgE;AAChE,uBAAgC;AAEhC,MAAM,OACJ;AAUF,MAAM,kBAAkB;AAAA,EACtB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEA,MAAM,aAAa;AACnB,MAAM,iBAAiB;AAAA,EACrB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEA,MAAM,mBACJ;AACF,MAAM,uBAAuB;AAAA,EAC3B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEA,MAAM,kBAAkB,CAAC,WAAW,YAAY,iBAAiB,UAAU,WAAW,SAAS;AAC/F,MAAM,sBAAsB;AAAA,EAC1B,CAAC,OAAO,MAAM;AAAA,EACd,CAAC,MAAM,QAAQ,IAAI;AAAA,EACnB,CAAC,OAAO,MAAM,MAAM,MAAM,MAAM;AAAA,EAChC,CAAC,OAAO,KAAK;AAAA,EACb,CAAC,OAAO,MAAM;AAAA,EACd,CAAC,OAAO,MAAM;AAChB;AAEA,MAAM,uBAA+D;AAAA,EACnE,CAAC,qBAAqB,CAAC,CAAC,qBAAqB,GAAG,EAAE,CAAC,CAAC;AAAA,EACpD;AAAA,IACE;AAAA,IACA;AAAA,MACE,CAAC,gBAAgB,GAAG,EAAE;AAAA,MACtB,CAAC,gBAAgB,IAAI,EAAE;AAAA,IACzB;AAAA,EACF;AAAA,EACA;AAAA,IACE;AAAA,IACA;AAAA,MACE,CAAC,WAAW,GAAG,CAAC;AAAA,MAChB,CAAC,WAAW,GAAG,EAAE;AAAA,MACjB,CAAC,WAAW,IAAI,EAAE;AAAA,IACpB;AAAA,EACF;AAAA,EACA,CAAC,wCAAwC,CAAC,CAAC,oCAAoC,GAAG,EAAE,CAAC,CAAC;AAAA,EACtF,CAAC,yCAAyC,CAAC,CAAC,qCAAqC,GAAG,EAAE,CAAC,CAAC;AAAA,EACxF;AAAA,IACE;AAAA,IACA,CAAC,CAAC,+CAA+C,GAAG,EAAE,CAAC;AAAA,EACzD;AAAA,EACA;AAAA,IACE;AAAA;AAAA,IACA;AAAA,MACE,CAAC,WAAW,GAAG,CAAC;AAAA,MAChB,CAAC,WAAW,IAAI,EAAE;AAAA,IACpB;AAAA,EACF;AAAA,EACA;AAAA,IACE;AAAA;AAAA,IACA;AAAA,MACE,CAAC,WAAW,GAAG,CAAC;AAAA,MAChB,CAAC,WAAW,IAAI,EAAE;AAAA,IACpB;AAAA,EACF;AAAA,EACA;AAAA,IACE;AAAA;AAAA,IACA,CAAC;AAAA,EACH;AAAA,EACA;AAAA,IACE;AAAA;AAAA,IACA,CAAC;AAAA,EACH;AAAA,EACA;AAAA,IACE;AAAA;AAAA,IACA,CAAC,CAAC,0BAA0B,GAAG,EAAE,CAAC;AAAA,EACpC;AACF;AAAA,IAEA,wBAAS,aAAa,MAAM;AAC1B,8BAAS,qBAAqB,MAAM;AAClC,UAAM,YAAY,IAAI,+BAAkB;AAExC,0BAAG,uCAAuC,MAAM;AAC9C,gCAAO,UAAU,SAAS,IAAI,EAAE,MAAM,CAAC,GAAG,MAAM,gBAAgB,CAAC,MAAM,CAAC,CAAC,EAAE,WAAW;AAAA,IACxF,CAAC;AAED,0BAAG,8CAA8C,YAAY;AAC3D,YAAM,UAAU,CAAC,GAAG,GAAG,CAAC;AACxB,UAAI,OAAO;AACX,YAAM,SAAS,CAAC;AAChB,YAAM,cAAc,MAAM,KAAK,KAAK,KAAK,SAAS,QAAQ,OAAO,CAAC,KAAK,QAAQ,MAAM,KAAK,CAAC,CAAC,CAAC,EAC1F,KAAK,OAAO,EACZ,KAAK,EACL,OAAO,QAAQ,EAAE;AAEpB,iBAAW,QAAQ,aAAa;AAC9B,YAAI,CAAC,KAAM;AACX,eAAO,KAAK,KAAK,MAAM,QAAW,IAAI,CAAC;AACvC,eAAO,KAAK,MAAM,IAAI;AAAA,MACxB;AACA,YAAM,SAAS,UAAU,OAAO;AAChC,iBAAW,SAAS,QAAQ;AAC1B,eAAO,SAAS,KAAK;AAAA,MACvB;AACA,aAAO,SAAS;AAChB,aAAO,MAAM;AAEb,iBAAW,KAAK,iBAAiB;AAC/B,cAAM,OAAO,KAAK,EAAE,KAAK,CAAC,UAAU;AAClC,cAAI,MAAM,OAAO;AACf,sCAAO,MAAM,MAAM,KAAK,EAAE,cAAc,CAAC;AAAA,UAC3C;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF,CAAC;AAAA,EACH,CAAC;AACD,8BAAS,iBAAiB,MAAM;AAC9B,UAAM,YAAY,IAAI,2BAAc;AAEpC,0BAAG,mCAAmC,MAAM;AAC1C,gCAAO,UAAU,SAAS,UAAU,EAAE,MAAM,CAAC,GAAG,MAAM,eAAe,CAAC,MAAM,CAAC,CAAC,EAAE,WAAW;AAAA,IAC7F,CAAC;AAED,0BAAG,0CAA0C,YAAY;AACvD,YAAM,UAAU,CAAC,GAAG,GAAG,CAAC;AACxB,UAAI,OAAO;AACX,YAAM,SAAS,CAAC;AAChB,YAAM,cAAc,MAAM,KAAK,KAAK,KAAK,SAAS,QAAQ,OAAO,CAAC,KAAK,QAAQ,MAAM,KAAK,CAAC,CAAC,CAAC,EAC1F,KAAK,OAAO,EACZ,KAAK,EACL,OAAO,QAAQ,EAAE;AAEpB,iBAAW,QAAQ,aAAa;AAC9B,YAAI,CAAC,KAAM;AACX,eAAO,KAAK,KAAK,MAAM,QAAW,IAAI,CAAC;AACvC,eAAO,KAAK,MAAM,IAAI;AAAA,MACxB;AACA,YAAM,SAAS,UAAU,OAAO;AAChC,iBAAW,SAAS,QAAQ;AAC1B,eAAO,SAAS,KAAK;AAAA,MACvB;AACA,aAAO,SAAS;AAChB,aAAO,MAAM;AAEb,iBAAW,KAAK,gBAAgB;AAC9B,cAAM,OAAO,KAAK,EAAE,KAAK,CAAC,UAAU;AAClC,cAAI,MAAM,OAAO;AACf,sCAAO,MAAM,MAAM,KAAK,EAAE,cAAc,CAAC;AAAA,UAC3C;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF,CAAC;AAED,gCAAS,wBAAwB,MAAM;AACrC,YAAM,iBAAiB,IAAI,2BAAc,KAAK;AAE9C,4BAAG,mCAAmC,MAAM;AAC1C;AAAA,UACE,eAAe,SAAS,gBAAgB,EAAE,MAAM,CAAC,GAAG,MAAM,qBAAqB,CAAC,MAAM,CAAC;AAAA,QACzF,EAAE,WAAW;AAAA,MACf,CAAC;AAED,4BAAG,0CAA0C,YAAY;AACvD,cAAM,UAAU,CAAC,GAAG,GAAG,CAAC;AACxB,YAAI,OAAO;AACX,cAAM,SAAS,CAAC;AAChB,cAAM,cAAc;AAAA,UAClB,KAAK,KAAK,KAAK,SAAS,QAAQ,OAAO,CAAC,KAAK,QAAQ,MAAM,KAAK,CAAC,CAAC;AAAA,QACpE,EACG,KAAK,OAAO,EACZ,KAAK,EACL,OAAO,QAAQ,EAAE;AAEpB,mBAAW,QAAQ,aAAa;AAC9B,cAAI,CAAC,KAAM;AACX,iBAAO,KAAK,KAAK,MAAM,QAAW,IAAI,CAAC;AACvC,iBAAO,KAAK,MAAM,IAAI;AAAA,QACxB;AACA,cAAM,SAAS,eAAe,OAAO;AACrC,mBAAW,SAAS,QAAQ;AAC1B,iBAAO,SAAS,KAAK;AAAA,QACvB;AACA,eAAO,SAAS;AAChB,eAAO,MAAM;AAEb,mBAAW,KAAK,sBAAsB;AACpC,gBAAM,OAAO,KAAK,EAAE,KAAK,CAAC,UAAU;AAClC,gBAAI,MAAM,OAAO;AACf,wCAAO,MAAM,MAAM,KAAK,EAAE,cAAc,CAAC;AAAA,YAC3C;AAAA,UACF,CAAC;AAAA,QACH;AAAA,MACF,CAAC;AAAA,IACH,CAAC;AAAA,EACH,CAAC;AACD,8BAAS,iBAAiB,MAAM;AAC9B,0BAAG,8BAA8B,MAAM;AACrC,sBAAgB,QAAQ,CAAC,GAAG,MAAM;AAChC,sCAAO,4BAAc,CAAC,CAAC,EAAE,cAAc,oBAAoB,CAAC,CAAC;AAAA,MAC/D,CAAC;AAAA,IACH,CAAC;AAAA,EACH,CAAC;AACD,8BAAS,mBAAmB,MAAM;AAChC,0BAAG,wCAAwC,MAAM;AAC/C,2BAAqB,QAAQ,CAAC,CAAC,GAAG,CAAC,MAAM;AACvC,sCAAO,kCAAgB,CAAC,CAAC,EAAE,cAAc,CAAC;AAAA,MAC5C,CAAC;AAAA,IACH,CAAC;AAAA,EACH,CAAC;AACH,CAAC;","names":[]}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"tokenizer.test.d.ts","sourceRoot":"","sources":["../../src/tokenize/tokenizer.test.ts"],"names":[],"mappings":""}
|