livekit-plugins-nltk 0.5.dev0__tar.gz → 0.6.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (16) hide show
  1. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/PKG-INFO +2 -2
  2. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/livekit/plugins/nltk/__init__.py +1 -2
  3. livekit_plugins_nltk-0.6.0/livekit/plugins/nltk/sentence_tokenizer.py +74 -0
  4. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/livekit/plugins/nltk/version.py +1 -1
  5. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/livekit_plugins_nltk.egg-info/PKG-INFO +2 -2
  6. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/livekit_plugins_nltk.egg-info/requires.txt +1 -1
  7. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/setup.py +1 -1
  8. livekit_plugins_nltk-0.5.dev0/livekit/plugins/nltk/sentence_tokenizer.py +0 -143
  9. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/README.md +0 -0
  10. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/livekit/plugins/nltk/log.py +0 -0
  11. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/livekit/plugins/nltk/py.typed +0 -0
  12. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/livekit_plugins_nltk.egg-info/SOURCES.txt +0 -0
  13. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/livekit_plugins_nltk.egg-info/dependency_links.txt +0 -0
  14. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/livekit_plugins_nltk.egg-info/top_level.txt +0 -0
  15. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/pyproject.toml +0 -0
  16. {livekit_plugins_nltk-0.5.dev0 → livekit_plugins_nltk-0.6.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: livekit-plugins-nltk
3
- Version: 0.5.dev0
3
+ Version: 0.6.0
4
4
  Summary: Agent Framework plugin for NLTK-based text processing.
5
5
  Home-page: https://github.com/livekit/agents
6
6
  License: Apache-2.0
@@ -20,7 +20,7 @@ Requires-Python: >=3.9.0
20
20
  Description-Content-Type: text/markdown
21
21
  Requires-Dist: livekit~=0.11
22
22
  Requires-Dist: nltk<4,>=3
23
- Requires-Dist: livekit-agents~=0.6.dev0
23
+ Requires-Dist: livekit-agents~=0.7.0
24
24
 
25
25
  # LiveKit Plugins NLTK
26
26
 
@@ -13,12 +13,11 @@
13
13
  # limitations under the License.
14
14
 
15
15
 
16
- from .sentence_tokenizer import SentenceStream, SentenceTokenizer
16
+ from .sentence_tokenizer import SentenceTokenizer
17
17
  from .version import __version__
18
18
 
19
19
  __all__ = [
20
20
  "SentenceTokenizer",
21
- "SentenceStream",
22
21
  "__version__",
23
22
  ]
24
23
 
@@ -0,0 +1,74 @@
1
+ from __future__ import annotations
2
+
3
+ import dataclasses
4
+ import functools
5
+ from dataclasses import dataclass
6
+
7
+ from livekit import agents
8
+
9
+ import nltk # type: ignore
10
+
11
+ # nltk is using the punkt tokenizer
12
+ # https://www.nltk.org/_modules/nltk/tokenize/punkt.html
13
+ # this code is using a whitespace to concatenate small sentences together
14
+ # (languages such as Chinese and Japanese are not yet supported)
15
+
16
+
17
+ @dataclass
18
+ class _TokenizerOptions:
19
+ language: str
20
+ min_sentence_len: int
21
+ stream_context_len: int
22
+
23
+
24
+ class SentenceTokenizer(agents.tokenize.SentenceTokenizer):
25
+ def __init__(
26
+ self,
27
+ *,
28
+ language: str = "english",
29
+ min_sentence_len: int = 20,
30
+ stream_context_len: int = 10,
31
+ ) -> None:
32
+ super().__init__()
33
+ self._config = _TokenizerOptions(
34
+ language=language,
35
+ min_sentence_len=min_sentence_len,
36
+ stream_context_len=stream_context_len,
37
+ )
38
+
39
+ def _sanitize_options(self, language: str | None = None) -> _TokenizerOptions:
40
+ config = dataclasses.replace(self._config)
41
+ if language:
42
+ config.language = language
43
+ return config
44
+
45
+ def tokenize(self, *, text: str, language: str | None = None) -> list[str]:
46
+ config = self._sanitize_options(language=language)
47
+ sentences = nltk.tokenize.sent_tokenize(text, config.language)
48
+ new_sentences = []
49
+ buff = ""
50
+ for sentence in sentences:
51
+ buff += sentence + " "
52
+ if len(buff) - 1 >= config.min_sentence_len:
53
+ new_sentences.append(buff.rstrip())
54
+ buff = ""
55
+
56
+ if buff:
57
+ new_sentences.append(buff.rstrip())
58
+
59
+ return new_sentences
60
+
61
+ def stream(
62
+ self,
63
+ *,
64
+ language: str | None = None,
65
+ ) -> agents.tokenize.SentenceStream:
66
+ config = self._sanitize_options(language=language)
67
+ return agents.tokenize.BufferedTokenStream(
68
+ tokenizer=functools.partial(
69
+ nltk.tokenize.sent_tokenize,
70
+ language=config.language,
71
+ ),
72
+ min_token_len=self._config.min_sentence_len,
73
+ ctx_len=self._config.stream_context_len,
74
+ )
@@ -12,4 +12,4 @@
12
12
  # See the License for the specific language governing permissions and
13
13
  # limitations under the License.
14
14
 
15
- __version__ = "0.5.dev0"
15
+ __version__ = "0.6.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: livekit-plugins-nltk
3
- Version: 0.5.dev0
3
+ Version: 0.6.0
4
4
  Summary: Agent Framework plugin for NLTK-based text processing.
5
5
  Home-page: https://github.com/livekit/agents
6
6
  License: Apache-2.0
@@ -20,7 +20,7 @@ Requires-Python: >=3.9.0
20
20
  Description-Content-Type: text/markdown
21
21
  Requires-Dist: livekit~=0.11
22
22
  Requires-Dist: nltk<4,>=3
23
- Requires-Dist: livekit-agents~=0.6.dev0
23
+ Requires-Dist: livekit-agents~=0.7.0
24
24
 
25
25
  # LiveKit Plugins NLTK
26
26
 
@@ -1,3 +1,3 @@
1
1
  livekit~=0.11
2
2
  nltk<4,>=3
3
- livekit-agents~=0.6.dev0
3
+ livekit-agents~=0.7.0
@@ -49,7 +49,7 @@ setuptools.setup(
49
49
  install_requires=[
50
50
  "livekit~=0.11",
51
51
  "nltk >= 3, < 4",
52
- "livekit-agents~=0.6.dev0",
52
+ "livekit-agents~=0.7.0",
53
53
  ],
54
54
  package_data={
55
55
  "livekit.plugins.nltk": ["py.typed"],
@@ -1,143 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import asyncio
4
- import dataclasses
5
- from dataclasses import dataclass
6
- from typing import List, Optional
7
-
8
- from livekit import agents
9
-
10
- import nltk # type: ignore
11
-
12
- from .log import logger
13
-
14
- # nltk is using the punkt tokenizer
15
- # https://www.nltk.org/_modules/nltk/tokenize/punkt.html
16
- # this code is using a whitespace to concatenate small sentences together
17
- # (languages such as Chinese and Japanese are not yet supported)
18
-
19
-
20
- @dataclass
21
- class TokenizerOptions:
22
- language: str
23
- min_sentence_len: int
24
- stream_context_len: int
25
-
26
-
27
- class SentenceTokenizer(agents.tokenize.SentenceTokenizer):
28
- def __init__(
29
- self,
30
- language: str = "english",
31
- min_sentence_len: int = 20,
32
- stream_context_len: int = 10,
33
- ) -> None:
34
- super().__init__()
35
- self._config = TokenizerOptions(
36
- language=language,
37
- min_sentence_len=min_sentence_len,
38
- stream_context_len=stream_context_len,
39
- )
40
-
41
- def _sanitize_options(self, language: Optional[str] = None) -> TokenizerOptions:
42
- config = dataclasses.replace(self._config)
43
- if language:
44
- config.language = language
45
- return config
46
-
47
- def tokenize(
48
- self, *, text: str, language: Optional[str] = None
49
- ) -> List[agents.tokenize.SegmentedSentence]:
50
- config = self._sanitize_options(language=language)
51
- sentences = nltk.tokenize.sent_tokenize(text, config.language)
52
- new_sentences = []
53
- buff = ""
54
- for sentence in sentences:
55
- buff += sentence + " "
56
- if len(buff) - 1 >= config.min_sentence_len:
57
- new_sentences.append(buff.rstrip())
58
- buff = ""
59
-
60
- if buff:
61
- new_sentences.append(buff.rstrip())
62
-
63
- return [agents.tokenize.SegmentedSentence(text=text) for text in new_sentences]
64
-
65
- def stream(
66
- self,
67
- *,
68
- language: Optional[str] = None,
69
- ) -> agents.tokenize.SentenceStream:
70
- config = self._sanitize_options(language=language)
71
- return SentenceStream(
72
- language=config.language,
73
- min_sentence_len=config.min_sentence_len,
74
- context_len=config.stream_context_len,
75
- )
76
-
77
-
78
- class SentenceStream(agents.tokenize.SentenceStream):
79
- def __init__(
80
- self, *, language: str, min_sentence_len: int, context_len: int
81
- ) -> None:
82
- self._language = language
83
- self._context_len = context_len
84
- self._min_sentence_len = min_sentence_len
85
- self._event_queue = asyncio.Queue[agents.tokenize.SegmentedSentence | None]()
86
- self._closed = False
87
-
88
- self._incomplete_sentences: List[str] = [] # <= min_sentence_len
89
- self._buffer = ""
90
-
91
- def push_text(self, text: str) -> None:
92
- if self._closed:
93
- logger.error("Cannot push text to closed stream")
94
- return
95
-
96
- for char in text:
97
- self._buffer += char
98
-
99
- if len(self._buffer) < self._context_len:
100
- continue
101
-
102
- sentences = nltk.tokenize.sent_tokenize(self._buffer, self._language)
103
- if len(sentences) < 2:
104
- continue
105
-
106
- new_sentence = sentences[0]
107
- self._incomplete_sentences.append(new_sentence)
108
- s = " ".join(self._incomplete_sentences)
109
-
110
- if len(s) >= self._min_sentence_len:
111
- self._event_queue.put_nowait(agents.tokenize.SegmentedSentence(text=s))
112
- self._incomplete_sentences = []
113
-
114
- self._buffer = self._buffer[len(new_sentence) :].lstrip()
115
-
116
- async def flush(self) -> None:
117
- # try to segment the remaining data inside self._text_buffer
118
- buff = " ".join(self._incomplete_sentences)
119
- sentences = nltk.tokenize.sent_tokenize(self._buffer, self._language)
120
- for sentence in sentences:
121
- buff += " " + sentence
122
- if len(buff) >= self._min_sentence_len:
123
- await self._event_queue.put(
124
- agents.tokenize.SegmentedSentence(text=buff)
125
- )
126
- buff = ""
127
-
128
- if buff:
129
- await self._event_queue.put(agents.tokenize.SegmentedSentence(text=buff))
130
-
131
- async def aclose(self) -> None:
132
- self._closed = True
133
- self._event_queue.put_nowait(None)
134
-
135
- async def __anext__(self) -> agents.tokenize.SegmentedSentence:
136
- event = await self._event_queue.get()
137
- if event is None:
138
- raise StopAsyncIteration
139
-
140
- return event
141
-
142
- def __aiter__(self) -> "SentenceStream":
143
- return self