livekit-plugins-nvidia 1.2.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- livekit_plugins_nvidia-1.2.9/.gitignore +179 -0
- livekit_plugins_nvidia-1.2.9/PKG-INFO +42 -0
- livekit_plugins_nvidia-1.2.9/README.md +18 -0
- livekit_plugins_nvidia-1.2.9/livekit/plugins/nvidia/__init__.py +42 -0
- livekit_plugins_nvidia-1.2.9/livekit/plugins/nvidia/auth.py +26 -0
- livekit_plugins_nvidia-1.2.9/livekit/plugins/nvidia/log.py +3 -0
- livekit_plugins_nvidia-1.2.9/livekit/plugins/nvidia/stt.py +284 -0
- livekit_plugins_nvidia-1.2.9/livekit/plugins/nvidia/tts.py +199 -0
- livekit_plugins_nvidia-1.2.9/livekit/plugins/nvidia/version.py +15 -0
- livekit_plugins_nvidia-1.2.9/pyproject.toml +42 -0
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
**/.vscode
|
|
2
|
+
**/.DS_Store
|
|
3
|
+
|
|
4
|
+
# Byte-compiled / optimized / DLL files
|
|
5
|
+
__pycache__/
|
|
6
|
+
*.py[cod]
|
|
7
|
+
*$py.class
|
|
8
|
+
|
|
9
|
+
# C extensions
|
|
10
|
+
*.so
|
|
11
|
+
|
|
12
|
+
# Distribution / packaging
|
|
13
|
+
.Python
|
|
14
|
+
build/
|
|
15
|
+
develop-eggs/
|
|
16
|
+
dist/
|
|
17
|
+
downloads/
|
|
18
|
+
eggs/
|
|
19
|
+
.eggs/
|
|
20
|
+
lib/
|
|
21
|
+
lib64/
|
|
22
|
+
parts/
|
|
23
|
+
sdist/
|
|
24
|
+
var/
|
|
25
|
+
wheels/
|
|
26
|
+
share/python-wheels/
|
|
27
|
+
*.egg-info/
|
|
28
|
+
.installed.cfg
|
|
29
|
+
*.egg
|
|
30
|
+
MANIFEST
|
|
31
|
+
|
|
32
|
+
# PyInstaller
|
|
33
|
+
# Usually these files are written by a python script from a template
|
|
34
|
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
35
|
+
*.manifest
|
|
36
|
+
*.spec
|
|
37
|
+
|
|
38
|
+
# Installer logs
|
|
39
|
+
pip-log.txt
|
|
40
|
+
pip-delete-this-directory.txt
|
|
41
|
+
|
|
42
|
+
# Unit test / coverage reports
|
|
43
|
+
htmlcov/
|
|
44
|
+
.tox/
|
|
45
|
+
.nox/
|
|
46
|
+
.coverage
|
|
47
|
+
.coverage.*
|
|
48
|
+
.cache
|
|
49
|
+
nosetests.xml
|
|
50
|
+
coverage.xml
|
|
51
|
+
*.cover
|
|
52
|
+
*.py,cover
|
|
53
|
+
.hypothesis/
|
|
54
|
+
.pytest_cache/
|
|
55
|
+
cover/
|
|
56
|
+
|
|
57
|
+
# Translations
|
|
58
|
+
*.mo
|
|
59
|
+
*.pot
|
|
60
|
+
|
|
61
|
+
# Django stuff:
|
|
62
|
+
*.log
|
|
63
|
+
local_settings.py
|
|
64
|
+
db.sqlite3
|
|
65
|
+
db.sqlite3-journal
|
|
66
|
+
|
|
67
|
+
# Flask stuff:
|
|
68
|
+
instance/
|
|
69
|
+
.webassets-cache
|
|
70
|
+
|
|
71
|
+
# Scrapy stuff:
|
|
72
|
+
.scrapy
|
|
73
|
+
|
|
74
|
+
# Sphinx documentation
|
|
75
|
+
docs/_build/
|
|
76
|
+
|
|
77
|
+
# PyBuilder
|
|
78
|
+
.pybuilder/
|
|
79
|
+
target/
|
|
80
|
+
|
|
81
|
+
# Jupyter Notebook
|
|
82
|
+
.ipynb_checkpoints
|
|
83
|
+
|
|
84
|
+
# IPython
|
|
85
|
+
profile_default/
|
|
86
|
+
ipython_config.py
|
|
87
|
+
|
|
88
|
+
# pyenv
|
|
89
|
+
# For a library or package, you might want to ignore these files since the code is
|
|
90
|
+
# intended to run in multiple environments; otherwise, check them in:
|
|
91
|
+
# .python-version
|
|
92
|
+
|
|
93
|
+
# pipenv
|
|
94
|
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
95
|
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
96
|
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
97
|
+
# install all needed dependencies.
|
|
98
|
+
#Pipfile.lock
|
|
99
|
+
|
|
100
|
+
# poetry
|
|
101
|
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
102
|
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
103
|
+
# commonly ignored for libraries.
|
|
104
|
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
105
|
+
#poetry.lock
|
|
106
|
+
|
|
107
|
+
# pdm
|
|
108
|
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
109
|
+
#pdm.lock
|
|
110
|
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
|
111
|
+
# in version control.
|
|
112
|
+
# https://pdm.fming.dev/#use-with-ide
|
|
113
|
+
.pdm.toml
|
|
114
|
+
|
|
115
|
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
116
|
+
__pypackages__/
|
|
117
|
+
|
|
118
|
+
# Celery stuff
|
|
119
|
+
celerybeat-schedule
|
|
120
|
+
celerybeat.pid
|
|
121
|
+
|
|
122
|
+
# SageMath parsed files
|
|
123
|
+
*.sage.py
|
|
124
|
+
|
|
125
|
+
# Environments
|
|
126
|
+
.env
|
|
127
|
+
.venv
|
|
128
|
+
env/
|
|
129
|
+
venv/
|
|
130
|
+
ENV/
|
|
131
|
+
env.bak/
|
|
132
|
+
venv.bak/
|
|
133
|
+
|
|
134
|
+
# Spyder project settings
|
|
135
|
+
.spyderproject
|
|
136
|
+
.spyproject
|
|
137
|
+
|
|
138
|
+
# Rope project settings
|
|
139
|
+
.ropeproject
|
|
140
|
+
|
|
141
|
+
# mkdocs documentation
|
|
142
|
+
/site
|
|
143
|
+
|
|
144
|
+
# mypy
|
|
145
|
+
.mypy_cache/
|
|
146
|
+
.dmypy.json
|
|
147
|
+
dmypy.json
|
|
148
|
+
|
|
149
|
+
# trunk
|
|
150
|
+
.trunk/
|
|
151
|
+
|
|
152
|
+
# Pyre type checker
|
|
153
|
+
.pyre/
|
|
154
|
+
|
|
155
|
+
# pytype static type analyzer
|
|
156
|
+
.pytype/
|
|
157
|
+
|
|
158
|
+
# Cython debug symbols
|
|
159
|
+
cython_debug/
|
|
160
|
+
|
|
161
|
+
# PyCharm
|
|
162
|
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
163
|
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
164
|
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
165
|
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
166
|
+
.idea/
|
|
167
|
+
|
|
168
|
+
node_modules
|
|
169
|
+
|
|
170
|
+
credentials.json
|
|
171
|
+
pyrightconfig.json
|
|
172
|
+
docs/
|
|
173
|
+
|
|
174
|
+
# Database files
|
|
175
|
+
*.db
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
# Examples for development
|
|
179
|
+
examples/dev/*
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: livekit-plugins-nvidia
|
|
3
|
+
Version: 1.2.9
|
|
4
|
+
Summary: LiveKit Agents Plugin for services from nvidia
|
|
5
|
+
Project-URL: Documentation, https://docs.livekit.io
|
|
6
|
+
Project-URL: Website, https://livekit.io/
|
|
7
|
+
Project-URL: Source, https://github.com/livekit/agents
|
|
8
|
+
Author-email: LiveKit <hello@livekit.io>
|
|
9
|
+
License-Expression: Apache-2.0
|
|
10
|
+
Keywords: audio,livekit,nvidia,realtime,video,webrtc
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Topic :: Multimedia :: Sound/Audio
|
|
18
|
+
Classifier: Topic :: Multimedia :: Video
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
20
|
+
Requires-Python: >=3.9.0
|
|
21
|
+
Requires-Dist: livekit-agents>=1.2.18
|
|
22
|
+
Requires-Dist: nvidia-riva-client
|
|
23
|
+
Description-Content-Type: text/markdown
|
|
24
|
+
|
|
25
|
+
# NVIDIA plugin for LiveKit Agents
|
|
26
|
+
|
|
27
|
+
Support for [NVIDIA Riva](https://developer.nvidia.com/riva)'s speech AI services in LiveKit Agents.
|
|
28
|
+
|
|
29
|
+
More information is available in the [NVIDIA Riva documentation](https://developer.nvidia.com/riva).
|
|
30
|
+
|
|
31
|
+
## Installation
|
|
32
|
+
|
|
33
|
+
```bash
|
|
34
|
+
pip install livekit-plugins-nvidia
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Pre-requisites
|
|
38
|
+
|
|
39
|
+
You can either:
|
|
40
|
+
|
|
41
|
+
1. Use an API key from NVIDIA. It can be set as an environment variable: `NVIDIA_API_KEY`
|
|
42
|
+
2. Use you self hosted [Nim](https://developer.nvidia.com/nim) server.
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
# NVIDIA plugin for LiveKit Agents
|
|
2
|
+
|
|
3
|
+
Support for [NVIDIA Riva](https://developer.nvidia.com/riva)'s speech AI services in LiveKit Agents.
|
|
4
|
+
|
|
5
|
+
More information is available in the [NVIDIA Riva documentation](https://developer.nvidia.com/riva).
|
|
6
|
+
|
|
7
|
+
## Installation
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pip install livekit-plugins-nvidia
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Pre-requisites
|
|
14
|
+
|
|
15
|
+
You can either:
|
|
16
|
+
|
|
17
|
+
1. Use an API key from NVIDIA. It can be set as an environment variable: `NVIDIA_API_KEY`
|
|
18
|
+
2. Use you self hosted [Nim](https://developer.nvidia.com/nim) server.
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# Copyright 2025 LiveKit, Inc.
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
from .stt import STT, SpeechStream
|
|
17
|
+
from .tts import TTS, SynthesizeStream
|
|
18
|
+
from .version import __version__
|
|
19
|
+
|
|
20
|
+
__all__ = ["STT", "SpeechStream", "TTS", "SynthesizeStream", "__version__"]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
from livekit.agents import Plugin
|
|
24
|
+
|
|
25
|
+
from .log import logger
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class NVIDIAPlugin(Plugin):
|
|
29
|
+
def __init__(self) -> None:
|
|
30
|
+
super().__init__(__name__, __version__, __package__, logger)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
Plugin.register_plugin(NVIDIAPlugin())
|
|
34
|
+
|
|
35
|
+
# Cleanup docs of unexported modules
|
|
36
|
+
_module = dir()
|
|
37
|
+
NOT_IN_ALL = [m for m in _module if m not in __all__]
|
|
38
|
+
|
|
39
|
+
__pdoc__ = {}
|
|
40
|
+
|
|
41
|
+
for n in NOT_IN_ALL:
|
|
42
|
+
__pdoc__[n] = False
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
import riva.client
|
|
4
|
+
|
|
5
|
+
from livekit.agents.utils import is_given
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def create_riva_auth(
|
|
9
|
+
*,
|
|
10
|
+
api_key: Optional[str],
|
|
11
|
+
function_id: str,
|
|
12
|
+
server: str,
|
|
13
|
+
use_ssl: bool = True,
|
|
14
|
+
) -> riva.client.Auth:
|
|
15
|
+
metadata_args = []
|
|
16
|
+
|
|
17
|
+
if is_given(api_key) and api_key:
|
|
18
|
+
metadata_args.append(["authorization", f"Bearer {api_key}"])
|
|
19
|
+
|
|
20
|
+
metadata_args.append(["function-id", function_id])
|
|
21
|
+
|
|
22
|
+
return riva.client.Auth(
|
|
23
|
+
uri=server,
|
|
24
|
+
use_ssl=use_ssl,
|
|
25
|
+
metadata_args=metadata_args,
|
|
26
|
+
)
|
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import queue
|
|
5
|
+
import threading
|
|
6
|
+
from collections.abc import Generator
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
|
|
9
|
+
import riva.client
|
|
10
|
+
|
|
11
|
+
from livekit import rtc
|
|
12
|
+
from livekit.agents import (
|
|
13
|
+
DEFAULT_API_CONNECT_OPTIONS,
|
|
14
|
+
APIConnectOptions,
|
|
15
|
+
stt,
|
|
16
|
+
)
|
|
17
|
+
from livekit.agents.types import NOT_GIVEN, NotGivenOr
|
|
18
|
+
from livekit.agents.utils import AudioBuffer, is_given
|
|
19
|
+
|
|
20
|
+
from . import auth
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class STTOptions:
|
|
27
|
+
model: str
|
|
28
|
+
function_id: str
|
|
29
|
+
punctuate: bool
|
|
30
|
+
language_code: str
|
|
31
|
+
sample_rate: int
|
|
32
|
+
use_ssl: bool
|
|
33
|
+
server: str
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class STT(stt.STT):
|
|
37
|
+
def __init__(
|
|
38
|
+
self,
|
|
39
|
+
*,
|
|
40
|
+
model: str = "parakeet-1.1b-en-US-asr-streaming-silero-vad-sortformer",
|
|
41
|
+
function_id: str = "1598d209-5e27-4d3c-8079-4751568b1081",
|
|
42
|
+
punctuate: bool = True,
|
|
43
|
+
language_code: str = "en-US",
|
|
44
|
+
sample_rate: int = 16000,
|
|
45
|
+
server: str = "grpc.nvcf.nvidia.com:443",
|
|
46
|
+
use_ssl: bool = True,
|
|
47
|
+
api_key: NotGivenOr[str] = NOT_GIVEN,
|
|
48
|
+
):
|
|
49
|
+
super().__init__(
|
|
50
|
+
capabilities=stt.STTCapabilities(
|
|
51
|
+
streaming=True,
|
|
52
|
+
interim_results=True,
|
|
53
|
+
),
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
if is_given(api_key):
|
|
57
|
+
self.nvidia_api_key = api_key
|
|
58
|
+
else:
|
|
59
|
+
self.nvidia_api_key = os.getenv("NVIDIA_API_KEY")
|
|
60
|
+
if use_ssl and not self.nvidia_api_key:
|
|
61
|
+
raise ValueError(
|
|
62
|
+
"NVIDIA_API_KEY is not set while using SSL. Either pass api_key parameter, set NVIDIA_API_KEY environment variable "
|
|
63
|
+
+ "or disable SSL and use a locally hosted Riva NIM service."
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
logger.info(f"Initializing NVIDIA STT with model: {model}, server: {server}")
|
|
67
|
+
logger.debug(
|
|
68
|
+
f"Function ID: {function_id}, Language: {language_code}, Sample rate: {sample_rate}"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
self._opts = STTOptions(
|
|
72
|
+
model=model,
|
|
73
|
+
function_id=function_id,
|
|
74
|
+
punctuate=punctuate,
|
|
75
|
+
language_code=language_code,
|
|
76
|
+
sample_rate=sample_rate,
|
|
77
|
+
server=server,
|
|
78
|
+
use_ssl=use_ssl,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
def _recognize_impl(
|
|
82
|
+
self,
|
|
83
|
+
buffer: AudioBuffer,
|
|
84
|
+
*,
|
|
85
|
+
language: NotGivenOr[str] = NOT_GIVEN,
|
|
86
|
+
conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS,
|
|
87
|
+
) -> stt.SpeechEvent:
|
|
88
|
+
raise NotImplementedError("Not implemented")
|
|
89
|
+
|
|
90
|
+
def stream(
|
|
91
|
+
self,
|
|
92
|
+
*,
|
|
93
|
+
language: NotGivenOr[str] = NOT_GIVEN,
|
|
94
|
+
conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS,
|
|
95
|
+
) -> stt.RecognizeStream:
|
|
96
|
+
effective_language = language if is_given(language) else self._opts.language_code
|
|
97
|
+
return SpeechStream(stt=self, conn_options=conn_options, language=effective_language)
|
|
98
|
+
|
|
99
|
+
def log_asr_models(self, asr_service: riva.client.ASRService) -> dict:
|
|
100
|
+
config_response = asr_service.stub.GetRivaSpeechRecognitionConfig(
|
|
101
|
+
riva.client.RivaSpeechRecognitionConfigRequest()
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
asr_models = {}
|
|
105
|
+
for model_config in config_response.model_config:
|
|
106
|
+
if model_config.parameters.get("type") == "online":
|
|
107
|
+
language_code = model_config.parameters["language_code"]
|
|
108
|
+
model = {"model": [model_config.model_name]}
|
|
109
|
+
if language_code in asr_models:
|
|
110
|
+
asr_models[language_code].append(model)
|
|
111
|
+
else:
|
|
112
|
+
asr_models[language_code] = [model]
|
|
113
|
+
|
|
114
|
+
asr_models = dict(sorted(asr_models.items()))
|
|
115
|
+
return asr_models
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class SpeechStream(stt.SpeechStream):
|
|
119
|
+
def __init__(self, *, stt: STT, conn_options: APIConnectOptions, language: str):
|
|
120
|
+
super().__init__(stt=stt, conn_options=conn_options, sample_rate=stt._opts.sample_rate)
|
|
121
|
+
self._stt = stt
|
|
122
|
+
self._language = language
|
|
123
|
+
|
|
124
|
+
self._audio_queue = queue.Queue()
|
|
125
|
+
self._shutdown_event = threading.Event()
|
|
126
|
+
self._recognition_thread = None
|
|
127
|
+
|
|
128
|
+
self._speaking = False
|
|
129
|
+
self._request_id = ""
|
|
130
|
+
|
|
131
|
+
self._auth = auth.create_riva_auth(
|
|
132
|
+
api_key=self._stt.nvidia_api_key,
|
|
133
|
+
function_id=self._stt._opts.function_id,
|
|
134
|
+
server=stt._opts.server,
|
|
135
|
+
use_ssl=stt._opts.use_ssl,
|
|
136
|
+
)
|
|
137
|
+
self._asr_service = riva.client.ASRService(self._auth)
|
|
138
|
+
|
|
139
|
+
self._event_loop = asyncio.get_running_loop()
|
|
140
|
+
self._done_fut = asyncio.Future()
|
|
141
|
+
|
|
142
|
+
async def _run(self) -> None:
|
|
143
|
+
config = self._create_streaming_config()
|
|
144
|
+
|
|
145
|
+
self._recognition_thread = threading.Thread(
|
|
146
|
+
target=self._recognition_worker,
|
|
147
|
+
args=(config,),
|
|
148
|
+
name="nvidia-asr-recognition",
|
|
149
|
+
daemon=True,
|
|
150
|
+
)
|
|
151
|
+
self._recognition_thread.start()
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
await self._collect_audio()
|
|
155
|
+
|
|
156
|
+
finally:
|
|
157
|
+
self._audio_queue.put(None)
|
|
158
|
+
await self._done_fut
|
|
159
|
+
|
|
160
|
+
def _create_streaming_config(self) -> riva.client.StreamingRecognitionConfig:
|
|
161
|
+
return riva.client.StreamingRecognitionConfig(
|
|
162
|
+
config=riva.client.RecognitionConfig(
|
|
163
|
+
encoding=riva.client.AudioEncoding.LINEAR_PCM,
|
|
164
|
+
language_code=self._language,
|
|
165
|
+
model=self._stt._opts.model,
|
|
166
|
+
max_alternatives=1,
|
|
167
|
+
enable_automatic_punctuation=self._stt._opts.punctuate,
|
|
168
|
+
sample_rate_hertz=self._stt._opts.sample_rate,
|
|
169
|
+
audio_channel_count=1,
|
|
170
|
+
),
|
|
171
|
+
interim_results=True,
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
async def _collect_audio(self) -> None:
|
|
175
|
+
async for data in self._input_ch:
|
|
176
|
+
if isinstance(data, rtc.AudioFrame):
|
|
177
|
+
audio_bytes = data.data.tobytes()
|
|
178
|
+
if audio_bytes:
|
|
179
|
+
self._audio_queue.put(audio_bytes)
|
|
180
|
+
elif isinstance(data, self._FlushSentinel):
|
|
181
|
+
break
|
|
182
|
+
|
|
183
|
+
def _recognition_worker(self, config: riva.client.StreamingRecognitionConfig) -> None:
|
|
184
|
+
try:
|
|
185
|
+
audio_generator = self._audio_chunk_generator()
|
|
186
|
+
|
|
187
|
+
response_generator = self._asr_service.streaming_response_generator(
|
|
188
|
+
audio_generator, config
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
for response in response_generator:
|
|
192
|
+
self._handle_response(response)
|
|
193
|
+
|
|
194
|
+
except Exception:
|
|
195
|
+
logger.exception("Error in NVIDIA recognition thread")
|
|
196
|
+
finally:
|
|
197
|
+
self._event_loop.call_soon_threadsafe(self._done_fut.set_result, None)
|
|
198
|
+
|
|
199
|
+
def _audio_chunk_generator(self) -> Generator[bytes, None, None]:
|
|
200
|
+
"""
|
|
201
|
+
The nvidia riva SDK requires a generator for realtime STT - so we have to
|
|
202
|
+
wrap the
|
|
203
|
+
"""
|
|
204
|
+
while True:
|
|
205
|
+
audio_chunk = self._audio_queue.get()
|
|
206
|
+
|
|
207
|
+
if not audio_chunk:
|
|
208
|
+
break
|
|
209
|
+
|
|
210
|
+
yield audio_chunk
|
|
211
|
+
|
|
212
|
+
def _handle_response(self, response) -> None:
|
|
213
|
+
try:
|
|
214
|
+
if not hasattr(response, "results") or not response.results:
|
|
215
|
+
return
|
|
216
|
+
|
|
217
|
+
for result in response.results:
|
|
218
|
+
if not hasattr(result, "alternatives") or not result.alternatives:
|
|
219
|
+
continue
|
|
220
|
+
|
|
221
|
+
alternative = result.alternatives[0]
|
|
222
|
+
transcript = getattr(alternative, "transcript", "")
|
|
223
|
+
is_final = getattr(result, "is_final", False)
|
|
224
|
+
|
|
225
|
+
if not transcript.strip():
|
|
226
|
+
continue
|
|
227
|
+
|
|
228
|
+
self._request_id = f"nvidia-{id(response)}"
|
|
229
|
+
|
|
230
|
+
if not self._speaking and transcript.strip():
|
|
231
|
+
self._speaking = True
|
|
232
|
+
self._event_loop.call_soon_threadsafe(
|
|
233
|
+
self._event_ch.send_nowait,
|
|
234
|
+
stt.SpeechEvent(type=stt.SpeechEventType.START_OF_SPEECH),
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
speech_data = self._convert_to_speech_data(alternative)
|
|
238
|
+
|
|
239
|
+
if is_final:
|
|
240
|
+
self._event_loop.call_soon_threadsafe(
|
|
241
|
+
self._event_ch.send_nowait,
|
|
242
|
+
stt.SpeechEvent(
|
|
243
|
+
type=stt.SpeechEventType.FINAL_TRANSCRIPT,
|
|
244
|
+
request_id=self._request_id,
|
|
245
|
+
alternatives=[speech_data],
|
|
246
|
+
),
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
if self._speaking:
|
|
250
|
+
self._event_loop.call_soon_threadsafe(
|
|
251
|
+
self._event_ch.send_nowait,
|
|
252
|
+
stt.SpeechEvent(type=stt.SpeechEventType.END_OF_SPEECH),
|
|
253
|
+
)
|
|
254
|
+
else:
|
|
255
|
+
self._event_loop.call_soon_threadsafe(
|
|
256
|
+
self._event_ch.send_nowait,
|
|
257
|
+
stt.SpeechEvent(
|
|
258
|
+
type=stt.SpeechEventType.INTERIM_TRANSCRIPT,
|
|
259
|
+
request_id=self._request_id,
|
|
260
|
+
alternatives=[speech_data],
|
|
261
|
+
),
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
except Exception:
|
|
265
|
+
logger.exception("Error handling response")
|
|
266
|
+
|
|
267
|
+
def _convert_to_speech_data(self, alternative) -> stt.SpeechData:
|
|
268
|
+
transcript = getattr(alternative, "transcript", "")
|
|
269
|
+
confidence = getattr(alternative, "confidence", 0.0)
|
|
270
|
+
words = getattr(alternative, "words", [])
|
|
271
|
+
|
|
272
|
+
start_time = 0.0
|
|
273
|
+
end_time = 0.0
|
|
274
|
+
if words:
|
|
275
|
+
start_time = getattr(words[0], "start_time", 0) / 1000.0
|
|
276
|
+
end_time = getattr(words[-1], "end_time", 0) / 1000.0
|
|
277
|
+
|
|
278
|
+
return stt.SpeechData(
|
|
279
|
+
language=self._language,
|
|
280
|
+
start_time=start_time,
|
|
281
|
+
end_time=end_time,
|
|
282
|
+
confidence=confidence,
|
|
283
|
+
text=transcript,
|
|
284
|
+
)
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
import os
|
|
4
|
+
import queue
|
|
5
|
+
import threading
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
|
|
8
|
+
import riva.client
|
|
9
|
+
from riva.client.proto.riva_audio_pb2 import AudioEncoding
|
|
10
|
+
|
|
11
|
+
from livekit.agents import (
|
|
12
|
+
APIConnectOptions,
|
|
13
|
+
tokenize,
|
|
14
|
+
tts,
|
|
15
|
+
utils,
|
|
16
|
+
)
|
|
17
|
+
from livekit.agents.types import DEFAULT_API_CONNECT_OPTIONS
|
|
18
|
+
|
|
19
|
+
from . import auth
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass
|
|
25
|
+
class TTSOptions:
|
|
26
|
+
voice: str
|
|
27
|
+
function_id: str
|
|
28
|
+
server: str
|
|
29
|
+
sample_rate: int
|
|
30
|
+
use_ssl: bool
|
|
31
|
+
language_code: str
|
|
32
|
+
word_tokenizer: tokenize.WordTokenizer | tokenize.SentenceTokenizer
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class TTS(tts.TTS):
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
*,
|
|
39
|
+
server: str = "grpc.nvcf.nvidia.com:443",
|
|
40
|
+
voice: str = "Magpie-Multilingual.EN-US.Leo",
|
|
41
|
+
function_id: str = "877104f7-e885-42b9-8de8-f6e4c6303969",
|
|
42
|
+
language_code: str = "en-US",
|
|
43
|
+
use_ssl: bool = True,
|
|
44
|
+
api_key: str | None = None,
|
|
45
|
+
):
|
|
46
|
+
super().__init__(
|
|
47
|
+
capabilities=tts.TTSCapabilities(streaming=True),
|
|
48
|
+
sample_rate=16000,
|
|
49
|
+
num_channels=1,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
if api_key:
|
|
53
|
+
self.nvidia_api_key = api_key
|
|
54
|
+
else:
|
|
55
|
+
self.nvidia_api_key = os.getenv("NVIDIA_API_KEY")
|
|
56
|
+
if use_ssl and not self.nvidia_api_key:
|
|
57
|
+
raise ValueError(
|
|
58
|
+
"NVIDIA_API_KEY is not set while using SSL. Either pass api_key parameter, set NVIDIA_API_KEY environment variable "
|
|
59
|
+
+ "or disable SSL and use a locally hosted Riva NIM service."
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
self._opts = TTSOptions(
|
|
63
|
+
voice=voice,
|
|
64
|
+
function_id=function_id,
|
|
65
|
+
server=server,
|
|
66
|
+
sample_rate=16000,
|
|
67
|
+
use_ssl=use_ssl,
|
|
68
|
+
language_code=language_code,
|
|
69
|
+
word_tokenizer=tokenize.blingfire.SentenceTokenizer(),
|
|
70
|
+
)
|
|
71
|
+
self._tts_service = None
|
|
72
|
+
|
|
73
|
+
def _ensure_session(self) -> riva.client.SpeechSynthesisService:
|
|
74
|
+
if not self._tts_service:
|
|
75
|
+
riva_auth = auth.create_riva_auth(
|
|
76
|
+
api_key=self.nvidia_api_key,
|
|
77
|
+
function_id=self._opts.function_id,
|
|
78
|
+
server=self._opts.server,
|
|
79
|
+
use_ssl=self._opts.use_ssl,
|
|
80
|
+
)
|
|
81
|
+
self._tts_service = riva.client.SpeechSynthesisService(riva_auth)
|
|
82
|
+
return self._tts_service
|
|
83
|
+
|
|
84
|
+
def list_voices(self) -> dict:
|
|
85
|
+
service = self._ensure_session()
|
|
86
|
+
config_response = service.stub.GetRivaSynthesisConfig(
|
|
87
|
+
riva.client.proto.riva_tts_pb2.RivaSynthesisConfigRequest()
|
|
88
|
+
)
|
|
89
|
+
tts_models = {}
|
|
90
|
+
for model_config in config_response.model_config:
|
|
91
|
+
language_code = model_config.parameters.get("language_code", "unknown")
|
|
92
|
+
voice_name = model_config.parameters.get("voice_name", "unknown")
|
|
93
|
+
subvoices_str = model_config.parameters.get("subvoices", "")
|
|
94
|
+
|
|
95
|
+
if subvoices_str:
|
|
96
|
+
subvoices = [voice.split(":")[0] for voice in subvoices_str.split(",")]
|
|
97
|
+
full_voice_names = [voice_name + "." + subvoice for subvoice in subvoices]
|
|
98
|
+
else:
|
|
99
|
+
full_voice_names = [voice_name]
|
|
100
|
+
|
|
101
|
+
if language_code in tts_models:
|
|
102
|
+
tts_models[language_code]["voices"].extend(full_voice_names)
|
|
103
|
+
else:
|
|
104
|
+
tts_models[language_code] = {"voices": full_voice_names}
|
|
105
|
+
|
|
106
|
+
tts_models = dict(sorted(tts_models.items()))
|
|
107
|
+
return tts_models
|
|
108
|
+
|
|
109
|
+
def synthesize(
|
|
110
|
+
self, text: str, *, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS
|
|
111
|
+
) -> tts.ChunkedStream:
|
|
112
|
+
raise NotImplementedError("Chunked synthesis is not supported for NVIDIA TTS")
|
|
113
|
+
|
|
114
|
+
def stream(
|
|
115
|
+
self, *, conn_options: APIConnectOptions = DEFAULT_API_CONNECT_OPTIONS
|
|
116
|
+
) -> tts.SynthesizeStream:
|
|
117
|
+
return SynthesizeStream(tts=self, conn_options=conn_options, opts=self._opts)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class SynthesizeStream(tts.SynthesizeStream):
|
|
121
|
+
def __init__(self, *, tts: TTS, conn_options: APIConnectOptions, opts: TTSOptions):
|
|
122
|
+
super().__init__(tts=tts, conn_options=conn_options)
|
|
123
|
+
self._opts = opts
|
|
124
|
+
self._context_id = utils.shortuuid()
|
|
125
|
+
self._sent_tokenizer_stream = self._opts.word_tokenizer.stream()
|
|
126
|
+
self._token_q = queue.Queue()
|
|
127
|
+
self._event_loop = asyncio.get_running_loop()
|
|
128
|
+
|
|
129
|
+
async def _run(self, output_emitter: tts.AudioEmitter) -> None:
|
|
130
|
+
output_emitter.initialize(
|
|
131
|
+
request_id=self._context_id,
|
|
132
|
+
sample_rate=self._opts.sample_rate,
|
|
133
|
+
num_channels=1,
|
|
134
|
+
stream=True,
|
|
135
|
+
mime_type="audio/pcm",
|
|
136
|
+
)
|
|
137
|
+
output_emitter.start_segment(segment_id=self._context_id)
|
|
138
|
+
|
|
139
|
+
done_fut = asyncio.Future()
|
|
140
|
+
|
|
141
|
+
async def _input_task() -> None:
|
|
142
|
+
async for data in self._input_ch:
|
|
143
|
+
if isinstance(data, self._FlushSentinel):
|
|
144
|
+
self._sent_tokenizer_stream.flush()
|
|
145
|
+
continue
|
|
146
|
+
self._sent_tokenizer_stream.push_text(data)
|
|
147
|
+
self._sent_tokenizer_stream.end_input()
|
|
148
|
+
|
|
149
|
+
async def _process_segments() -> None:
|
|
150
|
+
async for word_stream in self._sent_tokenizer_stream:
|
|
151
|
+
self._token_q.put(word_stream)
|
|
152
|
+
self._token_q.put(None)
|
|
153
|
+
|
|
154
|
+
def _synthesize_worker() -> None:
|
|
155
|
+
try:
|
|
156
|
+
service = self._tts._ensure_session()
|
|
157
|
+
while True:
|
|
158
|
+
token = self._token_q.get()
|
|
159
|
+
|
|
160
|
+
if not token:
|
|
161
|
+
break
|
|
162
|
+
|
|
163
|
+
try:
|
|
164
|
+
responses = service.synthesize_online(
|
|
165
|
+
token.token,
|
|
166
|
+
self._opts.voice,
|
|
167
|
+
self._opts.language_code,
|
|
168
|
+
sample_rate_hz=self._opts.sample_rate,
|
|
169
|
+
encoding=AudioEncoding.LINEAR_PCM,
|
|
170
|
+
)
|
|
171
|
+
for response in responses:
|
|
172
|
+
self._event_loop.call_soon_threadsafe(
|
|
173
|
+
output_emitter.push, response.audio
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
except Exception as e:
|
|
177
|
+
logger.error(f"Error in synthesis: {e}")
|
|
178
|
+
continue
|
|
179
|
+
finally:
|
|
180
|
+
self._event_loop.call_soon_threadsafe(done_fut.set_result, None)
|
|
181
|
+
|
|
182
|
+
synthesize_thread = threading.Thread(
|
|
183
|
+
target=_synthesize_worker,
|
|
184
|
+
name="nvidia-tts-synthesize",
|
|
185
|
+
daemon=True,
|
|
186
|
+
)
|
|
187
|
+
synthesize_thread.start()
|
|
188
|
+
|
|
189
|
+
tasks = [
|
|
190
|
+
asyncio.create_task(_input_task()),
|
|
191
|
+
asyncio.create_task(_process_segments()),
|
|
192
|
+
]
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
await asyncio.gather(*tasks)
|
|
196
|
+
finally:
|
|
197
|
+
self._token_q.put(None)
|
|
198
|
+
await done_fut
|
|
199
|
+
output_emitter.end_segment()
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
# Copyright 2023 LiveKit, Inc.
|
|
2
|
+
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
__version__ = "1.2.9"
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["hatchling"]
|
|
3
|
+
build-backend = "hatchling.build"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "livekit-plugins-nvidia"
|
|
7
|
+
dynamic = ["version"]
|
|
8
|
+
description = "LiveKit Agents Plugin for services from nvidia"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
license = "Apache-2.0"
|
|
11
|
+
requires-python = ">=3.9.0"
|
|
12
|
+
authors = [{ name = "LiveKit", email = "hello@livekit.io" }]
|
|
13
|
+
keywords = ["webrtc", "realtime", "audio", "video", "livekit", "nvidia"]
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Intended Audience :: Developers",
|
|
16
|
+
"License :: OSI Approved :: Apache Software License",
|
|
17
|
+
"Topic :: Multimedia :: Sound/Audio",
|
|
18
|
+
"Topic :: Multimedia :: Video",
|
|
19
|
+
"Topic :: Scientific/Engineering :: Artificial Intelligence",
|
|
20
|
+
"Programming Language :: Python :: 3",
|
|
21
|
+
"Programming Language :: Python :: 3.9",
|
|
22
|
+
"Programming Language :: Python :: 3.10",
|
|
23
|
+
"Programming Language :: Python :: 3 :: Only",
|
|
24
|
+
]
|
|
25
|
+
dependencies = [
|
|
26
|
+
"livekit-agents>=1.2.18",
|
|
27
|
+
"nvidia-riva-client",
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
[project.urls]
|
|
31
|
+
Documentation = "https://docs.livekit.io"
|
|
32
|
+
Website = "https://livekit.io/"
|
|
33
|
+
Source = "https://github.com/livekit/agents"
|
|
34
|
+
|
|
35
|
+
[tool.hatch.version]
|
|
36
|
+
path = "livekit/plugins/nvidia/version.py"
|
|
37
|
+
|
|
38
|
+
[tool.hatch.build.targets.wheel]
|
|
39
|
+
packages = ["livekit"]
|
|
40
|
+
|
|
41
|
+
[tool.hatch.build.targets.sdist]
|
|
42
|
+
include = ["/livekit"]
|