hyperneuronai 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hyperneuronai-0.1.0/PKG-INFO +85 -0
- hyperneuronai-0.1.0/README.md +55 -0
- hyperneuronai-0.1.0/hyperneuronai/__init__.py +91 -0
- hyperneuronai-0.1.0/hyperneuronai/_client.py +207 -0
- hyperneuronai-0.1.0/hyperneuronai/_hyperneuron.py +121 -0
- hyperneuronai-0.1.0/hyperneuronai/exceptions.py +56 -0
- hyperneuronai-0.1.0/hyperneuronai/resources/__init__.py +4 -0
- hyperneuronai-0.1.0/hyperneuronai/resources/telephony.py +185 -0
- hyperneuronai-0.1.0/hyperneuronai/resources/tts.py +222 -0
- hyperneuronai-0.1.0/hyperneuronai/types/__init__.py +18 -0
- hyperneuronai-0.1.0/hyperneuronai/types/telephony.py +49 -0
- hyperneuronai-0.1.0/hyperneuronai/types/tts.py +33 -0
- hyperneuronai-0.1.0/hyperneuronai.egg-info/PKG-INFO +85 -0
- hyperneuronai-0.1.0/hyperneuronai.egg-info/SOURCES.txt +17 -0
- hyperneuronai-0.1.0/hyperneuronai.egg-info/dependency_links.txt +1 -0
- hyperneuronai-0.1.0/hyperneuronai.egg-info/requires.txt +11 -0
- hyperneuronai-0.1.0/hyperneuronai.egg-info/top_level.txt +1 -0
- hyperneuronai-0.1.0/pyproject.toml +40 -0
- hyperneuronai-0.1.0/setup.cfg +4 -0
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: hyperneuronai
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Official Python SDK for HyperNeuron AI services
|
|
5
|
+
Author-email: HyperNeuron AI <support@hyperneuron.in>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://www.hyperneuronai.com
|
|
8
|
+
Project-URL: Repository, https://github.com/hyperneuronai/hyperneuronai-python
|
|
9
|
+
Keywords: ai,tts,voice,telephony,speech
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
19
|
+
Requires-Python: >=3.9
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
Requires-Dist: httpx>=0.27.0
|
|
22
|
+
Requires-Dist: pydantic>=2.0.0
|
|
23
|
+
Provides-Extra: audio
|
|
24
|
+
Requires-Dist: numpy>=1.24.0; extra == "audio"
|
|
25
|
+
Requires-Dist: scipy>=1.11.0; extra == "audio"
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: pytest>=8.0; extra == "dev"
|
|
28
|
+
Requires-Dist: pytest-asyncio>=0.23; extra == "dev"
|
|
29
|
+
Requires-Dist: respx>=0.21; extra == "dev"
|
|
30
|
+
|
|
31
|
+
# hyperneuronai
|
|
32
|
+
|
|
33
|
+
Official Python SDK for [HyperNeuron AI](https://www.hyperneuronai.com) — TTS streaming and AI telephony services.
|
|
34
|
+
|
|
35
|
+
## Install
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
pip install hyperneuronai
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## Quick start
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
import hyperneuronai
|
|
45
|
+
|
|
46
|
+
client = hyperneuronai.HyperNeuron(
|
|
47
|
+
api_key="hn_key_xxxx",
|
|
48
|
+
base_url="https://api.hyperneuronai.com",
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# TTS — save to WAV
|
|
52
|
+
audio = client.tts.generate("Hello, world!", voice="sanjana")
|
|
53
|
+
with open("hello.wav", "wb") as f:
|
|
54
|
+
f.write(audio)
|
|
55
|
+
|
|
56
|
+
# TTS — stream in real time
|
|
57
|
+
for chunk in client.tts.stream("Hello, world!", voice="sanjana"):
|
|
58
|
+
your_speaker.write(chunk)
|
|
59
|
+
|
|
60
|
+
# One-way outbound call
|
|
61
|
+
call = client.telephony.outbound(
|
|
62
|
+
to="+919876543210",
|
|
63
|
+
text="Hi! Your order has shipped and arrives tomorrow.",
|
|
64
|
+
voice="sanjana",
|
|
65
|
+
)
|
|
66
|
+
print(call.call_uuid, call.state)
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## Async
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
import asyncio
|
|
73
|
+
import hyperneuronai
|
|
74
|
+
|
|
75
|
+
async def main():
|
|
76
|
+
async with hyperneuronai.AsyncHyperNeuron(api_key="hn_key_xxxx") as client:
|
|
77
|
+
audio = await client.tts.generate("Hello!")
|
|
78
|
+
call = await client.telephony.outbound(to="+919876543210", text="Hi there!")
|
|
79
|
+
|
|
80
|
+
asyncio.run(main())
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
## License
|
|
84
|
+
|
|
85
|
+
MIT
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# hyperneuronai
|
|
2
|
+
|
|
3
|
+
Official Python SDK for [HyperNeuron AI](https://www.hyperneuronai.com) — TTS streaming and AI telephony services.
|
|
4
|
+
|
|
5
|
+
## Install
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
pip install hyperneuronai
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick start
|
|
12
|
+
|
|
13
|
+
```python
|
|
14
|
+
import hyperneuronai
|
|
15
|
+
|
|
16
|
+
client = hyperneuronai.HyperNeuron(
|
|
17
|
+
api_key="hn_key_xxxx",
|
|
18
|
+
base_url="https://api.hyperneuronai.com",
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
# TTS — save to WAV
|
|
22
|
+
audio = client.tts.generate("Hello, world!", voice="sanjana")
|
|
23
|
+
with open("hello.wav", "wb") as f:
|
|
24
|
+
f.write(audio)
|
|
25
|
+
|
|
26
|
+
# TTS — stream in real time
|
|
27
|
+
for chunk in client.tts.stream("Hello, world!", voice="sanjana"):
|
|
28
|
+
your_speaker.write(chunk)
|
|
29
|
+
|
|
30
|
+
# One-way outbound call
|
|
31
|
+
call = client.telephony.outbound(
|
|
32
|
+
to="+919876543210",
|
|
33
|
+
text="Hi! Your order has shipped and arrives tomorrow.",
|
|
34
|
+
voice="sanjana",
|
|
35
|
+
)
|
|
36
|
+
print(call.call_uuid, call.state)
|
|
37
|
+
```
|
|
38
|
+
|
|
39
|
+
## Async
|
|
40
|
+
|
|
41
|
+
```python
|
|
42
|
+
import asyncio
|
|
43
|
+
import hyperneuronai
|
|
44
|
+
|
|
45
|
+
async def main():
|
|
46
|
+
async with hyperneuronai.AsyncHyperNeuron(api_key="hn_key_xxxx") as client:
|
|
47
|
+
audio = await client.tts.generate("Hello!")
|
|
48
|
+
call = await client.telephony.outbound(to="+919876543210", text="Hi there!")
|
|
49
|
+
|
|
50
|
+
asyncio.run(main())
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
## License
|
|
54
|
+
|
|
55
|
+
MIT
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"""
|
|
2
|
+
hyperneuronai — Official Python SDK for HyperNeuron AI services.
|
|
3
|
+
|
|
4
|
+
Quick start (sync)::
|
|
5
|
+
|
|
6
|
+
import hyperneuronai
|
|
7
|
+
|
|
8
|
+
client = hyperneuronai.HyperNeuron(api_key="hn_key_xxxx")
|
|
9
|
+
|
|
10
|
+
# TTS — save to file
|
|
11
|
+
audio = client.tts.generate("Hello, world!", voice="sanjana")
|
|
12
|
+
with open("hello.wav", "wb") as f:
|
|
13
|
+
f.write(audio)
|
|
14
|
+
|
|
15
|
+
# TTS — stream in real-time
|
|
16
|
+
for chunk in client.tts.stream("Hello, world!", voice="sanjana"):
|
|
17
|
+
your_speaker.write(chunk)
|
|
18
|
+
|
|
19
|
+
# Outbound call
|
|
20
|
+
call = client.telephony.call(
|
|
21
|
+
to="+919876543210",
|
|
22
|
+
voice="sanjana",
|
|
23
|
+
greeting="Hi! I'm calling to help you today.",
|
|
24
|
+
)
|
|
25
|
+
print(call.call_uuid)
|
|
26
|
+
|
|
27
|
+
Quick start (async)::
|
|
28
|
+
|
|
29
|
+
import asyncio
|
|
30
|
+
import hyperneuronai
|
|
31
|
+
|
|
32
|
+
async def main():
|
|
33
|
+
client = hyperneuronai.AsyncHyperNeuron(api_key="hn_key_xxxx")
|
|
34
|
+
|
|
35
|
+
audio = await client.tts.generate("Hello!", voice="sanjana")
|
|
36
|
+
|
|
37
|
+
async for chunk in client.tts.stream("Hello!"):
|
|
38
|
+
await speaker.write(chunk)
|
|
39
|
+
|
|
40
|
+
call = await client.telephony.call(to="+919876543210")
|
|
41
|
+
|
|
42
|
+
asyncio.run(main())
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
from hyperneuronai._hyperneuron import HyperNeuron, AsyncHyperNeuron
|
|
46
|
+
from hyperneuronai.exceptions import (
|
|
47
|
+
HyperNeuronError,
|
|
48
|
+
APIError,
|
|
49
|
+
AuthenticationError,
|
|
50
|
+
PermissionError,
|
|
51
|
+
NotFoundError,
|
|
52
|
+
RateLimitError,
|
|
53
|
+
ServiceUnavailableError,
|
|
54
|
+
APIConnectionError,
|
|
55
|
+
)
|
|
56
|
+
from hyperneuronai.types import (
|
|
57
|
+
TTSRequest,
|
|
58
|
+
TTSResponse,
|
|
59
|
+
AudioFormat,
|
|
60
|
+
Voice,
|
|
61
|
+
CallRequest,
|
|
62
|
+
CallResponse,
|
|
63
|
+
CallStatus,
|
|
64
|
+
CallState,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
__version__ = "0.1.0"
|
|
68
|
+
|
|
69
|
+
__all__ = [
|
|
70
|
+
# Clients
|
|
71
|
+
"HyperNeuron",
|
|
72
|
+
"AsyncHyperNeuron",
|
|
73
|
+
# Exceptions
|
|
74
|
+
"HyperNeuronError",
|
|
75
|
+
"APIError",
|
|
76
|
+
"AuthenticationError",
|
|
77
|
+
"PermissionError",
|
|
78
|
+
"NotFoundError",
|
|
79
|
+
"RateLimitError",
|
|
80
|
+
"ServiceUnavailableError",
|
|
81
|
+
"APIConnectionError",
|
|
82
|
+
# Types
|
|
83
|
+
"TTSRequest",
|
|
84
|
+
"TTSResponse",
|
|
85
|
+
"AudioFormat",
|
|
86
|
+
"Voice",
|
|
87
|
+
"CallRequest",
|
|
88
|
+
"CallResponse",
|
|
89
|
+
"CallStatus",
|
|
90
|
+
"CallState",
|
|
91
|
+
]
|
|
@@ -0,0 +1,207 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Iterator, AsyncIterator, Optional
|
|
4
|
+
import warnings
|
|
5
|
+
|
|
6
|
+
import httpx
|
|
7
|
+
|
|
8
|
+
from hyperneuronai.exceptions import APIConnectionError, _raise_for_status
|
|
9
|
+
|
|
10
|
+
_DEFAULT_TIMEOUT = httpx.Timeout(connect=10.0, read=120.0, write=30.0, pool=10.0)
|
|
11
|
+
_DEFAULT_BASE_URL = "https://api.hyperneuron.ai"
|
|
12
|
+
_MAX_RESPONSE_BYTES = 64 * 1024 * 1024 # 64 MB hard cap on non-streaming responses
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _validate_base_url(url: str) -> None:
|
|
16
|
+
if url.startswith("http://"):
|
|
17
|
+
warnings.warn(
|
|
18
|
+
"base_url uses plain HTTP — your API key will be sent unencrypted. "
|
|
19
|
+
"Use HTTPS in production.",
|
|
20
|
+
stacklevel=3,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _auth_headers(api_key: str) -> dict[str, str]:
|
|
25
|
+
return {"Authorization": f"Bearer {api_key}"}
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# ── Sync ──────────────────────────────────────────────────────────────────────
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class SyncHTTPClient:
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
api_key: str,
|
|
35
|
+
base_url: str = _DEFAULT_BASE_URL,
|
|
36
|
+
timeout: Optional[httpx.Timeout] = None,
|
|
37
|
+
http_client: Optional[httpx.Client] = None,
|
|
38
|
+
) -> None:
|
|
39
|
+
_validate_base_url(base_url)
|
|
40
|
+
self._api_key = api_key
|
|
41
|
+
self._base_url = base_url.rstrip("/")
|
|
42
|
+
self._client = http_client or httpx.Client(
|
|
43
|
+
timeout=timeout or _DEFAULT_TIMEOUT,
|
|
44
|
+
headers={"User-Agent": "hyperneuronai-python/0.1.0"},
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
def _url(self, path: str) -> str:
|
|
48
|
+
return f"{self._base_url}{path}"
|
|
49
|
+
|
|
50
|
+
def _headers(self, extra: Optional[dict] = None) -> dict[str, str]:
|
|
51
|
+
h = _auth_headers(self._api_key)
|
|
52
|
+
if extra:
|
|
53
|
+
h.update(extra)
|
|
54
|
+
return h
|
|
55
|
+
|
|
56
|
+
def post(self, path: str, *, json_body: Any = None) -> dict:
|
|
57
|
+
try:
|
|
58
|
+
r = self._client.post(
|
|
59
|
+
self._url(path),
|
|
60
|
+
json=json_body,
|
|
61
|
+
headers=self._headers({"Content-Type": "application/json"}),
|
|
62
|
+
)
|
|
63
|
+
except httpx.RequestError as exc:
|
|
64
|
+
raise APIConnectionError(str(exc)) from exc
|
|
65
|
+
self._check(r)
|
|
66
|
+
return r.json()
|
|
67
|
+
|
|
68
|
+
def get(self, path: str) -> dict:
|
|
69
|
+
try:
|
|
70
|
+
r = self._client.get(self._url(path), headers=self._headers())
|
|
71
|
+
except httpx.RequestError as exc:
|
|
72
|
+
raise APIConnectionError(str(exc)) from exc
|
|
73
|
+
self._check(r)
|
|
74
|
+
return r.json()
|
|
75
|
+
|
|
76
|
+
def delete(self, path: str) -> dict:
|
|
77
|
+
try:
|
|
78
|
+
r = self._client.delete(self._url(path), headers=self._headers())
|
|
79
|
+
except httpx.RequestError as exc:
|
|
80
|
+
raise APIConnectionError(str(exc)) from exc
|
|
81
|
+
self._check(r)
|
|
82
|
+
return r.json()
|
|
83
|
+
|
|
84
|
+
def stream_bytes(self, path: str, *, json_body: Any = None) -> Iterator[bytes]:
|
|
85
|
+
try:
|
|
86
|
+
with self._client.stream(
|
|
87
|
+
"POST",
|
|
88
|
+
self._url(path),
|
|
89
|
+
json=json_body,
|
|
90
|
+
headers=self._headers({"Content-Type": "application/json"}),
|
|
91
|
+
) as r:
|
|
92
|
+
self._check(r)
|
|
93
|
+
yield from r.iter_bytes(chunk_size=4096)
|
|
94
|
+
except httpx.RequestError as exc:
|
|
95
|
+
raise APIConnectionError(str(exc)) from exc
|
|
96
|
+
|
|
97
|
+
def _check(self, r: httpx.Response) -> None:
|
|
98
|
+
if r.status_code >= 400:
|
|
99
|
+
try:
|
|
100
|
+
body = r.json()
|
|
101
|
+
except Exception:
|
|
102
|
+
body = {}
|
|
103
|
+
# Truncate raw text to avoid leaking large server error pages.
|
|
104
|
+
_raise_for_status(r.status_code, body, r.text[:300])
|
|
105
|
+
|
|
106
|
+
def close(self) -> None:
|
|
107
|
+
self._client.close()
|
|
108
|
+
|
|
109
|
+
def __enter__(self) -> "SyncHTTPClient":
|
|
110
|
+
return self
|
|
111
|
+
|
|
112
|
+
def __repr__(self) -> str:
|
|
113
|
+
return f"SyncHTTPClient(base_url={self._base_url!r}, api_key='***')"
|
|
114
|
+
|
|
115
|
+
def __exit__(self, *_: Any) -> None:
|
|
116
|
+
self.close()
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
# ── Async ─────────────────────────────────────────────────────────────────────
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
class AsyncHTTPClient:
|
|
123
|
+
def __init__(
|
|
124
|
+
self,
|
|
125
|
+
api_key: str,
|
|
126
|
+
base_url: str = _DEFAULT_BASE_URL,
|
|
127
|
+
timeout: Optional[httpx.Timeout] = None,
|
|
128
|
+
http_client: Optional[httpx.AsyncClient] = None,
|
|
129
|
+
) -> None:
|
|
130
|
+
_validate_base_url(base_url)
|
|
131
|
+
self._api_key = api_key
|
|
132
|
+
self._base_url = base_url.rstrip("/")
|
|
133
|
+
self._client = http_client or httpx.AsyncClient(
|
|
134
|
+
timeout=timeout or _DEFAULT_TIMEOUT,
|
|
135
|
+
headers={"User-Agent": "hyperneuronai-python/0.1.0"},
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
def _url(self, path: str) -> str:
|
|
139
|
+
return f"{self._base_url}{path}"
|
|
140
|
+
|
|
141
|
+
def _headers(self, extra: Optional[dict] = None) -> dict[str, str]:
|
|
142
|
+
h = _auth_headers(self._api_key)
|
|
143
|
+
if extra:
|
|
144
|
+
h.update(extra)
|
|
145
|
+
return h
|
|
146
|
+
|
|
147
|
+
async def post(self, path: str, *, json_body: Any = None) -> dict:
|
|
148
|
+
try:
|
|
149
|
+
r = await self._client.post(
|
|
150
|
+
self._url(path),
|
|
151
|
+
json=json_body,
|
|
152
|
+
headers=self._headers({"Content-Type": "application/json"}),
|
|
153
|
+
)
|
|
154
|
+
except httpx.RequestError as exc:
|
|
155
|
+
raise APIConnectionError(str(exc)) from exc
|
|
156
|
+
self._check(r)
|
|
157
|
+
return r.json()
|
|
158
|
+
|
|
159
|
+
async def get(self, path: str) -> dict:
|
|
160
|
+
try:
|
|
161
|
+
r = await self._client.get(self._url(path), headers=self._headers())
|
|
162
|
+
except httpx.RequestError as exc:
|
|
163
|
+
raise APIConnectionError(str(exc)) from exc
|
|
164
|
+
self._check(r)
|
|
165
|
+
return r.json()
|
|
166
|
+
|
|
167
|
+
async def delete(self, path: str) -> dict:
|
|
168
|
+
try:
|
|
169
|
+
r = await self._client.delete(self._url(path), headers=self._headers())
|
|
170
|
+
except httpx.RequestError as exc:
|
|
171
|
+
raise APIConnectionError(str(exc)) from exc
|
|
172
|
+
self._check(r)
|
|
173
|
+
return r.json()
|
|
174
|
+
|
|
175
|
+
async def stream_bytes(self, path: str, *, json_body: Any = None) -> AsyncIterator[bytes]:
|
|
176
|
+
try:
|
|
177
|
+
async with self._client.stream(
|
|
178
|
+
"POST",
|
|
179
|
+
self._url(path),
|
|
180
|
+
json=json_body,
|
|
181
|
+
headers=self._headers({"Content-Type": "application/json"}),
|
|
182
|
+
) as r:
|
|
183
|
+
self._check(r)
|
|
184
|
+
async for chunk in r.aiter_bytes(chunk_size=4096):
|
|
185
|
+
yield chunk
|
|
186
|
+
except httpx.RequestError as exc:
|
|
187
|
+
raise APIConnectionError(str(exc)) from exc
|
|
188
|
+
|
|
189
|
+
def _check(self, r: httpx.Response) -> None:
|
|
190
|
+
if r.status_code >= 400:
|
|
191
|
+
try:
|
|
192
|
+
body = r.json()
|
|
193
|
+
except Exception:
|
|
194
|
+
body = {}
|
|
195
|
+
_raise_for_status(r.status_code, body, r.text[:300])
|
|
196
|
+
|
|
197
|
+
def __repr__(self) -> str:
|
|
198
|
+
return f"AsyncHTTPClient(base_url={self._base_url!r}, api_key='***')"
|
|
199
|
+
|
|
200
|
+
async def aclose(self) -> None:
|
|
201
|
+
await self._client.aclose()
|
|
202
|
+
|
|
203
|
+
async def __aenter__(self) -> "AsyncHTTPClient":
|
|
204
|
+
return self
|
|
205
|
+
|
|
206
|
+
async def __aexit__(self, *_: Any) -> None:
|
|
207
|
+
await self.aclose()
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from hyperneuronai._client import (
|
|
8
|
+
SyncHTTPClient,
|
|
9
|
+
AsyncHTTPClient,
|
|
10
|
+
_DEFAULT_BASE_URL,
|
|
11
|
+
_DEFAULT_TIMEOUT,
|
|
12
|
+
)
|
|
13
|
+
from hyperneuronai.resources.tts import TTS, AsyncTTS
|
|
14
|
+
from hyperneuronai.resources.telephony import Telephony, AsyncTelephony
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class HyperNeuron:
|
|
18
|
+
"""
|
|
19
|
+
Synchronous HyperNeuron AI client.
|
|
20
|
+
|
|
21
|
+
Parameters
|
|
22
|
+
----------
|
|
23
|
+
api_key:
|
|
24
|
+
Your HyperNeuron API key (starts with ``hn_key_``).
|
|
25
|
+
base_url:
|
|
26
|
+
Server base URL. Defaults to ``https://api.hyperneuron.ai``.
|
|
27
|
+
Override to point at a self-hosted instance.
|
|
28
|
+
timeout:
|
|
29
|
+
Custom ``httpx.Timeout``. Defaults to 10 s connect / 120 s read.
|
|
30
|
+
http_client:
|
|
31
|
+
Bring your own ``httpx.Client`` (useful for proxies, custom SSL, etc.).
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(
|
|
35
|
+
self,
|
|
36
|
+
api_key: str,
|
|
37
|
+
*,
|
|
38
|
+
base_url: str = _DEFAULT_BASE_URL,
|
|
39
|
+
timeout: Optional[httpx.Timeout] = None,
|
|
40
|
+
http_client: Optional[httpx.Client] = None,
|
|
41
|
+
) -> None:
|
|
42
|
+
if not api_key:
|
|
43
|
+
raise ValueError("api_key must not be empty.")
|
|
44
|
+
self._http = SyncHTTPClient(
|
|
45
|
+
api_key=api_key,
|
|
46
|
+
base_url=base_url,
|
|
47
|
+
timeout=timeout,
|
|
48
|
+
http_client=http_client,
|
|
49
|
+
)
|
|
50
|
+
self.tts = TTS(self._http)
|
|
51
|
+
self.telephony = Telephony(self._http)
|
|
52
|
+
|
|
53
|
+
def __repr__(self) -> str:
|
|
54
|
+
return f"HyperNeuron(base_url={self._http._base_url!r}, api_key='***')"
|
|
55
|
+
|
|
56
|
+
def health(self) -> dict:
|
|
57
|
+
"""Check server health and model loading status."""
|
|
58
|
+
return self._http.get("/health")
|
|
59
|
+
|
|
60
|
+
def close(self) -> None:
|
|
61
|
+
"""Close the underlying HTTP connection pool."""
|
|
62
|
+
self._http.close()
|
|
63
|
+
|
|
64
|
+
def __enter__(self) -> "HyperNeuron":
|
|
65
|
+
return self
|
|
66
|
+
|
|
67
|
+
def __exit__(self, *_) -> None:
|
|
68
|
+
self.close()
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class AsyncHyperNeuron:
|
|
72
|
+
"""
|
|
73
|
+
Async HyperNeuron AI client.
|
|
74
|
+
|
|
75
|
+
Parameters
|
|
76
|
+
----------
|
|
77
|
+
api_key:
|
|
78
|
+
Your HyperNeuron API key (starts with ``hn_key_``).
|
|
79
|
+
base_url:
|
|
80
|
+
Server base URL. Defaults to ``https://api.hyperneuron.ai``.
|
|
81
|
+
timeout:
|
|
82
|
+
Custom ``httpx.Timeout``.
|
|
83
|
+
http_client:
|
|
84
|
+
Bring your own ``httpx.AsyncClient``.
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
def __init__(
|
|
88
|
+
self,
|
|
89
|
+
api_key: str,
|
|
90
|
+
*,
|
|
91
|
+
base_url: str = _DEFAULT_BASE_URL,
|
|
92
|
+
timeout: Optional[httpx.Timeout] = None,
|
|
93
|
+
http_client: Optional[httpx.AsyncClient] = None,
|
|
94
|
+
) -> None:
|
|
95
|
+
if not api_key:
|
|
96
|
+
raise ValueError("api_key must not be empty.")
|
|
97
|
+
self._http = AsyncHTTPClient(
|
|
98
|
+
api_key=api_key,
|
|
99
|
+
base_url=base_url,
|
|
100
|
+
timeout=timeout,
|
|
101
|
+
http_client=http_client,
|
|
102
|
+
)
|
|
103
|
+
self.tts = AsyncTTS(self._http)
|
|
104
|
+
self.telephony = AsyncTelephony(self._http)
|
|
105
|
+
|
|
106
|
+
def __repr__(self) -> str:
|
|
107
|
+
return f"AsyncHyperNeuron(base_url={self._http._base_url!r}, api_key='***')"
|
|
108
|
+
|
|
109
|
+
async def health(self) -> dict:
|
|
110
|
+
"""Check server health and model loading status."""
|
|
111
|
+
return await self._http.get("/health")
|
|
112
|
+
|
|
113
|
+
async def aclose(self) -> None:
|
|
114
|
+
"""Close the underlying HTTP connection pool."""
|
|
115
|
+
await self._http.aclose()
|
|
116
|
+
|
|
117
|
+
async def __aenter__(self) -> "AsyncHyperNeuron":
|
|
118
|
+
return self
|
|
119
|
+
|
|
120
|
+
async def __aexit__(self, *_) -> None:
|
|
121
|
+
await self.aclose()
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class HyperNeuronError(Exception):
|
|
7
|
+
"""Base exception for all HyperNeuron SDK errors."""
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class APIError(HyperNeuronError):
|
|
11
|
+
"""Raised when the API returns an error response."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, message: str, status_code: int, body: Optional[dict] = None) -> None:
|
|
14
|
+
super().__init__(message)
|
|
15
|
+
self.status_code = status_code
|
|
16
|
+
self.body = body or {}
|
|
17
|
+
|
|
18
|
+
def __repr__(self) -> str:
|
|
19
|
+
return f"{type(self).__name__}(status_code={self.status_code}, message={str(self)!r})"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class AuthenticationError(APIError):
|
|
23
|
+
"""401 — invalid or missing API key."""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class PermissionError(APIError):
|
|
27
|
+
"""403 — API key lacks permission for this resource."""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class NotFoundError(APIError):
|
|
31
|
+
"""404 — resource not found."""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class RateLimitError(APIError):
|
|
35
|
+
"""429 — rate limit exceeded."""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class ServiceUnavailableError(APIError):
|
|
39
|
+
"""503 — service temporarily unavailable (models loading, etc.)."""
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class APIConnectionError(HyperNeuronError):
|
|
43
|
+
"""Network-level error (timeout, DNS failure, etc.)."""
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _raise_for_status(status_code: int, body: dict, raw: str) -> None:
|
|
47
|
+
message = body.get("detail") or body.get("message") or raw or f"HTTP {status_code}"
|
|
48
|
+
cls_map = {
|
|
49
|
+
401: AuthenticationError,
|
|
50
|
+
403: PermissionError,
|
|
51
|
+
404: NotFoundError,
|
|
52
|
+
429: RateLimitError,
|
|
53
|
+
503: ServiceUnavailableError,
|
|
54
|
+
}
|
|
55
|
+
cls = cls_map.get(status_code, APIError)
|
|
56
|
+
raise cls(message, status_code, body)
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
from hyperneuronai._client import SyncHTTPClient, AsyncHTTPClient
|
|
7
|
+
from hyperneuronai.types.telephony import OutboundCallRequest, CallResponse, CallStatus
|
|
8
|
+
|
|
9
|
+
_UUID_RE = re.compile(r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$", re.I)
|
|
10
|
+
|
|
11
|
+
_AGENT_CALL_NOT_IMPLEMENTED = (
|
|
12
|
+
"agent_call() is not yet available in this SDK version. "
|
|
13
|
+
"It will be enabled in an upcoming release."
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def _validate_call_uuid(call_uuid: str) -> None:
|
|
18
|
+
if not _UUID_RE.match(call_uuid):
|
|
19
|
+
raise ValueError(f"Invalid call_uuid: {call_uuid!r}. Must be a UUID string.")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _validate_pagination(limit: int, offset: int) -> None:
|
|
23
|
+
if not (1 <= limit <= 100):
|
|
24
|
+
raise ValueError(f"limit must be between 1 and 100, got {limit}.")
|
|
25
|
+
if offset < 0:
|
|
26
|
+
raise ValueError(f"offset must be >= 0, got {offset}.")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# ── Sync ──────────────────────────────────────────────────────────────────────
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class Telephony:
|
|
33
|
+
"""
|
|
34
|
+
Synchronous telephony resource.
|
|
35
|
+
|
|
36
|
+
Enabled
|
|
37
|
+
-------
|
|
38
|
+
``outbound()`` — one-way call: synthesizes ``text`` via TTS and delivers
|
|
39
|
+
the audio to the recipient. No interaction from the called party.
|
|
40
|
+
|
|
41
|
+
Coming soon
|
|
42
|
+
-----------
|
|
43
|
+
``agent_call()`` — two-way AI voice conversation. Currently raises
|
|
44
|
+
``NotImplementedError``.
|
|
45
|
+
|
|
46
|
+
Usage::
|
|
47
|
+
|
|
48
|
+
call = client.telephony.outbound(
|
|
49
|
+
to="+919876543210",
|
|
50
|
+
text="Hi! Your order has been shipped and will arrive tomorrow.",
|
|
51
|
+
voice="sanjana",
|
|
52
|
+
)
|
|
53
|
+
print(call.call_uuid, call.state)
|
|
54
|
+
|
|
55
|
+
status = client.telephony.status(call.call_uuid)
|
|
56
|
+
client.telephony.end(call.call_uuid)
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def __init__(self, http: SyncHTTPClient) -> None:
|
|
60
|
+
self._http = http
|
|
61
|
+
|
|
62
|
+
def outbound(
|
|
63
|
+
self,
|
|
64
|
+
to: str,
|
|
65
|
+
text: str,
|
|
66
|
+
*,
|
|
67
|
+
voice: str = "sanjana",
|
|
68
|
+
language: str = "en",
|
|
69
|
+
metadata: Optional[dict] = None,
|
|
70
|
+
) -> CallResponse:
|
|
71
|
+
"""
|
|
72
|
+
Initiate a one-way outbound call.
|
|
73
|
+
|
|
74
|
+
The TTS model synthesizes ``text`` and plays the audio to the
|
|
75
|
+
recipient. The called party cannot speak back.
|
|
76
|
+
|
|
77
|
+
Parameters
|
|
78
|
+
----------
|
|
79
|
+
to:
|
|
80
|
+
E.164 phone number, e.g. ``"+919876543210"``.
|
|
81
|
+
text:
|
|
82
|
+
Message to synthesize and deliver (1–4096 characters).
|
|
83
|
+
voice:
|
|
84
|
+
TTS voice name, e.g. ``"sanjana"``.
|
|
85
|
+
language:
|
|
86
|
+
BCP-47 language code for synthesis, e.g. ``"en"``, ``"hi"``.
|
|
87
|
+
metadata:
|
|
88
|
+
Arbitrary key/value pairs for your own tracking.
|
|
89
|
+
"""
|
|
90
|
+
req = OutboundCallRequest(
|
|
91
|
+
to=to,
|
|
92
|
+
text=text,
|
|
93
|
+
voice=voice,
|
|
94
|
+
language=language,
|
|
95
|
+
metadata=metadata or {},
|
|
96
|
+
)
|
|
97
|
+
data = self._http.post("/telephony/outbound", json_body=req.model_dump())
|
|
98
|
+
return CallResponse(**data)
|
|
99
|
+
|
|
100
|
+
def agent_call(self) -> None:
|
|
101
|
+
"""Two-way AI voice call — not yet implemented."""
|
|
102
|
+
raise NotImplementedError(_AGENT_CALL_NOT_IMPLEMENTED)
|
|
103
|
+
|
|
104
|
+
def status(self, call_uuid: str) -> CallStatus:
|
|
105
|
+
"""Fetch the current status of a call."""
|
|
106
|
+
_validate_call_uuid(call_uuid)
|
|
107
|
+
data = self._http.get(f"/telephony/call/{call_uuid}")
|
|
108
|
+
return CallStatus(**data)
|
|
109
|
+
|
|
110
|
+
def end(self, call_uuid: str) -> dict:
|
|
111
|
+
"""Terminate an active call immediately."""
|
|
112
|
+
_validate_call_uuid(call_uuid)
|
|
113
|
+
return self._http.delete(f"/telephony/call/{call_uuid}")
|
|
114
|
+
|
|
115
|
+
def list_calls(self, limit: int = 20, offset: int = 0) -> list[CallStatus]:
|
|
116
|
+
"""List recent calls for this API key."""
|
|
117
|
+
_validate_pagination(limit, offset)
|
|
118
|
+
data = self._http.get(f"/telephony/calls?limit={limit}&offset={offset}")
|
|
119
|
+
calls = data if isinstance(data, list) else data.get("calls", [])
|
|
120
|
+
return [CallStatus(**c) for c in calls]
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
# ── Async ─────────────────────────────────────────────────────────────────────
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class AsyncTelephony:
|
|
127
|
+
"""
|
|
128
|
+
Async telephony resource.
|
|
129
|
+
|
|
130
|
+
Usage::
|
|
131
|
+
|
|
132
|
+
call = await client.telephony.outbound(
|
|
133
|
+
to="+919876543210",
|
|
134
|
+
text="Hi! Your order has been shipped and will arrive tomorrow.",
|
|
135
|
+
voice="sanjana",
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
status = await client.telephony.status(call.call_uuid)
|
|
139
|
+
await client.telephony.end(call.call_uuid)
|
|
140
|
+
"""
|
|
141
|
+
|
|
142
|
+
def __init__(self, http: AsyncHTTPClient) -> None:
|
|
143
|
+
self._http = http
|
|
144
|
+
|
|
145
|
+
async def outbound(
|
|
146
|
+
self,
|
|
147
|
+
to: str,
|
|
148
|
+
text: str,
|
|
149
|
+
*,
|
|
150
|
+
voice: str = "sanjana",
|
|
151
|
+
language: str = "en",
|
|
152
|
+
metadata: Optional[dict] = None,
|
|
153
|
+
) -> CallResponse:
|
|
154
|
+
"""Initiate a one-way outbound call (async)."""
|
|
155
|
+
req = OutboundCallRequest(
|
|
156
|
+
to=to,
|
|
157
|
+
text=text,
|
|
158
|
+
voice=voice,
|
|
159
|
+
language=language,
|
|
160
|
+
metadata=metadata or {},
|
|
161
|
+
)
|
|
162
|
+
data = await self._http.post("/telephony/outbound", json_body=req.model_dump())
|
|
163
|
+
return CallResponse(**data)
|
|
164
|
+
|
|
165
|
+
async def agent_call(self) -> None:
|
|
166
|
+
"""Two-way AI voice call — not yet implemented."""
|
|
167
|
+
raise NotImplementedError(_AGENT_CALL_NOT_IMPLEMENTED)
|
|
168
|
+
|
|
169
|
+
async def status(self, call_uuid: str) -> CallStatus:
|
|
170
|
+
"""Fetch the current status of a call."""
|
|
171
|
+
_validate_call_uuid(call_uuid)
|
|
172
|
+
data = await self._http.get(f"/telephony/call/{call_uuid}")
|
|
173
|
+
return CallStatus(**data)
|
|
174
|
+
|
|
175
|
+
async def end(self, call_uuid: str) -> dict:
|
|
176
|
+
"""Terminate an active call immediately."""
|
|
177
|
+
_validate_call_uuid(call_uuid)
|
|
178
|
+
return await self._http.delete(f"/telephony/call/{call_uuid}")
|
|
179
|
+
|
|
180
|
+
async def list_calls(self, limit: int = 20, offset: int = 0) -> list[CallStatus]:
|
|
181
|
+
"""List recent calls for this API key."""
|
|
182
|
+
_validate_pagination(limit, offset)
|
|
183
|
+
data = await self._http.get(f"/telephony/calls?limit={limit}&offset={offset}")
|
|
184
|
+
calls = data if isinstance(data, list) else data.get("calls", [])
|
|
185
|
+
return [CallStatus(**c) for c in calls]
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import io
|
|
4
|
+
import struct
|
|
5
|
+
import wave
|
|
6
|
+
from typing import Iterator, AsyncIterator, Optional
|
|
7
|
+
|
|
8
|
+
from hyperneuronai._client import SyncHTTPClient, AsyncHTTPClient, _MAX_RESPONSE_BYTES
|
|
9
|
+
from hyperneuronai.exceptions import APIError
|
|
10
|
+
from hyperneuronai.types.tts import TTSRequest, TTSResponse, AudioFormat
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _make_wav_bytes(pcm_f32_bytes: bytes, sample_rate: int) -> bytes:
|
|
14
|
+
"""Convert raw float32-LE PCM bytes to a WAV file in memory."""
|
|
15
|
+
n_samples = len(pcm_f32_bytes) // 4
|
|
16
|
+
pcm_s16 = bytearray(n_samples * 2)
|
|
17
|
+
for i in range(n_samples):
|
|
18
|
+
sample_f32 = struct.unpack_from("<f", pcm_f32_bytes, i * 4)[0]
|
|
19
|
+
sample_s16 = max(-32768, min(32767, int(sample_f32 * 32767)))
|
|
20
|
+
struct.pack_into("<h", pcm_s16, i * 2, sample_s16)
|
|
21
|
+
buf = io.BytesIO()
|
|
22
|
+
with wave.open(buf, "wb") as wf:
|
|
23
|
+
wf.setnchannels(1)
|
|
24
|
+
wf.setsampwidth(2)
|
|
25
|
+
wf.setframerate(sample_rate)
|
|
26
|
+
wf.writeframes(bytes(pcm_s16))
|
|
27
|
+
return buf.getvalue()
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def _build_payload(request: TTSRequest) -> dict:
|
|
31
|
+
return request.model_dump(exclude_none=True)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
# ── Sync ──────────────────────────────────────────────────────────────────────
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class TTS:
|
|
38
|
+
"""
|
|
39
|
+
Synchronous TTS resource.
|
|
40
|
+
|
|
41
|
+
Usage::
|
|
42
|
+
|
|
43
|
+
audio_bytes = client.tts.generate("Hello world", voice="sanjana")
|
|
44
|
+
with open("out.wav", "wb") as f:
|
|
45
|
+
f.write(audio_bytes)
|
|
46
|
+
|
|
47
|
+
for chunk in client.tts.stream("Hello world", voice="sanjana"):
|
|
48
|
+
speaker.play(chunk)
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(self, http: SyncHTTPClient) -> None:
|
|
52
|
+
self._http = http
|
|
53
|
+
|
|
54
|
+
def generate(
|
|
55
|
+
self,
|
|
56
|
+
text: str,
|
|
57
|
+
*,
|
|
58
|
+
voice: str = "sanjana",
|
|
59
|
+
language: Optional[str] = None,
|
|
60
|
+
temperature: float = 0.65,
|
|
61
|
+
repetition_penalty: float = 1.1,
|
|
62
|
+
top_p: float = 0.95,
|
|
63
|
+
max_tokens: int = 2000,
|
|
64
|
+
format: AudioFormat = "wav",
|
|
65
|
+
sample_rate: int = 24000,
|
|
66
|
+
) -> bytes:
|
|
67
|
+
"""
|
|
68
|
+
Generate speech and return the complete audio as bytes.
|
|
69
|
+
|
|
70
|
+
Returns WAV bytes by default (``format="wav"``).
|
|
71
|
+
Use ``format="pcm_f32le"`` or ``format="pcm_s16le"`` for raw PCM.
|
|
72
|
+
"""
|
|
73
|
+
req = TTSRequest(
|
|
74
|
+
text=text,
|
|
75
|
+
voice=voice,
|
|
76
|
+
language=language,
|
|
77
|
+
temperature=temperature,
|
|
78
|
+
repetition_penalty=repetition_penalty,
|
|
79
|
+
top_p=top_p,
|
|
80
|
+
max_tokens=max_tokens,
|
|
81
|
+
format=format,
|
|
82
|
+
sample_rate=sample_rate,
|
|
83
|
+
)
|
|
84
|
+
chunks: list[bytes] = []
|
|
85
|
+
total = 0
|
|
86
|
+
for chunk in self._http.stream_bytes("/tts/stream", json_body=_build_payload(req)):
|
|
87
|
+
total += len(chunk)
|
|
88
|
+
if total > _MAX_RESPONSE_BYTES:
|
|
89
|
+
raise APIError(
|
|
90
|
+
f"TTS response exceeded {_MAX_RESPONSE_BYTES // (1024*1024)} MB limit.",
|
|
91
|
+
status_code=0,
|
|
92
|
+
)
|
|
93
|
+
chunks.append(chunk)
|
|
94
|
+
raw = b"".join(chunks)
|
|
95
|
+
if format == "wav" and not raw.startswith(b"RIFF"):
|
|
96
|
+
raw = _make_wav_bytes(raw, sample_rate)
|
|
97
|
+
return raw
|
|
98
|
+
|
|
99
|
+
def stream(
|
|
100
|
+
self,
|
|
101
|
+
text: str,
|
|
102
|
+
*,
|
|
103
|
+
voice: str = "sanjana",
|
|
104
|
+
language: Optional[str] = None,
|
|
105
|
+
temperature: float = 0.65,
|
|
106
|
+
repetition_penalty: float = 1.1,
|
|
107
|
+
top_p: float = 0.95,
|
|
108
|
+
max_tokens: int = 2000,
|
|
109
|
+
sample_rate: int = 24000,
|
|
110
|
+
) -> Iterator[bytes]:
|
|
111
|
+
"""
|
|
112
|
+
Stream speech audio chunks as they are generated.
|
|
113
|
+
|
|
114
|
+
Yields raw PCM float32-LE bytes. First chunk typically arrives within 200 ms.
|
|
115
|
+
Useful for real-time playback where you want minimal latency.
|
|
116
|
+
"""
|
|
117
|
+
req = TTSRequest(
|
|
118
|
+
text=text,
|
|
119
|
+
voice=voice,
|
|
120
|
+
language=language,
|
|
121
|
+
temperature=temperature,
|
|
122
|
+
repetition_penalty=repetition_penalty,
|
|
123
|
+
top_p=top_p,
|
|
124
|
+
max_tokens=max_tokens,
|
|
125
|
+
format="pcm_f32le",
|
|
126
|
+
sample_rate=sample_rate,
|
|
127
|
+
)
|
|
128
|
+
yield from self._http.stream_bytes("/tts/stream", json_body=_build_payload(req))
|
|
129
|
+
|
|
130
|
+
def voices(self) -> list[dict]:
|
|
131
|
+
"""Return available voices and their metadata."""
|
|
132
|
+
return self._http.get("/tts/voices")
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
# ── Async ─────────────────────────────────────────────────────────────────────
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
class AsyncTTS:
|
|
139
|
+
"""
|
|
140
|
+
Async TTS resource.
|
|
141
|
+
|
|
142
|
+
Usage::
|
|
143
|
+
|
|
144
|
+
audio_bytes = await client.tts.generate("Hello world", voice="sanjana")
|
|
145
|
+
|
|
146
|
+
async for chunk in client.tts.stream("Hello world", voice="sanjana"):
|
|
147
|
+
await speaker.play(chunk)
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
def __init__(self, http: AsyncHTTPClient) -> None:
|
|
151
|
+
self._http = http
|
|
152
|
+
|
|
153
|
+
async def generate(
|
|
154
|
+
self,
|
|
155
|
+
text: str,
|
|
156
|
+
*,
|
|
157
|
+
voice: str = "sanjana",
|
|
158
|
+
language: Optional[str] = None,
|
|
159
|
+
temperature: float = 0.65,
|
|
160
|
+
repetition_penalty: float = 1.1,
|
|
161
|
+
top_p: float = 0.95,
|
|
162
|
+
max_tokens: int = 2000,
|
|
163
|
+
format: AudioFormat = "wav",
|
|
164
|
+
sample_rate: int = 24000,
|
|
165
|
+
) -> bytes:
|
|
166
|
+
"""Generate speech and return the complete audio as bytes."""
|
|
167
|
+
req = TTSRequest(
|
|
168
|
+
text=text,
|
|
169
|
+
voice=voice,
|
|
170
|
+
language=language,
|
|
171
|
+
temperature=temperature,
|
|
172
|
+
repetition_penalty=repetition_penalty,
|
|
173
|
+
top_p=top_p,
|
|
174
|
+
max_tokens=max_tokens,
|
|
175
|
+
format=format,
|
|
176
|
+
sample_rate=sample_rate,
|
|
177
|
+
)
|
|
178
|
+
chunks: list[bytes] = []
|
|
179
|
+
total = 0
|
|
180
|
+
async for chunk in self._http.stream_bytes("/tts/stream", json_body=_build_payload(req)):
|
|
181
|
+
total += len(chunk)
|
|
182
|
+
if total > _MAX_RESPONSE_BYTES:
|
|
183
|
+
raise APIError(
|
|
184
|
+
f"TTS response exceeded {_MAX_RESPONSE_BYTES // (1024*1024)} MB limit.",
|
|
185
|
+
status_code=0,
|
|
186
|
+
)
|
|
187
|
+
chunks.append(chunk)
|
|
188
|
+
raw = b"".join(chunks)
|
|
189
|
+
if format == "wav" and not raw.startswith(b"RIFF"):
|
|
190
|
+
raw = _make_wav_bytes(raw, sample_rate)
|
|
191
|
+
return raw
|
|
192
|
+
|
|
193
|
+
async def stream(
|
|
194
|
+
self,
|
|
195
|
+
text: str,
|
|
196
|
+
*,
|
|
197
|
+
voice: str = "sanjana",
|
|
198
|
+
language: Optional[str] = None,
|
|
199
|
+
temperature: float = 0.65,
|
|
200
|
+
repetition_penalty: float = 1.1,
|
|
201
|
+
top_p: float = 0.95,
|
|
202
|
+
max_tokens: int = 2000,
|
|
203
|
+
sample_rate: int = 24000,
|
|
204
|
+
) -> AsyncIterator[bytes]:
|
|
205
|
+
"""Stream speech audio chunks as they are generated."""
|
|
206
|
+
req = TTSRequest(
|
|
207
|
+
text=text,
|
|
208
|
+
voice=voice,
|
|
209
|
+
language=language,
|
|
210
|
+
temperature=temperature,
|
|
211
|
+
repetition_penalty=repetition_penalty,
|
|
212
|
+
top_p=top_p,
|
|
213
|
+
max_tokens=max_tokens,
|
|
214
|
+
format="pcm_f32le",
|
|
215
|
+
sample_rate=sample_rate,
|
|
216
|
+
)
|
|
217
|
+
async for chunk in self._http.stream_bytes("/tts/stream", json_body=_build_payload(req)):
|
|
218
|
+
yield chunk
|
|
219
|
+
|
|
220
|
+
async def voices(self) -> list[dict]:
|
|
221
|
+
"""Return available voices and their metadata."""
|
|
222
|
+
return await self._http.get("/tts/voices")
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from hyperneuronai.types.tts import TTSRequest, TTSResponse, AudioFormat, Voice
|
|
2
|
+
from hyperneuronai.types.telephony import (
|
|
3
|
+
CallRequest,
|
|
4
|
+
CallResponse,
|
|
5
|
+
CallStatus,
|
|
6
|
+
CallState,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"TTSRequest",
|
|
11
|
+
"TTSResponse",
|
|
12
|
+
"AudioFormat",
|
|
13
|
+
"Voice",
|
|
14
|
+
"CallRequest",
|
|
15
|
+
"CallResponse",
|
|
16
|
+
"CallStatus",
|
|
17
|
+
"CallState",
|
|
18
|
+
]
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from typing import Literal, Optional
|
|
5
|
+
from pydantic import BaseModel, Field, field_validator
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
CallState = Literal["queued", "ringing", "in_progress", "completed", "failed", "cancelled"]
|
|
9
|
+
|
|
10
|
+
_E164_RE = re.compile(r"^\+[1-9]\d{6,14}$")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class OutboundCallRequest(BaseModel):
|
|
14
|
+
"""Request model for a one-way outbound call."""
|
|
15
|
+
|
|
16
|
+
to: str = Field(..., description="E.164 phone number, e.g. '+919876543210'.")
|
|
17
|
+
text: str = Field(..., min_length=1, max_length=4096, description="Message text to synthesize and deliver.")
|
|
18
|
+
voice: str = Field(default="sanjana")
|
|
19
|
+
language: str = Field(default="en", description="BCP-47 language code or 'auto'.")
|
|
20
|
+
metadata: dict = Field(
|
|
21
|
+
default_factory=dict,
|
|
22
|
+
description="Arbitrary key/value pairs for your own tracking.",
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
@field_validator("to")
|
|
26
|
+
@classmethod
|
|
27
|
+
def _validate_e164(cls, v: str) -> str:
|
|
28
|
+
if not _E164_RE.match(v):
|
|
29
|
+
raise ValueError(
|
|
30
|
+
f"Phone number must be in E.164 format (e.g. '+919876543210'), got {v!r}."
|
|
31
|
+
)
|
|
32
|
+
return v
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class CallResponse(BaseModel):
|
|
36
|
+
call_uuid: str
|
|
37
|
+
state: CallState
|
|
38
|
+
to: str
|
|
39
|
+
voice: str
|
|
40
|
+
created_at: str
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class CallStatus(BaseModel):
|
|
44
|
+
call_uuid: str
|
|
45
|
+
state: CallState
|
|
46
|
+
to: str
|
|
47
|
+
duration_seconds: Optional[float] = None
|
|
48
|
+
started_at: Optional[str] = None
|
|
49
|
+
ended_at: Optional[str] = None
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Literal, Optional
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
Voice = Literal[
|
|
8
|
+
"sanjana", "arjun", "priya", "rahul", "ananya",
|
|
9
|
+
"leela", "dev", "kavya", "rohan", "meera",
|
|
10
|
+
]
|
|
11
|
+
|
|
12
|
+
AudioFormat = Literal["wav", "pcm_f32le", "pcm_s16le"]
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class TTSRequest(BaseModel):
|
|
16
|
+
text: str = Field(..., min_length=1, max_length=4096)
|
|
17
|
+
voice: str = Field(default="sanjana")
|
|
18
|
+
language: Optional[str] = Field(default=None, description="BCP-47 language tag, e.g. 'en', 'hi'. None = auto.")
|
|
19
|
+
temperature: float = Field(default=0.65, ge=0.0, le=1.0)
|
|
20
|
+
repetition_penalty: float = Field(default=1.1, ge=1.0, le=2.0)
|
|
21
|
+
top_p: float = Field(default=0.95, ge=0.0, le=1.0)
|
|
22
|
+
max_tokens: int = Field(default=2000, ge=7, le=4096)
|
|
23
|
+
format: AudioFormat = Field(default="wav")
|
|
24
|
+
sample_rate: int = Field(default=24000, ge=8000, le=48000, description="Output sample rate in Hz.")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class TTSResponse(BaseModel):
|
|
28
|
+
audio: bytes = Field(..., description="Raw audio bytes in the requested format.")
|
|
29
|
+
format: AudioFormat
|
|
30
|
+
sample_rate: int
|
|
31
|
+
duration_seconds: float
|
|
32
|
+
voice: str
|
|
33
|
+
text: str
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: hyperneuronai
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Official Python SDK for HyperNeuron AI services
|
|
5
|
+
Author-email: HyperNeuron AI <support@hyperneuron.in>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://www.hyperneuronai.com
|
|
8
|
+
Project-URL: Repository, https://github.com/hyperneuronai/hyperneuronai-python
|
|
9
|
+
Keywords: ai,tts,voice,telephony,speech
|
|
10
|
+
Classifier: Development Status :: 3 - Alpha
|
|
11
|
+
Classifier: Intended Audience :: Developers
|
|
12
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
13
|
+
Classifier: Programming Language :: Python :: 3
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
18
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
19
|
+
Requires-Python: >=3.9
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
Requires-Dist: httpx>=0.27.0
|
|
22
|
+
Requires-Dist: pydantic>=2.0.0
|
|
23
|
+
Provides-Extra: audio
|
|
24
|
+
Requires-Dist: numpy>=1.24.0; extra == "audio"
|
|
25
|
+
Requires-Dist: scipy>=1.11.0; extra == "audio"
|
|
26
|
+
Provides-Extra: dev
|
|
27
|
+
Requires-Dist: pytest>=8.0; extra == "dev"
|
|
28
|
+
Requires-Dist: pytest-asyncio>=0.23; extra == "dev"
|
|
29
|
+
Requires-Dist: respx>=0.21; extra == "dev"
|
|
30
|
+
|
|
31
|
+
# hyperneuronai
|
|
32
|
+
|
|
33
|
+
Official Python SDK for [HyperNeuron AI](https://www.hyperneuronai.com) — TTS streaming and AI telephony services.
|
|
34
|
+
|
|
35
|
+
## Install
|
|
36
|
+
|
|
37
|
+
```bash
|
|
38
|
+
pip install hyperneuronai
|
|
39
|
+
```
|
|
40
|
+
|
|
41
|
+
## Quick start
|
|
42
|
+
|
|
43
|
+
```python
|
|
44
|
+
import hyperneuronai
|
|
45
|
+
|
|
46
|
+
client = hyperneuronai.HyperNeuron(
|
|
47
|
+
api_key="hn_key_xxxx",
|
|
48
|
+
base_url="https://api.hyperneuronai.com",
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# TTS — save to WAV
|
|
52
|
+
audio = client.tts.generate("Hello, world!", voice="sanjana")
|
|
53
|
+
with open("hello.wav", "wb") as f:
|
|
54
|
+
f.write(audio)
|
|
55
|
+
|
|
56
|
+
# TTS — stream in real time
|
|
57
|
+
for chunk in client.tts.stream("Hello, world!", voice="sanjana"):
|
|
58
|
+
your_speaker.write(chunk)
|
|
59
|
+
|
|
60
|
+
# One-way outbound call
|
|
61
|
+
call = client.telephony.outbound(
|
|
62
|
+
to="+919876543210",
|
|
63
|
+
text="Hi! Your order has shipped and arrives tomorrow.",
|
|
64
|
+
voice="sanjana",
|
|
65
|
+
)
|
|
66
|
+
print(call.call_uuid, call.state)
|
|
67
|
+
```
|
|
68
|
+
|
|
69
|
+
## Async
|
|
70
|
+
|
|
71
|
+
```python
|
|
72
|
+
import asyncio
|
|
73
|
+
import hyperneuronai
|
|
74
|
+
|
|
75
|
+
async def main():
|
|
76
|
+
async with hyperneuronai.AsyncHyperNeuron(api_key="hn_key_xxxx") as client:
|
|
77
|
+
audio = await client.tts.generate("Hello!")
|
|
78
|
+
call = await client.telephony.outbound(to="+919876543210", text="Hi there!")
|
|
79
|
+
|
|
80
|
+
asyncio.run(main())
|
|
81
|
+
```
|
|
82
|
+
|
|
83
|
+
## License
|
|
84
|
+
|
|
85
|
+
MIT
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
README.md
|
|
2
|
+
pyproject.toml
|
|
3
|
+
hyperneuronai/__init__.py
|
|
4
|
+
hyperneuronai/_client.py
|
|
5
|
+
hyperneuronai/_hyperneuron.py
|
|
6
|
+
hyperneuronai/exceptions.py
|
|
7
|
+
hyperneuronai.egg-info/PKG-INFO
|
|
8
|
+
hyperneuronai.egg-info/SOURCES.txt
|
|
9
|
+
hyperneuronai.egg-info/dependency_links.txt
|
|
10
|
+
hyperneuronai.egg-info/requires.txt
|
|
11
|
+
hyperneuronai.egg-info/top_level.txt
|
|
12
|
+
hyperneuronai/resources/__init__.py
|
|
13
|
+
hyperneuronai/resources/telephony.py
|
|
14
|
+
hyperneuronai/resources/tts.py
|
|
15
|
+
hyperneuronai/types/__init__.py
|
|
16
|
+
hyperneuronai/types/telephony.py
|
|
17
|
+
hyperneuronai/types/tts.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
hyperneuronai
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=68", "wheel"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "hyperneuronai"
|
|
7
|
+
version = "0.1.0"
|
|
8
|
+
description = "Official Python SDK for HyperNeuron AI services"
|
|
9
|
+
readme = "README.md"
|
|
10
|
+
requires-python = ">=3.9"
|
|
11
|
+
license = { text = "MIT" }
|
|
12
|
+
authors = [{ name = "HyperNeuron AI", email = "support@hyperneuron.in" }]
|
|
13
|
+
keywords = ["ai", "tts", "voice", "telephony", "speech"]
|
|
14
|
+
classifiers = [
|
|
15
|
+
"Development Status :: 3 - Alpha",
|
|
16
|
+
"Intended Audience :: Developers",
|
|
17
|
+
"License :: OSI Approved :: MIT License",
|
|
18
|
+
"Programming Language :: Python :: 3",
|
|
19
|
+
"Programming Language :: Python :: 3.9",
|
|
20
|
+
"Programming Language :: Python :: 3.10",
|
|
21
|
+
"Programming Language :: Python :: 3.11",
|
|
22
|
+
"Programming Language :: Python :: 3.12",
|
|
23
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
24
|
+
]
|
|
25
|
+
dependencies = [
|
|
26
|
+
"httpx>=0.27.0",
|
|
27
|
+
"pydantic>=2.0.0",
|
|
28
|
+
]
|
|
29
|
+
|
|
30
|
+
[project.optional-dependencies]
|
|
31
|
+
audio = ["numpy>=1.24.0", "scipy>=1.11.0"]
|
|
32
|
+
dev = ["pytest>=8.0", "pytest-asyncio>=0.23", "respx>=0.21"]
|
|
33
|
+
|
|
34
|
+
[project.urls]
|
|
35
|
+
Homepage = "https://www.hyperneuronai.com"
|
|
36
|
+
Repository = "https://github.com/hyperneuronai/hyperneuronai-python"
|
|
37
|
+
|
|
38
|
+
[tool.setuptools.packages.find]
|
|
39
|
+
where = ["."]
|
|
40
|
+
include = ["hyperneuronai*"]
|