@neuralbase/client 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +63 -0
- package/dist/index.d.mts +105 -0
- package/dist/index.d.ts +105 -0
- package/dist/index.js +233 -0
- package/dist/index.mjs +201 -0
- package/package.json +40 -0
package/README.md
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
# @neuralbase/client
|
|
2
|
+
|
|
3
|
+
Official client SDK for NeuralBase AI services.
|
|
4
|
+
|
|
5
|
+
## Installation
|
|
6
|
+
|
|
7
|
+
```bash
|
|
8
|
+
npm install @neuralbase/client
|
|
9
|
+
```
|
|
10
|
+
|
|
11
|
+
## Quick Start
|
|
12
|
+
|
|
13
|
+
```typescript
|
|
14
|
+
import { NeuralBase } from '@neuralbase/client';
|
|
15
|
+
|
|
16
|
+
const client = new NeuralBase({
|
|
17
|
+
baseURL: 'https://api.your-domain.com'
|
|
18
|
+
});
|
|
19
|
+
|
|
20
|
+
// Chat
|
|
21
|
+
const response = await client.chat.neural.ask({ question: 'Hello!' });
|
|
22
|
+
console.log(response.answer); // "Hello there!"
|
|
23
|
+
```
|
|
24
|
+
|
|
25
|
+
## API Reference
|
|
26
|
+
|
|
27
|
+
### `new NeuralBase(config)`
|
|
28
|
+
|
|
29
|
+
| Option | Type | Default | Description |
|
|
30
|
+
|---|---|---|---|
|
|
31
|
+
| `baseURL` | `string` | _Required_ | API endpoint |
|
|
32
|
+
| `timeout` | `number` | `30000` | Request timeout (ms) |
|
|
33
|
+
| `contextId` | `string` | `undefined` | Default RAG context ID |
|
|
34
|
+
|
|
35
|
+
### Methods
|
|
36
|
+
|
|
37
|
+
#### `client.chat.neural.ask(request)`
|
|
38
|
+
|
|
39
|
+
```typescript
|
|
40
|
+
interface ChatRequest {
|
|
41
|
+
question: string;
|
|
42
|
+
contextId?: string;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
interface ChatResponse {
|
|
46
|
+
answer: string; // HTML formatted
|
|
47
|
+
answerClean: string; // Plain text
|
|
48
|
+
sources: Source[]; // RAG citations
|
|
49
|
+
}
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
#### Voice
|
|
53
|
+
|
|
54
|
+
- `client.voice.stt.transcribe(audio: Blob | Buffer)`: Returns `{ text: string }`
|
|
55
|
+
- `client.voice.tts.generate(text: string, options: TTSOptions)`: Returns `ArrayBuffer` (WAV)
|
|
56
|
+
|
|
57
|
+
## Examples
|
|
58
|
+
|
|
59
|
+
See [examples/nextjs-sdk-test](../examples/nextjs-sdk-test) for a complete Next.js implementation including voice features.
|
|
60
|
+
|
|
61
|
+
## License
|
|
62
|
+
|
|
63
|
+
MIT
|
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
interface NeuralBaseConfig {
|
|
2
|
+
baseURL?: string;
|
|
3
|
+
contextId?: string;
|
|
4
|
+
timeout?: number;
|
|
5
|
+
}
|
|
6
|
+
interface NeuralChatOptions {
|
|
7
|
+
question: string;
|
|
8
|
+
contextId?: string;
|
|
9
|
+
}
|
|
10
|
+
interface ChatResponse {
|
|
11
|
+
success?: boolean;
|
|
12
|
+
answer?: string;
|
|
13
|
+
answerClean?: string;
|
|
14
|
+
sources?: {
|
|
15
|
+
filename: string;
|
|
16
|
+
score: number;
|
|
17
|
+
}[];
|
|
18
|
+
}
|
|
19
|
+
interface TTSOptions {
|
|
20
|
+
voice?: string;
|
|
21
|
+
language?: string;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
declare class ChatModule {
|
|
25
|
+
private client;
|
|
26
|
+
constructor(client: NeuralBase);
|
|
27
|
+
/**
|
|
28
|
+
* Neural Chat (RAG)
|
|
29
|
+
* Answers questions based on a specific knowledge context.
|
|
30
|
+
*/
|
|
31
|
+
get neural(): {
|
|
32
|
+
ask: (options: NeuralChatOptions | string, contextIdOverride?: string) => Promise<ChatResponse>;
|
|
33
|
+
};
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
declare class VoiceModule {
|
|
37
|
+
private client;
|
|
38
|
+
constructor(client: NeuralBase);
|
|
39
|
+
/**
|
|
40
|
+
* Speech-to-Text
|
|
41
|
+
*/
|
|
42
|
+
get stt(): {
|
|
43
|
+
/**
|
|
44
|
+
* Transcribe raw PCM audio data/Blob
|
|
45
|
+
*/
|
|
46
|
+
transcribe: (audioData: Buffer | Blob | ArrayBuffer) => Promise<{
|
|
47
|
+
text: string;
|
|
48
|
+
}>;
|
|
49
|
+
};
|
|
50
|
+
/**
|
|
51
|
+
* Text-to-Speech
|
|
52
|
+
*/
|
|
53
|
+
get tts(): {
|
|
54
|
+
/**
|
|
55
|
+
* Generate speech audio from text
|
|
56
|
+
* Returns an ArrayBuffer of the WAV file
|
|
57
|
+
*/
|
|
58
|
+
generate: (text: string, options?: TTSOptions) => Promise<ArrayBuffer>;
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
declare class NeuralBase {
|
|
63
|
+
readonly baseURL: string;
|
|
64
|
+
readonly timeout: number;
|
|
65
|
+
readonly contextId?: string;
|
|
66
|
+
readonly chat: ChatModule;
|
|
67
|
+
readonly voice: VoiceModule;
|
|
68
|
+
constructor(config?: NeuralBaseConfig);
|
|
69
|
+
/**
|
|
70
|
+
* Helper for GET requests
|
|
71
|
+
*/
|
|
72
|
+
get<T>(path: string, options?: any): Promise<T>;
|
|
73
|
+
/**
|
|
74
|
+
* Helper for POST requests
|
|
75
|
+
*/
|
|
76
|
+
post<T>(path: string, body: any, options?: any): Promise<T>;
|
|
77
|
+
private getHeaders;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
declare class VoiceRecorder {
|
|
81
|
+
private mediaRecorder;
|
|
82
|
+
private audioChunks;
|
|
83
|
+
private stream;
|
|
84
|
+
constructor();
|
|
85
|
+
start(): Promise<void>;
|
|
86
|
+
stop(): Promise<Blob>;
|
|
87
|
+
cleanup(): void;
|
|
88
|
+
isRecording(): boolean;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
declare class NeuralBaseError extends Error {
|
|
92
|
+
status?: number | undefined;
|
|
93
|
+
data?: any | undefined;
|
|
94
|
+
constructor(message: string, status?: number | undefined, data?: any | undefined);
|
|
95
|
+
}
|
|
96
|
+
interface RequestOptions {
|
|
97
|
+
method?: string;
|
|
98
|
+
headers?: Record<string, string>;
|
|
99
|
+
body?: any;
|
|
100
|
+
timeout?: number;
|
|
101
|
+
signal?: AbortSignal;
|
|
102
|
+
}
|
|
103
|
+
declare function request<T>(url: string, options?: RequestOptions): Promise<T>;
|
|
104
|
+
|
|
105
|
+
export { ChatModule, type ChatResponse, NeuralBase, type NeuralBaseConfig, NeuralBaseError, type NeuralChatOptions, type RequestOptions, type TTSOptions, VoiceModule, VoiceRecorder, request };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
interface NeuralBaseConfig {
|
|
2
|
+
baseURL?: string;
|
|
3
|
+
contextId?: string;
|
|
4
|
+
timeout?: number;
|
|
5
|
+
}
|
|
6
|
+
interface NeuralChatOptions {
|
|
7
|
+
question: string;
|
|
8
|
+
contextId?: string;
|
|
9
|
+
}
|
|
10
|
+
interface ChatResponse {
|
|
11
|
+
success?: boolean;
|
|
12
|
+
answer?: string;
|
|
13
|
+
answerClean?: string;
|
|
14
|
+
sources?: {
|
|
15
|
+
filename: string;
|
|
16
|
+
score: number;
|
|
17
|
+
}[];
|
|
18
|
+
}
|
|
19
|
+
interface TTSOptions {
|
|
20
|
+
voice?: string;
|
|
21
|
+
language?: string;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
declare class ChatModule {
|
|
25
|
+
private client;
|
|
26
|
+
constructor(client: NeuralBase);
|
|
27
|
+
/**
|
|
28
|
+
* Neural Chat (RAG)
|
|
29
|
+
* Answers questions based on a specific knowledge context.
|
|
30
|
+
*/
|
|
31
|
+
get neural(): {
|
|
32
|
+
ask: (options: NeuralChatOptions | string, contextIdOverride?: string) => Promise<ChatResponse>;
|
|
33
|
+
};
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
declare class VoiceModule {
|
|
37
|
+
private client;
|
|
38
|
+
constructor(client: NeuralBase);
|
|
39
|
+
/**
|
|
40
|
+
* Speech-to-Text
|
|
41
|
+
*/
|
|
42
|
+
get stt(): {
|
|
43
|
+
/**
|
|
44
|
+
* Transcribe raw PCM audio data/Blob
|
|
45
|
+
*/
|
|
46
|
+
transcribe: (audioData: Buffer | Blob | ArrayBuffer) => Promise<{
|
|
47
|
+
text: string;
|
|
48
|
+
}>;
|
|
49
|
+
};
|
|
50
|
+
/**
|
|
51
|
+
* Text-to-Speech
|
|
52
|
+
*/
|
|
53
|
+
get tts(): {
|
|
54
|
+
/**
|
|
55
|
+
* Generate speech audio from text
|
|
56
|
+
* Returns an ArrayBuffer of the WAV file
|
|
57
|
+
*/
|
|
58
|
+
generate: (text: string, options?: TTSOptions) => Promise<ArrayBuffer>;
|
|
59
|
+
};
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
declare class NeuralBase {
|
|
63
|
+
readonly baseURL: string;
|
|
64
|
+
readonly timeout: number;
|
|
65
|
+
readonly contextId?: string;
|
|
66
|
+
readonly chat: ChatModule;
|
|
67
|
+
readonly voice: VoiceModule;
|
|
68
|
+
constructor(config?: NeuralBaseConfig);
|
|
69
|
+
/**
|
|
70
|
+
* Helper for GET requests
|
|
71
|
+
*/
|
|
72
|
+
get<T>(path: string, options?: any): Promise<T>;
|
|
73
|
+
/**
|
|
74
|
+
* Helper for POST requests
|
|
75
|
+
*/
|
|
76
|
+
post<T>(path: string, body: any, options?: any): Promise<T>;
|
|
77
|
+
private getHeaders;
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
declare class VoiceRecorder {
|
|
81
|
+
private mediaRecorder;
|
|
82
|
+
private audioChunks;
|
|
83
|
+
private stream;
|
|
84
|
+
constructor();
|
|
85
|
+
start(): Promise<void>;
|
|
86
|
+
stop(): Promise<Blob>;
|
|
87
|
+
cleanup(): void;
|
|
88
|
+
isRecording(): boolean;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
declare class NeuralBaseError extends Error {
|
|
92
|
+
status?: number | undefined;
|
|
93
|
+
data?: any | undefined;
|
|
94
|
+
constructor(message: string, status?: number | undefined, data?: any | undefined);
|
|
95
|
+
}
|
|
96
|
+
interface RequestOptions {
|
|
97
|
+
method?: string;
|
|
98
|
+
headers?: Record<string, string>;
|
|
99
|
+
body?: any;
|
|
100
|
+
timeout?: number;
|
|
101
|
+
signal?: AbortSignal;
|
|
102
|
+
}
|
|
103
|
+
declare function request<T>(url: string, options?: RequestOptions): Promise<T>;
|
|
104
|
+
|
|
105
|
+
export { ChatModule, type ChatResponse, NeuralBase, type NeuralBaseConfig, NeuralBaseError, type NeuralChatOptions, type RequestOptions, type TTSOptions, VoiceModule, VoiceRecorder, request };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,233 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var index_exports = {};
|
|
22
|
+
__export(index_exports, {
|
|
23
|
+
ChatModule: () => ChatModule,
|
|
24
|
+
NeuralBase: () => NeuralBase,
|
|
25
|
+
NeuralBaseError: () => NeuralBaseError,
|
|
26
|
+
VoiceModule: () => VoiceModule,
|
|
27
|
+
VoiceRecorder: () => VoiceRecorder,
|
|
28
|
+
request: () => request
|
|
29
|
+
});
|
|
30
|
+
module.exports = __toCommonJS(index_exports);
|
|
31
|
+
|
|
32
|
+
// src/core/request.ts
|
|
33
|
+
var NeuralBaseError = class extends Error {
|
|
34
|
+
constructor(message, status, data) {
|
|
35
|
+
super(message);
|
|
36
|
+
this.status = status;
|
|
37
|
+
this.data = data;
|
|
38
|
+
this.name = "NeuralBaseError";
|
|
39
|
+
}
|
|
40
|
+
};
|
|
41
|
+
async function request(url, options = {}) {
|
|
42
|
+
const { method = "GET", headers = {}, body, timeout = 3e4 } = options;
|
|
43
|
+
const controller = new AbortController();
|
|
44
|
+
const id = setTimeout(() => controller.abort(), timeout);
|
|
45
|
+
let requestBody = body;
|
|
46
|
+
if (body && typeof body === "object" && !(body instanceof FormData) && !(body instanceof Blob) && !(body instanceof ArrayBuffer) && !ArrayBuffer.isView(body)) {
|
|
47
|
+
requestBody = JSON.stringify(body);
|
|
48
|
+
}
|
|
49
|
+
try {
|
|
50
|
+
const response = await fetch(url, {
|
|
51
|
+
method,
|
|
52
|
+
headers,
|
|
53
|
+
body: requestBody,
|
|
54
|
+
signal: options.signal || controller.signal
|
|
55
|
+
});
|
|
56
|
+
clearTimeout(id);
|
|
57
|
+
if (!response.ok) {
|
|
58
|
+
let errorData;
|
|
59
|
+
try {
|
|
60
|
+
errorData = await response.json();
|
|
61
|
+
} catch {
|
|
62
|
+
errorData = { error: response.statusText };
|
|
63
|
+
}
|
|
64
|
+
const message = errorData.error || errorData.message || "Request failed";
|
|
65
|
+
throw new NeuralBaseError(message, response.status, errorData);
|
|
66
|
+
}
|
|
67
|
+
const contentType = response.headers.get("Content-Type") || "";
|
|
68
|
+
if (contentType.includes("audio/") || contentType.includes("application/octet-stream")) {
|
|
69
|
+
return await response.arrayBuffer();
|
|
70
|
+
}
|
|
71
|
+
return await response.json();
|
|
72
|
+
} catch (error) {
|
|
73
|
+
clearTimeout(id);
|
|
74
|
+
if (error instanceof NeuralBaseError) throw error;
|
|
75
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
76
|
+
throw new NeuralBaseError("Request timeout after " + timeout + "ms", 408);
|
|
77
|
+
}
|
|
78
|
+
throw new NeuralBaseError(error instanceof Error ? error.message : "Unknown error", 500);
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
// src/modules/chat.ts
|
|
83
|
+
var ChatModule = class {
|
|
84
|
+
constructor(client) {
|
|
85
|
+
this.client = client;
|
|
86
|
+
}
|
|
87
|
+
/**
|
|
88
|
+
* Neural Chat (RAG)
|
|
89
|
+
* Answers questions based on a specific knowledge context.
|
|
90
|
+
*/
|
|
91
|
+
get neural() {
|
|
92
|
+
return {
|
|
93
|
+
ask: (options, contextIdOverride) => {
|
|
94
|
+
const question = typeof options === "string" ? options : options.question;
|
|
95
|
+
const contextId = (typeof options === "object" ? options.contextId : contextIdOverride) || this.client.contextId;
|
|
96
|
+
return this.client.post("/chat/context", { question, contextId });
|
|
97
|
+
}
|
|
98
|
+
};
|
|
99
|
+
}
|
|
100
|
+
};
|
|
101
|
+
|
|
102
|
+
// src/modules/voice.ts
|
|
103
|
+
var VoiceModule = class {
|
|
104
|
+
constructor(client) {
|
|
105
|
+
this.client = client;
|
|
106
|
+
}
|
|
107
|
+
/**
|
|
108
|
+
* Speech-to-Text
|
|
109
|
+
*/
|
|
110
|
+
get stt() {
|
|
111
|
+
return {
|
|
112
|
+
/**
|
|
113
|
+
* Transcribe raw PCM audio data/Blob
|
|
114
|
+
*/
|
|
115
|
+
transcribe: (audioData) => {
|
|
116
|
+
return this.client.post("/stt", audioData, {
|
|
117
|
+
"Content-Type": "application/octet-stream"
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
/**
|
|
123
|
+
* Text-to-Speech
|
|
124
|
+
*/
|
|
125
|
+
get tts() {
|
|
126
|
+
return {
|
|
127
|
+
/**
|
|
128
|
+
* Generate speech audio from text
|
|
129
|
+
* Returns an ArrayBuffer of the WAV file
|
|
130
|
+
*/
|
|
131
|
+
generate: (text, options = {}) => {
|
|
132
|
+
return this.client.post("/tts", { text, ...options });
|
|
133
|
+
}
|
|
134
|
+
};
|
|
135
|
+
}
|
|
136
|
+
};
|
|
137
|
+
|
|
138
|
+
// src/client.ts
|
|
139
|
+
var NeuralBase = class {
|
|
140
|
+
constructor(config = {}) {
|
|
141
|
+
this.baseURL = (config.baseURL || "/api").replace(/\/$/, "");
|
|
142
|
+
this.timeout = config.timeout || 3e4;
|
|
143
|
+
this.contextId = config.contextId;
|
|
144
|
+
this.chat = new ChatModule(this);
|
|
145
|
+
this.voice = new VoiceModule(this);
|
|
146
|
+
}
|
|
147
|
+
/**
|
|
148
|
+
* Helper for GET requests
|
|
149
|
+
*/
|
|
150
|
+
async get(path, options = {}) {
|
|
151
|
+
return request(`${this.baseURL}${path}`, {
|
|
152
|
+
method: "GET",
|
|
153
|
+
headers: this.getHeaders(),
|
|
154
|
+
timeout: this.timeout,
|
|
155
|
+
...options
|
|
156
|
+
});
|
|
157
|
+
}
|
|
158
|
+
/**
|
|
159
|
+
* Helper for POST requests
|
|
160
|
+
*/
|
|
161
|
+
async post(path, body, options = {}) {
|
|
162
|
+
return request(`${this.baseURL}${path}`, {
|
|
163
|
+
method: "POST",
|
|
164
|
+
headers: { ...this.getHeaders(), ...options },
|
|
165
|
+
// Merge headers and options
|
|
166
|
+
body,
|
|
167
|
+
timeout: this.timeout
|
|
168
|
+
});
|
|
169
|
+
}
|
|
170
|
+
getHeaders() {
|
|
171
|
+
return {};
|
|
172
|
+
}
|
|
173
|
+
};
|
|
174
|
+
|
|
175
|
+
// src/modules/recorder.ts
|
|
176
|
+
var VoiceRecorder = class {
|
|
177
|
+
constructor() {
|
|
178
|
+
this.mediaRecorder = null;
|
|
179
|
+
this.audioChunks = [];
|
|
180
|
+
this.stream = null;
|
|
181
|
+
}
|
|
182
|
+
async start() {
|
|
183
|
+
try {
|
|
184
|
+
this.stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
185
|
+
this.mediaRecorder = new MediaRecorder(this.stream, {
|
|
186
|
+
mimeType: "audio/webm;codecs=opus"
|
|
187
|
+
});
|
|
188
|
+
this.audioChunks = [];
|
|
189
|
+
this.mediaRecorder.ondataavailable = (event) => {
|
|
190
|
+
if (event.data.size > 0) {
|
|
191
|
+
this.audioChunks.push(event.data);
|
|
192
|
+
}
|
|
193
|
+
};
|
|
194
|
+
this.mediaRecorder.start();
|
|
195
|
+
} catch (error) {
|
|
196
|
+
console.error("Error accessing microphone:", error);
|
|
197
|
+
throw new Error("Could not access microphone. Please check permissions.");
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
stop() {
|
|
201
|
+
return new Promise((resolve, reject) => {
|
|
202
|
+
if (!this.mediaRecorder || this.mediaRecorder.state === "inactive") {
|
|
203
|
+
return reject(new Error("Recorder is not active"));
|
|
204
|
+
}
|
|
205
|
+
this.mediaRecorder.onstop = () => {
|
|
206
|
+
const audioBlob = new Blob(this.audioChunks, { type: "audio/webm" });
|
|
207
|
+
this.cleanup();
|
|
208
|
+
resolve(audioBlob);
|
|
209
|
+
};
|
|
210
|
+
this.mediaRecorder.stop();
|
|
211
|
+
});
|
|
212
|
+
}
|
|
213
|
+
cleanup() {
|
|
214
|
+
if (this.stream) {
|
|
215
|
+
this.stream.getTracks().forEach((track) => track.stop());
|
|
216
|
+
this.stream = null;
|
|
217
|
+
}
|
|
218
|
+
this.mediaRecorder = null;
|
|
219
|
+
this.audioChunks = [];
|
|
220
|
+
}
|
|
221
|
+
isRecording() {
|
|
222
|
+
return !!this.mediaRecorder && this.mediaRecorder.state === "recording";
|
|
223
|
+
}
|
|
224
|
+
};
|
|
225
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
226
|
+
0 && (module.exports = {
|
|
227
|
+
ChatModule,
|
|
228
|
+
NeuralBase,
|
|
229
|
+
NeuralBaseError,
|
|
230
|
+
VoiceModule,
|
|
231
|
+
VoiceRecorder,
|
|
232
|
+
request
|
|
233
|
+
});
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
// src/core/request.ts
|
|
2
|
+
var NeuralBaseError = class extends Error {
|
|
3
|
+
constructor(message, status, data) {
|
|
4
|
+
super(message);
|
|
5
|
+
this.status = status;
|
|
6
|
+
this.data = data;
|
|
7
|
+
this.name = "NeuralBaseError";
|
|
8
|
+
}
|
|
9
|
+
};
|
|
10
|
+
async function request(url, options = {}) {
|
|
11
|
+
const { method = "GET", headers = {}, body, timeout = 3e4 } = options;
|
|
12
|
+
const controller = new AbortController();
|
|
13
|
+
const id = setTimeout(() => controller.abort(), timeout);
|
|
14
|
+
let requestBody = body;
|
|
15
|
+
if (body && typeof body === "object" && !(body instanceof FormData) && !(body instanceof Blob) && !(body instanceof ArrayBuffer) && !ArrayBuffer.isView(body)) {
|
|
16
|
+
requestBody = JSON.stringify(body);
|
|
17
|
+
}
|
|
18
|
+
try {
|
|
19
|
+
const response = await fetch(url, {
|
|
20
|
+
method,
|
|
21
|
+
headers,
|
|
22
|
+
body: requestBody,
|
|
23
|
+
signal: options.signal || controller.signal
|
|
24
|
+
});
|
|
25
|
+
clearTimeout(id);
|
|
26
|
+
if (!response.ok) {
|
|
27
|
+
let errorData;
|
|
28
|
+
try {
|
|
29
|
+
errorData = await response.json();
|
|
30
|
+
} catch {
|
|
31
|
+
errorData = { error: response.statusText };
|
|
32
|
+
}
|
|
33
|
+
const message = errorData.error || errorData.message || "Request failed";
|
|
34
|
+
throw new NeuralBaseError(message, response.status, errorData);
|
|
35
|
+
}
|
|
36
|
+
const contentType = response.headers.get("Content-Type") || "";
|
|
37
|
+
if (contentType.includes("audio/") || contentType.includes("application/octet-stream")) {
|
|
38
|
+
return await response.arrayBuffer();
|
|
39
|
+
}
|
|
40
|
+
return await response.json();
|
|
41
|
+
} catch (error) {
|
|
42
|
+
clearTimeout(id);
|
|
43
|
+
if (error instanceof NeuralBaseError) throw error;
|
|
44
|
+
if (error instanceof Error && error.name === "AbortError") {
|
|
45
|
+
throw new NeuralBaseError("Request timeout after " + timeout + "ms", 408);
|
|
46
|
+
}
|
|
47
|
+
throw new NeuralBaseError(error instanceof Error ? error.message : "Unknown error", 500);
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// src/modules/chat.ts
|
|
52
|
+
var ChatModule = class {
|
|
53
|
+
constructor(client) {
|
|
54
|
+
this.client = client;
|
|
55
|
+
}
|
|
56
|
+
/**
|
|
57
|
+
* Neural Chat (RAG)
|
|
58
|
+
* Answers questions based on a specific knowledge context.
|
|
59
|
+
*/
|
|
60
|
+
get neural() {
|
|
61
|
+
return {
|
|
62
|
+
ask: (options, contextIdOverride) => {
|
|
63
|
+
const question = typeof options === "string" ? options : options.question;
|
|
64
|
+
const contextId = (typeof options === "object" ? options.contextId : contextIdOverride) || this.client.contextId;
|
|
65
|
+
return this.client.post("/chat/context", { question, contextId });
|
|
66
|
+
}
|
|
67
|
+
};
|
|
68
|
+
}
|
|
69
|
+
};
|
|
70
|
+
|
|
71
|
+
// src/modules/voice.ts
|
|
72
|
+
var VoiceModule = class {
|
|
73
|
+
constructor(client) {
|
|
74
|
+
this.client = client;
|
|
75
|
+
}
|
|
76
|
+
/**
|
|
77
|
+
* Speech-to-Text
|
|
78
|
+
*/
|
|
79
|
+
get stt() {
|
|
80
|
+
return {
|
|
81
|
+
/**
|
|
82
|
+
* Transcribe raw PCM audio data/Blob
|
|
83
|
+
*/
|
|
84
|
+
transcribe: (audioData) => {
|
|
85
|
+
return this.client.post("/stt", audioData, {
|
|
86
|
+
"Content-Type": "application/octet-stream"
|
|
87
|
+
});
|
|
88
|
+
}
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
/**
|
|
92
|
+
* Text-to-Speech
|
|
93
|
+
*/
|
|
94
|
+
get tts() {
|
|
95
|
+
return {
|
|
96
|
+
/**
|
|
97
|
+
* Generate speech audio from text
|
|
98
|
+
* Returns an ArrayBuffer of the WAV file
|
|
99
|
+
*/
|
|
100
|
+
generate: (text, options = {}) => {
|
|
101
|
+
return this.client.post("/tts", { text, ...options });
|
|
102
|
+
}
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
// src/client.ts
|
|
108
|
+
var NeuralBase = class {
|
|
109
|
+
constructor(config = {}) {
|
|
110
|
+
this.baseURL = (config.baseURL || "/api").replace(/\/$/, "");
|
|
111
|
+
this.timeout = config.timeout || 3e4;
|
|
112
|
+
this.contextId = config.contextId;
|
|
113
|
+
this.chat = new ChatModule(this);
|
|
114
|
+
this.voice = new VoiceModule(this);
|
|
115
|
+
}
|
|
116
|
+
/**
|
|
117
|
+
* Helper for GET requests
|
|
118
|
+
*/
|
|
119
|
+
async get(path, options = {}) {
|
|
120
|
+
return request(`${this.baseURL}${path}`, {
|
|
121
|
+
method: "GET",
|
|
122
|
+
headers: this.getHeaders(),
|
|
123
|
+
timeout: this.timeout,
|
|
124
|
+
...options
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
/**
|
|
128
|
+
* Helper for POST requests
|
|
129
|
+
*/
|
|
130
|
+
async post(path, body, options = {}) {
|
|
131
|
+
return request(`${this.baseURL}${path}`, {
|
|
132
|
+
method: "POST",
|
|
133
|
+
headers: { ...this.getHeaders(), ...options },
|
|
134
|
+
// Merge headers and options
|
|
135
|
+
body,
|
|
136
|
+
timeout: this.timeout
|
|
137
|
+
});
|
|
138
|
+
}
|
|
139
|
+
getHeaders() {
|
|
140
|
+
return {};
|
|
141
|
+
}
|
|
142
|
+
};
|
|
143
|
+
|
|
144
|
+
// src/modules/recorder.ts
|
|
145
|
+
var VoiceRecorder = class {
|
|
146
|
+
constructor() {
|
|
147
|
+
this.mediaRecorder = null;
|
|
148
|
+
this.audioChunks = [];
|
|
149
|
+
this.stream = null;
|
|
150
|
+
}
|
|
151
|
+
async start() {
|
|
152
|
+
try {
|
|
153
|
+
this.stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
154
|
+
this.mediaRecorder = new MediaRecorder(this.stream, {
|
|
155
|
+
mimeType: "audio/webm;codecs=opus"
|
|
156
|
+
});
|
|
157
|
+
this.audioChunks = [];
|
|
158
|
+
this.mediaRecorder.ondataavailable = (event) => {
|
|
159
|
+
if (event.data.size > 0) {
|
|
160
|
+
this.audioChunks.push(event.data);
|
|
161
|
+
}
|
|
162
|
+
};
|
|
163
|
+
this.mediaRecorder.start();
|
|
164
|
+
} catch (error) {
|
|
165
|
+
console.error("Error accessing microphone:", error);
|
|
166
|
+
throw new Error("Could not access microphone. Please check permissions.");
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
stop() {
|
|
170
|
+
return new Promise((resolve, reject) => {
|
|
171
|
+
if (!this.mediaRecorder || this.mediaRecorder.state === "inactive") {
|
|
172
|
+
return reject(new Error("Recorder is not active"));
|
|
173
|
+
}
|
|
174
|
+
this.mediaRecorder.onstop = () => {
|
|
175
|
+
const audioBlob = new Blob(this.audioChunks, { type: "audio/webm" });
|
|
176
|
+
this.cleanup();
|
|
177
|
+
resolve(audioBlob);
|
|
178
|
+
};
|
|
179
|
+
this.mediaRecorder.stop();
|
|
180
|
+
});
|
|
181
|
+
}
|
|
182
|
+
cleanup() {
|
|
183
|
+
if (this.stream) {
|
|
184
|
+
this.stream.getTracks().forEach((track) => track.stop());
|
|
185
|
+
this.stream = null;
|
|
186
|
+
}
|
|
187
|
+
this.mediaRecorder = null;
|
|
188
|
+
this.audioChunks = [];
|
|
189
|
+
}
|
|
190
|
+
isRecording() {
|
|
191
|
+
return !!this.mediaRecorder && this.mediaRecorder.state === "recording";
|
|
192
|
+
}
|
|
193
|
+
};
|
|
194
|
+
export {
|
|
195
|
+
ChatModule,
|
|
196
|
+
NeuralBase,
|
|
197
|
+
NeuralBaseError,
|
|
198
|
+
VoiceModule,
|
|
199
|
+
VoiceRecorder,
|
|
200
|
+
request
|
|
201
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@neuralbase/client",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Simple, secure frontend chat SDK for NeuralBase - just send questions, get answers (no API keys required)",
|
|
5
|
+
"main": "dist/index.js",
|
|
6
|
+
"module": "dist/index.mjs",
|
|
7
|
+
"types": "dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./dist/index.d.ts",
|
|
11
|
+
"import": "./dist/index.mjs",
|
|
12
|
+
"require": "./dist/index.js"
|
|
13
|
+
}
|
|
14
|
+
},
|
|
15
|
+
"files": [
|
|
16
|
+
"dist",
|
|
17
|
+
"README.md"
|
|
18
|
+
],
|
|
19
|
+
"scripts": {
|
|
20
|
+
"build": "tsup src/index.ts --format cjs,esm --dts --clean",
|
|
21
|
+
"dev": "tsup src/index.ts --format cjs,esm --dts --watch"
|
|
22
|
+
},
|
|
23
|
+
"keywords": [
|
|
24
|
+
"neuralbase",
|
|
25
|
+
"chatbot",
|
|
26
|
+
"frontend",
|
|
27
|
+
"client",
|
|
28
|
+
"browser",
|
|
29
|
+
"react",
|
|
30
|
+
"vue",
|
|
31
|
+
"typescript"
|
|
32
|
+
],
|
|
33
|
+
"author": "NeuralBase",
|
|
34
|
+
"license": "MIT",
|
|
35
|
+
"devDependencies": {
|
|
36
|
+
"@types/node": "^20.11.0",
|
|
37
|
+
"tsup": "^8.0.1",
|
|
38
|
+
"typescript": "^5.3.3"
|
|
39
|
+
}
|
|
40
|
+
}
|