@voquill/voice-ai 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/aldea.utils.d.ts +16 -0
- package/dist/aldea.utils.d.ts.map +1 -0
- package/dist/aldea.utils.js +42 -0
- package/dist/assemblyai.utils.d.ts +6 -0
- package/dist/assemblyai.utils.d.ts.map +1 -0
- package/dist/assemblyai.utils.js +24 -0
- package/dist/azure-openai.utils.d.ts +29 -0
- package/dist/azure-openai.utils.d.ts.map +1 -0
- package/dist/azure-openai.utils.js +67 -0
- package/dist/azure.utils.d.ts +30 -0
- package/dist/azure.utils.d.ts.map +1 -0
- package/dist/azure.utils.js +253 -0
- package/dist/claude.utils.d.ts +26 -0
- package/dist/claude.utils.d.ts.map +1 -0
- package/dist/claude.utils.js +229 -0
- package/dist/deepgram.utils.d.ts +5 -0
- package/dist/deepgram.utils.d.ts.map +1 -0
- package/dist/deepgram.utils.js +25 -0
- package/dist/deepseek.utils.d.ts +26 -0
- package/dist/deepseek.utils.d.ts.map +1 -0
- package/dist/deepseek.utils.js +102 -0
- package/dist/elevenlabs.utils.d.ts +6 -0
- package/dist/elevenlabs.utils.d.ts.map +1 -0
- package/dist/elevenlabs.utils.js +29 -0
- package/dist/gemini.utils.d.ts +41 -0
- package/dist/gemini.utils.d.ts.map +1 -0
- package/dist/gemini.utils.js +271 -0
- package/dist/groq.utils.d.ts +42 -0
- package/dist/groq.utils.d.ts.map +1 -0
- package/dist/groq.utils.js +141 -0
- package/dist/index.d.ts +14 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +13 -0
- package/dist/openai.utils.d.ts +55 -0
- package/dist/openai.utils.d.ts.map +1 -0
- package/dist/openai.utils.js +275 -0
- package/dist/openrouter.utils.d.ts +69 -0
- package/dist/openrouter.utils.d.ts.map +1 -0
- package/dist/openrouter.utils.js +148 -0
- package/dist/speaches.utils.d.ts +18 -0
- package/dist/speaches.utils.d.ts.map +1 -0
- package/dist/speaches.utils.js +38 -0
- package/package.json +35 -0
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
import { countWords, retry } from "@voquill/utilities";
|
|
2
|
+
import OpenAI, { toFile } from "openai";
|
|
3
|
+
export const OPENAI_GENERATE_TEXT_MODELS = [
|
|
4
|
+
"gpt-4o",
|
|
5
|
+
"gpt-4o-mini",
|
|
6
|
+
"gpt-4-turbo",
|
|
7
|
+
"gpt-3.5-turbo",
|
|
8
|
+
"gpt-5.2",
|
|
9
|
+
"gpt-5.3",
|
|
10
|
+
"gpt-5.4",
|
|
11
|
+
];
|
|
12
|
+
export const OPENAI_TRANSCRIPTION_MODELS = ["whisper-1"];
|
|
13
|
+
const contentToString = (content) => {
|
|
14
|
+
if (!content) {
|
|
15
|
+
return "";
|
|
16
|
+
}
|
|
17
|
+
if (typeof content === "string") {
|
|
18
|
+
return content;
|
|
19
|
+
}
|
|
20
|
+
return content
|
|
21
|
+
.map((part) => {
|
|
22
|
+
if (part.type === "text") {
|
|
23
|
+
return part.text ?? "";
|
|
24
|
+
}
|
|
25
|
+
return "";
|
|
26
|
+
})
|
|
27
|
+
.join("")
|
|
28
|
+
.trim();
|
|
29
|
+
};
|
|
30
|
+
const createClient = (apiKey, baseUrl, customFetch) => {
|
|
31
|
+
// `dangerouslyAllowBrowser` is needed because this runs on a desktop tauri app.
|
|
32
|
+
// The Tauri app doesn't run in a web browser and encrypts API keys locally, so this
|
|
33
|
+
// is safe.
|
|
34
|
+
return new OpenAI({
|
|
35
|
+
apiKey: apiKey.trim(),
|
|
36
|
+
baseURL: baseUrl,
|
|
37
|
+
dangerouslyAllowBrowser: true,
|
|
38
|
+
fetch: customFetch,
|
|
39
|
+
});
|
|
40
|
+
};
|
|
41
|
+
export const openaiTranscribeAudio = async ({ apiKey, model = "whisper-1", blob, ext, prompt, language, }) => {
|
|
42
|
+
return retry({
|
|
43
|
+
retries: 3,
|
|
44
|
+
fn: async () => {
|
|
45
|
+
const client = createClient(apiKey);
|
|
46
|
+
const file = await toFile(blob, `audio.${ext}`);
|
|
47
|
+
const response = await client.audio.transcriptions.create({
|
|
48
|
+
file,
|
|
49
|
+
model,
|
|
50
|
+
prompt,
|
|
51
|
+
language: language ?? "en",
|
|
52
|
+
});
|
|
53
|
+
if (!response.text) {
|
|
54
|
+
throw new Error("Transcription failed");
|
|
55
|
+
}
|
|
56
|
+
return { text: response.text, wordsUsed: countWords(response.text) };
|
|
57
|
+
},
|
|
58
|
+
});
|
|
59
|
+
};
|
|
60
|
+
export const openaiGenerateTextResponse = async ({ apiKey, baseUrl, model = "gpt-4o-mini", system, prompt, imageUrls = [], jsonResponse, customFetch, }) => {
|
|
61
|
+
return retry({
|
|
62
|
+
retries: 3,
|
|
63
|
+
fn: async () => {
|
|
64
|
+
const client = createClient(apiKey, baseUrl, customFetch);
|
|
65
|
+
const messages = [];
|
|
66
|
+
if (system) {
|
|
67
|
+
messages.push({ role: "system", content: system });
|
|
68
|
+
}
|
|
69
|
+
const userParts = [];
|
|
70
|
+
for (const url of imageUrls) {
|
|
71
|
+
userParts.push({
|
|
72
|
+
type: "image_url",
|
|
73
|
+
image_url: { url },
|
|
74
|
+
});
|
|
75
|
+
}
|
|
76
|
+
userParts.push({ type: "text", text: prompt });
|
|
77
|
+
messages.push({ role: "user", content: userParts });
|
|
78
|
+
const response = await client.chat.completions.create({
|
|
79
|
+
messages,
|
|
80
|
+
model,
|
|
81
|
+
temperature: 1,
|
|
82
|
+
max_completion_tokens: 1024,
|
|
83
|
+
top_p: 1,
|
|
84
|
+
response_format: jsonResponse
|
|
85
|
+
? {
|
|
86
|
+
type: "json_schema",
|
|
87
|
+
json_schema: {
|
|
88
|
+
name: jsonResponse.name,
|
|
89
|
+
description: jsonResponse.description,
|
|
90
|
+
schema: jsonResponse.schema,
|
|
91
|
+
strict: true,
|
|
92
|
+
},
|
|
93
|
+
}
|
|
94
|
+
: undefined,
|
|
95
|
+
});
|
|
96
|
+
console.log("openai llm usage:", response.usage);
|
|
97
|
+
if (!response.choices || response.choices.length === 0) {
|
|
98
|
+
throw new Error("No response from OpenAI");
|
|
99
|
+
}
|
|
100
|
+
const result = response.choices[0].message.content;
|
|
101
|
+
if (!result) {
|
|
102
|
+
throw new Error("Content is empty");
|
|
103
|
+
}
|
|
104
|
+
const content = contentToString(result);
|
|
105
|
+
return {
|
|
106
|
+
text: content,
|
|
107
|
+
tokensUsed: response.usage?.total_tokens ?? countWords(content),
|
|
108
|
+
};
|
|
109
|
+
},
|
|
110
|
+
});
|
|
111
|
+
};
|
|
112
|
+
export const openaiCompatibleTestIntegration = async ({ baseUrl, apiKey, }) => {
|
|
113
|
+
const client = createClient(apiKey || "dummy", baseUrl);
|
|
114
|
+
// Test connectivity by listing models
|
|
115
|
+
await client.models.list();
|
|
116
|
+
// If we get here, the connection is successful
|
|
117
|
+
return true;
|
|
118
|
+
};
|
|
119
|
+
export const openaiTestIntegration = async ({ apiKey, }) => {
|
|
120
|
+
const client = createClient(apiKey);
|
|
121
|
+
const response = await client.chat.completions.create({
|
|
122
|
+
messages: [
|
|
123
|
+
{
|
|
124
|
+
role: "user",
|
|
125
|
+
content: [
|
|
126
|
+
{
|
|
127
|
+
type: "text",
|
|
128
|
+
text: `Reply with the single word "Hello."`,
|
|
129
|
+
},
|
|
130
|
+
],
|
|
131
|
+
},
|
|
132
|
+
],
|
|
133
|
+
model: "gpt-4o-mini",
|
|
134
|
+
temperature: 0,
|
|
135
|
+
max_completion_tokens: 32,
|
|
136
|
+
top_p: 1,
|
|
137
|
+
});
|
|
138
|
+
if (!response.choices || response.choices.length === 0) {
|
|
139
|
+
throw new Error("No response from OpenAI");
|
|
140
|
+
}
|
|
141
|
+
const first = response.choices[0];
|
|
142
|
+
const content = contentToString(first?.message?.content);
|
|
143
|
+
if (!content) {
|
|
144
|
+
throw new Error("Response content is empty");
|
|
145
|
+
}
|
|
146
|
+
return content.toLowerCase().includes("hello");
|
|
147
|
+
};
|
|
148
|
+
// ============================================================================
|
|
149
|
+
// Streaming Chat (shared utility for all OpenAI-compatible providers)
|
|
150
|
+
// ============================================================================
|
|
151
|
+
export function llmMessagesToOpenAI(messages) {
|
|
152
|
+
return messages.map((msg) => {
|
|
153
|
+
switch (msg.role) {
|
|
154
|
+
case "system":
|
|
155
|
+
return { role: "system", content: msg.content };
|
|
156
|
+
case "user":
|
|
157
|
+
return { role: "user", content: msg.content };
|
|
158
|
+
case "assistant":
|
|
159
|
+
return {
|
|
160
|
+
role: "assistant",
|
|
161
|
+
content: msg.content ?? null,
|
|
162
|
+
tool_calls: msg.toolCalls?.map((tc) => ({
|
|
163
|
+
id: tc.id,
|
|
164
|
+
type: "function",
|
|
165
|
+
function: { name: tc.name, arguments: tc.arguments },
|
|
166
|
+
})),
|
|
167
|
+
};
|
|
168
|
+
case "tool":
|
|
169
|
+
return {
|
|
170
|
+
role: "tool",
|
|
171
|
+
tool_call_id: msg.toolCallId,
|
|
172
|
+
content: msg.content,
|
|
173
|
+
};
|
|
174
|
+
}
|
|
175
|
+
});
|
|
176
|
+
}
|
|
177
|
+
function llmToolsToOpenAI(tools) {
|
|
178
|
+
if (!tools || tools.length === 0)
|
|
179
|
+
return undefined;
|
|
180
|
+
return tools.map((t) => ({
|
|
181
|
+
type: "function",
|
|
182
|
+
function: {
|
|
183
|
+
name: t.name,
|
|
184
|
+
description: t.description,
|
|
185
|
+
parameters: t.parameters,
|
|
186
|
+
},
|
|
187
|
+
}));
|
|
188
|
+
}
|
|
189
|
+
function llmToolChoiceToOpenAI(choice) {
|
|
190
|
+
if (!choice)
|
|
191
|
+
return undefined;
|
|
192
|
+
if (typeof choice === "string")
|
|
193
|
+
return choice;
|
|
194
|
+
return { type: "function", function: { name: choice.name } };
|
|
195
|
+
}
|
|
196
|
+
function toFinishReason(raw) {
|
|
197
|
+
switch (raw) {
|
|
198
|
+
case "stop":
|
|
199
|
+
return "stop";
|
|
200
|
+
case "length":
|
|
201
|
+
return "length";
|
|
202
|
+
case "content_filter":
|
|
203
|
+
return "content-filter";
|
|
204
|
+
case "tool_calls":
|
|
205
|
+
return "tool-calls";
|
|
206
|
+
default:
|
|
207
|
+
return "other";
|
|
208
|
+
}
|
|
209
|
+
}
|
|
210
|
+
export async function* openaiCompatibleStreamChat(client, model, input) {
|
|
211
|
+
const stream = await client.chat.completions.create({
|
|
212
|
+
model,
|
|
213
|
+
messages: llmMessagesToOpenAI(input.messages),
|
|
214
|
+
stream: true,
|
|
215
|
+
stream_options: { include_usage: true },
|
|
216
|
+
tools: llmToolsToOpenAI(input.tools),
|
|
217
|
+
tool_choice: llmToolChoiceToOpenAI(input.toolChoice),
|
|
218
|
+
max_tokens: input.maxTokens,
|
|
219
|
+
temperature: input.temperature,
|
|
220
|
+
stop: input.stopSequences,
|
|
221
|
+
top_p: input.topP,
|
|
222
|
+
frequency_penalty: input.frequencyPenalty,
|
|
223
|
+
presence_penalty: input.presencePenalty,
|
|
224
|
+
seed: input.seed,
|
|
225
|
+
});
|
|
226
|
+
const toolCalls = new Map();
|
|
227
|
+
let finishReason = "other";
|
|
228
|
+
let promptTokens;
|
|
229
|
+
let completionTokens;
|
|
230
|
+
let modelId;
|
|
231
|
+
for await (const chunk of stream) {
|
|
232
|
+
if (chunk.model) {
|
|
233
|
+
modelId = chunk.model;
|
|
234
|
+
}
|
|
235
|
+
if (chunk.usage) {
|
|
236
|
+
promptTokens = chunk.usage.prompt_tokens ?? undefined;
|
|
237
|
+
completionTokens = chunk.usage.completion_tokens ?? undefined;
|
|
238
|
+
}
|
|
239
|
+
const choice = chunk.choices[0];
|
|
240
|
+
if (!choice)
|
|
241
|
+
continue;
|
|
242
|
+
if (choice.delta?.content) {
|
|
243
|
+
yield { type: "text-delta", text: choice.delta.content };
|
|
244
|
+
}
|
|
245
|
+
for (const tc of choice.delta?.tool_calls ?? []) {
|
|
246
|
+
const index = tc.index ?? toolCalls.size;
|
|
247
|
+
const current = toolCalls.get(index) ?? { id: "", name: "", arguments: "" };
|
|
248
|
+
if (tc.id)
|
|
249
|
+
current.id = tc.id;
|
|
250
|
+
if (tc.function?.name)
|
|
251
|
+
current.name = tc.function.name;
|
|
252
|
+
if (tc.function?.arguments)
|
|
253
|
+
current.arguments += tc.function.arguments;
|
|
254
|
+
toolCalls.set(index, current);
|
|
255
|
+
}
|
|
256
|
+
if (choice.finish_reason) {
|
|
257
|
+
finishReason = toFinishReason(choice.finish_reason);
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
for (const [, tc] of [...toolCalls.entries()].sort(([a], [b]) => a - b)) {
|
|
261
|
+
yield { type: "tool-call", id: tc.id, name: tc.name, arguments: tc.arguments };
|
|
262
|
+
}
|
|
263
|
+
yield {
|
|
264
|
+
type: "finish",
|
|
265
|
+
finishReason,
|
|
266
|
+
usage: promptTokens != null || completionTokens != null
|
|
267
|
+
? { promptTokens, completionTokens }
|
|
268
|
+
: undefined,
|
|
269
|
+
modelId,
|
|
270
|
+
};
|
|
271
|
+
}
|
|
272
|
+
export async function* openaiStreamChat({ apiKey, baseUrl, model, input, customFetch, }) {
|
|
273
|
+
const client = createClient(apiKey, baseUrl, customFetch);
|
|
274
|
+
yield* openaiCompatibleStreamChat(client, model, input);
|
|
275
|
+
}
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
import type { JsonResponse, LlmChatInput, LlmStreamEvent, OpenRouterModel, OpenRouterProvider, OpenRouterProviderRouting } from "@voquill/types";
|
|
2
|
+
export declare const OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1";
|
|
3
|
+
export declare const OPENROUTER_APP_NAME = "Voquill";
|
|
4
|
+
export declare const OPENROUTER_APP_URL = "https://voquill.com";
|
|
5
|
+
/**
|
|
6
|
+
* Pre-set favorite models for quick access.
|
|
7
|
+
* These are shown at the top of the model picker.
|
|
8
|
+
*/
|
|
9
|
+
export declare const OPENROUTER_FAVORITE_MODELS: readonly ["openai/gpt-oss-120b", "openai/gpt-oss-20b"];
|
|
10
|
+
/**
|
|
11
|
+
* Default model for testing and fallback
|
|
12
|
+
*/
|
|
13
|
+
export declare const OPENROUTER_DEFAULT_MODEL = "openai/gpt-4o-mini";
|
|
14
|
+
export type OpenRouterFetchModelsArgs = {
|
|
15
|
+
apiKey: string;
|
|
16
|
+
customFetch?: typeof globalThis.fetch;
|
|
17
|
+
};
|
|
18
|
+
export type OpenRouterFetchModelsOutput = {
|
|
19
|
+
models: OpenRouterModel[];
|
|
20
|
+
};
|
|
21
|
+
/**
|
|
22
|
+
* Fetch all available models from OpenRouter.
|
|
23
|
+
*/
|
|
24
|
+
export declare const openrouterFetchModels: ({ apiKey, customFetch, }: OpenRouterFetchModelsArgs) => Promise<OpenRouterFetchModelsOutput>;
|
|
25
|
+
export type OpenRouterFetchProvidersArgs = {
|
|
26
|
+
customFetch?: typeof globalThis.fetch;
|
|
27
|
+
};
|
|
28
|
+
export type OpenRouterFetchProvidersOutput = {
|
|
29
|
+
providers: OpenRouterProvider[];
|
|
30
|
+
};
|
|
31
|
+
/**
|
|
32
|
+
* Fetch all available providers from OpenRouter.
|
|
33
|
+
* No API key required for this endpoint.
|
|
34
|
+
*/
|
|
35
|
+
export declare const openrouterFetchProviders: ({ customFetch, }?: OpenRouterFetchProvidersArgs) => Promise<OpenRouterFetchProvidersOutput>;
|
|
36
|
+
export type OpenRouterGenerateTextArgs = {
|
|
37
|
+
apiKey: string;
|
|
38
|
+
model?: string;
|
|
39
|
+
system?: string;
|
|
40
|
+
prompt: string;
|
|
41
|
+
jsonResponse?: JsonResponse;
|
|
42
|
+
providerRouting?: OpenRouterProviderRouting;
|
|
43
|
+
customFetch?: typeof globalThis.fetch;
|
|
44
|
+
};
|
|
45
|
+
export type OpenRouterGenerateTextOutput = {
|
|
46
|
+
text: string;
|
|
47
|
+
tokensUsed: number;
|
|
48
|
+
};
|
|
49
|
+
/**
|
|
50
|
+
* Generate text using OpenRouter's chat completions API.
|
|
51
|
+
* Uses the OpenAI SDK with custom baseURL since OpenRouter is OpenAI-compatible.
|
|
52
|
+
*/
|
|
53
|
+
export declare const openrouterGenerateTextResponse: ({ apiKey, model, system, prompt, jsonResponse, providerRouting, customFetch, }: OpenRouterGenerateTextArgs) => Promise<OpenRouterGenerateTextOutput>;
|
|
54
|
+
export type OpenRouterTestIntegrationArgs = {
|
|
55
|
+
apiKey: string;
|
|
56
|
+
customFetch?: typeof globalThis.fetch;
|
|
57
|
+
};
|
|
58
|
+
/**
|
|
59
|
+
* Test if an OpenRouter API key is valid by making a simple chat completion.
|
|
60
|
+
*/
|
|
61
|
+
export declare const openrouterTestIntegration: ({ apiKey, customFetch, }: OpenRouterTestIntegrationArgs) => Promise<boolean>;
|
|
62
|
+
export type OpenRouterStreamChatArgs = {
|
|
63
|
+
apiKey: string;
|
|
64
|
+
model: string;
|
|
65
|
+
input: LlmChatInput;
|
|
66
|
+
customFetch?: typeof globalThis.fetch;
|
|
67
|
+
};
|
|
68
|
+
export declare function openrouterStreamChat({ apiKey, model, input, customFetch, }: OpenRouterStreamChatArgs): AsyncGenerator<LlmStreamEvent>;
|
|
69
|
+
//# sourceMappingURL=openrouter.utils.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"openrouter.utils.d.ts","sourceRoot":"","sources":["../src/openrouter.utils.ts"],"names":[],"mappings":"AAEA,OAAO,KAAK,EACV,YAAY,EACZ,YAAY,EACZ,cAAc,EACd,eAAe,EACf,kBAAkB,EAClB,yBAAyB,EAC1B,MAAM,gBAAgB,CAAC;AAGxB,eAAO,MAAM,mBAAmB,iCAAiC,CAAC;AAClE,eAAO,MAAM,mBAAmB,YAAY,CAAC;AAC7C,eAAO,MAAM,kBAAkB,wBAAwB,CAAC;AAExD;;;GAGG;AACH,eAAO,MAAM,0BAA0B,wDAG7B,CAAC;AAEX;;GAEG;AACH,eAAO,MAAM,wBAAwB,uBAAuB,CAAC;AAyB7D,MAAM,MAAM,yBAAyB,GAAG;IACtC,MAAM,EAAE,MAAM,CAAC;IACf,WAAW,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACvC,CAAC;AAEF,MAAM,MAAM,2BAA2B,GAAG;IACxC,MAAM,EAAE,eAAe,EAAE,CAAC;CAC3B,CAAC;AAEF;;GAEG;AACH,eAAO,MAAM,qBAAqB,GAAU,0BAGzC,yBAAyB,KAAG,OAAO,CAAC,2BAA2B,CAmBjE,CAAC;AAMF,MAAM,MAAM,4BAA4B,GAAG;IACzC,WAAW,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACvC,CAAC;AAEF,MAAM,MAAM,8BAA8B,GAAG;IAC3C,SAAS,EAAE,kBAAkB,EAAE,CAAC;CACjC,CAAC;AAEF;;;GAGG;AACH,eAAO,MAAM,wBAAwB,GAAU,mBAE5C,4BAAiC,KAAG,OAAO,CAAC,8BAA8B,CAkB5E,CAAC;AAMF,MAAM,MAAM,0BAA0B,GAAG;IACvC,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,MAAM,EAAE,MAAM,CAAC;IACf,YAAY,CAAC,EAAE,YAAY,CAAC;IAC5B,eAAe,CAAC,EAAE,yBAAyB,CAAC;IAC5C,WAAW,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACvC,CAAC;AAEF,MAAM,MAAM,4BAA4B,GAAG;IACzC,IAAI,EAAE,MAAM,CAAC;IACb,UAAU,EAAE,MAAM,CAAC;CACpB,CAAC;AAEF;;;GAGG;AACH,eAAO,MAAM,8BAA8B,GAAU,gFAQlD,0BAA0B,KAAG,OAAO,CAAC,4BAA4B,CAyDnE,CAAC;AAMF,MAAM,MAAM,6BAA6B,GAAG;IAC1C,MAAM,EAAE,MAAM,CAAC;IACf,WAAW,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACvC,CAAC;AAEF;;GAEG;AACH,eAAO,MAAM,yBAAyB,GAAU,0BAG7C,6BAA6B,KAAG,OAAO,CAAC,OAAO,CAqBjD,CAAC;AAMF,MAAM,MAAM,wBAAwB,GAAG;IACrC,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;IACd,KAAK,EAAE,YAAY,CAAC;IACpB,WAAW,CAAC,EAAE,OAAO,UAAU,CAAC,KAAK,CAAC;CACvC,CAAC;AAEF,wBAAuB,oBAAoB,CAAC,EAC1C,MAAM,EACN,KAAK,EACL,KAAK,EACL,WAAW,GACZ,EAAE,wBAAwB,GAAG,cAAc,CAAC,cAAc,CAAC,CAG3D"}
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
import OpenAI from "openai";
|
|
2
|
+
import { retry, countWords } from "@voquill/utilities";
|
|
3
|
+
import { openaiCompatibleStreamChat } from "./openai.utils";
|
|
4
|
+
export const OPENROUTER_BASE_URL = "https://openrouter.ai/api/v1";
|
|
5
|
+
export const OPENROUTER_APP_NAME = "Voquill";
|
|
6
|
+
export const OPENROUTER_APP_URL = "https://voquill.com";
|
|
7
|
+
/**
|
|
8
|
+
* Pre-set favorite models for quick access.
|
|
9
|
+
* These are shown at the top of the model picker.
|
|
10
|
+
*/
|
|
11
|
+
export const OPENROUTER_FAVORITE_MODELS = [
|
|
12
|
+
"openai/gpt-oss-120b",
|
|
13
|
+
"openai/gpt-oss-20b",
|
|
14
|
+
];
|
|
15
|
+
/**
|
|
16
|
+
* Default model for testing and fallback
|
|
17
|
+
*/
|
|
18
|
+
export const OPENROUTER_DEFAULT_MODEL = "openai/gpt-4o-mini";
|
|
19
|
+
/**
|
|
20
|
+
* Create OpenAI client configured for OpenRouter
|
|
21
|
+
*/
|
|
22
|
+
const createClient = (apiKey, customFetch) => {
|
|
23
|
+
return new OpenAI({
|
|
24
|
+
apiKey: apiKey.trim(),
|
|
25
|
+
baseURL: OPENROUTER_BASE_URL,
|
|
26
|
+
dangerouslyAllowBrowser: true,
|
|
27
|
+
fetch: customFetch,
|
|
28
|
+
defaultHeaders: {
|
|
29
|
+
"HTTP-Referer": OPENROUTER_APP_URL,
|
|
30
|
+
"X-Title": OPENROUTER_APP_NAME,
|
|
31
|
+
},
|
|
32
|
+
});
|
|
33
|
+
};
|
|
34
|
+
/**
|
|
35
|
+
* Fetch all available models from OpenRouter.
|
|
36
|
+
*/
|
|
37
|
+
export const openrouterFetchModels = async ({ apiKey, customFetch, }) => {
|
|
38
|
+
const fetchFn = customFetch ?? globalThis.fetch;
|
|
39
|
+
const response = await fetchFn(`${OPENROUTER_BASE_URL}/models`, {
|
|
40
|
+
headers: {
|
|
41
|
+
Authorization: `Bearer ${apiKey.trim()}`,
|
|
42
|
+
"HTTP-Referer": OPENROUTER_APP_URL,
|
|
43
|
+
"X-Title": OPENROUTER_APP_NAME,
|
|
44
|
+
},
|
|
45
|
+
});
|
|
46
|
+
if (!response.ok) {
|
|
47
|
+
throw new Error(`Failed to fetch OpenRouter models: ${response.status} ${response.statusText}`);
|
|
48
|
+
}
|
|
49
|
+
const data = (await response.json());
|
|
50
|
+
return { models: data.data ?? [] };
|
|
51
|
+
};
|
|
52
|
+
/**
|
|
53
|
+
* Fetch all available providers from OpenRouter.
|
|
54
|
+
* No API key required for this endpoint.
|
|
55
|
+
*/
|
|
56
|
+
export const openrouterFetchProviders = async ({ customFetch, } = {}) => {
|
|
57
|
+
const fetchFn = customFetch ?? globalThis.fetch;
|
|
58
|
+
const response = await fetchFn(`${OPENROUTER_BASE_URL}/providers`, {
|
|
59
|
+
headers: {
|
|
60
|
+
"HTTP-Referer": OPENROUTER_APP_URL,
|
|
61
|
+
"X-Title": OPENROUTER_APP_NAME,
|
|
62
|
+
},
|
|
63
|
+
});
|
|
64
|
+
if (!response.ok) {
|
|
65
|
+
throw new Error(`Failed to fetch OpenRouter providers: ${response.status} ${response.statusText}`);
|
|
66
|
+
}
|
|
67
|
+
const data = (await response.json());
|
|
68
|
+
return { providers: data.data ?? [] };
|
|
69
|
+
};
|
|
70
|
+
/**
|
|
71
|
+
* Generate text using OpenRouter's chat completions API.
|
|
72
|
+
* Uses the OpenAI SDK with custom baseURL since OpenRouter is OpenAI-compatible.
|
|
73
|
+
*/
|
|
74
|
+
export const openrouterGenerateTextResponse = async ({ apiKey, model = OPENROUTER_DEFAULT_MODEL, system, prompt, jsonResponse, providerRouting, customFetch, }) => {
|
|
75
|
+
return retry({
|
|
76
|
+
retries: 3,
|
|
77
|
+
fn: async () => {
|
|
78
|
+
const client = createClient(apiKey, customFetch);
|
|
79
|
+
const messages = [];
|
|
80
|
+
if (system) {
|
|
81
|
+
messages.push({ role: "system", content: system });
|
|
82
|
+
}
|
|
83
|
+
messages.push({ role: "user", content: prompt });
|
|
84
|
+
// Build the request with optional provider routing
|
|
85
|
+
const requestParams = {
|
|
86
|
+
messages,
|
|
87
|
+
model,
|
|
88
|
+
temperature: 1,
|
|
89
|
+
max_tokens: 1024,
|
|
90
|
+
top_p: 1,
|
|
91
|
+
response_format: jsonResponse
|
|
92
|
+
? {
|
|
93
|
+
type: "json_schema",
|
|
94
|
+
json_schema: {
|
|
95
|
+
name: jsonResponse.name,
|
|
96
|
+
description: jsonResponse.description,
|
|
97
|
+
schema: jsonResponse.schema,
|
|
98
|
+
strict: true,
|
|
99
|
+
},
|
|
100
|
+
}
|
|
101
|
+
: undefined,
|
|
102
|
+
};
|
|
103
|
+
// Add provider routing if specified
|
|
104
|
+
if (providerRouting) {
|
|
105
|
+
requestParams.provider = providerRouting;
|
|
106
|
+
}
|
|
107
|
+
const response = await client.chat.completions.create(requestParams);
|
|
108
|
+
console.log("openrouter llm usage:", response.usage);
|
|
109
|
+
if (!response.choices || response.choices.length === 0) {
|
|
110
|
+
throw new Error("No response from OpenRouter");
|
|
111
|
+
}
|
|
112
|
+
const result = response.choices[0].message.content;
|
|
113
|
+
if (!result) {
|
|
114
|
+
throw new Error("Content is empty");
|
|
115
|
+
}
|
|
116
|
+
return {
|
|
117
|
+
text: result,
|
|
118
|
+
tokensUsed: response.usage?.total_tokens ?? countWords(result),
|
|
119
|
+
};
|
|
120
|
+
},
|
|
121
|
+
});
|
|
122
|
+
};
|
|
123
|
+
/**
|
|
124
|
+
* Test if an OpenRouter API key is valid by making a simple chat completion.
|
|
125
|
+
*/
|
|
126
|
+
export const openrouterTestIntegration = async ({ apiKey, customFetch, }) => {
|
|
127
|
+
const client = createClient(apiKey, customFetch);
|
|
128
|
+
const response = await client.chat.completions.create({
|
|
129
|
+
messages: [
|
|
130
|
+
{
|
|
131
|
+
role: "user",
|
|
132
|
+
content: 'Reply with the single word "Hello."',
|
|
133
|
+
},
|
|
134
|
+
],
|
|
135
|
+
model: OPENROUTER_DEFAULT_MODEL,
|
|
136
|
+
temperature: 0,
|
|
137
|
+
max_tokens: 32,
|
|
138
|
+
});
|
|
139
|
+
if (!response.choices || response.choices.length === 0) {
|
|
140
|
+
throw new Error("No response from OpenRouter");
|
|
141
|
+
}
|
|
142
|
+
const content = response.choices[0]?.message?.content ?? "";
|
|
143
|
+
return content.toLowerCase().includes("hello");
|
|
144
|
+
};
|
|
145
|
+
export async function* openrouterStreamChat({ apiKey, model, input, customFetch, }) {
|
|
146
|
+
const client = createClient(apiKey, customFetch);
|
|
147
|
+
yield* openaiCompatibleStreamChat(client, model, input);
|
|
148
|
+
}
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
export type SpeachesTestIntegrationArgs = {
|
|
2
|
+
baseUrl: string;
|
|
3
|
+
};
|
|
4
|
+
export declare const speachesTestIntegration: ({ baseUrl, }: SpeachesTestIntegrationArgs) => Promise<boolean>;
|
|
5
|
+
export type SpeachesTranscriptionArgs = {
|
|
6
|
+
baseUrl: string;
|
|
7
|
+
model: string;
|
|
8
|
+
blob: ArrayBuffer | Buffer;
|
|
9
|
+
ext: string;
|
|
10
|
+
prompt?: string;
|
|
11
|
+
language?: string;
|
|
12
|
+
};
|
|
13
|
+
export type SpeachesTranscribeAudioOutput = {
|
|
14
|
+
text: string;
|
|
15
|
+
wordsUsed: number;
|
|
16
|
+
};
|
|
17
|
+
export declare const speachesTranscribeAudio: ({ baseUrl, model, blob, ext, prompt, language, }: SpeachesTranscriptionArgs) => Promise<SpeachesTranscribeAudioOutput>;
|
|
18
|
+
//# sourceMappingURL=speaches.utils.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"speaches.utils.d.ts","sourceRoot":"","sources":["../src/speaches.utils.ts"],"names":[],"mappings":"AAEA,MAAM,MAAM,2BAA2B,GAAG;IACxC,OAAO,EAAE,MAAM,CAAC;CACjB,CAAC;AAEF,eAAO,MAAM,uBAAuB,GAAU,cAE3C,2BAA2B,KAAG,OAAO,CAAC,OAAO,CAI/C,CAAC;AAEF,MAAM,MAAM,yBAAyB,GAAG;IACtC,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,WAAW,GAAG,MAAM,CAAC;IAC3B,GAAG,EAAE,MAAM,CAAC;IACZ,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB,QAAQ,CAAC,EAAE,MAAM,CAAC;CACnB,CAAC;AAEF,MAAM,MAAM,6BAA6B,GAAG;IAC1C,IAAI,EAAE,MAAM,CAAC;IACb,SAAS,EAAE,MAAM,CAAC;CACnB,CAAC;AAEF,eAAO,MAAM,uBAAuB,GAAU,kDAO3C,yBAAyB,KAAG,OAAO,CAAC,6BAA6B,CAuCnE,CAAC"}
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import { retry, countWords } from "@voquill/utilities";
|
|
2
|
+
export const speachesTestIntegration = async ({ baseUrl, }) => {
|
|
3
|
+
const url = baseUrl.replace(/\/$/, "");
|
|
4
|
+
const response = await fetch(`${url}/health`);
|
|
5
|
+
return response.ok;
|
|
6
|
+
};
|
|
7
|
+
export const speachesTranscribeAudio = async ({ baseUrl, model, blob, ext, prompt, language, }) => {
|
|
8
|
+
return retry({
|
|
9
|
+
retries: 3,
|
|
10
|
+
fn: async () => {
|
|
11
|
+
const formData = new FormData();
|
|
12
|
+
const arrayBuffer = blob instanceof ArrayBuffer ? blob : new Uint8Array(blob).buffer;
|
|
13
|
+
const file = new Blob([arrayBuffer], { type: `audio/${ext}` });
|
|
14
|
+
formData.append("file", file, `audio.${ext}`);
|
|
15
|
+
formData.append("model", model);
|
|
16
|
+
if (prompt) {
|
|
17
|
+
formData.append("prompt", prompt);
|
|
18
|
+
}
|
|
19
|
+
if (language) {
|
|
20
|
+
formData.append("language", language);
|
|
21
|
+
}
|
|
22
|
+
const url = baseUrl.replace(/\/$/, "");
|
|
23
|
+
const response = await fetch(`${url}/v1/audio/transcriptions`, {
|
|
24
|
+
method: "POST",
|
|
25
|
+
body: formData,
|
|
26
|
+
});
|
|
27
|
+
if (!response.ok) {
|
|
28
|
+
const errorText = await response.text().catch(() => "Unknown error");
|
|
29
|
+
throw new Error(`Speaches transcription failed: ${response.status} - ${errorText}`);
|
|
30
|
+
}
|
|
31
|
+
const data = (await response.json());
|
|
32
|
+
if (!data.text) {
|
|
33
|
+
throw new Error("Transcription failed: no text in response");
|
|
34
|
+
}
|
|
35
|
+
return { text: data.text, wordsUsed: countWords(data.text) };
|
|
36
|
+
},
|
|
37
|
+
});
|
|
38
|
+
};
|
package/package.json
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@voquill/voice-ai",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "Shared Groq voice transcription helpers",
|
|
5
|
+
"main": "dist/index.js",
|
|
6
|
+
"types": "dist/index.d.ts",
|
|
7
|
+
"scripts": {
|
|
8
|
+
"build": "tsc",
|
|
9
|
+
"dev": "tsc --watch",
|
|
10
|
+
"clean": "rm -rf dist"
|
|
11
|
+
},
|
|
12
|
+
"files": [
|
|
13
|
+
"dist"
|
|
14
|
+
],
|
|
15
|
+
"dependencies": {
|
|
16
|
+
"@anthropic-ai/sdk": "^0.71.2",
|
|
17
|
+
"@azure/openai": "^2.0.0",
|
|
18
|
+
"@google/genai": "^1.37.0",
|
|
19
|
+
"@voquill/types": "workspace:*",
|
|
20
|
+
"@voquill/utilities": "workspace:*",
|
|
21
|
+
"groq-sdk": "^0.37.0",
|
|
22
|
+
"microsoft-cognitiveservices-speech-sdk": "^1.47.0",
|
|
23
|
+
"openai": "^4.73.0",
|
|
24
|
+
"wavefile": "^11.0.0",
|
|
25
|
+
"zod": "^3.25.76",
|
|
26
|
+
"zod-to-json-schema": "^3.24.6"
|
|
27
|
+
},
|
|
28
|
+
"devDependencies": {
|
|
29
|
+
"@voquill/typescript-config": "workspace:*",
|
|
30
|
+
"typescript": "5.9.2"
|
|
31
|
+
},
|
|
32
|
+
"publishConfig": {
|
|
33
|
+
"access": "public"
|
|
34
|
+
}
|
|
35
|
+
}
|