@plasius/ai 1.1.4 → 1.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +22 -2
- package/README.md +101 -2
- package/dist/components/pixelverse/balance.d.ts +6 -2
- package/dist/components/pixelverse/balance.d.ts.map +1 -1
- package/dist/components/pixelverse/balance.js +13 -23
- package/dist/components/pixelverse/index.d.ts +1 -1
- package/dist/components/pixelverse/index.d.ts.map +1 -1
- package/dist/components/pixelverse/index.js +1 -1
- package/dist/components/pixelverse/video-generation-editor.d.ts +10 -0
- package/dist/components/pixelverse/video-generation-editor.d.ts.map +1 -0
- package/dist/components/pixelverse/video-generation-editor.js +79 -0
- package/dist/platform/adapter-platform.d.ts +60 -0
- package/dist/platform/adapter-platform.d.ts.map +1 -0
- package/dist/platform/adapter-platform.js +222 -0
- package/dist/platform/gemini-adapter.d.ts +15 -0
- package/dist/platform/gemini-adapter.d.ts.map +1 -0
- package/dist/platform/gemini-adapter.js +293 -0
- package/dist/platform/http-resilience.d.ts +19 -0
- package/dist/platform/http-resilience.d.ts.map +1 -0
- package/dist/platform/http-resilience.js +126 -0
- package/dist/platform/index.d.ts +22 -1
- package/dist/platform/index.d.ts.map +1 -1
- package/dist/platform/index.js +24 -0
- package/dist/platform/openai-adapter.d.ts +24 -0
- package/dist/platform/openai-adapter.d.ts.map +1 -0
- package/dist/platform/openai-adapter.js +398 -0
- package/dist/platform/video-provider-adapter.d.ts +54 -0
- package/dist/platform/video-provider-adapter.d.ts.map +1 -0
- package/dist/platform/video-provider-adapter.js +165 -0
- package/dist/platform/video-provider-platform.d.ts +13 -0
- package/dist/platform/video-provider-platform.d.ts.map +1 -0
- package/dist/platform/video-provider-platform.js +102 -0
- package/dist-cjs/components/pixelverse/balance.d.ts +6 -2
- package/dist-cjs/components/pixelverse/balance.d.ts.map +1 -1
- package/dist-cjs/components/pixelverse/balance.js +13 -23
- package/dist-cjs/components/pixelverse/index.d.ts +1 -1
- package/dist-cjs/components/pixelverse/index.d.ts.map +1 -1
- package/dist-cjs/components/pixelverse/index.js +1 -1
- package/dist-cjs/components/pixelverse/video-generation-editor.d.ts +10 -0
- package/dist-cjs/components/pixelverse/video-generation-editor.d.ts.map +1 -0
- package/dist-cjs/components/pixelverse/video-generation-editor.js +85 -0
- package/dist-cjs/platform/adapter-platform.d.ts +60 -0
- package/dist-cjs/platform/adapter-platform.d.ts.map +1 -0
- package/dist-cjs/platform/adapter-platform.js +225 -0
- package/dist-cjs/platform/gemini-adapter.d.ts +15 -0
- package/dist-cjs/platform/gemini-adapter.d.ts.map +1 -0
- package/dist-cjs/platform/gemini-adapter.js +296 -0
- package/dist-cjs/platform/http-resilience.d.ts +19 -0
- package/dist-cjs/platform/http-resilience.d.ts.map +1 -0
- package/dist-cjs/platform/http-resilience.js +129 -0
- package/dist-cjs/platform/index.d.ts +22 -1
- package/dist-cjs/platform/index.d.ts.map +1 -1
- package/dist-cjs/platform/index.js +30 -1
- package/dist-cjs/platform/openai-adapter.d.ts +24 -0
- package/dist-cjs/platform/openai-adapter.d.ts.map +1 -0
- package/dist-cjs/platform/openai-adapter.js +401 -0
- package/dist-cjs/platform/video-provider-adapter.d.ts +54 -0
- package/dist-cjs/platform/video-provider-adapter.d.ts.map +1 -0
- package/dist-cjs/platform/video-provider-adapter.js +168 -0
- package/dist-cjs/platform/video-provider-platform.d.ts +13 -0
- package/dist-cjs/platform/video-provider-platform.d.ts.map +1 -0
- package/dist-cjs/platform/video-provider-platform.js +105 -0
- package/docs/api-reference.md +59 -0
- package/docs/architecture.md +5 -1
- package/docs/providers.md +24 -6
- package/package.json +6 -6
- package/src/components/pixelverse/balance.tsx +22 -35
- package/src/components/pixelverse/index.ts +1 -1
- package/src/components/pixelverse/video-generation-editor.tsx +164 -0
- package/src/platform/adapter-platform.ts +440 -0
- package/src/platform/gemini-adapter.ts +391 -0
- package/src/platform/http-resilience.ts +198 -0
- package/src/platform/index.ts +68 -0
- package/src/platform/openai-adapter.ts +552 -0
- package/src/platform/video-provider-adapter.ts +303 -0
- package/src/platform/video-provider-platform.ts +208 -0
- package/dist/components/pixelverse/pixelverseeditor.d.ts +0 -16
- package/dist/components/pixelverse/pixelverseeditor.d.ts.map +0 -1
- package/dist/components/pixelverse/pixelverseeditor.js +0 -21
- package/dist/platform/openai.d.ts +0 -8
- package/dist/platform/openai.d.ts.map +0 -1
- package/dist/platform/openai.js +0 -61
- package/dist/platform/pixelverse.d.ts +0 -6
- package/dist/platform/pixelverse.d.ts.map +0 -1
- package/dist/platform/pixelverse.js +0 -196
- package/dist-cjs/components/pixelverse/pixelverseeditor.d.ts +0 -16
- package/dist-cjs/components/pixelverse/pixelverseeditor.d.ts.map +0 -1
- package/dist-cjs/components/pixelverse/pixelverseeditor.js +0 -27
- package/dist-cjs/platform/openai.d.ts +0 -8
- package/dist-cjs/platform/openai.d.ts.map +0 -1
- package/dist-cjs/platform/openai.js +0 -67
- package/dist-cjs/platform/pixelverse.d.ts +0 -6
- package/dist-cjs/platform/pixelverse.d.ts.map +0 -1
- package/dist-cjs/platform/pixelverse.js +0 -199
- package/src/components/pixelverse/pixelverseeditor.mocule.css +0 -0
- package/src/components/pixelverse/pixelverseeditor.tsx +0 -74
- package/src/platform/openai.ts +0 -123
- package/src/platform/pixelverse.ts +0 -309
|
@@ -0,0 +1,552 @@
|
|
|
1
|
+
import { performance } from "node:perf_hooks";
|
|
2
|
+
|
|
3
|
+
import {
|
|
4
|
+
AICapability,
|
|
5
|
+
type AICapabilityAdapter,
|
|
6
|
+
type AdapterBalanceRequest,
|
|
7
|
+
type AdapterChatRequest,
|
|
8
|
+
type AdapterGenerateImageRequest,
|
|
9
|
+
type AdapterGenerateModelRequest,
|
|
10
|
+
type AdapterSynthesizeSpeechRequest,
|
|
11
|
+
type AdapterTranscribeSpeechRequest,
|
|
12
|
+
type BalanceCompletion,
|
|
13
|
+
type ChatCompletion,
|
|
14
|
+
type Completion,
|
|
15
|
+
type ImageCompletion,
|
|
16
|
+
type ModelCompletion,
|
|
17
|
+
type SpeechCompletion,
|
|
18
|
+
type TextCompletion,
|
|
19
|
+
} from "./index.js";
|
|
20
|
+
import { fetchWithPolicy, type HttpClientPolicy } from "./http-resilience.js";
|
|
21
|
+
|
|
22
|
+
interface OpenAIUsagePayload {
|
|
23
|
+
prompt_tokens?: number;
|
|
24
|
+
completion_tokens?: number;
|
|
25
|
+
total_tokens?: number;
|
|
26
|
+
input_tokens?: number;
|
|
27
|
+
output_tokens?: number;
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
export interface OpenAIAdapterOptions {
|
|
31
|
+
id?: string;
|
|
32
|
+
baseUrl?: string;
|
|
33
|
+
fetchFn?: typeof fetch;
|
|
34
|
+
httpPolicy?: HttpClientPolicy;
|
|
35
|
+
defaultModels?: {
|
|
36
|
+
chat?: string;
|
|
37
|
+
speech?: string;
|
|
38
|
+
transcription?: string;
|
|
39
|
+
image?: string;
|
|
40
|
+
model?: string;
|
|
41
|
+
};
|
|
42
|
+
speech?: {
|
|
43
|
+
voice?: string;
|
|
44
|
+
format?: "mp3" | "wav" | "opus" | "aac" | "flac" | "pcm";
|
|
45
|
+
};
|
|
46
|
+
image?: {
|
|
47
|
+
size?: string;
|
|
48
|
+
};
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
function normalizeBaseUrl(baseUrl: string | undefined): string {
|
|
52
|
+
const normalized = (baseUrl ?? "https://api.openai.com/v1").trim();
|
|
53
|
+
return normalized.endsWith("/") ? normalized.slice(0, -1) : normalized;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
function resolveFetch(fetchFn?: typeof fetch): typeof fetch {
|
|
57
|
+
const resolved = fetchFn ?? (typeof fetch !== "undefined" ? fetch : undefined);
|
|
58
|
+
if (!resolved) {
|
|
59
|
+
throw new Error("No fetch implementation available for OpenAI adapter.");
|
|
60
|
+
}
|
|
61
|
+
return resolved;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
function asRecord(value: unknown): Record<string, unknown> {
|
|
65
|
+
return value && typeof value === "object" ? (value as Record<string, unknown>) : {};
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
function asNumber(value: unknown): number | undefined {
|
|
69
|
+
return typeof value === "number" && Number.isFinite(value) ? value : undefined;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
function asString(value: unknown): string | undefined {
|
|
73
|
+
return typeof value === "string" && value.length > 0 ? value : undefined;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
async function parseResponseBody(response: Response): Promise<unknown> {
|
|
77
|
+
const contentType = response.headers.get("content-type") ?? "";
|
|
78
|
+
if (contentType.includes("application/json")) {
|
|
79
|
+
return await response.json();
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
const text = await response.text();
|
|
83
|
+
if (!text) {
|
|
84
|
+
return undefined;
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
try {
|
|
88
|
+
return JSON.parse(text) as unknown;
|
|
89
|
+
} catch {
|
|
90
|
+
return text;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
function resolveErrorMessage(body: unknown, fallback: string): string {
|
|
95
|
+
const payload = asRecord(body);
|
|
96
|
+
const nested = asRecord(payload.error);
|
|
97
|
+
return (
|
|
98
|
+
asString(nested.message) ??
|
|
99
|
+
asString(payload.message) ??
|
|
100
|
+
asString(payload.error) ??
|
|
101
|
+
fallback
|
|
102
|
+
);
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
function requireApiKey(apiKey: string): string {
|
|
106
|
+
const trimmed = apiKey.trim();
|
|
107
|
+
if (!trimmed) {
|
|
108
|
+
throw new Error("OpenAI API key is required.");
|
|
109
|
+
}
|
|
110
|
+
return trimmed;
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
function usageFromPayload(payload: unknown): Record<string, number> | undefined {
|
|
114
|
+
const usagePayload = asRecord(payload) as OpenAIUsagePayload;
|
|
115
|
+
const inputTokens = usagePayload.prompt_tokens ?? usagePayload.input_tokens;
|
|
116
|
+
const outputTokens = usagePayload.completion_tokens ?? usagePayload.output_tokens;
|
|
117
|
+
const totalTokens = usagePayload.total_tokens;
|
|
118
|
+
|
|
119
|
+
const usage: Record<string, number> = {};
|
|
120
|
+
if (asNumber(inputTokens) !== undefined) {
|
|
121
|
+
usage.inputTokens = inputTokens as number;
|
|
122
|
+
}
|
|
123
|
+
if (asNumber(outputTokens) !== undefined) {
|
|
124
|
+
usage.outputTokens = outputTokens as number;
|
|
125
|
+
}
|
|
126
|
+
if (asNumber(totalTokens) !== undefined) {
|
|
127
|
+
usage.totalTokens = totalTokens as number;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
return Object.keys(usage).length > 0 ? usage : undefined;
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
function createBaseCompletion(
|
|
134
|
+
type: string,
|
|
135
|
+
model: string,
|
|
136
|
+
requestor: string,
|
|
137
|
+
durationMs: number,
|
|
138
|
+
usage?: Record<string, number>
|
|
139
|
+
): Completion {
|
|
140
|
+
return {
|
|
141
|
+
partitionKey: requestor,
|
|
142
|
+
id: crypto.randomUUID(),
|
|
143
|
+
type,
|
|
144
|
+
model,
|
|
145
|
+
createdAt: new Date().toISOString(),
|
|
146
|
+
durationMs,
|
|
147
|
+
usage: usage ?? {},
|
|
148
|
+
};
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
function extractChatMessage(body: unknown): string {
|
|
152
|
+
const root = asRecord(body);
|
|
153
|
+
const choices = Array.isArray(root.choices) ? root.choices : [];
|
|
154
|
+
const firstChoice = choices[0] && typeof choices[0] === "object" ? asRecord(choices[0]) : {};
|
|
155
|
+
const message = asRecord(firstChoice.message);
|
|
156
|
+
const content = message.content;
|
|
157
|
+
|
|
158
|
+
if (typeof content === "string" && content.length > 0) {
|
|
159
|
+
return content;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
if (Array.isArray(content)) {
|
|
163
|
+
const text = content
|
|
164
|
+
.map((part) => {
|
|
165
|
+
const maybePart = asRecord(part);
|
|
166
|
+
return asString(maybePart.text) ?? "";
|
|
167
|
+
})
|
|
168
|
+
.join("")
|
|
169
|
+
.trim();
|
|
170
|
+
if (text.length > 0) {
|
|
171
|
+
return text;
|
|
172
|
+
}
|
|
173
|
+
}
|
|
174
|
+
|
|
175
|
+
throw new Error("OpenAI chat response did not contain assistant content.");
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
function extractImageUrl(body: unknown): URL {
|
|
179
|
+
const root = asRecord(body);
|
|
180
|
+
const data = Array.isArray(root.data) ? root.data : [];
|
|
181
|
+
const first = data[0] && typeof data[0] === "object" ? asRecord(data[0]) : {};
|
|
182
|
+
const url = asString(first.url);
|
|
183
|
+
const b64 = asString(first.b64_json);
|
|
184
|
+
|
|
185
|
+
if (url) {
|
|
186
|
+
return new URL(url);
|
|
187
|
+
}
|
|
188
|
+
if (b64) {
|
|
189
|
+
return new URL(`data:image/png;base64,${b64}`);
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
throw new Error("OpenAI image generation response did not contain image data.");
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
function extractTranscriptionText(body: unknown): string {
|
|
196
|
+
const root = asRecord(body);
|
|
197
|
+
const text = asString(root.text) ?? asString(root.output_text);
|
|
198
|
+
if (!text) {
|
|
199
|
+
throw new Error("OpenAI transcription response did not contain text.");
|
|
200
|
+
}
|
|
201
|
+
return text;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
function parseGeneratedModel(content: string): {
|
|
205
|
+
modelId: string;
|
|
206
|
+
artifactUrl?: URL;
|
|
207
|
+
} {
|
|
208
|
+
const trimmed = content.trim();
|
|
209
|
+
if (!trimmed) {
|
|
210
|
+
return {
|
|
211
|
+
modelId: crypto.randomUUID(),
|
|
212
|
+
};
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
try {
|
|
216
|
+
const parsed = JSON.parse(trimmed) as unknown;
|
|
217
|
+
const root = asRecord(parsed);
|
|
218
|
+
const modelId = asString(root.modelId);
|
|
219
|
+
const artifactUrl = asString(root.artifactUrl);
|
|
220
|
+
if (modelId) {
|
|
221
|
+
return {
|
|
222
|
+
modelId,
|
|
223
|
+
artifactUrl: artifactUrl ? new URL(artifactUrl) : undefined,
|
|
224
|
+
};
|
|
225
|
+
}
|
|
226
|
+
} catch {
|
|
227
|
+
// Fallback to plain text handling below.
|
|
228
|
+
}
|
|
229
|
+
|
|
230
|
+
return {
|
|
231
|
+
modelId: trimmed,
|
|
232
|
+
};
|
|
233
|
+
}
|
|
234
|
+
|
|
235
|
+
export function createOpenAIAdapter(
|
|
236
|
+
options: OpenAIAdapterOptions = {}
|
|
237
|
+
): AICapabilityAdapter {
|
|
238
|
+
const providerId = (options.id ?? "openai").trim() || "openai";
|
|
239
|
+
const baseUrl = normalizeBaseUrl(options.baseUrl);
|
|
240
|
+
const fetcher = resolveFetch(options.fetchFn);
|
|
241
|
+
|
|
242
|
+
const chatWithAI = async (request: AdapterChatRequest): Promise<ChatCompletion> => {
|
|
243
|
+
const startedAt = performance.now();
|
|
244
|
+
const apiKey = requireApiKey(request.apiKey);
|
|
245
|
+
const resolvedModel = request.model || options.defaultModels?.chat || "gpt-4.1-mini";
|
|
246
|
+
|
|
247
|
+
const response = await fetchWithPolicy({
|
|
248
|
+
url: `${baseUrl}/chat/completions`,
|
|
249
|
+
operation: "OpenAI chat request",
|
|
250
|
+
fetchFn: fetcher,
|
|
251
|
+
policy: options.httpPolicy,
|
|
252
|
+
createRequestInit: () => ({
|
|
253
|
+
method: "POST",
|
|
254
|
+
headers: {
|
|
255
|
+
Authorization: `Bearer ${apiKey}`,
|
|
256
|
+
"Content-Type": "application/json",
|
|
257
|
+
"Idempotency-Key": request.traceId,
|
|
258
|
+
"X-Request-Id": request.traceId,
|
|
259
|
+
"X-Plasius-Client": "@plasius/ai/openai-adapter",
|
|
260
|
+
},
|
|
261
|
+
body: JSON.stringify({
|
|
262
|
+
model: resolvedModel,
|
|
263
|
+
messages: [
|
|
264
|
+
...(request.context
|
|
265
|
+
? [{ role: "system", content: request.context }]
|
|
266
|
+
: []),
|
|
267
|
+
{ role: "user", content: request.input },
|
|
268
|
+
],
|
|
269
|
+
}),
|
|
270
|
+
}),
|
|
271
|
+
});
|
|
272
|
+
|
|
273
|
+
const body = await parseResponseBody(response);
|
|
274
|
+
if (!response.ok) {
|
|
275
|
+
throw new Error(
|
|
276
|
+
resolveErrorMessage(
|
|
277
|
+
body,
|
|
278
|
+
`OpenAI chat request failed (${response.status} ${response.statusText}).`
|
|
279
|
+
)
|
|
280
|
+
);
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
const message = extractChatMessage(body);
|
|
284
|
+
const usage = usageFromPayload(asRecord(body).usage);
|
|
285
|
+
const durationMs = performance.now() - startedAt;
|
|
286
|
+
const base = createBaseCompletion("chat", resolvedModel, request.userId, durationMs, usage);
|
|
287
|
+
|
|
288
|
+
return {
|
|
289
|
+
...base,
|
|
290
|
+
message,
|
|
291
|
+
outputUser: "assistant",
|
|
292
|
+
};
|
|
293
|
+
};
|
|
294
|
+
|
|
295
|
+
const synthesizeSpeech = async (
|
|
296
|
+
request: AdapterSynthesizeSpeechRequest
|
|
297
|
+
): Promise<SpeechCompletion> => {
|
|
298
|
+
const startedAt = performance.now();
|
|
299
|
+
const apiKey = requireApiKey(request.apiKey);
|
|
300
|
+
const resolvedModel = request.model || options.defaultModels?.speech || "gpt-4o-mini-tts";
|
|
301
|
+
const resolvedVoice = request.voice || options.speech?.voice || "alloy";
|
|
302
|
+
const resolvedFormat = options.speech?.format ?? "mp3";
|
|
303
|
+
|
|
304
|
+
const response = await fetchWithPolicy({
|
|
305
|
+
url: `${baseUrl}/audio/speech`,
|
|
306
|
+
operation: "OpenAI speech synthesis",
|
|
307
|
+
fetchFn: fetcher,
|
|
308
|
+
policy: options.httpPolicy,
|
|
309
|
+
createRequestInit: () => ({
|
|
310
|
+
method: "POST",
|
|
311
|
+
headers: {
|
|
312
|
+
Authorization: `Bearer ${apiKey}`,
|
|
313
|
+
"Content-Type": "application/json",
|
|
314
|
+
"Idempotency-Key": request.traceId,
|
|
315
|
+
"X-Request-Id": request.traceId,
|
|
316
|
+
"X-Plasius-Client": "@plasius/ai/openai-adapter",
|
|
317
|
+
},
|
|
318
|
+
body: JSON.stringify({
|
|
319
|
+
model: resolvedModel,
|
|
320
|
+
input: request.input,
|
|
321
|
+
voice: resolvedVoice,
|
|
322
|
+
format: resolvedFormat,
|
|
323
|
+
}),
|
|
324
|
+
}),
|
|
325
|
+
});
|
|
326
|
+
|
|
327
|
+
if (!response.ok) {
|
|
328
|
+
const errorBody = await parseResponseBody(response);
|
|
329
|
+
throw new Error(
|
|
330
|
+
resolveErrorMessage(
|
|
331
|
+
errorBody,
|
|
332
|
+
`OpenAI speech synthesis failed (${response.status} ${response.statusText}).`
|
|
333
|
+
)
|
|
334
|
+
);
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
const mimeTypeHeader = response.headers.get("content-type");
|
|
338
|
+
const mimeType = (mimeTypeHeader?.split(";")[0]?.trim() || "audio/mpeg").toLowerCase();
|
|
339
|
+
const bytes = Buffer.from(await response.arrayBuffer()).toString("base64");
|
|
340
|
+
const durationMs = performance.now() - startedAt;
|
|
341
|
+
const base = createBaseCompletion("speech", resolvedModel, request.userId, durationMs);
|
|
342
|
+
|
|
343
|
+
return {
|
|
344
|
+
...base,
|
|
345
|
+
url: new URL(`data:${mimeType};base64,${bytes}`),
|
|
346
|
+
};
|
|
347
|
+
};
|
|
348
|
+
|
|
349
|
+
const transcribeSpeech = async (
|
|
350
|
+
request: AdapterTranscribeSpeechRequest
|
|
351
|
+
): Promise<TextCompletion> => {
|
|
352
|
+
const startedAt = performance.now();
|
|
353
|
+
const apiKey = requireApiKey(request.apiKey);
|
|
354
|
+
const resolvedModel = request.model || options.defaultModels?.transcription || "gpt-4o-mini-transcribe";
|
|
355
|
+
|
|
356
|
+
const response = await fetchWithPolicy({
|
|
357
|
+
url: `${baseUrl}/audio/transcriptions`,
|
|
358
|
+
operation: "OpenAI transcription",
|
|
359
|
+
fetchFn: fetcher,
|
|
360
|
+
policy: options.httpPolicy,
|
|
361
|
+
createRequestInit: () => {
|
|
362
|
+
const formData = new FormData();
|
|
363
|
+
formData.append("model", resolvedModel);
|
|
364
|
+
const fileBytes = Uint8Array.from(request.input);
|
|
365
|
+
formData.append(
|
|
366
|
+
"file",
|
|
367
|
+
new Blob([fileBytes], { type: "application/octet-stream" }),
|
|
368
|
+
"audio.webm"
|
|
369
|
+
);
|
|
370
|
+
|
|
371
|
+
return {
|
|
372
|
+
method: "POST",
|
|
373
|
+
headers: {
|
|
374
|
+
Authorization: `Bearer ${apiKey}`,
|
|
375
|
+
"Idempotency-Key": request.traceId,
|
|
376
|
+
"X-Request-Id": request.traceId,
|
|
377
|
+
"X-Plasius-Client": "@plasius/ai/openai-adapter",
|
|
378
|
+
},
|
|
379
|
+
body: formData,
|
|
380
|
+
};
|
|
381
|
+
},
|
|
382
|
+
});
|
|
383
|
+
|
|
384
|
+
const body = await parseResponseBody(response);
|
|
385
|
+
if (!response.ok) {
|
|
386
|
+
throw new Error(
|
|
387
|
+
resolveErrorMessage(
|
|
388
|
+
body,
|
|
389
|
+
`OpenAI transcription failed (${response.status} ${response.statusText}).`
|
|
390
|
+
)
|
|
391
|
+
);
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
const message = extractTranscriptionText(body);
|
|
395
|
+
const durationMs = performance.now() - startedAt;
|
|
396
|
+
const base = createBaseCompletion("text", resolvedModel, request.userId, durationMs);
|
|
397
|
+
|
|
398
|
+
return {
|
|
399
|
+
...base,
|
|
400
|
+
message,
|
|
401
|
+
};
|
|
402
|
+
};
|
|
403
|
+
|
|
404
|
+
const generateImage = async (
|
|
405
|
+
request: AdapterGenerateImageRequest
|
|
406
|
+
): Promise<ImageCompletion> => {
|
|
407
|
+
const startedAt = performance.now();
|
|
408
|
+
const apiKey = requireApiKey(request.apiKey);
|
|
409
|
+
const resolvedModel = request.model || options.defaultModels?.image || "gpt-image-1";
|
|
410
|
+
const combinedPrompt = request.context
|
|
411
|
+
? `${request.context}\n\n${request.input}`
|
|
412
|
+
: request.input;
|
|
413
|
+
|
|
414
|
+
const response = await fetchWithPolicy({
|
|
415
|
+
url: `${baseUrl}/images/generations`,
|
|
416
|
+
operation: "OpenAI image generation",
|
|
417
|
+
fetchFn: fetcher,
|
|
418
|
+
policy: options.httpPolicy,
|
|
419
|
+
createRequestInit: () => ({
|
|
420
|
+
method: "POST",
|
|
421
|
+
headers: {
|
|
422
|
+
Authorization: `Bearer ${apiKey}`,
|
|
423
|
+
"Content-Type": "application/json",
|
|
424
|
+
"Idempotency-Key": request.traceId,
|
|
425
|
+
"X-Request-Id": request.traceId,
|
|
426
|
+
"X-Plasius-Client": "@plasius/ai/openai-adapter",
|
|
427
|
+
},
|
|
428
|
+
body: JSON.stringify({
|
|
429
|
+
model: resolvedModel,
|
|
430
|
+
prompt: combinedPrompt,
|
|
431
|
+
size: options.image?.size ?? "1024x1024",
|
|
432
|
+
response_format: "b64_json",
|
|
433
|
+
}),
|
|
434
|
+
}),
|
|
435
|
+
});
|
|
436
|
+
|
|
437
|
+
const body = await parseResponseBody(response);
|
|
438
|
+
if (!response.ok) {
|
|
439
|
+
throw new Error(
|
|
440
|
+
resolveErrorMessage(
|
|
441
|
+
body,
|
|
442
|
+
`OpenAI image generation failed (${response.status} ${response.statusText}).`
|
|
443
|
+
)
|
|
444
|
+
);
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
const url = extractImageUrl(body);
|
|
448
|
+
const usage = usageFromPayload(asRecord(body).usage);
|
|
449
|
+
const durationMs = performance.now() - startedAt;
|
|
450
|
+
const base = createBaseCompletion("image", resolvedModel, request.userId, durationMs, usage);
|
|
451
|
+
|
|
452
|
+
return {
|
|
453
|
+
...base,
|
|
454
|
+
url,
|
|
455
|
+
};
|
|
456
|
+
};
|
|
457
|
+
|
|
458
|
+
const generateModel = async (
|
|
459
|
+
request: AdapterGenerateModelRequest
|
|
460
|
+
): Promise<ModelCompletion> => {
|
|
461
|
+
const startedAt = performance.now();
|
|
462
|
+
const apiKey = requireApiKey(request.apiKey);
|
|
463
|
+
const resolvedModel = request.model || options.defaultModels?.model || "gpt-4.1-mini";
|
|
464
|
+
const systemInstruction = [
|
|
465
|
+
request.context,
|
|
466
|
+
"Return JSON only with fields modelId (string) and optional artifactUrl (string URL).",
|
|
467
|
+
]
|
|
468
|
+
.filter(Boolean)
|
|
469
|
+
.join("\n");
|
|
470
|
+
|
|
471
|
+
const response = await fetchWithPolicy({
|
|
472
|
+
url: `${baseUrl}/chat/completions`,
|
|
473
|
+
operation: "OpenAI model generation",
|
|
474
|
+
fetchFn: fetcher,
|
|
475
|
+
policy: options.httpPolicy,
|
|
476
|
+
createRequestInit: () => ({
|
|
477
|
+
method: "POST",
|
|
478
|
+
headers: {
|
|
479
|
+
Authorization: `Bearer ${apiKey}`,
|
|
480
|
+
"Content-Type": "application/json",
|
|
481
|
+
"Idempotency-Key": request.traceId,
|
|
482
|
+
"X-Request-Id": request.traceId,
|
|
483
|
+
"X-Plasius-Client": "@plasius/ai/openai-adapter",
|
|
484
|
+
},
|
|
485
|
+
body: JSON.stringify({
|
|
486
|
+
model: resolvedModel,
|
|
487
|
+
response_format: { type: "json_object" },
|
|
488
|
+
messages: [
|
|
489
|
+
{ role: "system", content: systemInstruction },
|
|
490
|
+
{ role: "user", content: request.input },
|
|
491
|
+
],
|
|
492
|
+
}),
|
|
493
|
+
}),
|
|
494
|
+
});
|
|
495
|
+
|
|
496
|
+
const body = await parseResponseBody(response);
|
|
497
|
+
if (!response.ok) {
|
|
498
|
+
throw new Error(
|
|
499
|
+
resolveErrorMessage(
|
|
500
|
+
body,
|
|
501
|
+
`OpenAI model generation failed (${response.status} ${response.statusText}).`
|
|
502
|
+
)
|
|
503
|
+
);
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
const responseText = extractChatMessage(body);
|
|
507
|
+
const parsed = parseGeneratedModel(responseText);
|
|
508
|
+
const usage = usageFromPayload(asRecord(body).usage);
|
|
509
|
+
const durationMs = performance.now() - startedAt;
|
|
510
|
+
const base = createBaseCompletion("model", resolvedModel, request.userId, durationMs, usage);
|
|
511
|
+
|
|
512
|
+
return {
|
|
513
|
+
...base,
|
|
514
|
+
modelId: parsed.modelId,
|
|
515
|
+
artifactUrl: parsed.artifactUrl,
|
|
516
|
+
};
|
|
517
|
+
};
|
|
518
|
+
|
|
519
|
+
const checkBalance = async (request: AdapterBalanceRequest): Promise<BalanceCompletion> => {
|
|
520
|
+
const startedAt = performance.now();
|
|
521
|
+
const resolvedModel = options.defaultModels?.chat ?? "";
|
|
522
|
+
const durationMs = performance.now() - startedAt;
|
|
523
|
+
const base = createBaseCompletion(
|
|
524
|
+
"balanceCompletion",
|
|
525
|
+
resolvedModel,
|
|
526
|
+
request.userId,
|
|
527
|
+
durationMs
|
|
528
|
+
);
|
|
529
|
+
|
|
530
|
+
return {
|
|
531
|
+
...base,
|
|
532
|
+
balance: 0,
|
|
533
|
+
};
|
|
534
|
+
};
|
|
535
|
+
|
|
536
|
+
return {
|
|
537
|
+
id: providerId,
|
|
538
|
+
capabilities: [
|
|
539
|
+
AICapability.Chat,
|
|
540
|
+
AICapability.Speech,
|
|
541
|
+
AICapability.Image,
|
|
542
|
+
AICapability.Model,
|
|
543
|
+
AICapability.Balance,
|
|
544
|
+
],
|
|
545
|
+
chatWithAI,
|
|
546
|
+
synthesizeSpeech,
|
|
547
|
+
transcribeSpeech,
|
|
548
|
+
generateImage,
|
|
549
|
+
generateModel,
|
|
550
|
+
checkBalance,
|
|
551
|
+
};
|
|
552
|
+
}
|