@plasius/ai 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +72 -0
- package/CODE_OF_CONDUCT.md +79 -0
- package/CONTRIBUTORS.md +27 -0
- package/LICENSE +21 -0
- package/README.md +132 -0
- package/SECURITY.md +17 -0
- package/dist/components/pixelverse/balance.d.ts +4 -0
- package/dist/components/pixelverse/balance.d.ts.map +1 -0
- package/dist/components/pixelverse/balance.js +40 -0
- package/dist/components/pixelverse/index.d.ts +3 -0
- package/dist/components/pixelverse/index.d.ts.map +1 -0
- package/dist/components/pixelverse/index.js +2 -0
- package/dist/components/pixelverse/pixelverseeditor.d.ts +16 -0
- package/dist/components/pixelverse/pixelverseeditor.d.ts.map +1 -0
- package/dist/components/pixelverse/pixelverseeditor.js +21 -0
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +1 -0
- package/dist/lib/chatWithAI.d.ts +2 -0
- package/dist/lib/chatWithAI.d.ts.map +1 -0
- package/dist/lib/chatWithAI.js +1 -0
- package/dist/lib/generateImage.d.ts +2 -0
- package/dist/lib/generateImage.d.ts.map +1 -0
- package/dist/lib/generateImage.js +1 -0
- package/dist/lib/synthesizeSpeech.d.ts +2 -0
- package/dist/lib/synthesizeSpeech.d.ts.map +1 -0
- package/dist/lib/synthesizeSpeech.js +1 -0
- package/dist/lib/transcribeSpeech.d.ts +2 -0
- package/dist/lib/transcribeSpeech.d.ts.map +1 -0
- package/dist/lib/transcribeSpeech.js +1 -0
- package/dist/platform/index.d.ts +76 -0
- package/dist/platform/index.d.ts.map +1 -0
- package/dist/platform/index.js +125 -0
- package/dist/platform/openai.d.ts +8 -0
- package/dist/platform/openai.d.ts.map +1 -0
- package/dist/platform/openai.js +61 -0
- package/dist/platform/pixelverse.d.ts +6 -0
- package/dist/platform/pixelverse.d.ts.map +1 -0
- package/dist/platform/pixelverse.js +196 -0
- package/dist-cjs/components/pixelverse/balance.d.ts +4 -0
- package/dist-cjs/components/pixelverse/balance.d.ts.map +1 -0
- package/dist-cjs/components/pixelverse/balance.js +46 -0
- package/dist-cjs/components/pixelverse/index.d.ts +3 -0
- package/dist-cjs/components/pixelverse/index.d.ts.map +1 -0
- package/dist-cjs/components/pixelverse/index.js +18 -0
- package/dist-cjs/components/pixelverse/pixelverseeditor.d.ts +16 -0
- package/dist-cjs/components/pixelverse/pixelverseeditor.d.ts.map +1 -0
- package/dist-cjs/components/pixelverse/pixelverseeditor.js +27 -0
- package/dist-cjs/index.d.ts +2 -0
- package/dist-cjs/index.d.ts.map +1 -0
- package/dist-cjs/index.js +17 -0
- package/dist-cjs/lib/chatWithAI.d.ts +1 -0
- package/dist-cjs/lib/chatWithAI.d.ts.map +1 -0
- package/dist-cjs/lib/chatWithAI.js +1 -0
- package/dist-cjs/lib/generateImage.d.ts +1 -0
- package/dist-cjs/lib/generateImage.d.ts.map +1 -0
- package/dist-cjs/lib/generateImage.js +1 -0
- package/dist-cjs/lib/synthesizeSpeech.d.ts +1 -0
- package/dist-cjs/lib/synthesizeSpeech.d.ts.map +1 -0
- package/dist-cjs/lib/synthesizeSpeech.js +1 -0
- package/dist-cjs/lib/transcribeSpeech.d.ts +1 -0
- package/dist-cjs/lib/transcribeSpeech.d.ts.map +1 -0
- package/dist-cjs/lib/transcribeSpeech.js +1 -0
- package/dist-cjs/platform/index.d.ts +76 -0
- package/dist-cjs/platform/index.d.ts.map +1 -0
- package/dist-cjs/platform/index.js +128 -0
- package/dist-cjs/platform/openai.d.ts +8 -0
- package/dist-cjs/platform/openai.d.ts.map +1 -0
- package/dist-cjs/platform/openai.js +67 -0
- package/dist-cjs/platform/pixelverse.d.ts +6 -0
- package/dist-cjs/platform/pixelverse.d.ts.map +1 -0
- package/dist-cjs/platform/pixelverse.js +199 -0
- package/docs/adrs/adr-0001-ai-package-scope.md +21 -0
- package/docs/adrs/adr-0002-public-repo-governance.md +24 -0
- package/docs/adrs/adr-0003-contracts-first-documentation.md +25 -0
- package/docs/adrs/adr-template.md +35 -0
- package/docs/api-reference.md +64 -0
- package/docs/architecture.md +21 -0
- package/docs/providers.md +26 -0
- package/legal/CLA-REGISTRY.csv +1 -0
- package/legal/CLA.md +22 -0
- package/legal/CORPORATE_CLA.md +57 -0
- package/legal/INDIVIDUAL_CLA.md +91 -0
- package/package.json +117 -0
- package/src/components/pixelverse/balance.module.css +6 -0
- package/src/components/pixelverse/balance.tsx +65 -0
- package/src/components/pixelverse/index.ts +2 -0
- package/src/components/pixelverse/pixelverseeditor.mocule.css +0 -0
- package/src/components/pixelverse/pixelverseeditor.tsx +74 -0
- package/src/global.d.ts +9 -0
- package/src/index.ts +1 -0
- package/src/lib/chatWithAI.ts +0 -0
- package/src/lib/generateImage.ts +0 -0
- package/src/lib/synthesizeSpeech.ts +0 -0
- package/src/lib/transcribeSpeech.ts +0 -0
- package/src/platform/index.ts +237 -0
- package/src/platform/openai.ts +123 -0
- package/src/platform/pixelverse.ts +309 -0
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
import { createSchema, field } from "@plasius/schema";
|
|
2
|
+
import type { FieldBuilder, SchemaShape } from "@plasius/schema";
|
|
3
|
+
|
|
4
|
+
export const completionSchema = createSchema(
|
|
5
|
+
{
|
|
6
|
+
id: field
|
|
7
|
+
.string()
|
|
8
|
+
.description("A unique ID for this completion")
|
|
9
|
+
.version("1.0"),
|
|
10
|
+
type: field
|
|
11
|
+
.string()
|
|
12
|
+
.description("The type of completion (e.g. 'chat', 'text', 'speech')")
|
|
13
|
+
.version("1.0"),
|
|
14
|
+
model: field
|
|
15
|
+
.string()
|
|
16
|
+
.description("The model used to generate this completion")
|
|
17
|
+
.version("1.0"),
|
|
18
|
+
durationMs: field
|
|
19
|
+
.number()
|
|
20
|
+
.description("How long the AI task took in milliseconds")
|
|
21
|
+
.version("1.0"),
|
|
22
|
+
createdAt: field
|
|
23
|
+
.string()
|
|
24
|
+
.description("ISO timestamp when the completion was created")
|
|
25
|
+
.version("1.0"),
|
|
26
|
+
partitionKey: field
|
|
27
|
+
.string()
|
|
28
|
+
.description("User or system identifier that made the request")
|
|
29
|
+
.version("1.0"),
|
|
30
|
+
usage: field
|
|
31
|
+
.object<Record<string, FieldBuilder<number>>>({} as SchemaShape)
|
|
32
|
+
.description("Optional usage metrics like token count or cost")
|
|
33
|
+
.version("1.0")
|
|
34
|
+
.optional()
|
|
35
|
+
.as<Record<string, number>>(),
|
|
36
|
+
},
|
|
37
|
+
"completion",
|
|
38
|
+
{
|
|
39
|
+
version: "1.0",
|
|
40
|
+
piiEnforcement: "none",
|
|
41
|
+
table: "completions",
|
|
42
|
+
schemaValidator: () => {
|
|
43
|
+
return true;
|
|
44
|
+
},
|
|
45
|
+
}
|
|
46
|
+
);
|
|
47
|
+
|
|
48
|
+
export interface Completion {
|
|
49
|
+
id: string;
|
|
50
|
+
partitionKey: string;
|
|
51
|
+
type: string;
|
|
52
|
+
model: string;
|
|
53
|
+
durationMs: number;
|
|
54
|
+
createdAt: string;
|
|
55
|
+
usage?: Record<string, number>;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
export const chatCompletionSchema = createSchema(
|
|
59
|
+
{
|
|
60
|
+
message: field
|
|
61
|
+
.string()
|
|
62
|
+
.description("The response from the AI")
|
|
63
|
+
.version("1.0"),
|
|
64
|
+
outputUser: field
|
|
65
|
+
.string()
|
|
66
|
+
.description("The 'actor' who is chatting")
|
|
67
|
+
.version("1.0"),
|
|
68
|
+
},
|
|
69
|
+
"chatCompletion",
|
|
70
|
+
{
|
|
71
|
+
version: "1.0",
|
|
72
|
+
table: "completions",
|
|
73
|
+
schemaValidator: () => {
|
|
74
|
+
return true;
|
|
75
|
+
},
|
|
76
|
+
}
|
|
77
|
+
);
|
|
78
|
+
|
|
79
|
+
export interface ChatCompletion extends Completion {
|
|
80
|
+
message: string;
|
|
81
|
+
outputUser: string;
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
export const textCompletionSchema = createSchema(
|
|
85
|
+
{
|
|
86
|
+
message: field
|
|
87
|
+
.string()
|
|
88
|
+
.description("The response from the AI")
|
|
89
|
+
.version("1.0"),
|
|
90
|
+
},
|
|
91
|
+
"textCompletion",
|
|
92
|
+
{
|
|
93
|
+
version: "1.0",
|
|
94
|
+
table: "completions",
|
|
95
|
+
schemaValidator: () => {
|
|
96
|
+
return true;
|
|
97
|
+
},
|
|
98
|
+
}
|
|
99
|
+
);
|
|
100
|
+
|
|
101
|
+
export interface TextCompletion extends Completion {
|
|
102
|
+
message: string;
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
export const imageCompletionSchema = createSchema(
|
|
106
|
+
{
|
|
107
|
+
url: field
|
|
108
|
+
.string()
|
|
109
|
+
.description("The response from the AI")
|
|
110
|
+
.version("1.0")
|
|
111
|
+
.as<URL>(),
|
|
112
|
+
},
|
|
113
|
+
"imageCompletion",
|
|
114
|
+
{
|
|
115
|
+
version: "1.0",
|
|
116
|
+
table: "completions",
|
|
117
|
+
schemaValidator: () => {
|
|
118
|
+
return true;
|
|
119
|
+
},
|
|
120
|
+
}
|
|
121
|
+
);
|
|
122
|
+
|
|
123
|
+
export interface ImageCompletion extends Completion {
|
|
124
|
+
url: URL;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
export const speechCompletionSchema = createSchema(
|
|
128
|
+
{
|
|
129
|
+
url: field
|
|
130
|
+
.string()
|
|
131
|
+
.description("The response from the AI")
|
|
132
|
+
.version("1.0")
|
|
133
|
+
.as<URL>(),
|
|
134
|
+
},
|
|
135
|
+
"speechCompletion",
|
|
136
|
+
{
|
|
137
|
+
version: "1.0",
|
|
138
|
+
table: "completions",
|
|
139
|
+
schemaValidator: () => {
|
|
140
|
+
return true;
|
|
141
|
+
},
|
|
142
|
+
}
|
|
143
|
+
);
|
|
144
|
+
|
|
145
|
+
export interface SpeechCompletion extends Completion {
|
|
146
|
+
url: URL;
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
export const videoCompletionSchema = createSchema(
|
|
150
|
+
{
|
|
151
|
+
url: field
|
|
152
|
+
.string()
|
|
153
|
+
.description("The response from the AI")
|
|
154
|
+
.version("1.0")
|
|
155
|
+
.as<URL>(),
|
|
156
|
+
},
|
|
157
|
+
"videoCompletion",
|
|
158
|
+
{
|
|
159
|
+
version: "1.0",
|
|
160
|
+
table: "completions",
|
|
161
|
+
schemaValidator: () => {
|
|
162
|
+
return true;
|
|
163
|
+
},
|
|
164
|
+
}
|
|
165
|
+
);
|
|
166
|
+
|
|
167
|
+
export interface VideoCompletion extends Completion {
|
|
168
|
+
url: URL;
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
export const balanceCompletionSchema = createSchema(
|
|
172
|
+
{
|
|
173
|
+
balance: field.number().description("Current balance").version("1.0"),
|
|
174
|
+
},
|
|
175
|
+
"balanceCompletion",
|
|
176
|
+
{
|
|
177
|
+
version: "1.0",
|
|
178
|
+
table: "completions",
|
|
179
|
+
schemaValidator: () => {
|
|
180
|
+
return true;
|
|
181
|
+
},
|
|
182
|
+
}
|
|
183
|
+
);
|
|
184
|
+
|
|
185
|
+
export interface BalanceCompletion extends Completion {
|
|
186
|
+
balance: number;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
export enum AICapability {
|
|
190
|
+
Chat,
|
|
191
|
+
Text,
|
|
192
|
+
Speech,
|
|
193
|
+
Image,
|
|
194
|
+
Video,
|
|
195
|
+
Balance,
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
export interface AIPlatform {
|
|
199
|
+
canHandle?: (
|
|
200
|
+
userId: string,
|
|
201
|
+
capabilities: AICapability[]
|
|
202
|
+
) => Promise<boolean>;
|
|
203
|
+
chatWithAI: (
|
|
204
|
+
userId: string,
|
|
205
|
+
input: string,
|
|
206
|
+
context: string,
|
|
207
|
+
model: string
|
|
208
|
+
) => Promise<ChatCompletion>;
|
|
209
|
+
synthesizeSpeech: (
|
|
210
|
+
userId: string,
|
|
211
|
+
input: string,
|
|
212
|
+
voice: string,
|
|
213
|
+
context: string,
|
|
214
|
+
model: string
|
|
215
|
+
) => Promise<SpeechCompletion>;
|
|
216
|
+
transcribeSpeech: (
|
|
217
|
+
userId: string,
|
|
218
|
+
input: Buffer,
|
|
219
|
+
context: string,
|
|
220
|
+
model: string
|
|
221
|
+
) => Promise<TextCompletion>;
|
|
222
|
+
generateImage: (
|
|
223
|
+
userId: string,
|
|
224
|
+
input: string,
|
|
225
|
+
context: string,
|
|
226
|
+
model: string
|
|
227
|
+
) => Promise<ImageCompletion>;
|
|
228
|
+
produceVideo: (
|
|
229
|
+
userId: string,
|
|
230
|
+
imput: string,
|
|
231
|
+
image: URL,
|
|
232
|
+
context: string,
|
|
233
|
+
model: string
|
|
234
|
+
) => Promise<VideoCompletion>;
|
|
235
|
+
checkBalance: (userId: string) => Promise<BalanceCompletion>;
|
|
236
|
+
currentBalance: number;
|
|
237
|
+
}
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import OpenAI from "openai";
|
|
2
|
+
import type {
|
|
3
|
+
AIPlatform,
|
|
4
|
+
BalanceCompletion,
|
|
5
|
+
ChatCompletion,
|
|
6
|
+
Completion,
|
|
7
|
+
ImageCompletion,
|
|
8
|
+
SpeechCompletion,
|
|
9
|
+
TextCompletion,
|
|
10
|
+
VideoCompletion,
|
|
11
|
+
} from "./index.js";
|
|
12
|
+
import { useState } from "react";
|
|
13
|
+
|
|
14
|
+
export interface OpenAIPlatformProps {
|
|
15
|
+
openaiAPIKey: string;
|
|
16
|
+
openaiProjectKey: string;
|
|
17
|
+
openaiOrgID: string;
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
export async function OpenAIPlatform(
|
|
21
|
+
userId: string,
|
|
22
|
+
props: OpenAIPlatformProps
|
|
23
|
+
): Promise<AIPlatform> {
|
|
24
|
+
const openai = new OpenAI({
|
|
25
|
+
apiKey: props.openaiAPIKey,
|
|
26
|
+
project: props.openaiProjectKey,
|
|
27
|
+
organization: props.openaiOrgID,
|
|
28
|
+
dangerouslyAllowBrowser: false,
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
void openai;
|
|
32
|
+
|
|
33
|
+
function baseCompletionData(
|
|
34
|
+
type: string,
|
|
35
|
+
model: string,
|
|
36
|
+
requestor: string,
|
|
37
|
+
duration: number
|
|
38
|
+
): Completion {
|
|
39
|
+
return {
|
|
40
|
+
partitionKey: requestor,
|
|
41
|
+
id: crypto.randomUUID(),
|
|
42
|
+
type,
|
|
43
|
+
model,
|
|
44
|
+
createdAt: new Date().toISOString(),
|
|
45
|
+
durationMs: duration,
|
|
46
|
+
usage: {},
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
const chatWithAI = (
|
|
51
|
+
userId: string,
|
|
52
|
+
input: string,
|
|
53
|
+
context: string,
|
|
54
|
+
model: string
|
|
55
|
+
): Promise<ChatCompletion> => {
|
|
56
|
+
void [input, context, model];
|
|
57
|
+
const base = baseCompletionData("chat", "model", userId, 0);
|
|
58
|
+
return Promise.resolve({ ...base, message: "Something", outputUser: "" });
|
|
59
|
+
};
|
|
60
|
+
|
|
61
|
+
const synthesizeSpeech = (
|
|
62
|
+
userId: string,
|
|
63
|
+
input: string,
|
|
64
|
+
voice: string,
|
|
65
|
+
context: string,
|
|
66
|
+
model: string
|
|
67
|
+
): Promise<SpeechCompletion> => {
|
|
68
|
+
void [input, voice, context, model];
|
|
69
|
+
const base = baseCompletionData("chat", "model", userId, 0);
|
|
70
|
+
return Promise.resolve({ ...base, url: new URL("Something") });
|
|
71
|
+
};
|
|
72
|
+
|
|
73
|
+
const transcribeSpeech = (
|
|
74
|
+
userId: string,
|
|
75
|
+
input: Buffer,
|
|
76
|
+
context: string,
|
|
77
|
+
model: string
|
|
78
|
+
): Promise<TextCompletion> => {
|
|
79
|
+
void [input, context, model];
|
|
80
|
+
const base = baseCompletionData("chat", "model", userId, 0);
|
|
81
|
+
return Promise.resolve({ ...base, message: "Something" });
|
|
82
|
+
};
|
|
83
|
+
|
|
84
|
+
const generateImage = (
|
|
85
|
+
userId: string,
|
|
86
|
+
input: string,
|
|
87
|
+
context: string,
|
|
88
|
+
model: string
|
|
89
|
+
): Promise<ImageCompletion> => {
|
|
90
|
+
void [input, context, model];
|
|
91
|
+
const base = baseCompletionData("chat", "model", userId, 0);
|
|
92
|
+
return Promise.resolve({ ...base, url: new URL("Something") });
|
|
93
|
+
};
|
|
94
|
+
|
|
95
|
+
const produceVideo = (
|
|
96
|
+
userId: string,
|
|
97
|
+
imput: string,
|
|
98
|
+
image: URL,
|
|
99
|
+
context: string,
|
|
100
|
+
model: string
|
|
101
|
+
): Promise<VideoCompletion> => {
|
|
102
|
+
void [imput, image, context, model];
|
|
103
|
+
const base = baseCompletionData("chat", "model", userId, 0);
|
|
104
|
+
return Promise.resolve({ ...base, url: new URL("Something") });
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
const checkBalance = (userId: string): Promise<BalanceCompletion> => {
|
|
108
|
+
const base = baseCompletionData("balanceCompletion", "", userId, 0);
|
|
109
|
+
return Promise.resolve({ ...base, balance: 0.0 });
|
|
110
|
+
};
|
|
111
|
+
|
|
112
|
+
const [currentBalance] = useState<number>((await checkBalance(userId)).balance! as number);
|
|
113
|
+
|
|
114
|
+
return {
|
|
115
|
+
chatWithAI,
|
|
116
|
+
synthesizeSpeech,
|
|
117
|
+
transcribeSpeech,
|
|
118
|
+
generateImage,
|
|
119
|
+
produceVideo,
|
|
120
|
+
checkBalance,
|
|
121
|
+
currentBalance,
|
|
122
|
+
};
|
|
123
|
+
}
|
|
@@ -0,0 +1,309 @@
|
|
|
1
|
+
import { v4 as uuidv4 } from "uuid";
|
|
2
|
+
import { performance } from "perf_hooks";
|
|
3
|
+
|
|
4
|
+
import type {
|
|
5
|
+
AIPlatform,
|
|
6
|
+
BalanceCompletion,
|
|
7
|
+
ChatCompletion,
|
|
8
|
+
Completion,
|
|
9
|
+
ImageCompletion,
|
|
10
|
+
SpeechCompletion,
|
|
11
|
+
TextCompletion,
|
|
12
|
+
VideoCompletion,
|
|
13
|
+
} from "./index.js";
|
|
14
|
+
import { useState } from "react";
|
|
15
|
+
|
|
16
|
+
interface UploadImageResponse {
|
|
17
|
+
Resp?: { id?: number };
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
interface GenerateVideoResponse {
|
|
21
|
+
Resp?: { id?: number };
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
interface VideoStatusResponse {
|
|
25
|
+
Resp?: { status?: number; url?: string };
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
interface BalanceResponse {
|
|
29
|
+
Resp?: { credit_monthly?: number; credit_package?: number };
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export interface PixelVersePlatformProps {
|
|
33
|
+
pixelVerseAPIKey: string;
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export async function PixelVersePlatform(
|
|
37
|
+
userId: string,
|
|
38
|
+
props: PixelVersePlatformProps
|
|
39
|
+
): Promise<AIPlatform> {
|
|
40
|
+
async function uploadImage(image: File | URL, apiKey: string): Promise<UploadImageResponse> {
|
|
41
|
+
const headers = new Headers();
|
|
42
|
+
headers.append("API-KEY", apiKey);
|
|
43
|
+
headers.append("Ai-trace-id", uuidv4());
|
|
44
|
+
headers.append("Access-Control-Allow-Origin", "*");
|
|
45
|
+
|
|
46
|
+
const formData = new FormData();
|
|
47
|
+
if (image instanceof File) {
|
|
48
|
+
formData.append("image", image, "");
|
|
49
|
+
} else {
|
|
50
|
+
const blob = await fetch(image.toString()).then((r) => r.blob());
|
|
51
|
+
formData.append("image", blob, "image-from-url");
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// pixelapi is proxied through the vite.config.ts file
|
|
55
|
+
// to avoid CORS issues and to allow for local development
|
|
56
|
+
const response = await fetch("/pixelapi/openapi/v2/image/upload", {
|
|
57
|
+
method: "POST",
|
|
58
|
+
headers,
|
|
59
|
+
body: formData,
|
|
60
|
+
redirect: "follow",
|
|
61
|
+
});
|
|
62
|
+
const data = (await response.json()) as UploadImageResponse;
|
|
63
|
+
return data;
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
async function generateVideo(
|
|
67
|
+
imgId: number,
|
|
68
|
+
prompt: string,
|
|
69
|
+
apiKey: string,
|
|
70
|
+
seed?: number,
|
|
71
|
+
template_id?: string,
|
|
72
|
+
negative_prompt?: string
|
|
73
|
+
): Promise<GenerateVideoResponse> {
|
|
74
|
+
const headers = new Headers();
|
|
75
|
+
headers.append("API-KEY", apiKey);
|
|
76
|
+
headers.append("Ai-trace-id", uuidv4());
|
|
77
|
+
headers.append("Content-Type", "application/json");
|
|
78
|
+
headers.append("Access-Control-Allow-Origin", "*");
|
|
79
|
+
headers.append("Accept", "application/json");
|
|
80
|
+
const values: {
|
|
81
|
+
duration: number;
|
|
82
|
+
img_id: number;
|
|
83
|
+
model: string;
|
|
84
|
+
motion_mode: string;
|
|
85
|
+
prompt: string;
|
|
86
|
+
quality: string;
|
|
87
|
+
water_mark: boolean;
|
|
88
|
+
seed?: number;
|
|
89
|
+
template_id?: string;
|
|
90
|
+
negative_prompt?: string;
|
|
91
|
+
} = {
|
|
92
|
+
duration: 5,
|
|
93
|
+
img_id: imgId,
|
|
94
|
+
model: "v3.5",
|
|
95
|
+
motion_mode: "normal",
|
|
96
|
+
prompt: prompt,
|
|
97
|
+
quality: "720p",
|
|
98
|
+
water_mark: false,
|
|
99
|
+
};
|
|
100
|
+
|
|
101
|
+
if (seed) {
|
|
102
|
+
values.seed = seed;
|
|
103
|
+
}
|
|
104
|
+
if (template_id) {
|
|
105
|
+
values.template_id = template_id;
|
|
106
|
+
}
|
|
107
|
+
if (negative_prompt) {
|
|
108
|
+
values.negative_prompt = negative_prompt;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
const body = JSON.stringify(values);
|
|
112
|
+
|
|
113
|
+
// pixelapi is proxied through the vite.config.ts file
|
|
114
|
+
// to avoid CORS issues and to allow for local development
|
|
115
|
+
const response = await fetch("/pixelapi/openapi/v2/video/img/generate", {
|
|
116
|
+
method: "POST",
|
|
117
|
+
headers: headers,
|
|
118
|
+
referrerPolicy: "no-referrer",
|
|
119
|
+
body,
|
|
120
|
+
});
|
|
121
|
+
const data = (await response.json()) as GenerateVideoResponse;
|
|
122
|
+
return data;
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
async function checkVideoStatus(id: number, apiKey: string): Promise<VideoStatusResponse> {
|
|
126
|
+
const headers = new Headers();
|
|
127
|
+
headers.append("API-KEY", apiKey);
|
|
128
|
+
headers.append("Ai-trace-id", uuidv4());
|
|
129
|
+
headers.append("Access-Control-Allow-Origin", "*");
|
|
130
|
+
headers.append("Accept", "application/json");
|
|
131
|
+
|
|
132
|
+
// pixelapi is proxied through the vite.config.ts file
|
|
133
|
+
// to avoid CORS issues and to allow for local development
|
|
134
|
+
const response = await fetch(`/pixelapi/openapi/v2/video/result/${id}`, {
|
|
135
|
+
method: "GET",
|
|
136
|
+
headers,
|
|
137
|
+
referrerPolicy: "no-referrer",
|
|
138
|
+
});
|
|
139
|
+
const data = (await response.json()) as VideoStatusResponse;
|
|
140
|
+
return data;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
function baseCompletionData(
|
|
144
|
+
type: string,
|
|
145
|
+
model: string,
|
|
146
|
+
requestor: string,
|
|
147
|
+
duration: number
|
|
148
|
+
): Completion {
|
|
149
|
+
return {
|
|
150
|
+
partitionKey: requestor,
|
|
151
|
+
id: crypto.randomUUID(),
|
|
152
|
+
type,
|
|
153
|
+
model,
|
|
154
|
+
createdAt: new Date().toISOString(),
|
|
155
|
+
durationMs: duration,
|
|
156
|
+
usage: {},
|
|
157
|
+
};
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
const chatWithAI = (
|
|
161
|
+
_userId: string,
|
|
162
|
+
_input: string,
|
|
163
|
+
_context: string,
|
|
164
|
+
_model: string
|
|
165
|
+
): Promise<ChatCompletion> => {
|
|
166
|
+
void [_userId, _input, _context, _model];
|
|
167
|
+
return Promise.reject(new Error("Not implemented"));
|
|
168
|
+
};
|
|
169
|
+
|
|
170
|
+
const synthesizeSpeech = (
|
|
171
|
+
_userId: string,
|
|
172
|
+
_input: string,
|
|
173
|
+
_voice: string,
|
|
174
|
+
_context: string,
|
|
175
|
+
_model: string
|
|
176
|
+
): Promise<SpeechCompletion> => {
|
|
177
|
+
void [_userId, _input, _voice, _context, _model];
|
|
178
|
+
return Promise.reject(new Error("Not implemented"));
|
|
179
|
+
};
|
|
180
|
+
|
|
181
|
+
const transcribeSpeech = (
|
|
182
|
+
_userId: string,
|
|
183
|
+
_input: Buffer,
|
|
184
|
+
_context: string,
|
|
185
|
+
_model: string
|
|
186
|
+
): Promise<TextCompletion> => {
|
|
187
|
+
void [_userId, _input, _context, _model];
|
|
188
|
+
return Promise.reject(new Error("Not implemented"));
|
|
189
|
+
};
|
|
190
|
+
|
|
191
|
+
const generateImage = (
|
|
192
|
+
_userId: string,
|
|
193
|
+
_input: string,
|
|
194
|
+
_context: string,
|
|
195
|
+
_model: string
|
|
196
|
+
): Promise<ImageCompletion> => {
|
|
197
|
+
void [_userId, _input, _context, _model];
|
|
198
|
+
return Promise.reject(new Error("Not implemented"));
|
|
199
|
+
};
|
|
200
|
+
|
|
201
|
+
const produceVideo = (
|
|
202
|
+
userId: string,
|
|
203
|
+
input: string,
|
|
204
|
+
image: File | URL,
|
|
205
|
+
context: string,
|
|
206
|
+
model: string
|
|
207
|
+
): Promise<VideoCompletion> => {
|
|
208
|
+
const start = performance.now();
|
|
209
|
+
return uploadImage(image, props.pixelVerseAPIKey)
|
|
210
|
+
.then((uploadResult: UploadImageResponse) => {
|
|
211
|
+
const imageId = uploadResult?.Resp?.id;
|
|
212
|
+
if (!imageId) throw new Error("Invalid image upload response.");
|
|
213
|
+
return generateVideo(imageId, input, props.pixelVerseAPIKey);
|
|
214
|
+
})
|
|
215
|
+
.then((generated: GenerateVideoResponse) => {
|
|
216
|
+
const videoId = generated?.Resp?.id;
|
|
217
|
+
if (!videoId)
|
|
218
|
+
throw new Error("Video generation did not return a valid ID.");
|
|
219
|
+
return waitForVideoCompletion(videoId, props.pixelVerseAPIKey);
|
|
220
|
+
})
|
|
221
|
+
.then((videoUrl) => {
|
|
222
|
+
const duration = performance.now() - start;
|
|
223
|
+
const base = baseCompletionData("video", model, userId, duration);
|
|
224
|
+
return {
|
|
225
|
+
...base,
|
|
226
|
+
url: new URL(videoUrl),
|
|
227
|
+
};
|
|
228
|
+
})
|
|
229
|
+
.catch((err) => {
|
|
230
|
+
// Optional: log or re-throw error for upstream handling
|
|
231
|
+
throw new Error(`produceVideo failed: ${(err as Error).message}`);
|
|
232
|
+
});
|
|
233
|
+
};
|
|
234
|
+
|
|
235
|
+
async function waitForVideoCompletion(
|
|
236
|
+
videoId: number,
|
|
237
|
+
apiKey: string,
|
|
238
|
+
maxRetries = 20,
|
|
239
|
+
delayMs = 3000
|
|
240
|
+
): Promise<string> {
|
|
241
|
+
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
|
242
|
+
await new Promise((res) => setTimeout(res, delayMs));
|
|
243
|
+
try {
|
|
244
|
+
const videoCheck: VideoStatusResponse = await checkVideoStatus(videoId, apiKey);
|
|
245
|
+
if (videoCheck?.Resp?.status === 1) {
|
|
246
|
+
const url = videoCheck?.Resp?.url;
|
|
247
|
+
if (!url)
|
|
248
|
+
throw new Error("Video marked complete but no URL returned.");
|
|
249
|
+
return url;
|
|
250
|
+
}
|
|
251
|
+
} catch (err) {
|
|
252
|
+
console.warn(
|
|
253
|
+
`Attempt ${attempt + 1} failed: ${(err as Error).message}`
|
|
254
|
+
);
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
throw new Error("Timed out waiting for video to complete.");
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
const checkBalance = (userId: string): Promise<BalanceCompletion> => {
|
|
261
|
+
const start = performance.now();
|
|
262
|
+
const headers = new Headers();
|
|
263
|
+
headers.append("API-KEY", props.pixelVerseAPIKey);
|
|
264
|
+
headers.append("AI-trace-ID", uuidv4());
|
|
265
|
+
headers.append("Access-Control-Allow-Origin", "*");
|
|
266
|
+
headers.append("Accept", "application/json");
|
|
267
|
+
headers.append("Content-Type", "application/json");
|
|
268
|
+
|
|
269
|
+
return fetch("/pixelapi/openapi/v2/account/balance", {
|
|
270
|
+
method: "GET",
|
|
271
|
+
headers,
|
|
272
|
+
referrerPolicy: "no-referrer",
|
|
273
|
+
})
|
|
274
|
+
.then(async (res): Promise<BalanceResponse> => (await res.json()) as BalanceResponse)
|
|
275
|
+
.then((data) => {
|
|
276
|
+
if (!data?.Resp) {
|
|
277
|
+
throw new Error("Invalid balance response");
|
|
278
|
+
}
|
|
279
|
+
const duration = performance.now() - start;
|
|
280
|
+
const base = baseCompletionData(
|
|
281
|
+
"balanceCompletion",
|
|
282
|
+
"",
|
|
283
|
+
userId,
|
|
284
|
+
duration
|
|
285
|
+
);
|
|
286
|
+
const monthly = data.Resp.credit_monthly ?? 0;
|
|
287
|
+
const pkg = data.Resp.credit_package ?? 0;
|
|
288
|
+
return {
|
|
289
|
+
...base,
|
|
290
|
+
balance: monthly + pkg,
|
|
291
|
+
};
|
|
292
|
+
})
|
|
293
|
+
.catch((err) => {
|
|
294
|
+
throw new Error(`checkBalance failed: ${(err as Error).message}`);
|
|
295
|
+
});
|
|
296
|
+
};
|
|
297
|
+
|
|
298
|
+
const [currentBalance] = useState<number>((await checkBalance(userId)).balance as number ?? 0);
|
|
299
|
+
|
|
300
|
+
return {
|
|
301
|
+
chatWithAI,
|
|
302
|
+
synthesizeSpeech,
|
|
303
|
+
transcribeSpeech,
|
|
304
|
+
generateImage,
|
|
305
|
+
produceVideo,
|
|
306
|
+
checkBalance,
|
|
307
|
+
currentBalance,
|
|
308
|
+
};
|
|
309
|
+
}
|