prompt-api-polyfill 1.0.0 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +27 -7
- package/dist/backends/firebase.js +1808 -0
- package/dist/backends/gemini.js +55 -0
- package/dist/backends/openai.js +199 -0
- package/dist/backends/transformers.js +254 -0
- package/{backends/base.js → dist/chunks/defaults-CNQngzSd.js} +29 -24
- package/dist/prompt-api-polyfill.js +1031 -0
- package/dot_env.json +3 -0
- package/package.json +21 -16
- package/async-iterator-polyfill.js +0 -16
- package/backends/defaults.js +0 -13
- package/backends/firebase.js +0 -49
- package/backends/gemini.js +0 -52
- package/backends/openai.js +0 -337
- package/backends/transformers.js +0 -451
- package/json-schema-converter.js +0 -88
- package/multimodal-converter.js +0 -383
- package/prompt-api-polyfill.js +0 -1467
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { GoogleGenAI as s } from "@google/genai";
|
|
2
|
+
import { P as a, D as i } from "../chunks/defaults-CNQngzSd.js";
|
|
3
|
+
class u extends a {
|
|
4
|
+
#n;
|
|
5
|
+
#e;
|
|
6
|
+
#t;
|
|
7
|
+
constructor(t) {
|
|
8
|
+
super(t.modelName || i.gemini.modelName), this.#n = new s({ apiKey: t.apiKey });
|
|
9
|
+
}
|
|
10
|
+
createSession(t, e) {
|
|
11
|
+
return this.#t = e, this.#e = t.modelName || this.modelName, { model: this.#e, params: e };
|
|
12
|
+
}
|
|
13
|
+
async generateContent(t) {
|
|
14
|
+
const e = {
|
|
15
|
+
systemInstruction: this.#t.systemInstruction,
|
|
16
|
+
temperature: this.#t.generationConfig?.temperature,
|
|
17
|
+
topK: this.#t.generationConfig?.topK
|
|
18
|
+
}, n = await this.#n.models.generateContent({
|
|
19
|
+
model: this.#e,
|
|
20
|
+
contents: t,
|
|
21
|
+
config: e
|
|
22
|
+
}), o = n.usageMetadata?.promptTokenCount || 0;
|
|
23
|
+
return { text: n.text, usage: o };
|
|
24
|
+
}
|
|
25
|
+
async generateContentStream(t) {
|
|
26
|
+
const e = {
|
|
27
|
+
systemInstruction: this.#t.systemInstruction,
|
|
28
|
+
temperature: this.#t.generationConfig?.temperature,
|
|
29
|
+
topK: this.#t.generationConfig?.topK
|
|
30
|
+
}, n = await this.#n.models.generateContentStream({
|
|
31
|
+
model: this.#e,
|
|
32
|
+
contents: t,
|
|
33
|
+
config: e
|
|
34
|
+
});
|
|
35
|
+
return (async function* () {
|
|
36
|
+
for await (const o of n)
|
|
37
|
+
yield {
|
|
38
|
+
text: () => o.text,
|
|
39
|
+
usageMetadata: {
|
|
40
|
+
totalTokenCount: o.usageMetadata?.totalTokenCount || 0
|
|
41
|
+
}
|
|
42
|
+
};
|
|
43
|
+
})();
|
|
44
|
+
}
|
|
45
|
+
async countTokens(t) {
|
|
46
|
+
const { totalTokens: e } = await this.#n.models.countTokens({
|
|
47
|
+
model: this.#e,
|
|
48
|
+
contents: t
|
|
49
|
+
});
|
|
50
|
+
return e;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
export {
|
|
54
|
+
u as default
|
|
55
|
+
};
|
|
@@ -0,0 +1,199 @@
|
|
|
1
|
+
import u from "openai";
|
|
2
|
+
import { P as m, D as l } from "../chunks/defaults-CNQngzSd.js";
|
|
3
|
+
class y extends m {
|
|
4
|
+
#e;
|
|
5
|
+
constructor(t) {
|
|
6
|
+
super(t.modelName || l.openai.modelName), this.config = t, this.openai = new u({
|
|
7
|
+
apiKey: t.apiKey,
|
|
8
|
+
dangerouslyAllowBrowser: !0
|
|
9
|
+
// Required for client-side usage
|
|
10
|
+
});
|
|
11
|
+
}
|
|
12
|
+
static availability(t = {}) {
|
|
13
|
+
if (t.expectedInputs) {
|
|
14
|
+
const s = t.expectedInputs.some(
|
|
15
|
+
(e) => e.type === "audio"
|
|
16
|
+
), r = t.expectedInputs.some(
|
|
17
|
+
(e) => e.type === "image"
|
|
18
|
+
);
|
|
19
|
+
if (s && r)
|
|
20
|
+
return "unavailable";
|
|
21
|
+
}
|
|
22
|
+
return "available";
|
|
23
|
+
}
|
|
24
|
+
createSession(t, s) {
|
|
25
|
+
this.#e = {
|
|
26
|
+
model: t.modelName || this.modelName,
|
|
27
|
+
temperature: s.generationConfig?.temperature,
|
|
28
|
+
top_p: 1,
|
|
29
|
+
// Default to 1.0 as topK is not directly supported the same way
|
|
30
|
+
systemInstruction: s.systemInstruction
|
|
31
|
+
};
|
|
32
|
+
const r = s.generationConfig || {};
|
|
33
|
+
if (r.responseSchema) {
|
|
34
|
+
const { schema: e, wrapped: o } = this.#o(
|
|
35
|
+
r.responseSchema
|
|
36
|
+
);
|
|
37
|
+
this.#e.response_format = {
|
|
38
|
+
type: "json_schema",
|
|
39
|
+
json_schema: {
|
|
40
|
+
name: "response",
|
|
41
|
+
strict: !0,
|
|
42
|
+
schema: e
|
|
43
|
+
}
|
|
44
|
+
}, this.#e.response_wrapped = o;
|
|
45
|
+
} else r.responseMimeType === "application/json" && (this.#e.response_format = { type: "json_object" });
|
|
46
|
+
return this.#e;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* OpenAI Structured Outputs require:
|
|
50
|
+
* 1. All fields in objects to be marked as 'required'.
|
|
51
|
+
* 2. Objects to have 'additionalProperties: false'.
|
|
52
|
+
* 3. The root must be an 'object'.
|
|
53
|
+
*/
|
|
54
|
+
#o(t) {
|
|
55
|
+
if (typeof t != "object" || t === null)
|
|
56
|
+
return { schema: t, wrapped: !1 };
|
|
57
|
+
const s = (e) => {
|
|
58
|
+
if (e.type === "object")
|
|
59
|
+
if (e.properties) {
|
|
60
|
+
e.additionalProperties = !1, e.required = Object.keys(e.properties);
|
|
61
|
+
for (const o in e.properties)
|
|
62
|
+
s(e.properties[o]);
|
|
63
|
+
} else
|
|
64
|
+
e.additionalProperties = !1, e.required = [];
|
|
65
|
+
else e.type === "array" && e.items && s(e.items);
|
|
66
|
+
return e;
|
|
67
|
+
}, r = JSON.parse(JSON.stringify(t));
|
|
68
|
+
return r.type !== "object" ? {
|
|
69
|
+
wrapped: !0,
|
|
70
|
+
schema: {
|
|
71
|
+
type: "object",
|
|
72
|
+
properties: { value: r },
|
|
73
|
+
required: ["value"],
|
|
74
|
+
additionalProperties: !1
|
|
75
|
+
}
|
|
76
|
+
} : {
|
|
77
|
+
wrapped: !1,
|
|
78
|
+
schema: s(r)
|
|
79
|
+
};
|
|
80
|
+
}
|
|
81
|
+
#t(t) {
|
|
82
|
+
let s = !1, r = !1;
|
|
83
|
+
for (const e of t)
|
|
84
|
+
if (Array.isArray(e.content))
|
|
85
|
+
for (const o of e.content)
|
|
86
|
+
o.type === "image_url" && (s = !0), o.type === "input_audio" && (r = !0);
|
|
87
|
+
if (s && r)
|
|
88
|
+
throw new Error(
|
|
89
|
+
"OpenAI backend does not support mixing images and audio in the same session. Please start a new session."
|
|
90
|
+
);
|
|
91
|
+
return { hasImage: s, hasAudio: r };
|
|
92
|
+
}
|
|
93
|
+
#s(t) {
|
|
94
|
+
return this.#e.model !== this.modelName ? this.#e.model : t ? `${this.modelName}-audio-preview` : this.modelName;
|
|
95
|
+
}
|
|
96
|
+
async generateContent(t) {
|
|
97
|
+
const { messages: s } = this.#r(
|
|
98
|
+
t,
|
|
99
|
+
this.#e.systemInstruction
|
|
100
|
+
), { hasAudio: r } = this.#t(s), e = this.#s(r);
|
|
101
|
+
if (e === `${this.modelName}-audio-preview` && this.#e.response_format)
|
|
102
|
+
throw new DOMException(
|
|
103
|
+
`OpenAI audio model ('${e}') does not support structured outputs (responseConstraint).`,
|
|
104
|
+
"NotSupportedError"
|
|
105
|
+
);
|
|
106
|
+
const o = {
|
|
107
|
+
model: e,
|
|
108
|
+
messages: s
|
|
109
|
+
};
|
|
110
|
+
this.#e.temperature > 0 && (o.temperature = this.#e.temperature), this.#e.response_format && (o.response_format = this.#e.response_format);
|
|
111
|
+
try {
|
|
112
|
+
const a = await this.openai.chat.completions.create(o);
|
|
113
|
+
let i = a.choices[0].message.content;
|
|
114
|
+
if (this.#e.response_wrapped && i)
|
|
115
|
+
try {
|
|
116
|
+
const c = JSON.parse(i);
|
|
117
|
+
c && typeof c == "object" && "value" in c && (i = JSON.stringify(c.value));
|
|
118
|
+
} catch {
|
|
119
|
+
}
|
|
120
|
+
const n = a.usage?.prompt_tokens || 0;
|
|
121
|
+
return { text: i, usage: n };
|
|
122
|
+
} catch (a) {
|
|
123
|
+
throw console.error("OpenAI Generate Content Error:", a), a;
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
async generateContentStream(t) {
|
|
127
|
+
const { messages: s } = this.#r(
|
|
128
|
+
t,
|
|
129
|
+
this.#e.systemInstruction
|
|
130
|
+
), { hasAudio: r } = this.#t(s), e = this.#s(r);
|
|
131
|
+
if (e === `${this.modelName}-audio-preview` && this.#e.response_format)
|
|
132
|
+
throw new DOMException(
|
|
133
|
+
`OpenAI audio model ('${e}') does not support structured outputs (responseConstraint).`,
|
|
134
|
+
"NotSupportedError"
|
|
135
|
+
);
|
|
136
|
+
const o = {
|
|
137
|
+
model: e,
|
|
138
|
+
messages: s,
|
|
139
|
+
stream: !0
|
|
140
|
+
};
|
|
141
|
+
this.#e.temperature > 0 && (o.temperature = this.#e.temperature), this.#e.response_format && (o.response_format = this.#e.response_format);
|
|
142
|
+
try {
|
|
143
|
+
const a = await this.openai.chat.completions.create(o);
|
|
144
|
+
return (async function* () {
|
|
145
|
+
let p = !0;
|
|
146
|
+
for await (const i of a) {
|
|
147
|
+
let n = i.choices[0]?.delta?.content;
|
|
148
|
+
n && (yield {
|
|
149
|
+
text: () => n,
|
|
150
|
+
usageMetadata: { totalTokenCount: 0 }
|
|
151
|
+
});
|
|
152
|
+
}
|
|
153
|
+
})();
|
|
154
|
+
} catch (a) {
|
|
155
|
+
throw console.error("OpenAI Generate Content Stream Error:", a), a;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
async countTokens(t) {
|
|
159
|
+
let s = "";
|
|
160
|
+
if (Array.isArray(t)) {
|
|
161
|
+
for (const r of t)
|
|
162
|
+
if (r.parts)
|
|
163
|
+
for (const e of r.parts)
|
|
164
|
+
e.text ? s += e.text : e.inlineData && (s += " ".repeat(1e3));
|
|
165
|
+
}
|
|
166
|
+
return Math.ceil(s.length / 4);
|
|
167
|
+
}
|
|
168
|
+
#r(t, s) {
|
|
169
|
+
const r = [];
|
|
170
|
+
s && r.push({
|
|
171
|
+
role: "system",
|
|
172
|
+
content: s
|
|
173
|
+
});
|
|
174
|
+
for (const e of t) {
|
|
175
|
+
const o = e.role === "model" ? "assistant" : "user", a = [];
|
|
176
|
+
for (const p of e.parts)
|
|
177
|
+
if (p.text)
|
|
178
|
+
a.push({ type: "text", text: p.text });
|
|
179
|
+
else if (p.inlineData) {
|
|
180
|
+
const { data: i, mimeType: n } = p.inlineData;
|
|
181
|
+
n.startsWith("image/") ? a.push({
|
|
182
|
+
type: "image_url",
|
|
183
|
+
image_url: { url: `data:${n};base64,${i}` }
|
|
184
|
+
}) : n.startsWith("audio/") && a.push({
|
|
185
|
+
type: "input_audio",
|
|
186
|
+
input_audio: {
|
|
187
|
+
data: i,
|
|
188
|
+
format: n.split("/")[1] === "mpeg" ? "mp3" : "wav"
|
|
189
|
+
}
|
|
190
|
+
});
|
|
191
|
+
}
|
|
192
|
+
r.push({ role: o, content: a });
|
|
193
|
+
}
|
|
194
|
+
return { messages: r };
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
export {
|
|
198
|
+
y as default
|
|
199
|
+
};
|
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
import { pipeline as z, TextStreamer as C } from "@huggingface/transformers";
|
|
2
|
+
import { P as j, D as y } from "../chunks/defaults-CNQngzSd.js";
|
|
3
|
+
class D extends j {
|
|
4
|
+
#e;
|
|
5
|
+
#t;
|
|
6
|
+
#a;
|
|
7
|
+
#n;
|
|
8
|
+
#o;
|
|
9
|
+
constructor(n = {}) {
|
|
10
|
+
super(n.modelName || y.transformers.modelName), this.#a = n.device || y.transformers.device, this.#n = n.dtype || y.transformers.dtype;
|
|
11
|
+
}
|
|
12
|
+
/**
|
|
13
|
+
* Loaded models can be large, so we initialize them lazily.
|
|
14
|
+
* @param {EventTarget} [monitorTarget] - The event target to dispatch download progress events to.
|
|
15
|
+
* @returns {Promise<Object>} The generator.
|
|
16
|
+
*/
|
|
17
|
+
async #s(n) {
|
|
18
|
+
if (!this.#e) {
|
|
19
|
+
const t = /* @__PURE__ */ new Map(), a = await $(this.modelName, {
|
|
20
|
+
dtype: this.#n
|
|
21
|
+
});
|
|
22
|
+
for (const { path: e, size: o } of a)
|
|
23
|
+
t.set(e, { loaded: 0, total: o });
|
|
24
|
+
const i = (e) => {
|
|
25
|
+
if (!n)
|
|
26
|
+
return;
|
|
27
|
+
const o = 1 / 65536, c = Math.floor(e / o) * o;
|
|
28
|
+
c <= n.__lastProgressLoaded || (n.dispatchEvent(
|
|
29
|
+
new ProgressEvent("downloadprogress", {
|
|
30
|
+
loaded: c,
|
|
31
|
+
total: 1,
|
|
32
|
+
lengthComputable: !0
|
|
33
|
+
})
|
|
34
|
+
), n.__lastProgressLoaded = c);
|
|
35
|
+
}, l = (e) => {
|
|
36
|
+
if (e.status === "initiate")
|
|
37
|
+
if (t.has(e.file)) {
|
|
38
|
+
const o = t.get(e.file);
|
|
39
|
+
e.total && (o.total = e.total);
|
|
40
|
+
} else
|
|
41
|
+
t.set(e.file, { loaded: 0, total: e.total || 0 });
|
|
42
|
+
else if (e.status === "progress")
|
|
43
|
+
t.has(e.file) && (t.get(e.file).loaded = e.loaded);
|
|
44
|
+
else if (e.status === "done") {
|
|
45
|
+
if (t.has(e.file)) {
|
|
46
|
+
const o = t.get(e.file);
|
|
47
|
+
o.loaded = o.total;
|
|
48
|
+
}
|
|
49
|
+
} else if (e.status === "ready") {
|
|
50
|
+
i(1);
|
|
51
|
+
return;
|
|
52
|
+
}
|
|
53
|
+
if (e.status === "progress" || e.status === "done") {
|
|
54
|
+
let o = 0, c = 0;
|
|
55
|
+
for (const { loaded: f, total: d } of t.values())
|
|
56
|
+
o += f, c += d;
|
|
57
|
+
if (c > 0) {
|
|
58
|
+
const f = o / c;
|
|
59
|
+
i(Math.min(f, 0.9999));
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
};
|
|
63
|
+
i(0), this.#e = await z("text-generation", this.modelName, {
|
|
64
|
+
device: this.#a,
|
|
65
|
+
dtype: this.#n,
|
|
66
|
+
progress_callback: l
|
|
67
|
+
}), this.#t = this.#e.tokenizer;
|
|
68
|
+
}
|
|
69
|
+
return this.#e;
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Checks if the backend is available given the options.
|
|
73
|
+
* @param {Object} options - LanguageModel options.
|
|
74
|
+
* @returns {string} 'available' or 'unavailable'.
|
|
75
|
+
*/
|
|
76
|
+
static availability(n) {
|
|
77
|
+
if (n?.expectedInputs && Array.isArray(n.expectedInputs)) {
|
|
78
|
+
for (const t of n.expectedInputs)
|
|
79
|
+
if (t.type === "audio" || t.type === "image")
|
|
80
|
+
return "unavailable";
|
|
81
|
+
}
|
|
82
|
+
return "available";
|
|
83
|
+
}
|
|
84
|
+
/**
|
|
85
|
+
* Creates a new session.
|
|
86
|
+
* @param {Object} options - LanguageModel options.
|
|
87
|
+
* @param {Object} sessionParams - Session parameters.
|
|
88
|
+
* @param {EventTarget} [monitorTarget] - The event target to dispatch download progress events to.
|
|
89
|
+
* @returns {Promise<Object>} The generator.
|
|
90
|
+
*/
|
|
91
|
+
async createSession(n, t, a) {
|
|
92
|
+
return n.responseConstraint && console.warn(
|
|
93
|
+
"The `responseConstraint` flag isn't supported by the Transformers.js backend and was ignored."
|
|
94
|
+
), await this.#s(a), this.generationConfig = {
|
|
95
|
+
max_new_tokens: 512,
|
|
96
|
+
// Default limit
|
|
97
|
+
temperature: t.generationConfig?.temperature ?? 1,
|
|
98
|
+
top_p: 1,
|
|
99
|
+
do_sample: t.generationConfig?.temperature !== 0,
|
|
100
|
+
return_full_text: !1
|
|
101
|
+
}, this.#o = t.systemInstruction, this.#e;
|
|
102
|
+
}
|
|
103
|
+
async generateContent(n) {
|
|
104
|
+
const t = await this.#s(), a = this.#r(n), i = this.#t.apply_chat_template(a, {
|
|
105
|
+
tokenize: !1,
|
|
106
|
+
add_generation_prompt: !0
|
|
107
|
+
}), e = (await t(i, {
|
|
108
|
+
...this.generationConfig,
|
|
109
|
+
add_special_tokens: !1
|
|
110
|
+
}))[0].generated_text, o = await this.countTokens(n);
|
|
111
|
+
return { text: e, usage: o };
|
|
112
|
+
}
|
|
113
|
+
async generateContentStream(n) {
|
|
114
|
+
const t = await this.#s(), a = this.#r(n), i = this.#t.apply_chat_template(a, {
|
|
115
|
+
tokenize: !1,
|
|
116
|
+
add_generation_prompt: !0
|
|
117
|
+
}), l = [];
|
|
118
|
+
let e, o = new Promise((r) => e = r), c = !1;
|
|
119
|
+
const f = (r) => {
|
|
120
|
+
l.push(r), e && (e(), e = null);
|
|
121
|
+
}, d = new C(this.#t, {
|
|
122
|
+
skip_prompt: !0,
|
|
123
|
+
skip_special_tokens: !0,
|
|
124
|
+
callback_function: f
|
|
125
|
+
});
|
|
126
|
+
return t(i, {
|
|
127
|
+
...this.generationConfig,
|
|
128
|
+
add_special_tokens: !1,
|
|
129
|
+
streamer: d
|
|
130
|
+
}).then(() => {
|
|
131
|
+
c = !0, e && (e(), e = null);
|
|
132
|
+
}).catch((r) => {
|
|
133
|
+
console.error("[Transformers.js] Generation error:", r), c = !0, e && (e(), e = null);
|
|
134
|
+
}), (async function* () {
|
|
135
|
+
for (; ; ) {
|
|
136
|
+
for (l.length === 0 && !c && (e || (o = new Promise((r) => e = r)), await o); l.length > 0; ) {
|
|
137
|
+
const r = l.shift();
|
|
138
|
+
yield {
|
|
139
|
+
text: () => r,
|
|
140
|
+
usageMetadata: { totalTokenCount: 0 }
|
|
141
|
+
};
|
|
142
|
+
}
|
|
143
|
+
if (c)
|
|
144
|
+
break;
|
|
145
|
+
}
|
|
146
|
+
})();
|
|
147
|
+
}
|
|
148
|
+
async countTokens(n) {
|
|
149
|
+
await this.#s();
|
|
150
|
+
const t = this.#r(n);
|
|
151
|
+
return this.#t.apply_chat_template(t, {
|
|
152
|
+
tokenize: !0,
|
|
153
|
+
add_generation_prompt: !1,
|
|
154
|
+
return_tensor: !1
|
|
155
|
+
}).length;
|
|
156
|
+
}
|
|
157
|
+
#r(n) {
|
|
158
|
+
const t = n.map((a) => {
|
|
159
|
+
let i = a.role === "model" ? "assistant" : a.role === "system" ? "system" : "user";
|
|
160
|
+
const l = a.parts.map((e) => e.text).join("");
|
|
161
|
+
return { role: i, content: l };
|
|
162
|
+
});
|
|
163
|
+
if (this.#o && !t.some((a) => a.role === "system") && t.unshift({ role: "system", content: this.#o }), this.modelName.toLowerCase().includes("gemma")) {
|
|
164
|
+
const a = t.findIndex((i) => i.role === "system");
|
|
165
|
+
if (a !== -1) {
|
|
166
|
+
const i = t[a], l = t.findIndex(
|
|
167
|
+
(e, o) => e.role === "user" && o > a
|
|
168
|
+
);
|
|
169
|
+
l !== -1 ? (t[l].content = i.content + `
|
|
170
|
+
|
|
171
|
+
` + t[l].content, t.splice(a, 1)) : (i.content += `
|
|
172
|
+
|
|
173
|
+
`, i.role = "user");
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
return t;
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
async function $(g, n = {}) {
|
|
180
|
+
const { dtype: t = "q8", branch: a = "main" } = n;
|
|
181
|
+
let i = null;
|
|
182
|
+
const l = `transformers_model_files_${g}_${t}_${a}`;
|
|
183
|
+
try {
|
|
184
|
+
const s = localStorage.getItem(l);
|
|
185
|
+
if (s) {
|
|
186
|
+
i = JSON.parse(s);
|
|
187
|
+
const { timestamp: p, files: u } = i, v = 1440 * 60 * 1e3;
|
|
188
|
+
if (Date.now() - p < v)
|
|
189
|
+
return u;
|
|
190
|
+
}
|
|
191
|
+
} catch (s) {
|
|
192
|
+
console.warn("Failed to read from localStorage cache:", s);
|
|
193
|
+
}
|
|
194
|
+
const e = `https://huggingface.co/api/models/${g}/tree/${a}?recursive=true`;
|
|
195
|
+
let o;
|
|
196
|
+
try {
|
|
197
|
+
if (o = await fetch(e), !o.ok)
|
|
198
|
+
throw new Error(`Manifest fetch failed: ${o.status}`);
|
|
199
|
+
} catch (s) {
|
|
200
|
+
if (i)
|
|
201
|
+
return console.warn(
|
|
202
|
+
"Failed to fetch manifest from network, falling back to cached data (expired):",
|
|
203
|
+
s
|
|
204
|
+
), i.files;
|
|
205
|
+
throw s;
|
|
206
|
+
}
|
|
207
|
+
const c = await o.json(), f = new Map(c.map((s) => [s.path, s.size])), d = [], h = (s) => f.has(s), r = (s) => h(s) ? (d.push({ path: s, size: f.get(s) }), !0) : !1;
|
|
208
|
+
r("config.json"), r("generation_config.json"), r("preprocessor_config.json"), h("tokenizer.json") ? (r("tokenizer.json"), r("tokenizer_config.json")) : (r("tokenizer_config.json"), r("special_tokens_map.json"), r("vocab.json"), r("merges.txt"), r("vocab.txt"));
|
|
209
|
+
const w = "onnx";
|
|
210
|
+
let m = [];
|
|
211
|
+
t === "fp32" ? m = [""] : t === "quantized" ? m = ["_quantized"] : (m = [`_${t}`], t === "q8" && m.push(""));
|
|
212
|
+
let k = [
|
|
213
|
+
"model",
|
|
214
|
+
"encoder_model",
|
|
215
|
+
"decoder_model",
|
|
216
|
+
"decoder_model_merged"
|
|
217
|
+
];
|
|
218
|
+
const _ = [];
|
|
219
|
+
for (const s of k)
|
|
220
|
+
for (const p of m) {
|
|
221
|
+
const u = `${w}/${s}${p}.onnx`;
|
|
222
|
+
if (h(u)) {
|
|
223
|
+
_.push(u);
|
|
224
|
+
break;
|
|
225
|
+
}
|
|
226
|
+
}
|
|
227
|
+
const x = _.some(
|
|
228
|
+
(s) => s.includes("decoder_model_merged")
|
|
229
|
+
), b = _.filter((s) => !(x && s.includes("decoder_model") && !s.includes("merged")));
|
|
230
|
+
for (const s of b) {
|
|
231
|
+
r(s);
|
|
232
|
+
const p = `${s}_data`;
|
|
233
|
+
if (r(p)) {
|
|
234
|
+
let u = 1;
|
|
235
|
+
for (; r(`${p}_${u}`); )
|
|
236
|
+
u++;
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
try {
|
|
240
|
+
localStorage.setItem(
|
|
241
|
+
l,
|
|
242
|
+
JSON.stringify({
|
|
243
|
+
timestamp: Date.now(),
|
|
244
|
+
files: d
|
|
245
|
+
})
|
|
246
|
+
);
|
|
247
|
+
} catch (s) {
|
|
248
|
+
console.warn("Failed to write to localStorage cache:", s);
|
|
249
|
+
}
|
|
250
|
+
return d;
|
|
251
|
+
}
|
|
252
|
+
export {
|
|
253
|
+
D as default
|
|
254
|
+
};
|
|
@@ -1,60 +1,65 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
*/
|
|
4
|
-
export default class PolyfillBackend {
|
|
5
|
-
#model;
|
|
6
|
-
|
|
1
|
+
class r {
|
|
2
|
+
#e;
|
|
7
3
|
/**
|
|
8
4
|
* @param {string} modelName - The name of the model.
|
|
9
5
|
*/
|
|
10
|
-
constructor(
|
|
11
|
-
this.modelName =
|
|
6
|
+
constructor(e) {
|
|
7
|
+
this.modelName = e;
|
|
12
8
|
}
|
|
13
|
-
|
|
14
9
|
/**
|
|
15
10
|
* Checks if the backend is available given the options.
|
|
16
11
|
* @param {Object} options - LanguageModel options.
|
|
17
12
|
* @returns {string} 'available', 'unavailable', 'downloadable', or 'downloading'.
|
|
18
13
|
*/
|
|
19
|
-
static availability(
|
|
20
|
-
return
|
|
14
|
+
static availability(e) {
|
|
15
|
+
return "available";
|
|
21
16
|
}
|
|
22
|
-
|
|
23
17
|
/**
|
|
24
18
|
* Creates a model session and stores it.
|
|
25
19
|
* @param {Object} options - LanguageModel options.
|
|
26
20
|
* @param {Object} sessionParams - Parameters for the cloud or local model.
|
|
27
|
-
* @param {
|
|
21
|
+
* @param {EventTarget} [monitorTarget] - The event target to dispatch download progress events to.
|
|
28
22
|
* @returns {any} The created session object.
|
|
29
23
|
*/
|
|
30
|
-
createSession(
|
|
31
|
-
throw new Error(
|
|
24
|
+
createSession(e, o, n) {
|
|
25
|
+
throw new Error("Not implemented");
|
|
32
26
|
}
|
|
33
|
-
|
|
34
27
|
/**
|
|
35
28
|
* Generates content (non-streaming).
|
|
36
29
|
* @param {Array} content - The history + new message content.
|
|
37
30
|
* @returns {Promise<{text: string, usage: number}>}
|
|
38
31
|
*/
|
|
39
|
-
async generateContent(
|
|
40
|
-
throw new Error(
|
|
32
|
+
async generateContent(e) {
|
|
33
|
+
throw new Error("Not implemented");
|
|
41
34
|
}
|
|
42
|
-
|
|
43
35
|
/**
|
|
44
36
|
* Generates content stream.
|
|
45
37
|
* @param {Array} content - The history + new content.
|
|
46
38
|
* @returns {Promise<AsyncIterable>} Stream of chunks.
|
|
47
39
|
*/
|
|
48
|
-
async generateContentStream(
|
|
49
|
-
throw new Error(
|
|
40
|
+
async generateContentStream(e) {
|
|
41
|
+
throw new Error("Not implemented");
|
|
50
42
|
}
|
|
51
|
-
|
|
52
43
|
/**
|
|
53
44
|
* Counts tokens.
|
|
54
45
|
* @param {Array} content - The content to count.
|
|
55
46
|
* @returns {Promise<number>} Total tokens.
|
|
56
47
|
*/
|
|
57
|
-
async countTokens(
|
|
58
|
-
throw new Error(
|
|
48
|
+
async countTokens(e) {
|
|
49
|
+
throw new Error("Not implemented");
|
|
59
50
|
}
|
|
60
51
|
}
|
|
52
|
+
const a = {
|
|
53
|
+
firebase: { modelName: "gemini-2.5-flash-lite" },
|
|
54
|
+
gemini: { modelName: "gemini-2.0-flash-lite-preview-02-05" },
|
|
55
|
+
openai: { modelName: "gpt-4o" },
|
|
56
|
+
transformers: {
|
|
57
|
+
modelName: "onnx-community/gemma-3-1b-it-ONNX-GQA",
|
|
58
|
+
device: "webgpu",
|
|
59
|
+
dtype: "q4f16"
|
|
60
|
+
}
|
|
61
|
+
};
|
|
62
|
+
export {
|
|
63
|
+
a as D,
|
|
64
|
+
r as P
|
|
65
|
+
};
|