@blank-utils/llm 0.5.0 → 0.5.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{chunk-A7TUIC5G.js → chunk-QUWLNQIN.js} +10 -23
- package/dist/index.css +3 -0
- package/dist/index.js +1 -1
- package/dist/react/index.css +3 -0
- package/dist/react/index.js +1 -1
- package/package.json +2 -2
|
@@ -1067,7 +1067,8 @@ var DEFAULT_SYSTEM_PROMPT = `You are a helpful AI assistant.
|
|
|
1067
1067
|
B -- Yes --> C[Continue]
|
|
1068
1068
|
B -- No --> D[Error]
|
|
1069
1069
|
\`\`\`
|
|
1070
|
-
- You can use LaTeX math ($$ ... $$)
|
|
1070
|
+
- You can use LaTeX math ($$ ... $$).
|
|
1071
|
+
- IF you are provided an uploaded image, your primary task is strictly to analyze the contents of that specific image in natural language. Do not hallucinate that the user desires a Mermaid diagram unless they specifically ask for one.`;
|
|
1071
1072
|
var ALL_MODELS = { ...WEBLLM_MODELS };
|
|
1072
1073
|
function isVisionModel(modelId) {
|
|
1073
1074
|
if (!modelId) return false;
|
|
@@ -1215,33 +1216,19 @@ ${systemPrompt}` : systemPrompt;
|
|
|
1215
1216
|
currentMessages.forEach((m) => {
|
|
1216
1217
|
let content = m.content;
|
|
1217
1218
|
if (m.role === "user" && m.images && m.images.length > 0 && isVisionModel(modelId || "")) {
|
|
1218
|
-
|
|
1219
|
-
content
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
];
|
|
1223
|
-
} else {
|
|
1224
|
-
content = [
|
|
1225
|
-
...m.images.map((img) => ({ type: "image", image: img.dataUrl })),
|
|
1226
|
-
{ type: "text", text: m.content }
|
|
1227
|
-
];
|
|
1228
|
-
}
|
|
1219
|
+
content = [
|
|
1220
|
+
{ type: "text", text: m.content },
|
|
1221
|
+
...m.images.map((img) => ({ type: "image_url", image_url: { url: img.dataUrl } }))
|
|
1222
|
+
];
|
|
1229
1223
|
}
|
|
1230
1224
|
apiMessages.push({ role: m.role, content });
|
|
1231
1225
|
});
|
|
1232
1226
|
let finalUserContent = userContent;
|
|
1233
1227
|
if (attachedImages.length > 0 && isVisionModel(modelId || "")) {
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1237
|
-
|
|
1238
|
-
];
|
|
1239
|
-
} else {
|
|
1240
|
-
finalUserContent = [
|
|
1241
|
-
...attachedImages.map((img) => ({ type: "image", image: img.dataUrl })),
|
|
1242
|
-
{ type: "text", text: userContent }
|
|
1243
|
-
];
|
|
1244
|
-
}
|
|
1228
|
+
finalUserContent = [
|
|
1229
|
+
{ type: "text", text: userContent },
|
|
1230
|
+
...attachedImages.map((img) => ({ type: "image_url", image_url: { url: img.dataUrl } }))
|
|
1231
|
+
];
|
|
1245
1232
|
}
|
|
1246
1233
|
apiMessages.push({ role: "user", content: finalUserContent });
|
|
1247
1234
|
try {
|
package/dist/index.css
CHANGED
package/dist/index.js
CHANGED
package/dist/react/index.css
CHANGED
package/dist/react/index.js
CHANGED
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@blank-utils/llm",
|
|
3
|
-
"version": "0.5.
|
|
3
|
+
"version": "0.5.2",
|
|
4
4
|
"description": "Run LLMs directly in your browser with WebGPU acceleration. Supports React hooks and eager background loading.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.js",
|
|
@@ -101,7 +101,7 @@
|
|
|
101
101
|
},
|
|
102
102
|
"dependencies": {
|
|
103
103
|
"@huggingface/transformers": "^3.8.1",
|
|
104
|
-
"@mlc-ai/web-llm": "^0.2.
|
|
104
|
+
"@mlc-ai/web-llm": "^0.2.81",
|
|
105
105
|
"@streamdown/code": "^1.0.3",
|
|
106
106
|
"@streamdown/math": "^1.0.2",
|
|
107
107
|
"@streamdown/mermaid": "^1.0.2",
|