mindcraft 0.1.4-0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/FAQ.md +38 -0
- package/LICENSE +21 -0
- package/README.md +255 -0
- package/andy.json +6 -0
- package/bin/mindcraft.js +80 -0
- package/keys.example.json +19 -0
- package/main.js +80 -0
- package/package.json +78 -0
- package/patches/minecraft-data+3.97.0.patch +13 -0
- package/patches/mineflayer+4.33.0.patch +54 -0
- package/patches/mineflayer-pathfinder+2.4.5.patch +265 -0
- package/patches/mineflayer-pvp+1.3.2.patch +13 -0
- package/patches/prismarine-viewer+1.33.0.patch +13 -0
- package/patches/protodef+1.19.0.patch +15 -0
- package/profiles/andy-4-reasoning.json +14 -0
- package/profiles/andy-4.json +7 -0
- package/profiles/azure.json +19 -0
- package/profiles/claude.json +7 -0
- package/profiles/claude_thinker.json +15 -0
- package/profiles/deepseek.json +7 -0
- package/profiles/defaults/_default.json +256 -0
- package/profiles/defaults/assistant.json +14 -0
- package/profiles/defaults/creative.json +14 -0
- package/profiles/defaults/god_mode.json +14 -0
- package/profiles/defaults/survival.json +14 -0
- package/profiles/freeguy.json +7 -0
- package/profiles/gemini.json +9 -0
- package/profiles/gpt.json +12 -0
- package/profiles/grok.json +7 -0
- package/profiles/llama.json +10 -0
- package/profiles/mercury.json +9 -0
- package/profiles/mistral.json +5 -0
- package/profiles/qwen.json +17 -0
- package/profiles/tasks/construction_profile.json +42 -0
- package/profiles/tasks/cooking_profile.json +11 -0
- package/profiles/tasks/crafting_profile.json +71 -0
- package/profiles/vllm.json +10 -0
- package/settings.js +64 -0
- package/src/agent/action_manager.js +177 -0
- package/src/agent/agent.js +561 -0
- package/src/agent/coder.js +229 -0
- package/src/agent/commands/actions.js +504 -0
- package/src/agent/commands/index.js +259 -0
- package/src/agent/commands/queries.js +347 -0
- package/src/agent/connection_handler.js +96 -0
- package/src/agent/conversation.js +353 -0
- package/src/agent/history.js +122 -0
- package/src/agent/library/full_state.js +89 -0
- package/src/agent/library/index.js +23 -0
- package/src/agent/library/lockdown.js +32 -0
- package/src/agent/library/skill_library.js +93 -0
- package/src/agent/library/skills.js +2093 -0
- package/src/agent/library/world.js +431 -0
- package/src/agent/memory_bank.js +25 -0
- package/src/agent/mindserver_proxy.js +136 -0
- package/src/agent/modes.js +446 -0
- package/src/agent/npc/build_goal.js +80 -0
- package/src/agent/npc/construction/dirt_shelter.json +38 -0
- package/src/agent/npc/construction/large_house.json +230 -0
- package/src/agent/npc/construction/small_stone_house.json +42 -0
- package/src/agent/npc/construction/small_wood_house.json +42 -0
- package/src/agent/npc/controller.js +261 -0
- package/src/agent/npc/data.js +50 -0
- package/src/agent/npc/item_goal.js +355 -0
- package/src/agent/npc/utils.js +126 -0
- package/src/agent/self_prompter.js +146 -0
- package/src/agent/settings.js +7 -0
- package/src/agent/speak.js +150 -0
- package/src/agent/tasks/construction_tasks.js +1104 -0
- package/src/agent/tasks/cooking_tasks.js +358 -0
- package/src/agent/tasks/tasks.js +594 -0
- package/src/agent/templates/execTemplate.js +6 -0
- package/src/agent/templates/lintTemplate.js +10 -0
- package/src/agent/vision/browser_viewer.js +8 -0
- package/src/agent/vision/camera.js +78 -0
- package/src/agent/vision/vision_interpreter.js +82 -0
- package/src/mindcraft/index.js +28 -0
- package/src/mindcraft/mcserver.js +154 -0
- package/src/mindcraft/mindcraft.js +111 -0
- package/src/mindcraft/mindserver.js +328 -0
- package/src/mindcraft/public/index.html +1253 -0
- package/src/mindcraft/public/settings_spec.json +145 -0
- package/src/mindcraft/userconfig.js +72 -0
- package/src/mindcraft-py/example.py +27 -0
- package/src/mindcraft-py/init-mindcraft.js +24 -0
- package/src/mindcraft-py/mindcraft.py +99 -0
- package/src/models/_model_map.js +89 -0
- package/src/models/azure.js +32 -0
- package/src/models/cerebras.js +61 -0
- package/src/models/claude.js +87 -0
- package/src/models/deepseek.js +59 -0
- package/src/models/gemini.js +176 -0
- package/src/models/glhf.js +71 -0
- package/src/models/gpt.js +147 -0
- package/src/models/grok.js +82 -0
- package/src/models/groq.js +95 -0
- package/src/models/huggingface.js +86 -0
- package/src/models/hyperbolic.js +114 -0
- package/src/models/lmstudio.js +74 -0
- package/src/models/mercury.js +95 -0
- package/src/models/mistral.js +94 -0
- package/src/models/novita.js +71 -0
- package/src/models/ollama.js +115 -0
- package/src/models/openrouter.js +77 -0
- package/src/models/prompter.js +366 -0
- package/src/models/qwen.js +80 -0
- package/src/models/replicate.js +60 -0
- package/src/models/vllm.js +81 -0
- package/src/process/agent_process.js +84 -0
- package/src/process/init_agent.js +54 -0
- package/src/utils/examples.js +83 -0
- package/src/utils/keys.js +34 -0
- package/src/utils/math.js +13 -0
- package/src/utils/mcdata.js +572 -0
- package/src/utils/text.js +78 -0
- package/src/utils/translator.js +30 -0
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
import { toSinglePrompt } from '../utils/text.js';
|
|
2
|
+
import { getKey } from '../utils/keys.js';
|
|
3
|
+
import { HfInference } from "@huggingface/inference";
|
|
4
|
+
|
|
5
|
+
export class HuggingFace {
|
|
6
|
+
static prefix = 'huggingface';
|
|
7
|
+
constructor(model_name, url, params) {
|
|
8
|
+
// Remove 'huggingface/' prefix if present
|
|
9
|
+
this.model_name = model_name.replace('huggingface/', '');
|
|
10
|
+
this.url = url;
|
|
11
|
+
this.params = params;
|
|
12
|
+
|
|
13
|
+
if (this.url) {
|
|
14
|
+
console.warn("Hugging Face doesn't support custom urls!");
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
this.huggingface = new HfInference(getKey('HUGGINGFACE_API_KEY'));
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
async sendRequest(turns, systemMessage) {
|
|
21
|
+
const stop_seq = '***';
|
|
22
|
+
// Build a single prompt from the conversation turns
|
|
23
|
+
const prompt = toSinglePrompt(turns, null, stop_seq);
|
|
24
|
+
// Fallback model if none was provided
|
|
25
|
+
const model_name = this.model_name || 'meta-llama/Meta-Llama-3-8B';
|
|
26
|
+
// Combine system message with the prompt
|
|
27
|
+
const input = systemMessage + "\n" + prompt;
|
|
28
|
+
|
|
29
|
+
// We'll try up to 5 times in case of partial <think> blocks for DeepSeek-R1 models.
|
|
30
|
+
const maxAttempts = 5;
|
|
31
|
+
let attempt = 0;
|
|
32
|
+
let finalRes = null;
|
|
33
|
+
|
|
34
|
+
while (attempt < maxAttempts) {
|
|
35
|
+
attempt++;
|
|
36
|
+
console.log(`Awaiting Hugging Face API response... (model: ${model_name}, attempt: ${attempt})`);
|
|
37
|
+
let res = '';
|
|
38
|
+
try {
|
|
39
|
+
// Consume the streaming response chunk by chunk
|
|
40
|
+
for await (const chunk of this.huggingface.chatCompletionStream({
|
|
41
|
+
model: model_name,
|
|
42
|
+
messages: [{ role: "user", content: input }],
|
|
43
|
+
...(this.params || {})
|
|
44
|
+
})) {
|
|
45
|
+
res += (chunk.choices[0]?.delta?.content || "");
|
|
46
|
+
}
|
|
47
|
+
} catch (err) {
|
|
48
|
+
console.log(err);
|
|
49
|
+
res = 'My brain disconnected, try again.';
|
|
50
|
+
// Break out immediately; we only retry when handling partial <think> tags.
|
|
51
|
+
break;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// If the model is DeepSeek-R1, check for mismatched <think> blocks.
|
|
55
|
+
const hasOpenTag = res.includes("<think>");
|
|
56
|
+
const hasCloseTag = res.includes("</think>");
|
|
57
|
+
|
|
58
|
+
// If there's a partial mismatch, warn and retry the entire request.
|
|
59
|
+
if ((hasOpenTag && !hasCloseTag)) {
|
|
60
|
+
console.warn("Partial <think> block detected. Re-generating...");
|
|
61
|
+
continue;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// If both tags are present, remove the <think> block entirely.
|
|
65
|
+
if (hasOpenTag && hasCloseTag) {
|
|
66
|
+
res = res.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
finalRes = res;
|
|
70
|
+
break; // Exit loop if we got a valid response.
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
// If no valid response was obtained after max attempts, assign a fallback.
|
|
74
|
+
if (finalRes == null) {
|
|
75
|
+
console.warn("Could not get a valid <think> block or normal response after max attempts.");
|
|
76
|
+
finalRes = 'I thought too hard, sorry, try again.';
|
|
77
|
+
}
|
|
78
|
+
console.log('Received.');
|
|
79
|
+
console.log(finalRes);
|
|
80
|
+
return finalRes;
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
async embed(text) {
|
|
84
|
+
throw new Error('Embeddings are not supported by HuggingFace.');
|
|
85
|
+
}
|
|
86
|
+
}
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
import { getKey } from '../utils/keys.js';
|
|
2
|
+
|
|
3
|
+
export class Hyperbolic {
|
|
4
|
+
static prefix = 'hyperbolic';
|
|
5
|
+
constructor(modelName, apiUrl) {
|
|
6
|
+
this.modelName = modelName || "deepseek-ai/DeepSeek-V3";
|
|
7
|
+
this.apiUrl = apiUrl || "https://api.hyperbolic.xyz/v1/chat/completions";
|
|
8
|
+
|
|
9
|
+
// Retrieve the Hyperbolic API key from keys.js
|
|
10
|
+
this.apiKey = getKey('HYPERBOLIC_API_KEY');
|
|
11
|
+
if (!this.apiKey) {
|
|
12
|
+
throw new Error('HYPERBOLIC_API_KEY not found. Check your keys.js file.');
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Sends a chat completion request to the Hyperbolic endpoint.
|
|
18
|
+
*
|
|
19
|
+
* @param {Array} turns - An array of message objects, e.g. [{role: 'user', content: 'Hi'}].
|
|
20
|
+
* @param {string} systemMessage - The system prompt or instruction.
|
|
21
|
+
* @param {string} stopSeq - A stopping sequence, default '***'.
|
|
22
|
+
* @returns {Promise<string>} - The model's reply.
|
|
23
|
+
*/
|
|
24
|
+
async sendRequest(turns, systemMessage, stopSeq = '***') {
|
|
25
|
+
// Prepare the messages with a system prompt at the beginning
|
|
26
|
+
const messages = [{ role: 'system', content: systemMessage }, ...turns];
|
|
27
|
+
|
|
28
|
+
// Build the request payload
|
|
29
|
+
const payload = {
|
|
30
|
+
model: this.modelName,
|
|
31
|
+
messages: messages,
|
|
32
|
+
max_tokens: 8192,
|
|
33
|
+
temperature: 0.7,
|
|
34
|
+
top_p: 0.9,
|
|
35
|
+
stream: false
|
|
36
|
+
};
|
|
37
|
+
|
|
38
|
+
const maxAttempts = 5;
|
|
39
|
+
let attempt = 0;
|
|
40
|
+
let finalRes = null;
|
|
41
|
+
|
|
42
|
+
while (attempt < maxAttempts) {
|
|
43
|
+
attempt++;
|
|
44
|
+
console.log(`Awaiting Hyperbolic API response... (attempt: ${attempt})`);
|
|
45
|
+
console.log('Messages:', messages);
|
|
46
|
+
|
|
47
|
+
let completionContent = null;
|
|
48
|
+
|
|
49
|
+
try {
|
|
50
|
+
const response = await fetch(this.apiUrl, {
|
|
51
|
+
method: 'POST',
|
|
52
|
+
headers: {
|
|
53
|
+
'Content-Type': 'application/json',
|
|
54
|
+
'Authorization': `Bearer ${this.apiKey}`
|
|
55
|
+
},
|
|
56
|
+
body: JSON.stringify(payload)
|
|
57
|
+
});
|
|
58
|
+
|
|
59
|
+
if (!response.ok) {
|
|
60
|
+
throw new Error(`HTTP error! status: ${response.status}`);
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
const data = await response.json();
|
|
64
|
+
if (data?.choices?.[0]?.finish_reason === 'length') {
|
|
65
|
+
throw new Error('Context length exceeded');
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
completionContent = data?.choices?.[0]?.message?.content || '';
|
|
69
|
+
console.log('Received response from Hyperbolic.');
|
|
70
|
+
} catch (err) {
|
|
71
|
+
if (
|
|
72
|
+
(err.message === 'Context length exceeded' || err.code === 'context_length_exceeded') &&
|
|
73
|
+
turns.length > 1
|
|
74
|
+
) {
|
|
75
|
+
console.log('Context length exceeded, trying again with a shorter context...');
|
|
76
|
+
return await this.sendRequest(turns.slice(1), systemMessage, stopSeq);
|
|
77
|
+
} else {
|
|
78
|
+
console.error(err);
|
|
79
|
+
completionContent = 'My brain disconnected, try again.';
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
// Check for <think> blocks
|
|
84
|
+
const hasOpenTag = completionContent.includes("<think>");
|
|
85
|
+
const hasCloseTag = completionContent.includes("</think>");
|
|
86
|
+
|
|
87
|
+
if ((hasOpenTag && !hasCloseTag)) {
|
|
88
|
+
console.warn("Partial <think> block detected. Re-generating...");
|
|
89
|
+
continue; // Retry the request
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if (hasCloseTag && !hasOpenTag) {
|
|
93
|
+
completionContent = '<think>' + completionContent;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
if (hasOpenTag && hasCloseTag) {
|
|
97
|
+
completionContent = completionContent.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
finalRes = completionContent.replace(/<\|separator\|>/g, '*no response*');
|
|
101
|
+
break; // Valid response obtained—exit loop
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
if (finalRes == null) {
|
|
105
|
+
console.warn("Could not get a valid <think> block or normal response after max attempts.");
|
|
106
|
+
finalRes = 'I thought too hard, sorry, try again.';
|
|
107
|
+
}
|
|
108
|
+
return finalRes;
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
async embed(text) {
|
|
112
|
+
throw new Error('Embeddings are not supported by Hyperbolic.');
|
|
113
|
+
}
|
|
114
|
+
}
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
import OpenAIApi from 'openai';
|
|
2
|
+
import { strictFormat } from '../utils/text.js';
|
|
3
|
+
|
|
4
|
+
export class LMStudio {
|
|
5
|
+
static prefix = 'lmstudio';
|
|
6
|
+
constructor(model_name, url, params) {
|
|
7
|
+
this.model_name = model_name;
|
|
8
|
+
this.params = params;
|
|
9
|
+
this.openai = new OpenAIApi({
|
|
10
|
+
baseURL: url || 'http://localhost:1234/v1',
|
|
11
|
+
apiKey: 'lm-studio', // LM Studio ignores this but the client requires a non-empty value
|
|
12
|
+
});
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
async sendRequest(turns, systemMessage, stop_seq='***') {
|
|
16
|
+
let messages = [{ role: 'system', content: systemMessage }].concat(strictFormat(turns));
|
|
17
|
+
let model = this.model_name || 'andy-4.1';
|
|
18
|
+
let res = null;
|
|
19
|
+
|
|
20
|
+
try {
|
|
21
|
+
console.log('Awaiting LM Studio response from model', model);
|
|
22
|
+
const pack = {
|
|
23
|
+
model,
|
|
24
|
+
messages,
|
|
25
|
+
stop: stop_seq,
|
|
26
|
+
...(this.params || {})
|
|
27
|
+
};
|
|
28
|
+
const completion = await this.openai.chat.completions.create(pack);
|
|
29
|
+
if (completion.choices[0].finish_reason === 'length')
|
|
30
|
+
throw new Error('Context length exceeded');
|
|
31
|
+
console.log('Received.');
|
|
32
|
+
res = completion.choices[0].message.content;
|
|
33
|
+
if (res.includes('</think>')) {
|
|
34
|
+
if (!res.includes('<think>')) res = '<think>' + res;
|
|
35
|
+
res = res.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
|
36
|
+
}
|
|
37
|
+
} catch (err) {
|
|
38
|
+
if ((err.message === 'Context length exceeded' || err.code === 'context_length_exceeded') && turns.length > 1) {
|
|
39
|
+
console.log('Context length exceeded, trying again with shorter context.');
|
|
40
|
+
return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
|
|
41
|
+
} else {
|
|
42
|
+
console.log(err);
|
|
43
|
+
res = 'My brain disconnected, try again.';
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
return res;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
async sendVisionRequest(messages, systemMessage, imageBuffer) {
|
|
50
|
+
const imageMessages = [...messages];
|
|
51
|
+
imageMessages.push({
|
|
52
|
+
role: 'user',
|
|
53
|
+
content: [
|
|
54
|
+
{ type: 'text', text: systemMessage },
|
|
55
|
+
{
|
|
56
|
+
type: 'image_url',
|
|
57
|
+
image_url: { url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}` }
|
|
58
|
+
}
|
|
59
|
+
]
|
|
60
|
+
});
|
|
61
|
+
return this.sendRequest(imageMessages, systemMessage);
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
async embed(text) {
|
|
65
|
+
if (text.length > 8191)
|
|
66
|
+
text = text.slice(0, 8191);
|
|
67
|
+
const embedding = await this.openai.embeddings.create({
|
|
68
|
+
model: this.model_name || 'text-embedding-nomic-embed-text-v1.5',
|
|
69
|
+
input: text,
|
|
70
|
+
encoding_format: 'float',
|
|
71
|
+
});
|
|
72
|
+
return embedding.data[0].embedding;
|
|
73
|
+
}
|
|
74
|
+
}
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import OpenAIApi from 'openai';
|
|
2
|
+
import { getKey, hasKey } from '../utils/keys.js';
|
|
3
|
+
import { strictFormat } from '../utils/text.js';
|
|
4
|
+
|
|
5
|
+
export class Mercury {
|
|
6
|
+
static prefix = 'mercury';
|
|
7
|
+
constructor(model_name, url, params) {
|
|
8
|
+
this.model_name = model_name;
|
|
9
|
+
this.params = params;
|
|
10
|
+
let config = {};
|
|
11
|
+
if (url)
|
|
12
|
+
config.baseURL = url;
|
|
13
|
+
else
|
|
14
|
+
config.baseURL = "https://api.inceptionlabs.ai/v1";
|
|
15
|
+
|
|
16
|
+
config.apiKey = getKey('MERCURY_API_KEY');
|
|
17
|
+
|
|
18
|
+
this.openai = new OpenAIApi(config);
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
async sendRequest(turns, systemMessage, stop_seq='***') {
|
|
22
|
+
if (typeof stop_seq === 'string') {
|
|
23
|
+
stop_seq = [stop_seq];
|
|
24
|
+
} else if (!Array.isArray(stop_seq)) {
|
|
25
|
+
stop_seq = [];
|
|
26
|
+
}
|
|
27
|
+
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
|
|
28
|
+
messages = strictFormat(messages);
|
|
29
|
+
const pack = {
|
|
30
|
+
model: this.model_name || "mercury-coder-small",
|
|
31
|
+
messages,
|
|
32
|
+
stop: stop_seq,
|
|
33
|
+
...(this.params || {})
|
|
34
|
+
};
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
let res = null;
|
|
38
|
+
|
|
39
|
+
try {
|
|
40
|
+
console.log('Awaiting mercury api response from model', this.model_name)
|
|
41
|
+
// console.log('Messages:', messages);
|
|
42
|
+
let completion = await this.openai.chat.completions.create(pack);
|
|
43
|
+
if (completion.choices[0].finish_reason == 'length')
|
|
44
|
+
throw new Error('Context length exceeded');
|
|
45
|
+
console.log('Received.')
|
|
46
|
+
res = completion.choices[0].message.content;
|
|
47
|
+
}
|
|
48
|
+
catch (err) {
|
|
49
|
+
if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
|
|
50
|
+
console.log('Context length exceeded, trying again with shorter context.');
|
|
51
|
+
return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
|
|
52
|
+
} else if (err.message.includes('image_url')) {
|
|
53
|
+
console.log(err);
|
|
54
|
+
res = 'Vision is only supported by certain models.';
|
|
55
|
+
} else {
|
|
56
|
+
console.log(err);
|
|
57
|
+
res = 'My brain disconnected, try again.';
|
|
58
|
+
}
|
|
59
|
+
}
|
|
60
|
+
return res;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
async sendVisionRequest(messages, systemMessage, imageBuffer) {
|
|
64
|
+
const imageMessages = [...messages];
|
|
65
|
+
imageMessages.push({
|
|
66
|
+
role: "user",
|
|
67
|
+
content: [
|
|
68
|
+
{ type: "text", text: systemMessage },
|
|
69
|
+
{
|
|
70
|
+
type: "image_url",
|
|
71
|
+
image_url: {
|
|
72
|
+
url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
|
|
73
|
+
}
|
|
74
|
+
}
|
|
75
|
+
]
|
|
76
|
+
});
|
|
77
|
+
|
|
78
|
+
return this.sendRequest(imageMessages, systemMessage);
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
async embed(text) {
|
|
82
|
+
if (text.length > 8191)
|
|
83
|
+
text = text.slice(0, 8191);
|
|
84
|
+
const embedding = await this.openai.embeddings.create({
|
|
85
|
+
model: this.model_name || "text-embedding-3-small",
|
|
86
|
+
input: text,
|
|
87
|
+
encoding_format: "float",
|
|
88
|
+
});
|
|
89
|
+
return embedding.data[0].embedding;
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
import { Mistral as MistralClient } from '@mistralai/mistralai';
|
|
2
|
+
import { getKey } from '../utils/keys.js';
|
|
3
|
+
import { strictFormat } from '../utils/text.js';
|
|
4
|
+
|
|
5
|
+
export class Mistral {
|
|
6
|
+
static prefix = 'mistral';
|
|
7
|
+
#client;
|
|
8
|
+
|
|
9
|
+
constructor(model_name, url, params) {
|
|
10
|
+
this.model_name = model_name;
|
|
11
|
+
this.params = params;
|
|
12
|
+
|
|
13
|
+
if (typeof url === "string") {
|
|
14
|
+
console.warn("Mistral does not support custom URL's, ignoring!");
|
|
15
|
+
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
if (!getKey("MISTRAL_API_KEY")) {
|
|
19
|
+
throw new Error("Mistral API Key missing, make sure to set MISTRAL_API_KEY in settings.json")
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
this.#client = new MistralClient(
|
|
23
|
+
{
|
|
24
|
+
apiKey: getKey("MISTRAL_API_KEY")
|
|
25
|
+
}
|
|
26
|
+
);
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
// Prevents the following code from running when model not specified
|
|
30
|
+
if (typeof this.model_name === "undefined") return;
|
|
31
|
+
|
|
32
|
+
// get the model name without the "mistral" or "mistralai" prefix
|
|
33
|
+
// e.g "mistral/mistral-large-latest" -> "mistral-large-latest"
|
|
34
|
+
if (typeof model_name.split("/")[1] !== "undefined") {
|
|
35
|
+
this.model_name = model_name.split("/")[1];
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
async sendRequest(turns, systemMessage) {
|
|
40
|
+
|
|
41
|
+
let result;
|
|
42
|
+
|
|
43
|
+
try {
|
|
44
|
+
const model = this.model_name || "mistral-large-latest";
|
|
45
|
+
|
|
46
|
+
const messages = [
|
|
47
|
+
{ role: "system", content: systemMessage }
|
|
48
|
+
];
|
|
49
|
+
messages.push(...strictFormat(turns));
|
|
50
|
+
|
|
51
|
+
console.log('Awaiting mistral api response...')
|
|
52
|
+
const response = await this.#client.chat.complete({
|
|
53
|
+
model,
|
|
54
|
+
messages,
|
|
55
|
+
...(this.params || {})
|
|
56
|
+
});
|
|
57
|
+
|
|
58
|
+
result = response.choices[0].message.content;
|
|
59
|
+
} catch (err) {
|
|
60
|
+
if (err.message.includes("A request containing images has been given to a model which does not have the 'vision' capability.")) {
|
|
61
|
+
result = "Vision is only supported by certain models.";
|
|
62
|
+
} else {
|
|
63
|
+
result = "My brain disconnected, try again.";
|
|
64
|
+
}
|
|
65
|
+
console.log(err);
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
return result;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
async sendVisionRequest(messages, systemMessage, imageBuffer) {
|
|
72
|
+
const imageMessages = [...messages];
|
|
73
|
+
imageMessages.push({
|
|
74
|
+
role: "user",
|
|
75
|
+
content: [
|
|
76
|
+
{ type: "text", text: systemMessage },
|
|
77
|
+
{
|
|
78
|
+
type: "image_url",
|
|
79
|
+
imageUrl: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
|
|
80
|
+
}
|
|
81
|
+
]
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
return this.sendRequest(imageMessages, systemMessage);
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
async embed(text) {
|
|
88
|
+
const embedding = await this.#client.embeddings.create({
|
|
89
|
+
model: "mistral-embed",
|
|
90
|
+
inputs: text
|
|
91
|
+
});
|
|
92
|
+
return embedding.data[0].embedding;
|
|
93
|
+
}
|
|
94
|
+
}
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
import OpenAIApi from 'openai';
|
|
2
|
+
import { getKey } from '../utils/keys.js';
|
|
3
|
+
import { strictFormat } from '../utils/text.js';
|
|
4
|
+
|
|
5
|
+
// llama, mistral
|
|
6
|
+
export class Novita {
|
|
7
|
+
static prefix = 'novita';
|
|
8
|
+
constructor(model_name, url, params) {
|
|
9
|
+
this.model_name = model_name;
|
|
10
|
+
this.url = url || 'https://api.novita.ai/v3/openai';
|
|
11
|
+
this.params = params;
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
let config = {
|
|
15
|
+
baseURL: this.url
|
|
16
|
+
};
|
|
17
|
+
config.apiKey = getKey('NOVITA_API_KEY');
|
|
18
|
+
|
|
19
|
+
this.openai = new OpenAIApi(config);
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
async sendRequest(turns, systemMessage, stop_seq='***') {
|
|
23
|
+
let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
messages = strictFormat(messages);
|
|
27
|
+
|
|
28
|
+
const pack = {
|
|
29
|
+
model: this.model_name || "meta-llama/llama-4-scout-17b-16e-instruct",
|
|
30
|
+
messages,
|
|
31
|
+
stop: [stop_seq],
|
|
32
|
+
...(this.params || {})
|
|
33
|
+
};
|
|
34
|
+
|
|
35
|
+
let res = null;
|
|
36
|
+
try {
|
|
37
|
+
console.log('Awaiting novita api response...')
|
|
38
|
+
let completion = await this.openai.chat.completions.create(pack);
|
|
39
|
+
if (completion.choices[0].finish_reason == 'length')
|
|
40
|
+
throw new Error('Context length exceeded');
|
|
41
|
+
console.log('Received.')
|
|
42
|
+
res = completion.choices[0].message.content;
|
|
43
|
+
}
|
|
44
|
+
catch (err) {
|
|
45
|
+
if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
|
|
46
|
+
console.log('Context length exceeded, trying again with shorter context.');
|
|
47
|
+
return await sendRequest(turns.slice(1), systemMessage, stop_seq);
|
|
48
|
+
} else {
|
|
49
|
+
console.log(err);
|
|
50
|
+
res = 'My brain disconnected, try again.';
|
|
51
|
+
}
|
|
52
|
+
}
|
|
53
|
+
if (res.includes('<think>')) {
|
|
54
|
+
let start = res.indexOf('<think>');
|
|
55
|
+
let end = res.indexOf('</think>') + 8;
|
|
56
|
+
if (start != -1) {
|
|
57
|
+
if (end != -1) {
|
|
58
|
+
res = res.substring(0, start) + res.substring(end);
|
|
59
|
+
} else {
|
|
60
|
+
res = res.substring(0, start+7);
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
res = res.trim();
|
|
64
|
+
}
|
|
65
|
+
return res;
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
async embed(text) {
|
|
69
|
+
throw new Error('Embeddings are not supported by Novita AI.');
|
|
70
|
+
}
|
|
71
|
+
}
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
import { strictFormat } from '../utils/text.js';
|
|
2
|
+
|
|
3
|
+
export class Ollama {
|
|
4
|
+
static prefix = 'ollama';
|
|
5
|
+
constructor(model_name, url, params) {
|
|
6
|
+
this.model_name = model_name;
|
|
7
|
+
this.params = params;
|
|
8
|
+
this.url = url || 'http://127.0.0.1:11434';
|
|
9
|
+
this.chat_endpoint = '/api/chat';
|
|
10
|
+
this.embedding_endpoint = '/api/embeddings';
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
async sendRequest(turns, systemMessage) {
|
|
14
|
+
let model = this.model_name || 'sweaterdog/andy-4:micro-q8_0';
|
|
15
|
+
let messages = strictFormat(turns);
|
|
16
|
+
messages.unshift({ role: 'system', content: systemMessage });
|
|
17
|
+
const maxAttempts = 5;
|
|
18
|
+
let attempt = 0;
|
|
19
|
+
let finalRes = null;
|
|
20
|
+
|
|
21
|
+
while (attempt < maxAttempts) {
|
|
22
|
+
attempt++;
|
|
23
|
+
console.log(`Awaiting local response... (model: ${model}, attempt: ${attempt})`);
|
|
24
|
+
let res = null;
|
|
25
|
+
try {
|
|
26
|
+
let apiResponse = await this.send(this.chat_endpoint, {
|
|
27
|
+
model: model,
|
|
28
|
+
messages: messages,
|
|
29
|
+
stream: false,
|
|
30
|
+
...(this.params || {})
|
|
31
|
+
});
|
|
32
|
+
if (apiResponse) {
|
|
33
|
+
res = apiResponse['message']['content'];
|
|
34
|
+
} else {
|
|
35
|
+
res = 'No response data.';
|
|
36
|
+
}
|
|
37
|
+
} catch (err) {
|
|
38
|
+
if (err.message.toLowerCase().includes('context length') && turns.length > 1) {
|
|
39
|
+
console.log('Context length exceeded, trying again with shorter context.');
|
|
40
|
+
return await this.sendRequest(turns.slice(1), systemMessage);
|
|
41
|
+
} else {
|
|
42
|
+
console.log(err);
|
|
43
|
+
res = 'My brain disconnected, try again.';
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
const hasOpenTag = res.includes("<think>");
|
|
48
|
+
const hasCloseTag = res.includes("</think>");
|
|
49
|
+
|
|
50
|
+
if ((hasOpenTag && !hasCloseTag)) {
|
|
51
|
+
console.warn("Partial <think> block detected. Re-generating...");
|
|
52
|
+
if (attempt < maxAttempts) continue;
|
|
53
|
+
}
|
|
54
|
+
if (hasCloseTag && !hasOpenTag) {
|
|
55
|
+
res = '<think>' + res;
|
|
56
|
+
}
|
|
57
|
+
if (hasOpenTag && hasCloseTag) {
|
|
58
|
+
res = res.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
|
|
59
|
+
}
|
|
60
|
+
finalRes = res;
|
|
61
|
+
break;
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
if (finalRes == null) {
|
|
65
|
+
console.warn("Could not get a valid response after max attempts.");
|
|
66
|
+
finalRes = 'I thought too hard, sorry, try again.';
|
|
67
|
+
}
|
|
68
|
+
return finalRes;
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
async embed(text) {
|
|
72
|
+
let model = this.model_name || 'embeddinggemma';
|
|
73
|
+
let body = { model: model, input: text };
|
|
74
|
+
let res = await this.send(this.embedding_endpoint, body);
|
|
75
|
+
return res['embedding'];
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
async send(endpoint, body) {
|
|
79
|
+
const url = new URL(endpoint, this.url);
|
|
80
|
+
let method = 'POST';
|
|
81
|
+
let headers = new Headers();
|
|
82
|
+
const request = new Request(url, { method, headers, body: JSON.stringify(body) });
|
|
83
|
+
let data = null;
|
|
84
|
+
try {
|
|
85
|
+
const res = await fetch(request);
|
|
86
|
+
if (res.ok) {
|
|
87
|
+
data = await res.json();
|
|
88
|
+
} else {
|
|
89
|
+
throw new Error(`Ollama Status: ${res.status}`);
|
|
90
|
+
}
|
|
91
|
+
} catch (err) {
|
|
92
|
+
console.error('Failed to send Ollama request.');
|
|
93
|
+
console.error(err);
|
|
94
|
+
}
|
|
95
|
+
return data;
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
async sendVisionRequest(messages, systemMessage, imageBuffer) {
|
|
99
|
+
const imageMessages = [...messages];
|
|
100
|
+
imageMessages.push({
|
|
101
|
+
role: "user",
|
|
102
|
+
content: [
|
|
103
|
+
{ type: "text", text: systemMessage },
|
|
104
|
+
{
|
|
105
|
+
type: "image_url",
|
|
106
|
+
image_url: {
|
|
107
|
+
url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
|
|
108
|
+
}
|
|
109
|
+
}
|
|
110
|
+
]
|
|
111
|
+
});
|
|
112
|
+
|
|
113
|
+
return this.sendRequest(imageMessages, systemMessage);
|
|
114
|
+
}
|
|
115
|
+
}
|