mindcraft 0.1.4-0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. package/FAQ.md +38 -0
  2. package/LICENSE +21 -0
  3. package/README.md +255 -0
  4. package/andy.json +6 -0
  5. package/bin/mindcraft.js +80 -0
  6. package/keys.example.json +19 -0
  7. package/main.js +80 -0
  8. package/package.json +78 -0
  9. package/patches/minecraft-data+3.97.0.patch +13 -0
  10. package/patches/mineflayer+4.33.0.patch +54 -0
  11. package/patches/mineflayer-pathfinder+2.4.5.patch +265 -0
  12. package/patches/mineflayer-pvp+1.3.2.patch +13 -0
  13. package/patches/prismarine-viewer+1.33.0.patch +13 -0
  14. package/patches/protodef+1.19.0.patch +15 -0
  15. package/profiles/andy-4-reasoning.json +14 -0
  16. package/profiles/andy-4.json +7 -0
  17. package/profiles/azure.json +19 -0
  18. package/profiles/claude.json +7 -0
  19. package/profiles/claude_thinker.json +15 -0
  20. package/profiles/deepseek.json +7 -0
  21. package/profiles/defaults/_default.json +256 -0
  22. package/profiles/defaults/assistant.json +14 -0
  23. package/profiles/defaults/creative.json +14 -0
  24. package/profiles/defaults/god_mode.json +14 -0
  25. package/profiles/defaults/survival.json +14 -0
  26. package/profiles/freeguy.json +7 -0
  27. package/profiles/gemini.json +9 -0
  28. package/profiles/gpt.json +12 -0
  29. package/profiles/grok.json +7 -0
  30. package/profiles/llama.json +10 -0
  31. package/profiles/mercury.json +9 -0
  32. package/profiles/mistral.json +5 -0
  33. package/profiles/qwen.json +17 -0
  34. package/profiles/tasks/construction_profile.json +42 -0
  35. package/profiles/tasks/cooking_profile.json +11 -0
  36. package/profiles/tasks/crafting_profile.json +71 -0
  37. package/profiles/vllm.json +10 -0
  38. package/settings.js +64 -0
  39. package/src/agent/action_manager.js +177 -0
  40. package/src/agent/agent.js +561 -0
  41. package/src/agent/coder.js +229 -0
  42. package/src/agent/commands/actions.js +504 -0
  43. package/src/agent/commands/index.js +259 -0
  44. package/src/agent/commands/queries.js +347 -0
  45. package/src/agent/connection_handler.js +96 -0
  46. package/src/agent/conversation.js +353 -0
  47. package/src/agent/history.js +122 -0
  48. package/src/agent/library/full_state.js +89 -0
  49. package/src/agent/library/index.js +23 -0
  50. package/src/agent/library/lockdown.js +32 -0
  51. package/src/agent/library/skill_library.js +93 -0
  52. package/src/agent/library/skills.js +2093 -0
  53. package/src/agent/library/world.js +431 -0
  54. package/src/agent/memory_bank.js +25 -0
  55. package/src/agent/mindserver_proxy.js +136 -0
  56. package/src/agent/modes.js +446 -0
  57. package/src/agent/npc/build_goal.js +80 -0
  58. package/src/agent/npc/construction/dirt_shelter.json +38 -0
  59. package/src/agent/npc/construction/large_house.json +230 -0
  60. package/src/agent/npc/construction/small_stone_house.json +42 -0
  61. package/src/agent/npc/construction/small_wood_house.json +42 -0
  62. package/src/agent/npc/controller.js +261 -0
  63. package/src/agent/npc/data.js +50 -0
  64. package/src/agent/npc/item_goal.js +355 -0
  65. package/src/agent/npc/utils.js +126 -0
  66. package/src/agent/self_prompter.js +146 -0
  67. package/src/agent/settings.js +7 -0
  68. package/src/agent/speak.js +150 -0
  69. package/src/agent/tasks/construction_tasks.js +1104 -0
  70. package/src/agent/tasks/cooking_tasks.js +358 -0
  71. package/src/agent/tasks/tasks.js +594 -0
  72. package/src/agent/templates/execTemplate.js +6 -0
  73. package/src/agent/templates/lintTemplate.js +10 -0
  74. package/src/agent/vision/browser_viewer.js +8 -0
  75. package/src/agent/vision/camera.js +78 -0
  76. package/src/agent/vision/vision_interpreter.js +82 -0
  77. package/src/mindcraft/index.js +28 -0
  78. package/src/mindcraft/mcserver.js +154 -0
  79. package/src/mindcraft/mindcraft.js +111 -0
  80. package/src/mindcraft/mindserver.js +328 -0
  81. package/src/mindcraft/public/index.html +1253 -0
  82. package/src/mindcraft/public/settings_spec.json +145 -0
  83. package/src/mindcraft/userconfig.js +72 -0
  84. package/src/mindcraft-py/example.py +27 -0
  85. package/src/mindcraft-py/init-mindcraft.js +24 -0
  86. package/src/mindcraft-py/mindcraft.py +99 -0
  87. package/src/models/_model_map.js +89 -0
  88. package/src/models/azure.js +32 -0
  89. package/src/models/cerebras.js +61 -0
  90. package/src/models/claude.js +87 -0
  91. package/src/models/deepseek.js +59 -0
  92. package/src/models/gemini.js +176 -0
  93. package/src/models/glhf.js +71 -0
  94. package/src/models/gpt.js +147 -0
  95. package/src/models/grok.js +82 -0
  96. package/src/models/groq.js +95 -0
  97. package/src/models/huggingface.js +86 -0
  98. package/src/models/hyperbolic.js +114 -0
  99. package/src/models/lmstudio.js +74 -0
  100. package/src/models/mercury.js +95 -0
  101. package/src/models/mistral.js +94 -0
  102. package/src/models/novita.js +71 -0
  103. package/src/models/ollama.js +115 -0
  104. package/src/models/openrouter.js +77 -0
  105. package/src/models/prompter.js +366 -0
  106. package/src/models/qwen.js +80 -0
  107. package/src/models/replicate.js +60 -0
  108. package/src/models/vllm.js +81 -0
  109. package/src/process/agent_process.js +84 -0
  110. package/src/process/init_agent.js +54 -0
  111. package/src/utils/examples.js +83 -0
  112. package/src/utils/keys.js +34 -0
  113. package/src/utils/math.js +13 -0
  114. package/src/utils/mcdata.js +572 -0
  115. package/src/utils/text.js +78 -0
  116. package/src/utils/translator.js +30 -0
@@ -0,0 +1,59 @@
1
+ import OpenAIApi from 'openai';
2
+ import { getKey, hasKey } from '../utils/keys.js';
3
+ import { strictFormat } from '../utils/text.js';
4
+
5
+ export class DeepSeek {
6
+ static prefix = 'deepseek';
7
+ constructor(model_name, url, params) {
8
+ this.model_name = model_name;
9
+ this.params = params;
10
+
11
+ let config = {};
12
+
13
+ config.baseURL = url || 'https://api.deepseek.com';
14
+ config.apiKey = getKey('DEEPSEEK_API_KEY');
15
+
16
+ this.openai = new OpenAIApi(config);
17
+ }
18
+
19
+ async sendRequest(turns, systemMessage, stop_seq='***') {
20
+ let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
21
+
22
+ messages = strictFormat(messages);
23
+
24
+ const pack = {
25
+ model: this.model_name || "deepseek-chat",
26
+ messages,
27
+ stop: stop_seq,
28
+ ...(this.params || {})
29
+ };
30
+
31
+ let res = null;
32
+ try {
33
+ console.log('Awaiting deepseek api response...')
34
+ // console.log('Messages:', messages);
35
+ let completion = await this.openai.chat.completions.create(pack);
36
+ if (completion.choices[0].finish_reason == 'length')
37
+ throw new Error('Context length exceeded');
38
+ console.log('Received.')
39
+ res = completion.choices[0].message.content;
40
+ }
41
+ catch (err) {
42
+ if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
43
+ console.log('Context length exceeded, trying again with shorter context.');
44
+ return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
45
+ } else {
46
+ console.log(err);
47
+ res = 'My brain disconnected, try again.';
48
+ }
49
+ }
50
+ return res;
51
+ }
52
+
53
+ async embed(text) {
54
+ throw new Error('Embeddings are not supported by Deepseek.');
55
+ }
56
+ }
57
+
58
+
59
+
@@ -0,0 +1,176 @@
1
+ import { GoogleGenAI } from '@google/genai';
2
+ import { strictFormat } from '../utils/text.js';
3
+ import { getKey } from '../utils/keys.js';
4
+
5
+
6
+ export class Gemini {
7
+ static prefix = 'google';
8
+ constructor(model_name, url, params) {
9
+ this.model_name = model_name;
10
+ this.params = params;
11
+ this.safetySettings = [
12
+ {
13
+ "category": "HARM_CATEGORY_DANGEROUS",
14
+ "threshold": "BLOCK_NONE",
15
+ },
16
+ {
17
+ "category": "HARM_CATEGORY_HARASSMENT",
18
+ "threshold": "BLOCK_NONE",
19
+ },
20
+ {
21
+ "category": "HARM_CATEGORY_HATE_SPEECH",
22
+ "threshold": "BLOCK_NONE",
23
+ },
24
+ {
25
+ "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
26
+ "threshold": "BLOCK_NONE",
27
+ },
28
+ {
29
+ "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
30
+ "threshold": "BLOCK_NONE",
31
+ },
32
+ ];
33
+
34
+ this.genAI = new GoogleGenAI({apiKey: getKey('GEMINI_API_KEY')});
35
+ }
36
+
37
+ async sendRequest(turns, systemMessage) {
38
+ console.log('Awaiting Google API response...');
39
+
40
+ turns = strictFormat(turns);
41
+ let contents = [];
42
+ for (let turn of turns) {
43
+ contents.push({
44
+ role: turn.role === 'assistant' ? 'model' : 'user',
45
+ parts: [{ text: turn.content }]
46
+ });
47
+ }
48
+
49
+ const result = await this.genAI.models.generateContent({
50
+ model: this.model_name || "gemini-2.5-flash",
51
+ contents: contents,
52
+ safetySettings: this.safetySettings,
53
+ config: {
54
+ systemInstruction: systemMessage,
55
+ ...(this.params || {})
56
+ }
57
+ });
58
+ const response = await result.text;
59
+
60
+ console.log('Received.');
61
+
62
+ return response;
63
+ }
64
+
65
+ async sendVisionRequest(turns, systemMessage, imageBuffer) {
66
+ const imagePart = {
67
+ inlineData: {
68
+ data: imageBuffer.toString('base64'),
69
+ mimeType: 'image/jpeg'
70
+ }
71
+ };
72
+
73
+ turns = strictFormat(turns);
74
+ let contents = [];
75
+ for (let turn of turns) {
76
+ contents.push({
77
+ role: turn.role === 'assistant' ? 'model' : 'user',
78
+ parts: [{ text: turn.content }]
79
+ });
80
+ }
81
+ contents.push({
82
+ role: 'user',
83
+ parts: [{ text: 'SYSTEM: Vision response' }, imagePart]
84
+ })
85
+
86
+ let res = null;
87
+ try {
88
+ console.log('Awaiting Google API vision response...');
89
+ const result = await this.genAI.models.generateContent({
90
+ model: this.model_name,
91
+ contents: contents,
92
+ safetySettings: this.safetySettings,
93
+ generationConfig: {
94
+ ...(this.params || {})
95
+ },
96
+ systemInstruction: systemMessage
97
+ });
98
+ res = await result.text;
99
+ console.log('Received.');
100
+ } catch (err) {
101
+ console.log(err);
102
+ if (err.message.includes("Image input modality is not enabled for models/")) {
103
+ res = "Vision is only supported by certain models.";
104
+ } else {
105
+ res = "An unexpected error occurred, please try again.";
106
+ }
107
+ }
108
+ return res;
109
+ }
110
+
111
+ async embed(text) {
112
+ const result = await this.genAI.models.embedContent({
113
+ model: this.model_name || "gemini-embedding-001",
114
+ contents: text,
115
+ })
116
+
117
+ return result.embeddings;
118
+ }
119
+ }
120
+
121
+ const sendAudioRequest = async (text, model, voice, url) => {
122
+ const ai = new GoogleGenAI({apiKey: getKey('GEMINI_API_KEY')});
123
+
124
+ const response = await ai.models.generateContent({
125
+ model: model,
126
+ contents: [{ parts: [{text: text}] }],
127
+ config: {
128
+ responseModalities: ['AUDIO'],
129
+ speechConfig: {
130
+ voiceConfig: {
131
+ prebuiltVoiceConfig: { voiceName: voice },
132
+ },
133
+ },
134
+ },
135
+ })
136
+
137
+ const pcmBase64 = response.candidates?.[0]?.content?.parts?.[0]?.inlineData?.data;
138
+ if (!pcmBase64) {
139
+ console.warn('Gemini TTS: no audio data returned');
140
+ return null;
141
+ }
142
+
143
+ // Wrap PCM in a minimal WAV container so ffplay can decode it.
144
+ const pcmBuffer = Buffer.from(pcmBase64, 'base64');
145
+ const wavHeader = createWavHeader(pcmBuffer.length, 24000, 1, 16);
146
+ const wavBuffer = Buffer.concat([wavHeader, pcmBuffer]);
147
+
148
+ const wavBase64 = wavBuffer.toString('base64');
149
+ return wavBase64;
150
+ }
151
+
152
+ // helper: create PCM WAV header
153
+ function createWavHeader(dataLength, sampleRate, channels, bitsPerSample) {
154
+ const header = Buffer.alloc(44);
155
+ const byteRate = sampleRate * channels * bitsPerSample / 8;
156
+ const blockAlign = channels * bitsPerSample / 8;
157
+
158
+ header.write('RIFF', 0);
159
+ header.writeUInt32LE(36 + dataLength, 4);
160
+ header.write('WAVE', 8);
161
+ header.write('fmt ', 12);
162
+ header.writeUInt32LE(16, 16); // PCM
163
+ header.writeUInt16LE(1, 20); // Audio format = PCM
164
+ header.writeUInt16LE(channels, 22);
165
+ header.writeUInt32LE(sampleRate, 24);
166
+ header.writeUInt32LE(byteRate, 28);
167
+ header.writeUInt16LE(blockAlign, 32);
168
+ header.writeUInt16LE(bitsPerSample, 34);
169
+ header.write('data', 36);
170
+ header.writeUInt32LE(dataLength, 40);
171
+ return header;
172
+ }
173
+
174
+ export const TTSConfig = {
175
+ sendAudioRequest: sendAudioRequest,
176
+ }
@@ -0,0 +1,71 @@
1
+ import OpenAIApi from 'openai';
2
+ import { getKey } from '../utils/keys.js';
3
+
4
+ export class GLHF {
5
+ static prefix = 'glhf';
6
+ constructor(model_name, url) {
7
+ this.model_name = model_name;
8
+ const apiKey = getKey('GHLF_API_KEY');
9
+ if (!apiKey) {
10
+ throw new Error('API key not found. Please check keys.json and ensure GHLF_API_KEY is defined.');
11
+ }
12
+ this.openai = new OpenAIApi({
13
+ apiKey,
14
+ baseURL: url || "https://glhf.chat/api/openai/v1"
15
+ });
16
+ }
17
+
18
+ async sendRequest(turns, systemMessage, stop_seq = '***') {
19
+ // Construct the message array for the API request.
20
+ let messages = [{ role: 'system', content: systemMessage }].concat(turns);
21
+ const pack = {
22
+ model: this.model_name || "hf:meta-llama/Llama-3.1-405B-Instruct",
23
+ messages,
24
+ stop: [stop_seq]
25
+ };
26
+
27
+ const maxAttempts = 5;
28
+ let attempt = 0;
29
+ let finalRes = null;
30
+
31
+ while (attempt < maxAttempts) {
32
+ attempt++;
33
+ console.log(`Awaiting glhf.chat API response... (attempt: ${attempt})`);
34
+ try {
35
+ let completion = await this.openai.chat.completions.create(pack);
36
+ if (completion.choices[0].finish_reason === 'length') {
37
+ throw new Error('Context length exceeded');
38
+ }
39
+ let res = completion.choices[0].message.content;
40
+ // If there's an open <think> tag without a corresponding </think>, retry.
41
+ if (res.includes("<think>") && !res.includes("</think>")) {
42
+ console.warn("Partial <think> block detected. Re-generating...");
43
+ continue;
44
+ }
45
+ // If there's a closing </think> tag but no opening <think>, prepend one.
46
+ if (res.includes("</think>") && !res.includes("<think>")) {
47
+ res = "<think>" + res;
48
+ }
49
+ finalRes = res.replace(/<\|separator\|>/g, '*no response*');
50
+ break; // Valid response obtained.
51
+ } catch (err) {
52
+ if ((err.message === 'Context length exceeded' || err.code === 'context_length_exceeded') && turns.length > 1) {
53
+ console.log('Context length exceeded, trying again with shorter context.');
54
+ return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
55
+ } else {
56
+ console.error(err);
57
+ finalRes = 'My brain disconnected, try again.';
58
+ break;
59
+ }
60
+ }
61
+ }
62
+ if (finalRes === null) {
63
+ finalRes = "I thought too hard, sorry, try again";
64
+ }
65
+ return finalRes;
66
+ }
67
+
68
+ async embed(text) {
69
+ throw new Error('Embeddings are not supported by glhf.');
70
+ }
71
+ }
@@ -0,0 +1,147 @@
1
+ import OpenAIApi from 'openai';
2
+ import { getKey, hasKey } from '../utils/keys.js';
3
+ import { strictFormat } from '../utils/text.js';
4
+
5
+ export class GPT {
6
+ static prefix = 'openai';
7
+ constructor(model_name, url, params) {
8
+ this.model_name = model_name;
9
+ this.params = params;
10
+ this.url = url; // store so that we know whether a custom URL has been set
11
+
12
+ let config = {};
13
+ if (url)
14
+ config.baseURL = url;
15
+
16
+ if (hasKey('OPENAI_ORG_ID'))
17
+ config.organization = getKey('OPENAI_ORG_ID');
18
+
19
+ config.apiKey = getKey('OPENAI_API_KEY');
20
+
21
+ this.openai = new OpenAIApi(config);
22
+ }
23
+
24
+ async sendRequest(turns, systemMessage, stop_seq='***') {
25
+ let messages = strictFormat(turns);
26
+ messages = messages.map(message => {
27
+ message.content += stop_seq;
28
+ return message;
29
+ });
30
+ let model = this.model_name || "gpt-4o-mini";
31
+
32
+ let res = null;
33
+
34
+ try {
35
+ console.log('Awaiting openai api response from model', model);
36
+ // if a custom URL is set, use chat.completions
37
+ // because custom "OpenAI-compatible" endpoints likely do not have responses endpoint
38
+ if (this.url) {
39
+ let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
40
+ messages = strictFormat(messages);
41
+ const pack = {
42
+ model: model,
43
+ messages,
44
+ stop: stop_seq,
45
+ ...(this.params || {})
46
+ };
47
+ if (model.includes('o1') || model.includes('o3') || model.includes('5')) {
48
+ delete pack.stop;
49
+ }
50
+ let completion = await this.openai.chat.completions.create(pack);
51
+ if (completion.choices[0].finish_reason == 'length')
52
+ throw new Error('Context length exceeded');
53
+ console.log('Received.');
54
+ res = completion.choices[0].message.content;
55
+ }
56
+ // otherwise, use responses
57
+ else {
58
+ let messages = strictFormat(turns);
59
+ messages = messages.map(message => {
60
+ message.content += stop_seq;
61
+ return message;
62
+ });
63
+ const response = await this.openai.responses.create({
64
+ model: model,
65
+ instructions: systemMessage,
66
+ input: messages,
67
+ ...(this.params || {})
68
+ });
69
+ console.log('Received.');
70
+ res = response.output_text;
71
+ let stop_seq_index = res.indexOf(stop_seq);
72
+ res = stop_seq_index !== -1 ? res.slice(0, stop_seq_index) : res;
73
+ }
74
+ }
75
+ catch (err) {
76
+ if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
77
+ console.log('Context length exceeded, trying again with shorter context.');
78
+ return await this.sendRequest(turns.slice(1), systemMessage, stop_seq);
79
+ } else if (err.message.includes('image_url')) {
80
+ console.log(err);
81
+ res = 'Vision is only supported by certain models.';
82
+ } else {
83
+ console.log(err);
84
+ res = 'My brain disconnected, try again.';
85
+ }
86
+ }
87
+ return res;
88
+ }
89
+
90
+ async sendVisionRequest(messages, systemMessage, imageBuffer) {
91
+ const imageMessages = [...messages];
92
+ imageMessages.push({
93
+ role: "user",
94
+ content: [
95
+ { type: "input_text", text: systemMessage },
96
+ {
97
+ type: "input_image",
98
+ image_url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
99
+ }
100
+ ]
101
+ });
102
+
103
+ return this.sendRequest(imageMessages, systemMessage);
104
+ }
105
+
106
+ async embed(text) {
107
+ if (text.length > 8191)
108
+ text = text.slice(0, 8191);
109
+ const embedding = await this.openai.embeddings.create({
110
+ model: this.model_name || "text-embedding-3-small",
111
+ input: text,
112
+ encoding_format: "float",
113
+ });
114
+ return embedding.data[0].embedding;
115
+ }
116
+
117
+ }
118
+
119
+ const sendAudioRequest = async (text, model, voice, url) => {
120
+ const payload = {
121
+ model: model,
122
+ voice: voice,
123
+ input: text
124
+ }
125
+
126
+ let config = {};
127
+
128
+ if (url)
129
+ config.baseURL = url;
130
+
131
+ if (hasKey('OPENAI_ORG_ID'))
132
+ config.organization = getKey('OPENAI_ORG_ID');
133
+
134
+ config.apiKey = getKey('OPENAI_API_KEY');
135
+
136
+ const openai = new OpenAIApi(config);
137
+
138
+ const mp3 = await openai.audio.speech.create(payload);
139
+ const buffer = Buffer.from(await mp3.arrayBuffer());
140
+ const base64 = buffer.toString("base64");
141
+ return base64;
142
+ }
143
+
144
+ export const TTSConfig = {
145
+ sendAudioRequest: sendAudioRequest,
146
+ baseUrl: 'https://api.openai.com/v1',
147
+ }
@@ -0,0 +1,82 @@
1
+ import OpenAIApi from 'openai';
2
+ import { getKey } from '../utils/keys.js';
3
+
4
+ // xAI doesn't supply a SDK for their models, but fully supports OpenAI and Anthropic SDKs
5
+ export class Grok {
6
+ static prefix = 'xai';
7
+ constructor(model_name, url, params) {
8
+ this.model_name = model_name;
9
+ this.url = url;
10
+ this.params = params;
11
+
12
+ let config = {};
13
+ if (url)
14
+ config.baseURL = url;
15
+ else
16
+ config.baseURL = "https://api.x.ai/v1"
17
+
18
+ config.apiKey = getKey('XAI_API_KEY');
19
+
20
+ this.openai = new OpenAIApi(config);
21
+ }
22
+
23
+ async sendRequest(turns, systemMessage) {
24
+ let messages = [{'role': 'system', 'content': systemMessage}].concat(turns);
25
+
26
+ const pack = {
27
+ model: this.model_name || "grok-3-mini-latest",
28
+ messages,
29
+ ...(this.params || {})
30
+ };
31
+
32
+ let res = null;
33
+ try {
34
+ console.log('Awaiting xai api response...')
35
+ ///console.log('Messages:', messages);
36
+ let completion = await this.openai.chat.completions.create(pack);
37
+ if (completion.choices[0].finish_reason == 'length')
38
+ throw new Error('Context length exceeded');
39
+ console.log('Received.')
40
+ res = completion.choices[0].message.content;
41
+ }
42
+ catch (err) {
43
+ if ((err.message == 'Context length exceeded' || err.code == 'context_length_exceeded') && turns.length > 1) {
44
+ console.log('Context length exceeded, trying again with shorter context.');
45
+ return await this.sendRequest(turns.slice(1), systemMessage);
46
+ } else if (err.message.includes('The model expects a single `text` element per message.')) {
47
+ console.log(err);
48
+ res = 'Vision is only supported by certain models.';
49
+ } else {
50
+ console.log(err);
51
+ res = 'My brain disconnected, try again.';
52
+ }
53
+ }
54
+ // sometimes outputs special token <|separator|>, just replace it
55
+ return res.replace(/<\|separator\|>/g, '*no response*');
56
+ }
57
+
58
+ async sendVisionRequest(messages, systemMessage, imageBuffer) {
59
+ const imageMessages = [...messages];
60
+ imageMessages.push({
61
+ role: "user",
62
+ content: [
63
+ { type: "text", text: systemMessage },
64
+ {
65
+ type: "image_url",
66
+ image_url: {
67
+ url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
68
+ }
69
+ }
70
+ ]
71
+ });
72
+
73
+ return this.sendRequest(imageMessages, systemMessage);
74
+ }
75
+
76
+ async embed(text) {
77
+ throw new Error('Embeddings are not supported by Grok.');
78
+ }
79
+ }
80
+
81
+
82
+
@@ -0,0 +1,95 @@
1
+ import Groq from 'groq-sdk'
2
+ import { getKey } from '../utils/keys.js';
3
+
4
+ // THIS API IS NOT TO BE CONFUSED WITH GROK!
5
+ // Go to grok.js for that. :)
6
+
7
+ // Umbrella class for everything under the sun... That GroqCloud provides, that is.
8
+ export class GroqCloudAPI {
9
+ static prefix = 'groq';
10
+
11
+ constructor(model_name, url, params) {
12
+
13
+ this.model_name = model_name;
14
+ this.url = url;
15
+ this.params = params || {};
16
+
17
+ // Remove any mention of "tools" from params:
18
+ if (this.params.tools)
19
+ delete this.params.tools;
20
+ // This is just a bit of future-proofing in case we drag Mindcraft in that direction.
21
+
22
+ // I'm going to do a sneaky ReplicateAPI theft for a lot of this, aren't I?
23
+ if (this.url)
24
+ console.warn("Groq Cloud has no implementation for custom URLs. Ignoring provided URL.");
25
+
26
+ this.groq = new Groq({ apiKey: getKey('GROQCLOUD_API_KEY') });
27
+
28
+
29
+ }
30
+
31
+ async sendRequest(turns, systemMessage, stop_seq = null) {
32
+ // Construct messages array
33
+ let messages = [{"role": "system", "content": systemMessage}].concat(turns);
34
+
35
+ let res = null;
36
+
37
+ try {
38
+ console.log("Awaiting Groq response...");
39
+
40
+ // Handle deprecated max_tokens parameter
41
+ if (this.params.max_tokens) {
42
+ console.warn("GROQCLOUD WARNING: A profile is using `max_tokens`. This is deprecated. Please move to `max_completion_tokens`.");
43
+ this.params.max_completion_tokens = this.params.max_tokens;
44
+ delete this.params.max_tokens;
45
+ }
46
+
47
+ if (!this.params.max_completion_tokens) {
48
+ this.params.max_completion_tokens = 4000;
49
+ }
50
+
51
+ let completion = await this.groq.chat.completions.create({
52
+ "messages": messages,
53
+ "model": this.model_name || "qwen/qwen3-32b",
54
+ "stream": false,
55
+ "stop": stop_seq,
56
+ ...(this.params || {})
57
+ });
58
+
59
+ res = completion.choices[0].message.content;
60
+
61
+ res = res.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
62
+ }
63
+ catch(err) {
64
+ if (err.message.includes("content must be a string")) {
65
+ res = "Vision is only supported by certain models.";
66
+ } else {
67
+ res = "My brain disconnected, try again.";
68
+ }
69
+ console.log(err);
70
+ }
71
+ return res;
72
+ }
73
+
74
+ async sendVisionRequest(messages, systemMessage, imageBuffer) {
75
+ const imageMessages = messages.filter(message => message.role !== 'system');
76
+ imageMessages.push({
77
+ role: "user",
78
+ content: [
79
+ { type: "text", text: systemMessage },
80
+ {
81
+ type: "image_url",
82
+ image_url: {
83
+ url: `data:image/jpeg;base64,${imageBuffer.toString('base64')}`
84
+ }
85
+ }
86
+ ]
87
+ });
88
+
89
+ return this.sendRequest(imageMessages);
90
+ }
91
+
92
+ async embed(_) {
93
+ throw new Error('Embeddings are not supported by Groq.');
94
+ }
95
+ }