@elizaos/plugin-groq 1.0.0-beta.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Shaw Walters and elizaOS Contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,85 @@
1
+ # OpenAI Plugin
2
+
3
+ This plugin provides integration with OpenAI's models through the ElizaOS platform.
4
+
5
+ ## Usage
6
+
7
+ Add the plugin to your character configuration:
8
+
9
+ ```json
10
+ "plugins": ["@elizaos/plugin-openai"]
11
+ ```
12
+
13
+ ## Configuration
14
+
15
+ The plugin requires these environment variables (can be set in .env file or character settings):
16
+
17
+ ```json
18
+ "settings": {
19
+ "OPENAI_API_KEY": "your_openai_api_key",
20
+ "OPENAI_BASE_URL": "optional_custom_endpoint",
21
+ "OPENAI_SMALL_MODEL": "gpt-4o-mini",
22
+ "OPENAI_LARGE_MODEL": "gpt-4o"
23
+ }
24
+ ```
25
+
26
+ Or in `.env` file:
27
+
28
+ ```
29
+ OPENAI_API_KEY=your_openai_api_key
30
+ # Optional overrides:
31
+ OPENAI_BASE_URL=optional_custom_endpoint
32
+ OPENAI_SMALL_MODEL=gpt-4o-mini
33
+ OPENAI_LARGE_MODEL=gpt-4o
34
+ ```
35
+
36
+ ### Configuration Options
37
+
38
+ - `OPENAI_API_KEY` (required): Your OpenAI API credentials
39
+ - `OPENAI_BASE_URL`: Custom API endpoint (default: https://api.openai.com/v1)
40
+ - `OPENAI_SMALL_MODEL`: Defaults to GPT-4o Mini ("gpt-4o-mini")
41
+ - `OPENAI_LARGE_MODEL`: Defaults to GPT-4o ("gpt-4o")
42
+
43
+ The plugin provides these model classes:
44
+
45
+ - `TEXT_SMALL`: Optimized for fast, cost-effective responses
46
+ - `TEXT_LARGE`: For complex tasks requiring deeper reasoning
47
+ - `TEXT_EMBEDDING`: Text embedding model (text-embedding-3-small)
48
+ - `IMAGE`: DALL-E image generation
49
+ - `IMAGE_DESCRIPTION`: GPT-4o image analysis
50
+ - `TRANSCRIPTION`: Whisper audio transcription
51
+ - `TEXT_TOKENIZER_ENCODE`: Text tokenization
52
+ - `TEXT_TOKENIZER_DECODE`: Token decoding
53
+
54
+ ## Additional Features
55
+
56
+ ### Image Generation
57
+
58
+ ```js
59
+ await runtime.useModel(ModelType.IMAGE, {
60
+ prompt: 'A sunset over mountains',
61
+ n: 1, // number of images
62
+ size: '1024x1024', // image resolution
63
+ });
64
+ ```
65
+
66
+ ### Audio Transcription
67
+
68
+ ```js
69
+ const transcription = await runtime.useModel(ModelType.TRANSCRIPTION, audioBuffer);
70
+ ```
71
+
72
+ ### Image Analysis
73
+
74
+ ```js
75
+ const { title, description } = await runtime.useModel(
76
+ ModelType.IMAGE_DESCRIPTION,
77
+ 'https://example.com/image.jpg'
78
+ );
79
+ ```
80
+
81
+ ### Text Embeddings
82
+
83
+ ```js
84
+ const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, 'text to embed');
85
+ ```
package/dist/index.js ADDED
@@ -0,0 +1,518 @@
1
+ // src/index.ts
2
+ import { createGroq } from "@ai-sdk/groq";
3
+ import {
4
+ ModelType,
5
+ logger
6
+ } from "@elizaos/core";
7
+ import { generateObject, generateText } from "ai";
8
+ import { encodingForModel } from "js-tiktoken";
9
+ function getCloudflareGatewayBaseURL(runtime, provider) {
10
+ try {
11
+ const isCloudflareEnabled = runtime.getSetting("CLOUDFLARE_GW_ENABLED") === "true";
12
+ const cloudflareAccountId = runtime.getSetting("CLOUDFLARE_AI_ACCOUNT_ID");
13
+ const cloudflareGatewayId = runtime.getSetting("CLOUDFLARE_AI_GATEWAY_ID");
14
+ const defaultUrl = "https://api.groq.com/openai/v1";
15
+ logger.debug("Cloudflare Gateway Configuration:", {
16
+ isEnabled: isCloudflareEnabled,
17
+ hasAccountId: !!cloudflareAccountId,
18
+ hasGatewayId: !!cloudflareGatewayId,
19
+ provider
20
+ });
21
+ if (!isCloudflareEnabled) {
22
+ logger.debug("Cloudflare Gateway is not enabled");
23
+ return defaultUrl;
24
+ }
25
+ if (!cloudflareAccountId) {
26
+ logger.warn("Cloudflare Gateway is enabled but CLOUDFLARE_AI_ACCOUNT_ID is not set");
27
+ return defaultUrl;
28
+ }
29
+ if (!cloudflareGatewayId) {
30
+ logger.warn("Cloudflare Gateway is enabled but CLOUDFLARE_AI_GATEWAY_ID is not set");
31
+ return defaultUrl;
32
+ }
33
+ const baseURL = `https://gateway.ai.cloudflare.com/v1/${cloudflareAccountId}/${cloudflareGatewayId}/${provider.toLowerCase()}`;
34
+ logger.info("Using Cloudflare Gateway:", {
35
+ provider,
36
+ baseURL,
37
+ accountId: cloudflareAccountId,
38
+ gatewayId: cloudflareGatewayId
39
+ });
40
+ return baseURL;
41
+ } catch (error) {
42
+ logger.error("Error in getCloudflareGatewayBaseURL:", error);
43
+ return "https://api.groq.com/openai/v1";
44
+ }
45
+ }
46
+ function findModelName(model) {
47
+ try {
48
+ const name = model === ModelType.TEXT_SMALL ? process.env.SMALL_GROQ_MODEL ?? "llama-3.1-8b-instant" : process.env.LARGE_GROQ_MODEL ?? "llama-3.2-90b-vision-preview";
49
+ return name;
50
+ } catch (error) {
51
+ logger.error("Error in findModelName:", error);
52
+ return "llama-3.1-8b-instant";
53
+ }
54
+ }
55
+ async function tokenizeText(model, prompt) {
56
+ try {
57
+ const encoding = encodingForModel(findModelName(model));
58
+ const tokens = encoding.encode(prompt);
59
+ return tokens;
60
+ } catch (error) {
61
+ logger.error("Error in tokenizeText:", error);
62
+ return [];
63
+ }
64
+ }
65
+ async function detokenizeText(model, tokens) {
66
+ try {
67
+ const modelName = findModelName(model);
68
+ const encoding = encodingForModel(modelName);
69
+ return encoding.decode(tokens);
70
+ } catch (error) {
71
+ logger.error("Error in detokenizeText:", error);
72
+ return "";
73
+ }
74
+ }
75
+ async function handleRateLimitError(error, retryFn) {
76
+ try {
77
+ if (error.message.includes("Rate limit reached")) {
78
+ logger.warn("Groq rate limit reached", { error: error.message });
79
+ let retryDelay = 1e4;
80
+ const delayMatch = error.message.match(/try again in (\d+\.?\d*)s/i);
81
+ if (delayMatch?.[1]) {
82
+ retryDelay = Math.ceil(Number.parseFloat(delayMatch[1]) * 1e3) + 1e3;
83
+ }
84
+ logger.info(`Will retry after ${retryDelay}ms delay`);
85
+ await new Promise((resolve) => setTimeout(resolve, retryDelay));
86
+ logger.info("Retrying request after rate limit delay");
87
+ return await retryFn();
88
+ }
89
+ logger.error("Error with Groq API:", error);
90
+ throw error;
91
+ } catch (retryError) {
92
+ logger.error("Error during retry handling:", retryError);
93
+ throw retryError;
94
+ }
95
+ }
96
+ async function generateGroqText(groq, model, params) {
97
+ try {
98
+ const { text: groqResponse } = await generateText({
99
+ model: groq.languageModel(model),
100
+ prompt: params.prompt,
101
+ system: params.system,
102
+ temperature: params.temperature,
103
+ maxTokens: params.maxTokens,
104
+ frequencyPenalty: params.frequencyPenalty,
105
+ presencePenalty: params.presencePenalty,
106
+ stopSequences: params.stopSequences
107
+ });
108
+ return groqResponse;
109
+ } catch (error) {
110
+ try {
111
+ return await handleRateLimitError(error, async () => {
112
+ const { text: groqRetryResponse } = await generateText({
113
+ model: groq.languageModel(model),
114
+ prompt: params.prompt,
115
+ system: params.system,
116
+ temperature: params.temperature,
117
+ maxTokens: params.maxTokens,
118
+ frequencyPenalty: params.frequencyPenalty,
119
+ presencePenalty: params.presencePenalty,
120
+ stopSequences: params.stopSequences
121
+ });
122
+ return groqRetryResponse;
123
+ });
124
+ } catch (retryError) {
125
+ logger.error("Final error in generateGroqText:", retryError);
126
+ return "Error generating text. Please try again later.";
127
+ }
128
+ }
129
+ }
130
+ async function generateGroqObject(groq, model, params) {
131
+ try {
132
+ const { object } = await generateObject({
133
+ model: groq.languageModel(model),
134
+ output: "no-schema",
135
+ prompt: params.prompt,
136
+ temperature: params.temperature
137
+ });
138
+ return object;
139
+ } catch (error) {
140
+ logger.error("Error generating object:", error);
141
+ return {};
142
+ }
143
+ }
144
+ var groqPlugin = {
145
+ name: "groq",
146
+ description: "Groq plugin",
147
+ config: {
148
+ GROQ_API_KEY: process.env.GROQ_API_KEY,
149
+ SMALL_GROQ_MODEL: process.env.SMALL_GROQ_MODEL,
150
+ MEDIUM_GROQ_MODEL: process.env.MEDIUM_GROQ_MODEL,
151
+ LARGE_GROQ_MODEL: process.env.LARGE_GROQ_MODEL
152
+ },
153
+ async init(config) {
154
+ if (!process.env.GROQ_API_KEY) {
155
+ throw Error("Missing GROQ_API_KEY in environment variables");
156
+ }
157
+ },
158
+ models: {
159
+ [ModelType.TEXT_EMBEDDING]: async (runtime, params) => {
160
+ try {
161
+ const testVector = Array(1536).fill(0);
162
+ testVector[0] = 0.1;
163
+ return testVector;
164
+ } catch (error) {
165
+ logger.error("Error in TEXT_EMBEDDING model:", error);
166
+ return Array(1536).fill(0);
167
+ }
168
+ },
169
+ [ModelType.TEXT_TOKENIZER_ENCODE]: async (_runtime, { prompt, modelType = ModelType.TEXT_LARGE }) => {
170
+ try {
171
+ return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);
172
+ } catch (error) {
173
+ logger.error("Error in TEXT_TOKENIZER_ENCODE model:", error);
174
+ return [];
175
+ }
176
+ },
177
+ [ModelType.TEXT_TOKENIZER_DECODE]: async (_runtime, { tokens, modelType = ModelType.TEXT_LARGE }) => {
178
+ try {
179
+ return await detokenizeText(modelType ?? ModelType.TEXT_LARGE, tokens);
180
+ } catch (error) {
181
+ logger.error("Error in TEXT_TOKENIZER_DECODE model:", error);
182
+ return "";
183
+ }
184
+ },
185
+ [ModelType.TEXT_SMALL]: async (runtime, { prompt, stopSequences = [] }) => {
186
+ try {
187
+ const temperature = 0.7;
188
+ const frequency_penalty = 0.7;
189
+ const presence_penalty = 0.7;
190
+ const max_response_length = 8e3;
191
+ const baseURL = getCloudflareGatewayBaseURL(runtime, "groq");
192
+ const groq = createGroq({
193
+ apiKey: runtime.getSetting("GROQ_API_KEY"),
194
+ fetch: runtime.fetch,
195
+ baseURL
196
+ });
197
+ const model = runtime.getSetting("GROQ_SMALL_MODEL") ?? runtime.getSetting("SMALL_MODEL") ?? "llama-3.1-8b-instant";
198
+ logger.log("generating text");
199
+ logger.log(prompt);
200
+ return await generateGroqText(groq, model, {
201
+ prompt,
202
+ system: runtime.character.system ?? void 0,
203
+ temperature,
204
+ maxTokens: max_response_length,
205
+ frequencyPenalty: frequency_penalty,
206
+ presencePenalty: presence_penalty,
207
+ stopSequences
208
+ });
209
+ } catch (error) {
210
+ logger.error("Error in TEXT_SMALL model:", error);
211
+ return "Error generating text. Please try again later.";
212
+ }
213
+ },
214
+ [ModelType.TEXT_LARGE]: async (runtime, {
215
+ prompt,
216
+ stopSequences = [],
217
+ maxTokens = 8192,
218
+ temperature = 0.7,
219
+ frequencyPenalty = 0.7,
220
+ presencePenalty = 0.7
221
+ }) => {
222
+ try {
223
+ const model = runtime.getSetting("GROQ_LARGE_MODEL") ?? runtime.getSetting("LARGE_MODEL") ?? "llama-3.2-90b";
224
+ const baseURL = getCloudflareGatewayBaseURL(runtime, "groq");
225
+ const groq = createGroq({
226
+ apiKey: runtime.getSetting("GROQ_API_KEY"),
227
+ fetch: runtime.fetch,
228
+ baseURL
229
+ });
230
+ return await generateGroqText(groq, model, {
231
+ prompt,
232
+ system: runtime.character.system ?? void 0,
233
+ temperature,
234
+ maxTokens,
235
+ frequencyPenalty,
236
+ presencePenalty,
237
+ stopSequences
238
+ });
239
+ } catch (error) {
240
+ logger.error("Error in TEXT_LARGE model:", error);
241
+ return "Error generating text. Please try again later.";
242
+ }
243
+ },
244
+ [ModelType.IMAGE]: async (runtime, params) => {
245
+ try {
246
+ const baseURL = getCloudflareGatewayBaseURL(runtime, "groq");
247
+ const response = await fetch(`${baseURL}/images/generations`, {
248
+ method: "POST",
249
+ headers: {
250
+ Authorization: `Bearer ${runtime.getSetting("GROQ_API_KEY")}`,
251
+ "Content-Type": "application/json"
252
+ },
253
+ body: JSON.stringify({
254
+ prompt: params.prompt,
255
+ n: params.n || 1,
256
+ size: params.size || "1024x1024"
257
+ })
258
+ });
259
+ if (!response.ok) {
260
+ logger.error(`Failed to generate image: ${response.statusText}`);
261
+ return [{ url: "" }];
262
+ }
263
+ const data = await response.json();
264
+ const typedData = data;
265
+ return typedData.data;
266
+ } catch (error) {
267
+ logger.error("Error in IMAGE model:", error);
268
+ return [{ url: "" }];
269
+ }
270
+ },
271
+ [ModelType.TRANSCRIPTION]: async (runtime, audioBuffer) => {
272
+ try {
273
+ logger.log("audioBuffer", audioBuffer);
274
+ const baseURL = getCloudflareGatewayBaseURL(runtime, "groq");
275
+ const formData = new FormData();
276
+ const enhancedFormData = formData;
277
+ enhancedFormData.append("file", new Blob([audioBuffer], { type: "audio/mp3" }));
278
+ enhancedFormData.append("model", "whisper-1");
279
+ const response = await fetch(`${baseURL}/audio/transcriptions`, {
280
+ method: "POST",
281
+ headers: {
282
+ Authorization: `Bearer ${runtime.getSetting("GROQ_API_KEY")}`
283
+ },
284
+ body: formData
285
+ });
286
+ logger.log("response", response);
287
+ if (!response.ok) {
288
+ logger.error(`Failed to transcribe audio: ${response.statusText}`);
289
+ return "Error transcribing audio. Please try again later.";
290
+ }
291
+ const data = await response.json();
292
+ return data.text;
293
+ } catch (error) {
294
+ logger.error("Error in TRANSCRIPTION model:", error);
295
+ return "Error transcribing audio. Please try again later.";
296
+ }
297
+ },
298
+ [ModelType.OBJECT_SMALL]: async (runtime, params) => {
299
+ try {
300
+ const baseURL = getCloudflareGatewayBaseURL(runtime, "groq");
301
+ const groq = createGroq({
302
+ apiKey: runtime.getSetting("GROQ_API_KEY"),
303
+ baseURL
304
+ });
305
+ const model = runtime.getSetting("GROQ_SMALL_MODEL") ?? runtime.getSetting("SMALL_MODEL") ?? "llama-3.1-8b-instant";
306
+ if (params.schema) {
307
+ logger.info("Using OBJECT_SMALL without schema validation");
308
+ }
309
+ return await generateGroqObject(groq, model, params);
310
+ } catch (error) {
311
+ logger.error("Error in OBJECT_SMALL model:", error);
312
+ return {};
313
+ }
314
+ },
315
+ [ModelType.OBJECT_LARGE]: async (runtime, params) => {
316
+ try {
317
+ const baseURL = getCloudflareGatewayBaseURL(runtime, "groq");
318
+ const groq = createGroq({
319
+ apiKey: runtime.getSetting("GROQ_API_KEY"),
320
+ baseURL
321
+ });
322
+ const model = runtime.getSetting("GROQ_LARGE_MODEL") ?? runtime.getSetting("LARGE_MODEL") ?? "llama-3.2-90b-vision-preview";
323
+ if (params.schema) {
324
+ logger.info("Using OBJECT_LARGE without schema validation");
325
+ }
326
+ return await generateGroqObject(groq, model, params);
327
+ } catch (error) {
328
+ logger.error("Error in OBJECT_LARGE model:", error);
329
+ return {};
330
+ }
331
+ }
332
+ },
333
+ tests: [
334
+ {
335
+ name: "groq_plugin_tests",
336
+ tests: [
337
+ {
338
+ name: "groq_test_url_and_api_key_validation",
339
+ fn: async (runtime) => {
340
+ try {
341
+ const baseURL = getCloudflareGatewayBaseURL(runtime, "groq") ?? "https://api.groq.com/openai/v1";
342
+ const response = await fetch(`${baseURL}/models`, {
343
+ headers: {
344
+ Authorization: `Bearer ${runtime.getSetting("GROQ_API_KEY")}`
345
+ }
346
+ });
347
+ const data = await response.json();
348
+ logger.log("Models Available:", data?.data?.length);
349
+ if (!response.ok) {
350
+ logger.error(`Failed to validate Groq API key: ${response.statusText}`);
351
+ return;
352
+ }
353
+ } catch (error) {
354
+ logger.error("Error in groq_test_url_and_api_key_validation:", error);
355
+ }
356
+ }
357
+ },
358
+ {
359
+ name: "groq_test_text_embedding",
360
+ fn: async (runtime) => {
361
+ try {
362
+ const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, {
363
+ text: "Hello, world!"
364
+ });
365
+ logger.log("embedding", embedding);
366
+ } catch (error) {
367
+ logger.error("Error in test_text_embedding:", error);
368
+ }
369
+ }
370
+ },
371
+ {
372
+ name: "groq_test_text_large",
373
+ fn: async (runtime) => {
374
+ try {
375
+ const text = await runtime.useModel(ModelType.TEXT_LARGE, {
376
+ prompt: "What is the nature of reality in 10 words?"
377
+ });
378
+ if (text.length === 0) {
379
+ logger.error("Failed to generate text");
380
+ return;
381
+ }
382
+ logger.log("generated with test_text_large:", text);
383
+ } catch (error) {
384
+ logger.error("Error in test_text_large:", error);
385
+ }
386
+ }
387
+ },
388
+ {
389
+ name: "groq_test_text_small",
390
+ fn: async (runtime) => {
391
+ try {
392
+ const text = await runtime.useModel(ModelType.TEXT_SMALL, {
393
+ prompt: "What is the nature of reality in 10 words?"
394
+ });
395
+ if (text.length === 0) {
396
+ logger.error("Failed to generate text");
397
+ return;
398
+ }
399
+ logger.log("generated with test_text_small:", text);
400
+ } catch (error) {
401
+ logger.error("Error in test_text_small:", error);
402
+ }
403
+ }
404
+ },
405
+ {
406
+ name: "groq_test_image_generation",
407
+ fn: async (runtime) => {
408
+ try {
409
+ logger.log("groq_test_image_generation");
410
+ const image = await runtime.useModel(ModelType.IMAGE, {
411
+ prompt: "A beautiful sunset over a calm ocean",
412
+ n: 1,
413
+ size: "1024x1024"
414
+ });
415
+ logger.log("generated with test_image_generation:", image);
416
+ } catch (error) {
417
+ logger.error("Error in test_image_generation:", error);
418
+ }
419
+ }
420
+ },
421
+ {
422
+ name: "groq_test_transcription",
423
+ fn: async (runtime) => {
424
+ try {
425
+ logger.log("groq_test_transcription");
426
+ const response = await fetch(
427
+ "https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg"
428
+ );
429
+ if (!response.ok) {
430
+ logger.error(`Failed to fetch audio sample: ${response.statusText}`);
431
+ return;
432
+ }
433
+ const arrayBuffer = await response.arrayBuffer();
434
+ const transcription = await runtime.useModel(
435
+ ModelType.TRANSCRIPTION,
436
+ Buffer.from(new Uint8Array(arrayBuffer))
437
+ );
438
+ logger.log("generated with test_transcription:", transcription);
439
+ } catch (error) {
440
+ logger.error("Error in test_transcription:", error);
441
+ }
442
+ }
443
+ },
444
+ {
445
+ name: "groq_test_text_tokenizer_encode",
446
+ fn: async (runtime) => {
447
+ try {
448
+ const prompt = "Hello tokenizer encode!";
449
+ const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
450
+ if (!Array.isArray(tokens) || tokens.length === 0) {
451
+ logger.error("Failed to tokenize text: expected non-empty array of tokens");
452
+ return;
453
+ }
454
+ logger.log("Tokenized output:", tokens);
455
+ } catch (error) {
456
+ logger.error("Error in test_text_tokenizer_encode:", error);
457
+ }
458
+ }
459
+ },
460
+ {
461
+ name: "groq_test_text_tokenizer_decode",
462
+ fn: async (runtime) => {
463
+ try {
464
+ const prompt = "Hello tokenizer decode!";
465
+ const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
466
+ const decodedText = await runtime.useModel(ModelType.TEXT_TOKENIZER_DECODE, {
467
+ tokens
468
+ });
469
+ if (decodedText !== prompt) {
470
+ logger.error(
471
+ `Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`
472
+ );
473
+ return;
474
+ }
475
+ logger.log("Decoded text:", decodedText);
476
+ } catch (error) {
477
+ logger.error("Error in test_text_tokenizer_decode:", error);
478
+ }
479
+ }
480
+ },
481
+ {
482
+ name: "groq_test_object_small",
483
+ fn: async (runtime) => {
484
+ try {
485
+ const object = await runtime.useModel(ModelType.OBJECT_SMALL, {
486
+ prompt: "Generate a JSON object representing a user profile with name, age, and hobbies",
487
+ temperature: 0.7
488
+ });
489
+ logger.log("Generated object:", object);
490
+ } catch (error) {
491
+ logger.error("Error in test_object_small:", error);
492
+ }
493
+ }
494
+ },
495
+ {
496
+ name: "groq_test_object_large",
497
+ fn: async (runtime) => {
498
+ try {
499
+ const object = await runtime.useModel(ModelType.OBJECT_LARGE, {
500
+ prompt: "Generate a detailed JSON object representing a restaurant with name, cuisine type, menu items with prices, and customer reviews",
501
+ temperature: 0.7
502
+ });
503
+ logger.log("Generated object:", object);
504
+ } catch (error) {
505
+ logger.error("Error in test_object_large:", error);
506
+ }
507
+ }
508
+ }
509
+ ]
510
+ }
511
+ ]
512
+ };
513
+ var index_default = groqPlugin;
514
+ export {
515
+ index_default as default,
516
+ groqPlugin
517
+ };
518
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/index.ts"],"sourcesContent":["import { createGroq } from '@ai-sdk/groq';\nimport type {\n ImageDescriptionParams,\n ModelTypeName,\n ObjectGenerationParams,\n Plugin,\n TextEmbeddingParams,\n} from '@elizaos/core';\nimport {\n type DetokenizeTextParams,\n type GenerateTextParams,\n ModelType,\n type TokenizeTextParams,\n logger,\n} from '@elizaos/core';\nimport { generateObject, generateText } from 'ai';\nimport { type TiktokenModel, encodingForModel } from 'js-tiktoken';\nimport { z } from 'zod';\n\n/**\n * Runtime interface for the Groq plugin\n */\ninterface Runtime {\n getSetting(key: string): string | undefined;\n character: {\n system?: string;\n };\n fetch?: typeof fetch;\n}\n\n/**\n * Gets the Cloudflare Gateway base URL for a specific provider if enabled\n * @param runtime The runtime environment\n * @param provider The model provider name\n * @returns The Cloudflare Gateway base URL if enabled, undefined otherwise\n */\nfunction getCloudflareGatewayBaseURL(runtime: Runtime, provider: string): string | undefined {\n try {\n const isCloudflareEnabled = runtime.getSetting('CLOUDFLARE_GW_ENABLED') === 'true';\n const cloudflareAccountId = runtime.getSetting('CLOUDFLARE_AI_ACCOUNT_ID');\n const cloudflareGatewayId = runtime.getSetting('CLOUDFLARE_AI_GATEWAY_ID');\n\n const defaultUrl = 'https://api.groq.com/openai/v1';\n logger.debug('Cloudflare Gateway Configuration:', {\n isEnabled: isCloudflareEnabled,\n hasAccountId: !!cloudflareAccountId,\n hasGatewayId: !!cloudflareGatewayId,\n provider: provider,\n });\n\n if (!isCloudflareEnabled) {\n logger.debug('Cloudflare Gateway is not enabled');\n return defaultUrl;\n }\n\n if (!cloudflareAccountId) {\n logger.warn('Cloudflare Gateway is enabled but CLOUDFLARE_AI_ACCOUNT_ID is not set');\n return defaultUrl;\n }\n\n if (!cloudflareGatewayId) {\n logger.warn('Cloudflare Gateway is enabled but CLOUDFLARE_AI_GATEWAY_ID is not set');\n return defaultUrl;\n }\n\n const baseURL = `https://gateway.ai.cloudflare.com/v1/${cloudflareAccountId}/${cloudflareGatewayId}/${provider.toLowerCase()}`;\n logger.info('Using Cloudflare Gateway:', {\n provider,\n baseURL,\n accountId: cloudflareAccountId,\n gatewayId: cloudflareGatewayId,\n });\n\n return baseURL;\n } catch (error) {\n logger.error('Error in getCloudflareGatewayBaseURL:', error);\n return 'https://api.groq.com/openai/v1';\n }\n}\n\nfunction findModelName(model: ModelTypeName): TiktokenModel {\n try {\n const name =\n model === ModelType.TEXT_SMALL\n ? (process.env.SMALL_GROQ_MODEL ?? 'llama-3.1-8b-instant')\n : (process.env.LARGE_GROQ_MODEL ?? 'llama-3.2-90b-vision-preview');\n return name as TiktokenModel;\n } catch (error) {\n logger.error('Error in findModelName:', error);\n return 'llama-3.1-8b-instant' as TiktokenModel;\n }\n}\n\nasync function tokenizeText(model: ModelTypeName, prompt: string) {\n try {\n const encoding = encodingForModel(findModelName(model));\n const tokens = encoding.encode(prompt);\n return tokens;\n } catch (error) {\n logger.error('Error in tokenizeText:', error);\n return [];\n }\n}\n\n/**\n * Detokenize a sequence of tokens back into text using the specified model.\n *\n * @param {ModelTypeName} model - The type of model to use for detokenization.\n * @param {number[]} tokens - The sequence of tokens to detokenize.\n * @returns {string} The detokenized text.\n */\nasync function detokenizeText(model: ModelTypeName, tokens: number[]) {\n try {\n const modelName = findModelName(model);\n const encoding = encodingForModel(modelName);\n return encoding.decode(tokens);\n } catch (error) {\n logger.error('Error in detokenizeText:', error);\n return '';\n }\n}\n\n/**\n * Handles rate limit errors, waits for the appropriate delay, and retries the operation\n * @param error The error object from the failed request\n * @param retryFn The function to retry after waiting\n * @returns Result from the retry function\n */\nasync function handleRateLimitError(error: Error, retryFn: () => Promise<unknown>) {\n try {\n if (error.message.includes('Rate limit reached')) {\n logger.warn('Groq rate limit reached', { error: error.message });\n\n // Extract retry delay from error message if possible\n let retryDelay = 10000; // Default to 10 seconds\n const delayMatch = error.message.match(/try again in (\\d+\\.?\\d*)s/i);\n if (delayMatch?.[1]) {\n // Convert to milliseconds and add a small buffer\n retryDelay = Math.ceil(Number.parseFloat(delayMatch[1]) * 1000) + 1000;\n }\n\n logger.info(`Will retry after ${retryDelay}ms delay`);\n\n // Wait for the suggested delay plus a small buffer\n await new Promise((resolve) => setTimeout(resolve, retryDelay));\n\n // Retry the request\n logger.info('Retrying request after rate limit delay');\n return await retryFn();\n }\n\n // For other errors, log and rethrow\n logger.error('Error with Groq API:', error);\n throw error;\n } catch (retryError) {\n logger.error('Error during retry handling:', retryError);\n throw retryError;\n }\n}\n\n/**\n * Generate text using Groq API with retry handling for rate limits\n */\nasync function generateGroqText(\n groq: ReturnType<typeof createGroq>,\n model: string,\n params: {\n prompt: string;\n system?: string;\n temperature: number;\n maxTokens: number;\n frequencyPenalty: number;\n presencePenalty: number;\n stopSequences: string[];\n }\n) {\n try {\n const { text: groqResponse } = await generateText({\n model: groq.languageModel(model),\n prompt: params.prompt,\n system: params.system,\n temperature: params.temperature,\n maxTokens: params.maxTokens,\n frequencyPenalty: params.frequencyPenalty,\n presencePenalty: params.presencePenalty,\n stopSequences: params.stopSequences,\n });\n return groqResponse;\n } catch (error: unknown) {\n try {\n return await handleRateLimitError(error as Error, async () => {\n const { text: groqRetryResponse } = await generateText({\n model: groq.languageModel(model),\n prompt: params.prompt,\n system: params.system,\n temperature: params.temperature,\n maxTokens: params.maxTokens,\n frequencyPenalty: params.frequencyPenalty,\n presencePenalty: params.presencePenalty,\n stopSequences: params.stopSequences,\n });\n return groqRetryResponse;\n });\n } catch (retryError) {\n logger.error('Final error in generateGroqText:', retryError);\n return 'Error generating text. Please try again later.';\n }\n }\n}\n\n/**\n * Generate object using Groq API with consistent error handling\n */\nasync function generateGroqObject(\n groq: ReturnType<typeof createGroq>,\n model: string,\n params: ObjectGenerationParams\n) {\n try {\n const { object } = await generateObject({\n model: groq.languageModel(model),\n output: 'no-schema',\n prompt: params.prompt,\n temperature: params.temperature,\n });\n return object;\n } catch (error: unknown) {\n logger.error('Error generating object:', error);\n return {};\n }\n}\n\nexport const groqPlugin: Plugin = {\n name: 'groq',\n description: 'Groq plugin',\n config: {\n GROQ_API_KEY: process.env.GROQ_API_KEY,\n SMALL_GROQ_MODEL: process.env.SMALL_GROQ_MODEL,\n MEDIUM_GROQ_MODEL: process.env.MEDIUM_GROQ_MODEL,\n LARGE_GROQ_MODEL: process.env.LARGE_GROQ_MODEL,\n },\n async init(config: Record<string, string>) {\n if (!process.env.GROQ_API_KEY) {\n throw Error('Missing GROQ_API_KEY in environment variables');\n }\n },\n models: {\n [ModelType.TEXT_EMBEDDING]: async (\n runtime,\n params: TextEmbeddingParams | string | null\n ): Promise<number[]> => {\n try {\n const testVector = Array(1536).fill(0);\n testVector[0] = 0.1;\n return testVector;\n } catch (error) {\n logger.error('Error in TEXT_EMBEDDING model:', error);\n // Return a fallback vector rather than crashing\n return Array(1536).fill(0);\n }\n },\n [ModelType.TEXT_TOKENIZER_ENCODE]: async (\n _runtime,\n { prompt, modelType = ModelType.TEXT_LARGE }: TokenizeTextParams\n ) => {\n try {\n return await tokenizeText(modelType ?? ModelType.TEXT_LARGE, prompt);\n } catch (error) {\n logger.error('Error in TEXT_TOKENIZER_ENCODE model:', error);\n // Return empty array instead of crashing\n return [];\n }\n },\n [ModelType.TEXT_TOKENIZER_DECODE]: async (\n _runtime,\n { tokens, modelType = ModelType.TEXT_LARGE }: DetokenizeTextParams\n ) => {\n try {\n return await detokenizeText(modelType ?? ModelType.TEXT_LARGE, tokens);\n } catch (error) {\n logger.error('Error in TEXT_TOKENIZER_DECODE model:', error);\n // Return empty string instead of crashing\n return '';\n }\n },\n [ModelType.TEXT_SMALL]: async (runtime, { prompt, stopSequences = [] }: GenerateTextParams) => {\n try {\n const temperature = 0.7;\n const frequency_penalty = 0.7;\n const presence_penalty = 0.7;\n const max_response_length = 8000;\n const baseURL = getCloudflareGatewayBaseURL(runtime, 'groq');\n const groq = createGroq({\n apiKey: runtime.getSetting('GROQ_API_KEY'),\n fetch: runtime.fetch,\n baseURL,\n });\n\n const model =\n runtime.getSetting('GROQ_SMALL_MODEL') ??\n runtime.getSetting('SMALL_MODEL') ??\n 'llama-3.1-8b-instant';\n\n logger.log('generating text');\n logger.log(prompt);\n\n return await generateGroqText(groq, model, {\n prompt,\n system: runtime.character.system ?? undefined,\n temperature,\n maxTokens: max_response_length,\n frequencyPenalty: frequency_penalty,\n presencePenalty: presence_penalty,\n stopSequences,\n });\n } catch (error) {\n logger.error('Error in TEXT_SMALL model:', error);\n return 'Error generating text. Please try again later.';\n }\n },\n [ModelType.TEXT_LARGE]: async (\n runtime,\n {\n prompt,\n stopSequences = [],\n maxTokens = 8192,\n temperature = 0.7,\n frequencyPenalty = 0.7,\n presencePenalty = 0.7,\n }: GenerateTextParams\n ) => {\n try {\n const model =\n runtime.getSetting('GROQ_LARGE_MODEL') ??\n runtime.getSetting('LARGE_MODEL') ??\n 'llama-3.2-90b';\n const baseURL = getCloudflareGatewayBaseURL(runtime, 'groq');\n const groq = createGroq({\n apiKey: runtime.getSetting('GROQ_API_KEY'),\n fetch: runtime.fetch,\n baseURL,\n });\n\n return await generateGroqText(groq, model, {\n prompt,\n system: runtime.character.system ?? undefined,\n temperature,\n maxTokens,\n frequencyPenalty,\n presencePenalty,\n stopSequences,\n });\n } catch (error) {\n logger.error('Error in TEXT_LARGE model:', error);\n return 'Error generating text. Please try again later.';\n }\n },\n [ModelType.IMAGE]: async (\n runtime,\n params: {\n prompt: string;\n n?: number;\n size?: string;\n }\n ) => {\n try {\n const baseURL = getCloudflareGatewayBaseURL(runtime, 'groq');\n const response = await fetch(`${baseURL}/images/generations`, {\n method: 'POST',\n headers: {\n Authorization: `Bearer ${runtime.getSetting('GROQ_API_KEY')}`,\n 'Content-Type': 'application/json',\n },\n body: JSON.stringify({\n prompt: params.prompt,\n n: params.n || 1,\n size: params.size || '1024x1024',\n }),\n });\n if (!response.ok) {\n logger.error(`Failed to generate image: ${response.statusText}`);\n return [{ url: '' }];\n }\n const data = await response.json();\n const typedData = data as { data: { url: string }[] };\n return typedData.data;\n } catch (error) {\n logger.error('Error in IMAGE model:', error);\n return [{ url: '' }];\n }\n },\n [ModelType.TRANSCRIPTION]: async (runtime, audioBuffer: Buffer) => {\n try {\n logger.log('audioBuffer', audioBuffer);\n const baseURL = getCloudflareGatewayBaseURL(runtime, 'groq');\n\n // Create a FormData instance\n const formData = new FormData();\n\n // Create a proper interface for FormData to avoid type errors\n interface EnhancedFormData extends FormData {\n append(name: string, value: string | Blob, fileName?: string): void;\n }\n\n // Cast to our enhanced interface\n const enhancedFormData = formData as EnhancedFormData;\n enhancedFormData.append('file', new Blob([audioBuffer], { type: 'audio/mp3' }));\n enhancedFormData.append('model', 'whisper-1');\n\n const response = await fetch(`${baseURL}/audio/transcriptions`, {\n method: 'POST',\n headers: {\n Authorization: `Bearer ${runtime.getSetting('GROQ_API_KEY')}`,\n },\n body: formData,\n });\n\n logger.log('response', response);\n if (!response.ok) {\n logger.error(`Failed to transcribe audio: ${response.statusText}`);\n return 'Error transcribing audio. Please try again later.';\n }\n const data = (await response.json()) as { text: string };\n return data.text;\n } catch (error) {\n logger.error('Error in TRANSCRIPTION model:', error);\n return 'Error transcribing audio. Please try again later.';\n }\n },\n [ModelType.OBJECT_SMALL]: async (runtime, params: ObjectGenerationParams) => {\n try {\n const baseURL = getCloudflareGatewayBaseURL(runtime, 'groq');\n const groq = createGroq({\n apiKey: runtime.getSetting('GROQ_API_KEY'),\n baseURL,\n });\n const model =\n runtime.getSetting('GROQ_SMALL_MODEL') ??\n runtime.getSetting('SMALL_MODEL') ??\n 'llama-3.1-8b-instant';\n\n if (params.schema) {\n logger.info('Using OBJECT_SMALL without schema validation');\n }\n\n return await generateGroqObject(groq, model, params);\n } catch (error) {\n logger.error('Error in OBJECT_SMALL model:', error);\n // Return empty object instead of crashing\n return {};\n }\n },\n [ModelType.OBJECT_LARGE]: async (runtime, params: ObjectGenerationParams) => {\n try {\n const baseURL = getCloudflareGatewayBaseURL(runtime, 'groq');\n const groq = createGroq({\n apiKey: runtime.getSetting('GROQ_API_KEY'),\n baseURL,\n });\n const model =\n runtime.getSetting('GROQ_LARGE_MODEL') ??\n runtime.getSetting('LARGE_MODEL') ??\n 'llama-3.2-90b-vision-preview';\n\n if (params.schema) {\n logger.info('Using OBJECT_LARGE without schema validation');\n }\n\n return await generateGroqObject(groq, model, params);\n } catch (error) {\n logger.error('Error in OBJECT_LARGE model:', error);\n // Return empty object instead of crashing\n return {};\n }\n },\n },\n tests: [\n {\n name: 'groq_plugin_tests',\n tests: [\n {\n name: 'groq_test_url_and_api_key_validation',\n fn: async (runtime) => {\n try {\n const baseURL =\n getCloudflareGatewayBaseURL(runtime, 'groq') ?? 'https://api.groq.com/openai/v1';\n const response = await fetch(`${baseURL}/models`, {\n headers: {\n Authorization: `Bearer ${runtime.getSetting('GROQ_API_KEY')}`,\n },\n });\n const data = await response.json();\n logger.log('Models Available:', (data as { data: unknown[] })?.data?.length);\n if (!response.ok) {\n logger.error(`Failed to validate Groq API key: ${response.statusText}`);\n return;\n }\n } catch (error) {\n logger.error('Error in groq_test_url_and_api_key_validation:', error);\n }\n },\n },\n {\n name: 'groq_test_text_embedding',\n fn: async (runtime) => {\n try {\n const embedding = await runtime.useModel(ModelType.TEXT_EMBEDDING, {\n text: 'Hello, world!',\n });\n logger.log('embedding', embedding);\n } catch (error) {\n logger.error('Error in test_text_embedding:', error);\n }\n },\n },\n {\n name: 'groq_test_text_large',\n fn: async (runtime) => {\n try {\n const text = await runtime.useModel(ModelType.TEXT_LARGE, {\n prompt: 'What is the nature of reality in 10 words?',\n });\n if (text.length === 0) {\n logger.error('Failed to generate text');\n return;\n }\n logger.log('generated with test_text_large:', text);\n } catch (error) {\n logger.error('Error in test_text_large:', error);\n }\n },\n },\n {\n name: 'groq_test_text_small',\n fn: async (runtime) => {\n try {\n const text = await runtime.useModel(ModelType.TEXT_SMALL, {\n prompt: 'What is the nature of reality in 10 words?',\n });\n if (text.length === 0) {\n logger.error('Failed to generate text');\n return;\n }\n logger.log('generated with test_text_small:', text);\n } catch (error) {\n logger.error('Error in test_text_small:', error);\n }\n },\n },\n {\n name: 'groq_test_image_generation',\n fn: async (runtime) => {\n try {\n logger.log('groq_test_image_generation');\n const image = await runtime.useModel(ModelType.IMAGE, {\n prompt: 'A beautiful sunset over a calm ocean',\n n: 1,\n size: '1024x1024',\n });\n logger.log('generated with test_image_generation:', image);\n } catch (error) {\n logger.error('Error in test_image_generation:', error);\n }\n },\n },\n {\n name: 'groq_test_transcription',\n fn: async (runtime) => {\n try {\n logger.log('groq_test_transcription');\n const response = await fetch(\n 'https://upload.wikimedia.org/wikipedia/en/4/40/Chris_Benoit_Voice_Message.ogg'\n );\n if (!response.ok) {\n logger.error(`Failed to fetch audio sample: ${response.statusText}`);\n return;\n }\n const arrayBuffer = await response.arrayBuffer();\n const transcription = await runtime.useModel(\n ModelType.TRANSCRIPTION,\n Buffer.from(new Uint8Array(arrayBuffer))\n );\n logger.log('generated with test_transcription:', transcription);\n } catch (error) {\n logger.error('Error in test_transcription:', error);\n }\n },\n },\n {\n name: 'groq_test_text_tokenizer_encode',\n fn: async (runtime) => {\n try {\n const prompt = 'Hello tokenizer encode!';\n const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });\n if (!Array.isArray(tokens) || tokens.length === 0) {\n logger.error('Failed to tokenize text: expected non-empty array of tokens');\n return;\n }\n logger.log('Tokenized output:', tokens);\n } catch (error) {\n logger.error('Error in test_text_tokenizer_encode:', error);\n }\n },\n },\n {\n name: 'groq_test_text_tokenizer_decode',\n fn: async (runtime) => {\n try {\n const prompt = 'Hello tokenizer decode!';\n // Encode the string into tokens first\n const tokens = await runtime.useModel(ModelType.TEXT_TOKENIZER_ENCODE, { prompt });\n // Now decode tokens back into text\n const decodedText = await runtime.useModel(ModelType.TEXT_TOKENIZER_DECODE, {\n tokens,\n });\n if (decodedText !== prompt) {\n logger.error(\n `Decoded text does not match original. Expected \"${prompt}\", got \"${decodedText}\"`\n );\n return;\n }\n logger.log('Decoded text:', decodedText);\n } catch (error) {\n logger.error('Error in test_text_tokenizer_decode:', error);\n }\n },\n },\n {\n name: 'groq_test_object_small',\n fn: async (runtime) => {\n try {\n const object = await runtime.useModel(ModelType.OBJECT_SMALL, {\n prompt:\n 'Generate a JSON object representing a user profile with name, age, and hobbies',\n temperature: 0.7,\n });\n logger.log('Generated object:', object);\n } catch (error) {\n logger.error('Error in test_object_small:', error);\n }\n },\n },\n {\n name: 'groq_test_object_large',\n fn: async (runtime) => {\n try {\n const object = await runtime.useModel(ModelType.OBJECT_LARGE, {\n prompt:\n 'Generate a detailed JSON object representing a restaurant with name, cuisine type, menu items with prices, and customer reviews',\n temperature: 0.7,\n });\n logger.log('Generated object:', object);\n } catch (error) {\n logger.error('Error in test_object_large:', error);\n }\n },\n },\n ],\n },\n ],\n};\nexport default groqPlugin;\n"],"mappings":";AAAA,SAAS,kBAAkB;AAQ3B;AAAA,EAGE;AAAA,EAEA;AAAA,OACK;AACP,SAAS,gBAAgB,oBAAoB;AAC7C,SAA6B,wBAAwB;AAoBrD,SAAS,4BAA4B,SAAkB,UAAsC;AAC3F,MAAI;AACF,UAAM,sBAAsB,QAAQ,WAAW,uBAAuB,MAAM;AAC5E,UAAM,sBAAsB,QAAQ,WAAW,0BAA0B;AACzE,UAAM,sBAAsB,QAAQ,WAAW,0BAA0B;AAEzE,UAAM,aAAa;AACnB,WAAO,MAAM,qCAAqC;AAAA,MAChD,WAAW;AAAA,MACX,cAAc,CAAC,CAAC;AAAA,MAChB,cAAc,CAAC,CAAC;AAAA,MAChB;AAAA,IACF,CAAC;AAED,QAAI,CAAC,qBAAqB;AACxB,aAAO,MAAM,mCAAmC;AAChD,aAAO;AAAA,IACT;AAEA,QAAI,CAAC,qBAAqB;AACxB,aAAO,KAAK,uEAAuE;AACnF,aAAO;AAAA,IACT;AAEA,QAAI,CAAC,qBAAqB;AACxB,aAAO,KAAK,uEAAuE;AACnF,aAAO;AAAA,IACT;AAEA,UAAM,UAAU,wCAAwC,mBAAmB,IAAI,mBAAmB,IAAI,SAAS,YAAY,CAAC;AAC5H,WAAO,KAAK,6BAA6B;AAAA,MACvC;AAAA,MACA;AAAA,MACA,WAAW;AAAA,MACX,WAAW;AAAA,IACb,CAAC;AAED,WAAO;AAAA,EACT,SAAS,OAAO;AACd,WAAO,MAAM,yCAAyC,KAAK;AAC3D,WAAO;AAAA,EACT;AACF;AAEA,SAAS,cAAc,OAAqC;AAC1D,MAAI;AACF,UAAM,OACJ,UAAU,UAAU,aACf,QAAQ,IAAI,oBAAoB,yBAChC,QAAQ,IAAI,oBAAoB;AACvC,WAAO;AAAA,EACT,SAAS,OAAO;AACd,WAAO,MAAM,2BAA2B,KAAK;AAC7C,WAAO;AAAA,EACT;AACF;AAEA,eAAe,aAAa,OAAsB,QAAgB;AAChE,MAAI;AACF,UAAM,WAAW,iBAAiB,cAAc,KAAK,CAAC;AACtD,UAAM,SAAS,SAAS,OAAO,MAAM;AACrC,WAAO;AAAA,EACT,SAAS,OAAO;AACd,WAAO,MAAM,0BAA0B,KAAK;AAC5C,WAAO,CAAC;AAAA,EACV;AACF;AASA,eAAe,eAAe,OAAsB,QAAkB;AACpE,MAAI;AACF,UAAM,YAAY,cAAc,KAAK;AACrC,UAAM,WAAW,iBAAiB,SAAS;AAC3C,WAAO,SAAS,OAAO,MAAM;AAAA,EAC/B,SAAS,OAAO;AACd,WAAO,MAAM,4BAA4B,KAAK;AAC9C,WAAO;AAAA,EACT;AACF;AAQA,eAAe,qBAAqB,OAAc,SAAiC;AACjF,MAAI;AACF,QAAI,MAAM,QAAQ,SAAS,oBAAoB,GAAG;AAChD,aAAO,KAAK,2BAA2B,EAAE,OAAO,MAAM,QAAQ,CAAC;AAG/D,UAAI,aAAa;AACjB,YAAM,aAAa,MAAM,QAAQ,MAAM,4BAA4B;AACnE,UAAI,aAAa,CAAC,GAAG;AAEnB,qBAAa,KAAK,KAAK,OAAO,WAAW,WAAW,CAAC,CAAC,IAAI,GAAI,IAAI;AAAA,MACpE;AAEA,aAAO,KAAK,oBAAoB,UAAU,UAAU;AAGpD,YAAM,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,UAAU,CAAC;AAG9D,aAAO,KAAK,yCAAyC;AACrD,aAAO,MAAM,QAAQ;AAAA,IACvB;AAGA,WAAO,MAAM,wBAAwB,KAAK;AAC1C,UAAM;AAAA,EACR,SAAS,YAAY;AACnB,WAAO,MAAM,gCAAgC,UAAU;AACvD,UAAM;AAAA,EACR;AACF;AAKA,eAAe,iBACb,MACA,OACA,QASA;AACA,MAAI;AACF,UAAM,EAAE,MAAM,aAAa,IAAI,MAAM,aAAa;AAAA,MAChD,OAAO,KAAK,cAAc,KAAK;AAAA,MAC/B,QAAQ,OAAO;AAAA,MACf,QAAQ,OAAO;AAAA,MACf,aAAa,OAAO;AAAA,MACpB,WAAW,OAAO;AAAA,MAClB,kBAAkB,OAAO;AAAA,MACzB,iBAAiB,OAAO;AAAA,MACxB,eAAe,OAAO;AAAA,IACxB,CAAC;AACD,WAAO;AAAA,EACT,SAAS,OAAgB;AACvB,QAAI;AACF,aAAO,MAAM,qBAAqB,OAAgB,YAAY;AAC5D,cAAM,EAAE,MAAM,kBAAkB,IAAI,MAAM,aAAa;AAAA,UACrD,OAAO,KAAK,cAAc,KAAK;AAAA,UAC/B,QAAQ,OAAO;AAAA,UACf,QAAQ,OAAO;AAAA,UACf,aAAa,OAAO;AAAA,UACpB,WAAW,OAAO;AAAA,UAClB,kBAAkB,OAAO;AAAA,UACzB,iBAAiB,OAAO;AAAA,UACxB,eAAe,OAAO;AAAA,QACxB,CAAC;AACD,eAAO;AAAA,MACT,CAAC;AAAA,IACH,SAAS,YAAY;AACnB,aAAO,MAAM,oCAAoC,UAAU;AAC3D,aAAO;AAAA,IACT;AAAA,EACF;AACF;AAKA,eAAe,mBACb,MACA,OACA,QACA;AACA,MAAI;AACF,UAAM,EAAE,OAAO,IAAI,MAAM,eAAe;AAAA,MACtC,OAAO,KAAK,cAAc,KAAK;AAAA,MAC/B,QAAQ;AAAA,MACR,QAAQ,OAAO;AAAA,MACf,aAAa,OAAO;AAAA,IACtB,CAAC;AACD,WAAO;AAAA,EACT,SAAS,OAAgB;AACvB,WAAO,MAAM,4BAA4B,KAAK;AAC9C,WAAO,CAAC;AAAA,EACV;AACF;AAEO,IAAM,aAAqB;AAAA,EAChC,MAAM;AAAA,EACN,aAAa;AAAA,EACb,QAAQ;AAAA,IACN,cAAc,QAAQ,IAAI;AAAA,IAC1B,kBAAkB,QAAQ,IAAI;AAAA,IAC9B,mBAAmB,QAAQ,IAAI;AAAA,IAC/B,kBAAkB,QAAQ,IAAI;AAAA,EAChC;AAAA,EACA,MAAM,KAAK,QAAgC;AACzC,QAAI,CAAC,QAAQ,IAAI,cAAc;AAC7B,YAAM,MAAM,+CAA+C;AAAA,IAC7D;AAAA,EACF;AAAA,EACA,QAAQ;AAAA,IACN,CAAC,UAAU,cAAc,GAAG,OAC1B,SACA,WACsB;AACtB,UAAI;AACF,cAAM,aAAa,MAAM,IAAI,EAAE,KAAK,CAAC;AACrC,mBAAW,CAAC,IAAI;AAChB,eAAO;AAAA,MACT,SAAS,OAAO;AACd,eAAO,MAAM,kCAAkC,KAAK;AAEpD,eAAO,MAAM,IAAI,EAAE,KAAK,CAAC;AAAA,MAC3B;AAAA,IACF;AAAA,IACA,CAAC,UAAU,qBAAqB,GAAG,OACjC,UACA,EAAE,QAAQ,YAAY,UAAU,WAAW,MACxC;AACH,UAAI;AACF,eAAO,MAAM,aAAa,aAAa,UAAU,YAAY,MAAM;AAAA,MACrE,SAAS,OAAO;AACd,eAAO,MAAM,yCAAyC,KAAK;AAE3D,eAAO,CAAC;AAAA,MACV;AAAA,IACF;AAAA,IACA,CAAC,UAAU,qBAAqB,GAAG,OACjC,UACA,EAAE,QAAQ,YAAY,UAAU,WAAW,MACxC;AACH,UAAI;AACF,eAAO,MAAM,eAAe,aAAa,UAAU,YAAY,MAAM;AAAA,MACvE,SAAS,OAAO;AACd,eAAO,MAAM,yCAAyC,KAAK;AAE3D,eAAO;AAAA,MACT;AAAA,IACF;AAAA,IACA,CAAC,UAAU,UAAU,GAAG,OAAO,SAAS,EAAE,QAAQ,gBAAgB,CAAC,EAAE,MAA0B;AAC7F,UAAI;AACF,cAAM,cAAc;AACpB,cAAM,oBAAoB;AAC1B,cAAM,mBAAmB;AACzB,cAAM,sBAAsB;AAC5B,cAAM,UAAU,4BAA4B,SAAS,MAAM;AAC3D,cAAM,OAAO,WAAW;AAAA,UACtB,QAAQ,QAAQ,WAAW,cAAc;AAAA,UACzC,OAAO,QAAQ;AAAA,UACf;AAAA,QACF,CAAC;AAED,cAAM,QACJ,QAAQ,WAAW,kBAAkB,KACrC,QAAQ,WAAW,aAAa,KAChC;AAEF,eAAO,IAAI,iBAAiB;AAC5B,eAAO,IAAI,MAAM;AAEjB,eAAO,MAAM,iBAAiB,MAAM,OAAO;AAAA,UACzC;AAAA,UACA,QAAQ,QAAQ,UAAU,UAAU;AAAA,UACpC;AAAA,UACA,WAAW;AAAA,UACX,kBAAkB;AAAA,UAClB,iBAAiB;AAAA,UACjB;AAAA,QACF,CAAC;AAAA,MACH,SAAS,OAAO;AACd,eAAO,MAAM,8BAA8B,KAAK;AAChD,eAAO;AAAA,MACT;AAAA,IACF;AAAA,IACA,CAAC,UAAU,UAAU,GAAG,OACtB,SACA;AAAA,MACE;AAAA,MACA,gBAAgB,CAAC;AAAA,MACjB,YAAY;AAAA,MACZ,cAAc;AAAA,MACd,mBAAmB;AAAA,MACnB,kBAAkB;AAAA,IACpB,MACG;AACH,UAAI;AACF,cAAM,QACJ,QAAQ,WAAW,kBAAkB,KACrC,QAAQ,WAAW,aAAa,KAChC;AACF,cAAM,UAAU,4BAA4B,SAAS,MAAM;AAC3D,cAAM,OAAO,WAAW;AAAA,UACtB,QAAQ,QAAQ,WAAW,cAAc;AAAA,UACzC,OAAO,QAAQ;AAAA,UACf;AAAA,QACF,CAAC;AAED,eAAO,MAAM,iBAAiB,MAAM,OAAO;AAAA,UACzC;AAAA,UACA,QAAQ,QAAQ,UAAU,UAAU;AAAA,UACpC;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,UACA;AAAA,QACF,CAAC;AAAA,MACH,SAAS,OAAO;AACd,eAAO,MAAM,8BAA8B,KAAK;AAChD,eAAO;AAAA,MACT;AAAA,IACF;AAAA,IACA,CAAC,UAAU,KAAK,GAAG,OACjB,SACA,WAKG;AACH,UAAI;AACF,cAAM,UAAU,4BAA4B,SAAS,MAAM;AAC3D,cAAM,WAAW,MAAM,MAAM,GAAG,OAAO,uBAAuB;AAAA,UAC5D,QAAQ;AAAA,UACR,SAAS;AAAA,YACP,eAAe,UAAU,QAAQ,WAAW,cAAc,CAAC;AAAA,YAC3D,gBAAgB;AAAA,UAClB;AAAA,UACA,MAAM,KAAK,UAAU;AAAA,YACnB,QAAQ,OAAO;AAAA,YACf,GAAG,OAAO,KAAK;AAAA,YACf,MAAM,OAAO,QAAQ;AAAA,UACvB,CAAC;AAAA,QACH,CAAC;AACD,YAAI,CAAC,SAAS,IAAI;AAChB,iBAAO,MAAM,6BAA6B,SAAS,UAAU,EAAE;AAC/D,iBAAO,CAAC,EAAE,KAAK,GAAG,CAAC;AAAA,QACrB;AACA,cAAM,OAAO,MAAM,SAAS,KAAK;AACjC,cAAM,YAAY;AAClB,eAAO,UAAU;AAAA,MACnB,SAAS,OAAO;AACd,eAAO,MAAM,yBAAyB,KAAK;AAC3C,eAAO,CAAC,EAAE,KAAK,GAAG,CAAC;AAAA,MACrB;AAAA,IACF;AAAA,IACA,CAAC,UAAU,aAAa,GAAG,OAAO,SAAS,gBAAwB;AACjE,UAAI;AACF,eAAO,IAAI,eAAe,WAAW;AACrC,cAAM,UAAU,4BAA4B,SAAS,MAAM;AAG3D,cAAM,WAAW,IAAI,SAAS;AAQ9B,cAAM,mBAAmB;AACzB,yBAAiB,OAAO,QAAQ,IAAI,KAAK,CAAC,WAAW,GAAG,EAAE,MAAM,YAAY,CAAC,CAAC;AAC9E,yBAAiB,OAAO,SAAS,WAAW;AAE5C,cAAM,WAAW,MAAM,MAAM,GAAG,OAAO,yBAAyB;AAAA,UAC9D,QAAQ;AAAA,UACR,SAAS;AAAA,YACP,eAAe,UAAU,QAAQ,WAAW,cAAc,CAAC;AAAA,UAC7D;AAAA,UACA,MAAM;AAAA,QACR,CAAC;AAED,eAAO,IAAI,YAAY,QAAQ;AAC/B,YAAI,CAAC,SAAS,IAAI;AAChB,iBAAO,MAAM,+BAA+B,SAAS,UAAU,EAAE;AACjE,iBAAO;AAAA,QACT;AACA,cAAM,OAAQ,MAAM,SAAS,KAAK;AAClC,eAAO,KAAK;AAAA,MACd,SAAS,OAAO;AACd,eAAO,MAAM,iCAAiC,KAAK;AACnD,eAAO;AAAA,MACT;AAAA,IACF;AAAA,IACA,CAAC,UAAU,YAAY,GAAG,OAAO,SAAS,WAAmC;AAC3E,UAAI;AACF,cAAM,UAAU,4BAA4B,SAAS,MAAM;AAC3D,cAAM,OAAO,WAAW;AAAA,UACtB,QAAQ,QAAQ,WAAW,cAAc;AAAA,UACzC;AAAA,QACF,CAAC;AACD,cAAM,QACJ,QAAQ,WAAW,kBAAkB,KACrC,QAAQ,WAAW,aAAa,KAChC;AAEF,YAAI,OAAO,QAAQ;AACjB,iBAAO,KAAK,8CAA8C;AAAA,QAC5D;AAEA,eAAO,MAAM,mBAAmB,MAAM,OAAO,MAAM;AAAA,MACrD,SAAS,OAAO;AACd,eAAO,MAAM,gCAAgC,KAAK;AAElD,eAAO,CAAC;AAAA,MACV;AAAA,IACF;AAAA,IACA,CAAC,UAAU,YAAY,GAAG,OAAO,SAAS,WAAmC;AAC3E,UAAI;AACF,cAAM,UAAU,4BAA4B,SAAS,MAAM;AAC3D,cAAM,OAAO,WAAW;AAAA,UACtB,QAAQ,QAAQ,WAAW,cAAc;AAAA,UACzC;AAAA,QACF,CAAC;AACD,cAAM,QACJ,QAAQ,WAAW,kBAAkB,KACrC,QAAQ,WAAW,aAAa,KAChC;AAEF,YAAI,OAAO,QAAQ;AACjB,iBAAO,KAAK,8CAA8C;AAAA,QAC5D;AAEA,eAAO,MAAM,mBAAmB,MAAM,OAAO,MAAM;AAAA,MACrD,SAAS,OAAO;AACd,eAAO,MAAM,gCAAgC,KAAK;AAElD,eAAO,CAAC;AAAA,MACV;AAAA,IACF;AAAA,EACF;AAAA,EACA,OAAO;AAAA,IACL;AAAA,MACE,MAAM;AAAA,MACN,OAAO;AAAA,QACL;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAAY;AACrB,gBAAI;AACF,oBAAM,UACJ,4BAA4B,SAAS,MAAM,KAAK;AAClD,oBAAM,WAAW,MAAM,MAAM,GAAG,OAAO,WAAW;AAAA,gBAChD,SAAS;AAAA,kBACP,eAAe,UAAU,QAAQ,WAAW,cAAc,CAAC;AAAA,gBAC7D;AAAA,cACF,CAAC;AACD,oBAAM,OAAO,MAAM,SAAS,KAAK;AACjC,qBAAO,IAAI,qBAAsB,MAA8B,MAAM,MAAM;AAC3E,kBAAI,CAAC,SAAS,IAAI;AAChB,uBAAO,MAAM,oCAAoC,SAAS,UAAU,EAAE;AACtE;AAAA,cACF;AAAA,YACF,SAAS,OAAO;AACd,qBAAO,MAAM,kDAAkD,KAAK;AAAA,YACtE;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAAY;AACrB,gBAAI;AACF,oBAAM,YAAY,MAAM,QAAQ,SAAS,UAAU,gBAAgB;AAAA,gBACjE,MAAM;AAAA,cACR,CAAC;AACD,qBAAO,IAAI,aAAa,SAAS;AAAA,YACnC,SAAS,OAAO;AACd,qBAAO,MAAM,iCAAiC,KAAK;AAAA,YACrD;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAAY;AACrB,gBAAI;AACF,oBAAM,OAAO,MAAM,QAAQ,SAAS,UAAU,YAAY;AAAA,gBACxD,QAAQ;AAAA,cACV,CAAC;AACD,kBAAI,KAAK,WAAW,GAAG;AACrB,uBAAO,MAAM,yBAAyB;AACtC;AAAA,cACF;AACA,qBAAO,IAAI,mCAAmC,IAAI;AAAA,YACpD,SAAS,OAAO;AACd,qBAAO,MAAM,6BAA6B,KAAK;AAAA,YACjD;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAAY;AACrB,gBAAI;AACF,oBAAM,OAAO,MAAM,QAAQ,SAAS,UAAU,YAAY;AAAA,gBACxD,QAAQ;AAAA,cACV,CAAC;AACD,kBAAI,KAAK,WAAW,GAAG;AACrB,uBAAO,MAAM,yBAAyB;AACtC;AAAA,cACF;AACA,qBAAO,IAAI,mCAAmC,IAAI;AAAA,YACpD,SAAS,OAAO;AACd,qBAAO,MAAM,6BAA6B,KAAK;AAAA,YACjD;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAAY;AACrB,gBAAI;AACF,qBAAO,IAAI,4BAA4B;AACvC,oBAAM,QAAQ,MAAM,QAAQ,SAAS,UAAU,OAAO;AAAA,gBACpD,QAAQ;AAAA,gBACR,GAAG;AAAA,gBACH,MAAM;AAAA,cACR,CAAC;AACD,qBAAO,IAAI,yCAAyC,KAAK;AAAA,YAC3D,SAAS,OAAO;AACd,qBAAO,MAAM,mCAAmC,KAAK;AAAA,YACvD;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAAY;AACrB,gBAAI;AACF,qBAAO,IAAI,yBAAyB;AACpC,oBAAM,WAAW,MAAM;AAAA,gBACrB;AAAA,cACF;AACA,kBAAI,CAAC,SAAS,IAAI;AAChB,uBAAO,MAAM,iCAAiC,SAAS,UAAU,EAAE;AACnE;AAAA,cACF;AACA,oBAAM,cAAc,MAAM,SAAS,YAAY;AAC/C,oBAAM,gBAAgB,MAAM,QAAQ;AAAA,gBAClC,UAAU;AAAA,gBACV,OAAO,KAAK,IAAI,WAAW,WAAW,CAAC;AAAA,cACzC;AACA,qBAAO,IAAI,sCAAsC,aAAa;AAAA,YAChE,SAAS,OAAO;AACd,qBAAO,MAAM,gCAAgC,KAAK;AAAA,YACpD;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAAY;AACrB,gBAAI;AACF,oBAAM,SAAS;AACf,oBAAM,SAAS,MAAM,QAAQ,SAAS,UAAU,uBAAuB,EAAE,OAAO,CAAC;AACjF,kBAAI,CAAC,MAAM,QAAQ,MAAM,KAAK,OAAO,WAAW,GAAG;AACjD,uBAAO,MAAM,6DAA6D;AAC1E;AAAA,cACF;AACA,qBAAO,IAAI,qBAAqB,MAAM;AAAA,YACxC,SAAS,OAAO;AACd,qBAAO,MAAM,wCAAwC,KAAK;AAAA,YAC5D;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAAY;AACrB,gBAAI;AACF,oBAAM,SAAS;AAEf,oBAAM,SAAS,MAAM,QAAQ,SAAS,UAAU,uBAAuB,EAAE,OAAO,CAAC;AAEjF,oBAAM,cAAc,MAAM,QAAQ,SAAS,UAAU,uBAAuB;AAAA,gBAC1E;AAAA,cACF,CAAC;AACD,kBAAI,gBAAgB,QAAQ;AAC1B,uBAAO;AAAA,kBACL,mDAAmD,MAAM,WAAW,WAAW;AAAA,gBACjF;AACA;AAAA,cACF;AACA,qBAAO,IAAI,iBAAiB,WAAW;AAAA,YACzC,SAAS,OAAO;AACd,qBAAO,MAAM,wCAAwC,KAAK;AAAA,YAC5D;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAAY;AACrB,gBAAI;AACF,oBAAM,SAAS,MAAM,QAAQ,SAAS,UAAU,cAAc;AAAA,gBAC5D,QACE;AAAA,gBACF,aAAa;AAAA,cACf,CAAC;AACD,qBAAO,IAAI,qBAAqB,MAAM;AAAA,YACxC,SAAS,OAAO;AACd,qBAAO,MAAM,+BAA+B,KAAK;AAAA,YACnD;AAAA,UACF;AAAA,QACF;AAAA,QACA;AAAA,UACE,MAAM;AAAA,UACN,IAAI,OAAO,YAAY;AACrB,gBAAI;AACF,oBAAM,SAAS,MAAM,QAAQ,SAAS,UAAU,cAAc;AAAA,gBAC5D,QACE;AAAA,gBACF,aAAa;AAAA,cACf,CAAC;AACD,qBAAO,IAAI,qBAAqB,MAAM;AAAA,YACxC,SAAS,OAAO;AACd,qBAAO,MAAM,+BAA+B,KAAK;AAAA,YACnD;AAAA,UACF;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AACF;AACA,IAAO,gBAAQ;","names":[]}
package/package.json ADDED
@@ -0,0 +1,56 @@
1
+ {
2
+ "name": "@elizaos/plugin-groq",
3
+ "version": "1.0.0-beta.12",
4
+ "type": "module",
5
+ "main": "dist/index.js",
6
+ "module": "dist/index.js",
7
+ "types": "dist/index.d.ts",
8
+ "repository": {
9
+ "type": "git",
10
+ "url": "https://github.com/elizaos-plugins/plugin-groq"
11
+ },
12
+ "exports": {
13
+ "./package.json": "./package.json",
14
+ ".": {
15
+ "import": {
16
+ "types": "./dist/index.d.ts",
17
+ "default": "./dist/index.js"
18
+ }
19
+ }
20
+ },
21
+ "files": [
22
+ "dist"
23
+ ],
24
+ "dependencies": {
25
+ "@ai-sdk/groq": "^1.1.9",
26
+ "@ai-sdk/ui-utils": "1.1.9",
27
+ "@elizaos/core": "^1.0.0-beta.12",
28
+ "ai": "^4.1.25",
29
+ "js-tiktoken": "^1.0.18",
30
+ "tsup": "8.4.0"
31
+ },
32
+ "scripts": {
33
+ "build": "tsup",
34
+ "dev": "tsup --watch",
35
+ "lint": "prettier --write ./src",
36
+ "clean": "rm -rf dist .turbo node_modules .turbo-tsconfig.json tsconfig.tsbuildinfo",
37
+ "format": "prettier --write ./src",
38
+ "format:check": "prettier --check ./src"
39
+ },
40
+ "publishConfig": {
41
+ "access": "public"
42
+ },
43
+ "agentConfig": {
44
+ "pluginType": "elizaos:plugin:1.0.0",
45
+ "pluginParameters": {
46
+ "GROQ_API_KEY": {
47
+ "type": "string",
48
+ "description": "API key for the service"
49
+ }
50
+ }
51
+ },
52
+ "gitHead": "7b01ea21f51671371e738134c80c958483b7b709",
53
+ "devDependencies": {
54
+ "prettier": "3.5.3"
55
+ }
56
+ }