@elizaos/plugin-openai 1.5.18 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,20 +1,7 @@
1
- var __create = Object.create;
2
- var __getProtoOf = Object.getPrototypeOf;
3
1
  var __defProp = Object.defineProperty;
4
2
  var __getOwnPropNames = Object.getOwnPropertyNames;
5
3
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
6
4
  var __hasOwnProp = Object.prototype.hasOwnProperty;
7
- var __toESM = (mod, isNodeMode, target) => {
8
- target = mod != null ? __create(__getProtoOf(mod)) : {};
9
- const to = isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target;
10
- for (let key of __getOwnPropNames(mod))
11
- if (!__hasOwnProp.call(to, key))
12
- __defProp(to, key, {
13
- get: () => mod[key],
14
- enumerable: true
15
- });
16
- return to;
17
- };
18
5
  var __moduleCache = /* @__PURE__ */ new WeakMap;
19
6
  var __toCommonJS = (from) => {
20
7
  var entry = __moduleCache.get(from), desc;
@@ -56,7 +43,11 @@ var import_core2 = require("@elizaos/core");
56
43
  // src/utils/config.ts
57
44
  var import_core = require("@elizaos/core");
58
45
  function getSetting(runtime, key, defaultValue) {
59
- return runtime.getSetting(key) ?? process.env[key] ?? defaultValue;
46
+ const value = runtime.getSetting(key);
47
+ if (value !== undefined && value !== null) {
48
+ return String(value);
49
+ }
50
+ return process.env[key] ?? defaultValue;
60
51
  }
61
52
  function isBrowser() {
62
53
  return typeof globalThis !== "undefined" && "document" in globalThis && typeof globalThis.document !== "undefined";
@@ -163,10 +154,13 @@ function emitModelUsageEvent(runtime, type, prompt, usage) {
163
154
  const promptTokens = ("promptTokens" in usage ? usage.promptTokens : undefined) ?? ("inputTokens" in usage ? usage.inputTokens : undefined) ?? 0;
164
155
  const completionTokens = ("completionTokens" in usage ? usage.completionTokens : undefined) ?? ("outputTokens" in usage ? usage.outputTokens : undefined) ?? 0;
165
156
  const totalTokens = ("totalTokens" in usage ? usage.totalTokens : undefined) ?? promptTokens + completionTokens;
157
+ const truncatedPrompt = typeof prompt === "string" ? prompt.length > 200 ? `${prompt.slice(0, 200)}…` : prompt : "";
166
158
  runtime.emitEvent(import_core3.EventType.MODEL_USED, {
159
+ runtime,
160
+ source: "openai",
167
161
  provider: "openai",
168
162
  type,
169
- prompt,
163
+ prompt: truncatedPrompt,
170
164
  tokens: {
171
165
  prompt: promptTokens,
172
166
  completion: completionTokens,
@@ -179,34 +173,35 @@ function emitModelUsageEvent(runtime, type, prompt, usage) {
179
173
  async function generateTextByModelType(runtime, params, modelType, getModelFn) {
180
174
  const openai = createOpenAIClient(runtime);
181
175
  const modelName = getModelFn(runtime);
182
- const experimentalTelemetry = getExperimentalTelemetry(runtime);
183
- import_core4.logger.log(`[OpenAI] Using ${modelType} model: ${modelName}`);
184
- import_core4.logger.log(params.prompt);
185
- const {
186
- prompt,
187
- stopSequences = [],
188
- maxTokens = 8192,
189
- temperature = 0.7,
190
- frequencyPenalty = 0.7,
191
- presencePenalty = 0.7
192
- } = params;
193
- const { text: openaiResponse, usage } = await import_ai.generateText({
176
+ import_core4.logger.debug(`[OpenAI] ${modelType} model: ${modelName}`);
177
+ const generateParams = {
194
178
  model: openai.languageModel(modelName),
195
- prompt,
179
+ prompt: params.prompt,
196
180
  system: runtime.character.system ?? undefined,
197
- temperature,
198
- maxOutputTokens: maxTokens,
199
- frequencyPenalty,
200
- presencePenalty,
201
- stopSequences,
202
- experimental_telemetry: {
203
- isEnabled: experimentalTelemetry
204
- }
205
- });
206
- if (usage) {
207
- emitModelUsageEvent(runtime, modelType, prompt, usage);
181
+ temperature: params.temperature ?? 0.7,
182
+ maxOutputTokens: params.maxTokens ?? 8192,
183
+ frequencyPenalty: params.frequencyPenalty ?? 0.7,
184
+ presencePenalty: params.presencePenalty ?? 0.7,
185
+ stopSequences: params.stopSequences ?? [],
186
+ experimental_telemetry: { isEnabled: getExperimentalTelemetry(runtime) }
187
+ };
188
+ if (params.stream) {
189
+ const result = import_ai.streamText(generateParams);
190
+ return {
191
+ textStream: result.textStream,
192
+ text: result.text,
193
+ usage: result.usage.then((u) => u ? {
194
+ promptTokens: u.inputTokens ?? 0,
195
+ completionTokens: u.outputTokens ?? 0,
196
+ totalTokens: (u.inputTokens ?? 0) + (u.outputTokens ?? 0)
197
+ } : undefined),
198
+ finishReason: result.finishReason
199
+ };
208
200
  }
209
- return openaiResponse;
201
+ const { text, usage } = await import_ai.generateText(generateParams);
202
+ if (usage)
203
+ emitModelUsageEvent(runtime, modelType, params.prompt, usage);
204
+ return text;
210
205
  }
211
206
  async function handleTextSmall(runtime, params) {
212
207
  return generateTextByModelType(runtime, params, import_core4.ModelType.TEXT_SMALL, getSmallModel);
@@ -298,7 +293,7 @@ async function handleTextEmbedding(runtime, params) {
298
293
  // src/models/image.ts
299
294
  var import_core6 = require("@elizaos/core");
300
295
  async function handleImageGeneration(runtime, params) {
301
- const n = params.n || 1;
296
+ const n = params.count || 1;
302
297
  const size = params.size || "1024x1024";
303
298
  const prompt = params.prompt;
304
299
  const modelName = getSetting(runtime, "OPENAI_IMAGE_MODEL", "gpt-image-1");
@@ -323,7 +318,7 @@ async function handleImageGeneration(runtime, params) {
323
318
  }
324
319
  const data = await response.json();
325
320
  const typedData = data;
326
- return typedData;
321
+ return typedData.data;
327
322
  } catch (error) {
328
323
  const message = error instanceof Error ? error.message : String(error);
329
324
  throw error;
@@ -386,10 +381,6 @@ async function handleImageDescription(runtime, params) {
386
381
  description: "No response from API"
387
382
  };
388
383
  }
389
- const isCustomPrompt = typeof params === "object" && Boolean(params.prompt) && params.prompt !== DEFAULT_PROMPT;
390
- if (isCustomPrompt) {
391
- return content;
392
- }
393
384
  const titleMatch = content.match(/title[:\s]+(.+?)(?:\n|$)/i);
394
385
  const title = titleMatch?.[1]?.trim();
395
386
  if (!title) {
@@ -456,33 +447,6 @@ function detectAudioMimeType(buffer) {
456
447
  import_core7.logger.warn("Could not detect audio format from buffer, using generic binary type");
457
448
  return "application/octet-stream";
458
449
  }
459
- async function webStreamToNodeStream(webStream) {
460
- try {
461
- const { Readable } = await import("node:stream");
462
- const reader = webStream.getReader();
463
- return new Readable({
464
- async read() {
465
- try {
466
- const { done, value } = await reader.read();
467
- if (done) {
468
- this.push(null);
469
- } else {
470
- this.push(value);
471
- }
472
- } catch (error) {
473
- this.destroy(error);
474
- }
475
- },
476
- destroy(error, callback) {
477
- reader.cancel().finally(() => callback(error));
478
- }
479
- });
480
- } catch (error) {
481
- const message = error instanceof Error ? error.message : String(error);
482
- import_core7.logger.error(`Failed to load node:stream module: ${message}`);
483
- throw new Error(`Cannot convert stream: node:stream module unavailable. This feature requires a Node.js environment.`);
484
- }
485
- }
486
450
 
487
451
  // src/models/audio.ts
488
452
  async function fetchTextToSpeech(runtime, options) {
@@ -514,13 +478,7 @@ async function fetchTextToSpeech(runtime, options) {
514
478
  const err = await res.text();
515
479
  throw new Error(`OpenAI TTS error ${res.status}: ${err}`);
516
480
  }
517
- if (!res.body) {
518
- throw new Error("OpenAI TTS response body is null");
519
- }
520
- if (!isBrowser()) {
521
- return await webStreamToNodeStream(res.body);
522
- }
523
- return res.body;
481
+ return await res.arrayBuffer();
524
482
  } catch (err) {
525
483
  const message = err instanceof Error ? err.message : String(err);
526
484
  throw new Error(`Failed to fetch speech from OpenAI TTS: ${message}`);
@@ -612,8 +570,7 @@ async function handleTextToSpeech(runtime, input) {
612
570
  const resolvedModel = options.model || getSetting(runtime, "OPENAI_TTS_MODEL", "gpt-4o-mini-tts");
613
571
  import_core8.logger.log(`[OpenAI] Using TEXT_TO_SPEECH model: ${resolvedModel}`);
614
572
  try {
615
- const speechStream = await fetchTextToSpeech(runtime, options);
616
- return speechStream;
573
+ return await fetchTextToSpeech(runtime, options);
617
574
  } catch (error) {
618
575
  const message = error instanceof Error ? error.message : String(error);
619
576
  import_core8.logger.error(`Error in TEXT_TO_SPEECH: ${message}`);
@@ -843,7 +800,7 @@ var openaiPlugin = {
843
800
  try {
844
801
  const image = await runtime.useModel(import_core13.ModelType.IMAGE, {
845
802
  prompt: "A beautiful sunset over a calm ocean",
846
- n: 1,
803
+ count: 1,
847
804
  size: "1024x1024"
848
805
  });
849
806
  import_core13.logger.log({ image }, "generated with test_image_generation");
@@ -896,7 +853,7 @@ var openaiPlugin = {
896
853
  name: "openai_test_text_tokenizer_encode",
897
854
  fn: async (runtime) => {
898
855
  const prompt = "Hello tokenizer encode!";
899
- const tokens = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
856
+ const tokens = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_ENCODE, { prompt, modelType: import_core13.ModelType.TEXT_SMALL });
900
857
  if (!Array.isArray(tokens) || tokens.length === 0) {
901
858
  throw new Error("Failed to tokenize text: expected non-empty array of tokens");
902
859
  }
@@ -907,9 +864,10 @@ var openaiPlugin = {
907
864
  name: "openai_test_text_tokenizer_decode",
908
865
  fn: async (runtime) => {
909
866
  const prompt = "Hello tokenizer decode!";
910
- const tokens = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_ENCODE, { prompt });
867
+ const tokens = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_ENCODE, { prompt, modelType: import_core13.ModelType.TEXT_SMALL });
911
868
  const decodedText = await runtime.useModel(import_core13.ModelType.TEXT_TOKENIZER_DECODE, {
912
- tokens
869
+ tokens,
870
+ modelType: import_core13.ModelType.TEXT_SMALL
913
871
  });
914
872
  if (decodedText !== prompt) {
915
873
  throw new Error(`Decoded text does not match original. Expected "${prompt}", got "${decodedText}"`);
@@ -934,6 +892,49 @@ var openaiPlugin = {
934
892
  throw error;
935
893
  }
936
894
  }
895
+ },
896
+ {
897
+ name: "openai_test_text_generation_large",
898
+ fn: async (runtime) => {
899
+ try {
900
+ const result = await runtime.useModel(import_core13.ModelType.TEXT_LARGE, {
901
+ prompt: "Say hello in 5 words."
902
+ });
903
+ if (!result || result.length === 0) {
904
+ throw new Error("Text generation returned empty result");
905
+ }
906
+ import_core13.logger.log({ result }, "Text generation test completed");
907
+ } catch (error) {
908
+ const message = error instanceof Error ? error.message : String(error);
909
+ import_core13.logger.error(`Error in openai_test_text_generation_large: ${message}`);
910
+ throw error;
911
+ }
912
+ }
913
+ },
914
+ {
915
+ name: "openai_test_streaming",
916
+ fn: async (runtime) => {
917
+ try {
918
+ const chunks = [];
919
+ const result = await runtime.useModel(import_core13.ModelType.TEXT_LARGE, {
920
+ prompt: "Count from 1 to 5.",
921
+ onStreamChunk: (chunk) => {
922
+ chunks.push(chunk);
923
+ }
924
+ });
925
+ if (!result || result.length === 0) {
926
+ throw new Error("Streaming returned empty result");
927
+ }
928
+ if (chunks.length === 0) {
929
+ throw new Error("No streaming chunks received");
930
+ }
931
+ import_core13.logger.log({ chunks: chunks.length, result: result.substring(0, 50) }, "Streaming test completed");
932
+ } catch (error) {
933
+ const message = error instanceof Error ? error.message : String(error);
934
+ import_core13.logger.error(`Error in openai_test_streaming: ${message}`);
935
+ throw error;
936
+ }
937
+ }
937
938
  }
938
939
  ]
939
940
  }
@@ -941,4 +942,4 @@ var openaiPlugin = {
941
942
  };
942
943
  var src_default = openaiPlugin;
943
944
 
944
- //# debugId=9B5BF59394995A3864756E2164756E21
945
+ //# debugId=F03D6434951BC08A64756E2164756E21