@elizaos/plugin-openai 1.6.0 → 2.0.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/LICENSE +1 -1
  2. package/dist/browser/index.browser.js +2 -2
  3. package/dist/browser/index.browser.js.map +18 -17
  4. package/dist/build.d.ts +13 -0
  5. package/dist/build.d.ts.map +1 -0
  6. package/dist/cjs/index.node.cjs +998 -658
  7. package/dist/cjs/index.node.js.map +18 -17
  8. package/dist/generated/specs/specs.d.ts +55 -0
  9. package/dist/generated/specs/specs.d.ts.map +1 -0
  10. package/dist/index.browser.d.ts +1 -0
  11. package/dist/index.browser.d.ts.map +1 -0
  12. package/dist/index.d.ts +1 -5
  13. package/dist/index.d.ts.map +1 -0
  14. package/dist/index.node.d.ts +1 -0
  15. package/dist/index.node.d.ts.map +1 -0
  16. package/dist/init.d.ts +4 -5
  17. package/dist/init.d.ts.map +1 -0
  18. package/dist/models/audio.d.ts +9 -10
  19. package/dist/models/audio.d.ts.map +1 -0
  20. package/dist/models/embedding.d.ts +1 -3
  21. package/dist/models/embedding.d.ts.map +1 -0
  22. package/dist/models/image.d.ts +4 -13
  23. package/dist/models/image.d.ts.map +1 -0
  24. package/dist/models/index.d.ts +7 -5
  25. package/dist/models/index.d.ts.map +1 -0
  26. package/dist/models/object.d.ts +4 -9
  27. package/dist/models/object.d.ts.map +1 -0
  28. package/dist/models/research.d.ts +34 -0
  29. package/dist/models/research.d.ts.map +1 -0
  30. package/dist/models/text.d.ts +22 -3
  31. package/dist/models/text.d.ts.map +1 -0
  32. package/dist/models/tokenizer.d.ts +4 -9
  33. package/dist/models/tokenizer.d.ts.map +1 -0
  34. package/dist/node/index.node.js +987 -644
  35. package/dist/node/index.node.js.map +18 -17
  36. package/dist/providers/index.d.ts +2 -1
  37. package/dist/providers/index.d.ts.map +1 -0
  38. package/dist/providers/openai.d.ts +3 -7
  39. package/dist/providers/openai.d.ts.map +1 -0
  40. package/dist/types/index.d.ts +313 -10
  41. package/dist/types/index.d.ts.map +1 -0
  42. package/dist/utils/audio.d.ts +6 -12
  43. package/dist/utils/audio.d.ts.map +1 -0
  44. package/dist/utils/config.d.ts +16 -59
  45. package/dist/utils/config.d.ts.map +1 -0
  46. package/dist/utils/events.d.ts +14 -9
  47. package/dist/utils/events.d.ts.map +1 -0
  48. package/dist/utils/index.d.ts +2 -1
  49. package/dist/utils/index.d.ts.map +1 -0
  50. package/dist/utils/json.d.ts +9 -6
  51. package/dist/utils/json.d.ts.map +1 -0
  52. package/dist/utils/tokenization.d.ts +5 -16
  53. package/dist/utils/tokenization.d.ts.map +1 -0
  54. package/package.json +24 -28
  55. package/README.md +0 -160
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@elizaos/plugin-openai",
3
- "version": "1.6.0",
3
+ "version": "2.0.0-alpha.2",
4
4
  "type": "module",
5
5
  "main": "dist/cjs/index.node.cjs",
6
6
  "module": "dist/node/index.node.js",
@@ -27,44 +27,40 @@
27
27
  "default": "./dist/node/index.node.js"
28
28
  },
29
29
  "default": "./dist/node/index.node.js"
30
- },
31
- "./node": {
32
- "types": "./dist/node/index.d.ts",
33
- "import": "./dist/node/index.node.js",
34
- "default": "./dist/node/index.node.js"
35
- },
36
- "./browser": {
37
- "types": "./dist/browser/index.d.ts",
38
- "import": "./dist/browser/index.browser.js",
39
- "default": "./dist/browser/index.browser.js"
40
30
  }
41
31
  },
42
32
  "files": [
43
- "dist"
33
+ "dist",
34
+ "rust"
44
35
  ],
45
36
  "dependencies": {
46
- "@ai-sdk/openai": "^2.0.32",
47
- "@elizaos/core": "^1.7.0",
48
- "ai": "^5.0.47",
37
+ "@ai-sdk/openai": "^3.0.9",
38
+ "@elizaos/core": "2.0.0-alpha.2",
39
+ "ai": "^6.0.30",
49
40
  "js-tiktoken": "^1.0.21",
50
41
  "undici": "^7.16.0"
51
42
  },
52
43
  "devDependencies": {
44
+ "@biomejs/biome": "^2.3.11",
53
45
  "@types/json-schema": "^7.0.15",
54
- "@types/node": "^24.5.2",
55
- "prettier": "3.6.2",
56
- "typescript": "^5.9.2"
46
+ "@types/node": "^25.0.3",
47
+ "typescript": "^5.9.3"
57
48
  },
58
49
  "peerDependencies": {
59
50
  "zod": "^3.25.76 || ^4.1.8"
60
51
  },
61
52
  "scripts": {
62
- "build": "bun run build.ts",
53
+ "build:ts": "bun run build.ts",
63
54
  "dev": "bun --hot build.ts",
64
- "lint": "prettier --write ./src",
65
- "clean": "rm -rf dist .turbo node_modules .turbo-tsconfig.json tsconfig.tsbuildinfo",
66
- "format": "prettier --write ./src",
67
- "format:check": "prettier --check ./src"
55
+ "lint": "bunx @biomejs/biome check --write --unsafe .",
56
+ "lint:check": "bunx @biomejs/biome check .",
57
+ "clean": "rm -rf dist .turbo node_modules",
58
+ "format": "bunx @biomejs/biome format --write .",
59
+ "format:check": "bunx @biomejs/biome format .",
60
+ "typecheck": "tsc --noEmit",
61
+ "test": "vitest run",
62
+ "test:ts": "vitest run",
63
+ "build": "bun run build.ts"
68
64
  },
69
65
  "publishConfig": {
70
66
  "access": "public"
@@ -95,7 +91,7 @@
95
91
  "type": "string",
96
92
  "description": "Fallback identifier for the small language model if OPENAI_SMALL_MODEL is not set.",
97
93
  "required": false,
98
- "default": "gpt-5-nano",
94
+ "default": "gpt-5-mini",
99
95
  "sensitive": false
100
96
  },
101
97
  "OPENAI_LARGE_MODEL": {
@@ -108,7 +104,7 @@
108
104
  "type": "string",
109
105
  "description": "Fallback identifier for the large language model if OPENAI_LARGE_MODEL is not set.",
110
106
  "required": false,
111
- "default": "gpt-5-mini",
107
+ "default": "gpt-5",
112
108
  "sensitive": false
113
109
  },
114
110
  "OPENAI_EMBEDDING_MODEL": {
@@ -141,7 +137,7 @@
141
137
  "type": "string",
142
138
  "description": "Identifier of the model used for describing images.",
143
139
  "required": false,
144
- "default": "gpt-5-nano",
140
+ "default": "gpt-5-mini",
145
141
  "sensitive": false
146
142
  },
147
143
  "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS": {
@@ -155,7 +151,7 @@
155
151
  "type": "string",
156
152
  "description": "Identifier of the model used for text-to-speech generation.",
157
153
  "required": false,
158
- "default": "gpt-4o-mini-tts",
154
+ "default": "gpt-5-mini-tts",
159
155
  "sensitive": false
160
156
  },
161
157
  "OPENAI_TTS_VOICE": {
@@ -192,5 +188,5 @@
192
188
  }
193
189
  }
194
190
  },
195
- "gitHead": "646c632924826e2b75c2304a75ee56959fe4a460"
191
+ "gitHead": "bc6cac8d36845d7cbde51a64307c6a57c16378ad"
196
192
  }
package/README.md DELETED
@@ -1,160 +0,0 @@
1
- # OpenAI Plugin
2
-
3
- This plugin provides integration with OpenAI's models through the ElizaOS platform.
4
-
5
- ## Usage
6
-
7
- Add the plugin to your character configuration:
8
-
9
- ```json
10
- "plugins": ["@elizaos-plugins/plugin-openai"]
11
- ```
12
-
13
- ## Configuration
14
-
15
- The plugin requires these environment variables (can be set in .env file or character settings):
16
-
17
- ```json
18
- "settings": {
19
- "OPENAI_API_KEY": "your_openai_api_key",
20
- "OPENAI_BASE_URL": "optional_custom_endpoint",
21
- "OPENAI_SMALL_MODEL": "gpt-4o-mini",
22
- "OPENAI_LARGE_MODEL": "gpt-4o",
23
- "OPENAI_EMBEDDING_MODEL": "text-embedding-3-small",
24
- "OPENAI_EMBEDDING_API_KEY": "your_openai_api_key_for_embedding",
25
- "OPENAI_EMBEDDING_URL": "optional_custom_endpoint",
26
- "OPENAI_EMBEDDING_DIMENSIONS": "1536",
27
- "OPENAI_IMAGE_DESCRIPTION_MODEL": "gpt-4o-mini",
28
- "OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS": "8192",
29
- "OPENAI_EXPERIMENTAL_TELEMETRY": "false",
30
- "OPENAI_BROWSER_BASE_URL": "https://your-proxy.example.com/openai",
31
- "OPENAI_BROWSER_EMBEDDING_URL": "https://your-proxy.example.com/openai"
32
- }
33
- ```
34
-
35
- Or in `.env` file:
36
-
37
- ```
38
- OPENAI_API_KEY=your_openai_api_key
39
- # Optional overrides:
40
- OPENAI_BASE_URL=optional_custom_endpoint
41
- OPENAI_SMALL_MODEL=gpt-4o-mini
42
- OPENAI_LARGE_MODEL=gpt-4o
43
- OPENAI_EMBEDDING_MODEL=text-embedding-3-small
44
- OPENAI_EMBEDDING_API_KEY=your_openai_api_key_for_embedding
45
- OPENAI_EMBEDDING_URL=optional_custom_endpoint
46
- OPENAI_EMBEDDING_DIMENSIONS=1536
47
- OPENAI_IMAGE_DESCRIPTION_MODEL=gpt-4o-mini
48
- OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS=8192
49
- OPENAI_EXPERIMENTAL_TELEMETRY=false
50
- # Browser proxy (frontend builds only)
51
- OPENAI_BROWSER_BASE_URL=https://your-proxy.example.com/openai
52
- OPENAI_BROWSER_EMBEDDING_URL=https://your-proxy.example.com/openai
53
- ```
54
-
55
- ### Configuration Options
56
-
57
- - `OPENAI_API_KEY` (required): Your OpenAI API credentials
58
- - `OPENAI_BASE_URL`: Custom API endpoint (default: https://api.openai.com/v1)
59
- - `OPENAI_SMALL_MODEL`: Defaults to GPT-4o Mini ("gpt-4o-mini")
60
- - `OPENAI_LARGE_MODEL`: Defaults to GPT-4o ("gpt-4o")
61
- - `OPENAI_EMBEDDING_MODEL`: Defaults to text-embedding-3-small ("text-embedding-3-small")
62
- - `OPENAI_EMBEDDING_API_KEY`: Custom embedding api key (defaults to `OPENAI_API_KEY`)
63
- - `OPENAI_EMBEDDING_URL`: Custom embedding endpoint (defaults to `OPENAI_BASE_URL`)
64
- - `OPENAI_EMBEDDING_DIMENSIONS`: Defaults to 1536 (1536)
65
- - `OPENAI_IMAGE_DESCRIPTION_MODEL`: Model used for image description (default: "gpt-4o-mini")
66
- - `OPENAI_IMAGE_DESCRIPTION_MAX_TOKENS`: Maximum tokens for image descriptions (default: 8192)
67
- - `OPENAI_EXPERIMENTAL_TELEMETRY`: Enable experimental telemetry features for enhanced debugging and usage analytics (default: false)
68
- - `OPENAI_BROWSER_BASE_URL`: Browser-only base URL to a proxy endpoint that forwards requests to OpenAI without exposing keys
69
- - `OPENAI_BROWSER_EMBEDDING_URL`: Browser-only embeddings endpoint base URL
70
-
71
- ### Browser mode and proxying
72
-
73
- When bundled for the browser, this plugin avoids sending Authorization headers. Set `OPENAI_BROWSER_BASE_URL` (and optionally `OPENAI_BROWSER_EMBEDDING_URL`) to a server-side proxy you control that injects the OpenAI API key. This prevents exposing secrets in frontend builds.
74
-
75
- Example minimal proxy (Express):
76
-
77
- ```ts
78
- import express from 'express';
79
- import fetch from 'node-fetch';
80
-
81
- const app = express();
82
- app.use(express.json());
83
-
84
- app.post('/openai/*', async (req, res) => {
85
- const url = `https://api.openai.com/v1/${req.params[0]}`;
86
- const r = await fetch(url, {
87
- method: 'POST',
88
- headers: {
89
- 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`,
90
- 'Content-Type': 'application/json',
91
- },
92
- body: JSON.stringify(req.body),
93
- });
94
- res.status(r.status).set(Object.fromEntries(r.headers)).send(await r.text());
95
- });
96
-
97
- app.listen(3000);
98
- ```
99
-
100
- ### Experimental Telemetry
101
-
102
- When `OPENAI_EXPERIMENTAL_TELEMETRY` is set to `true`, the plugin enables advanced telemetry features that provide:
103
-
104
- - Enhanced debugging capabilities for model performance issues
105
- - Detailed usage analytics for optimization
106
- - Better observability into OpenAI API interactions
107
- - Foundation for future monitoring and analytics features through Sentry or other frameworks
108
-
109
- **Note**: This feature is opt-in due to privacy considerations, as telemetry data may contain information about model usage patterns. Enable only when you need enhanced debugging or analytics capabilities.
110
-
111
- The plugin provides these model classes:
112
-
113
- - `TEXT_SMALL`: Optimized for fast, cost-effective responses
114
- - `TEXT_LARGE`: For complex tasks requiring deeper reasoning
115
- - `TEXT_EMBEDDING`: Text embedding model (text-embedding-3-small by default)
116
- - `IMAGE`: DALL-E image generation
117
- - `IMAGE_DESCRIPTION`: GPT-4o image analysis
118
- - `TRANSCRIPTION`: Whisper audio transcription
119
- - `TEXT_TOKENIZER_ENCODE`: Text tokenization
120
- - `TEXT_TOKENIZER_DECODE`: Token decoding
121
-
122
- ## Additional Features
123
-
124
- ### Image Generation
125
-
126
- ```js
127
- await runtime.useModel(ModelType.IMAGE, {
128
- prompt: "A sunset over mountains",
129
- n: 1, // number of images
130
- size: "1024x1024", // image resolution
131
- });
132
- ```
133
-
134
- ### Audio Transcription
135
-
136
- ```js
137
- const transcription = await runtime.useModel(
138
- ModelType.TRANSCRIPTION,
139
- audioBuffer
140
- );
141
- ```
142
-
143
- ### Image Analysis
144
-
145
- ```js
146
- const { title, description } = await runtime.useModel(
147
- ModelType.IMAGE_DESCRIPTION,
148
- "https://example.com/image.jpg"
149
- );
150
- ```
151
-
152
- ### Text Embeddings
153
-
154
- ```js
155
- await runtime.useModel(ModelType.TEXT_EMBEDDING, "text to embed");
156
- ```
157
-
158
- ### Tokenizer in browser
159
-
160
- js-tiktoken is WASM and browser-safe; this plugin uses `encodingForModel` directly in both Node and browser builds.