@aigne/openai 0.16.16-beta.2 → 0.16.16-beta.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,21 @@
1
1
  # Changelog
2
2
 
3
+ ## [0.16.16-beta.3](https://github.com/AIGNE-io/aigne-framework/compare/openai-v0.16.16-beta.2...openai-v0.16.16-beta.3) (2025-12-19)
4
+
5
+
6
+ ### Features
7
+
8
+ * add prompt caching for OpenAI/Gemini/Anthropic and cache token display ([#838](https://github.com/AIGNE-io/aigne-framework/issues/838)) ([46c628f](https://github.com/AIGNE-io/aigne-framework/commit/46c628f180572ea1b955d1a9888aad6145204842))
9
+
10
+
11
+ ### Dependencies
12
+
13
+ * The following workspace dependencies were updated
14
+ * dependencies
15
+ * @aigne/core bumped to 1.72.0-beta.3
16
+ * devDependencies
17
+ * @aigne/test-utils bumped to 0.5.69-beta.3
18
+
3
19
  ## [0.16.16-beta.2](https://github.com/AIGNE-io/aigne-framework/compare/openai-v0.16.16-beta.1...openai-v0.16.16-beta.2) (2025-12-19)
4
20
 
5
21
 
@@ -288,13 +288,19 @@ class OpenAIChatModel extends core_1.ChatModel {
288
288
  refusal += delta.refusal;
289
289
  }
290
290
  if (chunk.usage) {
291
+ const usage = {
292
+ inputTokens: chunk.usage.prompt_tokens,
293
+ outputTokens: chunk.usage.completion_tokens,
294
+ };
295
+ // Parse cache statistics if available
296
+ const inputDetails = chunk.usage.prompt_tokens_details;
297
+ if (inputDetails?.cached_tokens) {
298
+ usage.cacheReadInputTokens = inputDetails.cached_tokens;
299
+ }
291
300
  controller.enqueue({
292
301
  delta: {
293
302
  json: {
294
- usage: {
295
- inputTokens: chunk.usage.prompt_tokens,
296
- outputTokens: chunk.usage.completion_tokens,
297
- },
303
+ usage,
298
304
  },
299
305
  },
300
306
  });
@@ -283,13 +283,19 @@ export class OpenAIChatModel extends ChatModel {
283
283
  refusal += delta.refusal;
284
284
  }
285
285
  if (chunk.usage) {
286
+ const usage = {
287
+ inputTokens: chunk.usage.prompt_tokens,
288
+ outputTokens: chunk.usage.completion_tokens,
289
+ };
290
+ // Parse cache statistics if available
291
+ const inputDetails = chunk.usage.prompt_tokens_details;
292
+ if (inputDetails?.cached_tokens) {
293
+ usage.cacheReadInputTokens = inputDetails.cached_tokens;
294
+ }
286
295
  controller.enqueue({
287
296
  delta: {
288
297
  json: {
289
- usage: {
290
- inputTokens: chunk.usage.prompt_tokens,
291
- outputTokens: chunk.usage.completion_tokens,
292
- },
298
+ usage,
293
299
  },
294
300
  },
295
301
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@aigne/openai",
3
- "version": "0.16.16-beta.2",
3
+ "version": "0.16.16-beta.3",
4
4
  "description": "AIGNE OpenAI SDK for integrating with OpenAI's GPT models and API services",
5
5
  "publishConfig": {
6
6
  "access": "public"
@@ -36,9 +36,9 @@
36
36
  },
37
37
  "dependencies": {
38
38
  "@aigne/uuid": "^13.0.1",
39
- "openai": "^6.5.0",
39
+ "openai": "^6.14.0",
40
40
  "zod": "^3.25.67",
41
- "@aigne/core": "^1.72.0-beta.2",
41
+ "@aigne/core": "^1.72.0-beta.3",
42
42
  "@aigne/platform-helpers": "^0.6.7-beta"
43
43
  },
44
44
  "devDependencies": {
@@ -47,7 +47,7 @@
47
47
  "npm-run-all": "^4.1.5",
48
48
  "rimraf": "^6.0.1",
49
49
  "typescript": "^5.9.2",
50
- "@aigne/test-utils": "^0.5.69-beta.2"
50
+ "@aigne/test-utils": "^0.5.69-beta.3"
51
51
  },
52
52
  "scripts": {
53
53
  "lint": "tsc --noEmit",