@adminforth/completion-adapter-open-ai-chat-gpt 2.0.2 → 2.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/dist/index.js +10 -2
  2. package/index.ts +15 -2
  3. package/package.json +4 -1
package/dist/index.js CHANGED
@@ -7,9 +7,10 @@ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, ge
7
7
  step((generator = generator.apply(thisArg, _arguments || [])).next());
8
8
  });
9
9
  };
10
+ import { encoding_for_model } from "tiktoken";
10
11
  export default class CompletionAdapterOpenAIChatGPT {
11
12
  constructor(options) {
12
- this.complete = (content_1, ...args_1) => __awaiter(this, [content_1, ...args_1], void 0, function* (content, stop = ["."], maxTokens = 50) {
13
+ this.complete = (content_1, ...args_1) => __awaiter(this, [content_1, ...args_1], void 0, function* (content, stop = ["."], maxTokens = 50, outputSchema) {
13
14
  // stop parameter is alredy not supported
14
15
  // adapter users should explicitely ask model to stop at dot if needed (or "Complete only up to the end of sentence")
15
16
  const model = this.options.model || "gpt-5-nano";
@@ -24,7 +25,7 @@ export default class CompletionAdapterOpenAIChatGPT {
24
25
  role: "user",
25
26
  content, //param
26
27
  },
27
- ], max_completion_tokens: maxTokens }, this.options.extraRequestBodyParameters)),
28
+ ], max_completion_tokens: maxTokens, response_format: outputSchema ? Object.assign({ type: "json_schema" }, outputSchema) : undefined }, this.options.extraRequestBodyParameters)),
28
29
  });
29
30
  const data = yield resp.json();
30
31
  if (data.error) {
@@ -42,4 +43,11 @@ export default class CompletionAdapterOpenAIChatGPT {
42
43
  throw new Error("openAiApiKey is required");
43
44
  }
44
45
  }
46
+ measureTokensCount(content) {
47
+ // Implement token counting logic here
48
+ const model = this.options.model || "gpt-5-nano";
49
+ const encoding = encoding_for_model(model);
50
+ const tokens = encoding.encode(content);
51
+ return tokens.length;
52
+ }
45
53
  }
package/index.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import type { AdapterOptions } from "./types.js";
2
2
  import type { CompletionAdapter } from "adminforth";
3
-
3
+ import { encoding_for_model, type TiktokenModel } from "tiktoken";
4
4
  export default class CompletionAdapterOpenAIChatGPT
5
5
  implements CompletionAdapter
6
6
  {
@@ -16,7 +16,16 @@ export default class CompletionAdapterOpenAIChatGPT
16
16
  }
17
17
  }
18
18
 
19
- complete = async (content: string, stop = ["."], maxTokens = 50): Promise<{
19
+
20
+ measureTokensCount(content: string): number {
21
+ // Implement token counting logic here
22
+ const model = this.options.model || "gpt-5-nano";
23
+ const encoding = encoding_for_model(model as TiktokenModel);
24
+ const tokens = encoding.encode(content);
25
+ return tokens.length;
26
+ }
27
+
28
+ complete = async (content: string, stop = ["."], maxTokens = 50, outputSchema?: any): Promise<{
20
29
  content?: string;
21
30
  finishReason?: string;
22
31
  error?: string;
@@ -39,6 +48,10 @@ export default class CompletionAdapterOpenAIChatGPT
39
48
  },
40
49
  ],
41
50
  max_completion_tokens: maxTokens,
51
+ response_format: outputSchema ? {
52
+ type: "json_schema",
53
+ ...outputSchema,
54
+ } : undefined,
42
55
  ...this.options.extraRequestBodyParameters,
43
56
  }),
44
57
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@adminforth/completion-adapter-open-ai-chat-gpt",
3
- "version": "2.0.2",
3
+ "version": "2.0.6",
4
4
  "main": "dist/index.js",
5
5
  "types": "dist/index.d.ts",
6
6
  "type": "module",
@@ -15,5 +15,8 @@
15
15
  "description": "",
16
16
  "devDependencies": {
17
17
  "typescript": "^5.9.3"
18
+ },
19
+ "dependencies": {
20
+ "tiktoken": "^1.0.22"
18
21
  }
19
22
  }