manageprompt 0.1.0 → 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +11 -9
- package/dist/index.mjs +11 -9
- package/package.json +10 -2
package/dist/index.cjs
CHANGED
|
@@ -6,10 +6,12 @@ function extractText(content) {
|
|
|
6
6
|
.map((part) => part.text)
|
|
7
7
|
.join("");
|
|
8
8
|
}
|
|
9
|
-
function
|
|
9
|
+
function extractUsage(usage) {
|
|
10
10
|
return {
|
|
11
|
-
|
|
12
|
-
|
|
11
|
+
tokens_input: usage.inputTokens.total,
|
|
12
|
+
tokens_output: usage.outputTokens.total,
|
|
13
|
+
cache_read_tokens: usage.inputTokens.cacheRead,
|
|
14
|
+
cache_write_tokens: usage.inputTokens.cacheWrite,
|
|
13
15
|
};
|
|
14
16
|
}
|
|
15
17
|
function manageprompt(options) {
|
|
@@ -20,14 +22,13 @@ function manageprompt(options) {
|
|
|
20
22
|
const start = Date.now();
|
|
21
23
|
const result = await doGenerate();
|
|
22
24
|
const latency = Date.now() - start;
|
|
23
|
-
const tokens = extractTokens(result.usage);
|
|
24
25
|
send(baseURL, {
|
|
25
26
|
model: model.modelId,
|
|
26
27
|
provider: model.provider,
|
|
27
28
|
prompt: params.prompt,
|
|
28
29
|
response_text: extractText(result.content),
|
|
29
|
-
|
|
30
|
-
|
|
30
|
+
...extractUsage(result.usage),
|
|
31
|
+
raw_response: result,
|
|
31
32
|
latency_ms: latency,
|
|
32
33
|
is_streaming: false,
|
|
33
34
|
finish_reason: result.finishReason.unified,
|
|
@@ -40,8 +41,10 @@ function manageprompt(options) {
|
|
|
40
41
|
let text = "";
|
|
41
42
|
let usage = null;
|
|
42
43
|
let finishReason = null;
|
|
44
|
+
const chunks = [];
|
|
43
45
|
const transform = new TransformStream({
|
|
44
46
|
transform(chunk, controller) {
|
|
47
|
+
chunks.push(chunk);
|
|
45
48
|
if (chunk.type === "text-delta") {
|
|
46
49
|
text += chunk.delta;
|
|
47
50
|
}
|
|
@@ -52,14 +55,13 @@ function manageprompt(options) {
|
|
|
52
55
|
controller.enqueue(chunk);
|
|
53
56
|
},
|
|
54
57
|
flush() {
|
|
55
|
-
const tokens = usage ? extractTokens(usage) : {};
|
|
56
58
|
send(baseURL, {
|
|
57
59
|
model: model.modelId,
|
|
58
60
|
provider: model.provider,
|
|
59
61
|
prompt: params.prompt,
|
|
60
62
|
response_text: text,
|
|
61
|
-
|
|
62
|
-
|
|
63
|
+
...(usage ? extractUsage(usage) : {}),
|
|
64
|
+
raw_response: chunks,
|
|
63
65
|
latency_ms: Date.now() - start,
|
|
64
66
|
is_streaming: true,
|
|
65
67
|
finish_reason: finishReason?.unified,
|
package/dist/index.mjs
CHANGED
|
@@ -4,10 +4,12 @@ function extractText(content) {
|
|
|
4
4
|
.map((part) => part.text)
|
|
5
5
|
.join("");
|
|
6
6
|
}
|
|
7
|
-
function
|
|
7
|
+
function extractUsage(usage) {
|
|
8
8
|
return {
|
|
9
|
-
|
|
10
|
-
|
|
9
|
+
tokens_input: usage.inputTokens.total,
|
|
10
|
+
tokens_output: usage.outputTokens.total,
|
|
11
|
+
cache_read_tokens: usage.inputTokens.cacheRead,
|
|
12
|
+
cache_write_tokens: usage.inputTokens.cacheWrite,
|
|
11
13
|
};
|
|
12
14
|
}
|
|
13
15
|
function manageprompt(options) {
|
|
@@ -18,14 +20,13 @@ function manageprompt(options) {
|
|
|
18
20
|
const start = Date.now();
|
|
19
21
|
const result = await doGenerate();
|
|
20
22
|
const latency = Date.now() - start;
|
|
21
|
-
const tokens = extractTokens(result.usage);
|
|
22
23
|
send(baseURL, {
|
|
23
24
|
model: model.modelId,
|
|
24
25
|
provider: model.provider,
|
|
25
26
|
prompt: params.prompt,
|
|
26
27
|
response_text: extractText(result.content),
|
|
27
|
-
|
|
28
|
-
|
|
28
|
+
...extractUsage(result.usage),
|
|
29
|
+
raw_response: result,
|
|
29
30
|
latency_ms: latency,
|
|
30
31
|
is_streaming: false,
|
|
31
32
|
finish_reason: result.finishReason.unified,
|
|
@@ -38,8 +39,10 @@ function manageprompt(options) {
|
|
|
38
39
|
let text = "";
|
|
39
40
|
let usage = null;
|
|
40
41
|
let finishReason = null;
|
|
42
|
+
const chunks = [];
|
|
41
43
|
const transform = new TransformStream({
|
|
42
44
|
transform(chunk, controller) {
|
|
45
|
+
chunks.push(chunk);
|
|
43
46
|
if (chunk.type === "text-delta") {
|
|
44
47
|
text += chunk.delta;
|
|
45
48
|
}
|
|
@@ -50,14 +53,13 @@ function manageprompt(options) {
|
|
|
50
53
|
controller.enqueue(chunk);
|
|
51
54
|
},
|
|
52
55
|
flush() {
|
|
53
|
-
const tokens = usage ? extractTokens(usage) : {};
|
|
54
56
|
send(baseURL, {
|
|
55
57
|
model: model.modelId,
|
|
56
58
|
provider: model.provider,
|
|
57
59
|
prompt: params.prompt,
|
|
58
60
|
response_text: text,
|
|
59
|
-
|
|
60
|
-
|
|
61
|
+
...(usage ? extractUsage(usage) : {}),
|
|
62
|
+
raw_response: chunks,
|
|
61
63
|
latency_ms: Date.now() - start,
|
|
62
64
|
is_streaming: true,
|
|
63
65
|
finish_reason: finishReason?.unified,
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "manageprompt",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.2.0",
|
|
4
4
|
"description": "AI SDK middleware for ManagePrompt — local LLM call debugger",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"main": "./dist/index.cjs",
|
|
@@ -18,17 +18,25 @@
|
|
|
18
18
|
"README.md"
|
|
19
19
|
],
|
|
20
20
|
"scripts": {
|
|
21
|
-
"build": "rollup -c"
|
|
21
|
+
"build": "rollup -c",
|
|
22
|
+
"test:openai": "tsx tests/test-openai.ts",
|
|
23
|
+
"test:anthropic": "tsx tests/test-anthropic.ts",
|
|
24
|
+
"test:openrouter": "tsx tests/test-openrouter.ts"
|
|
22
25
|
},
|
|
23
26
|
"peerDependencies": {
|
|
24
27
|
"@ai-sdk/provider": ">=3.0.0"
|
|
25
28
|
},
|
|
26
29
|
"devDependencies": {
|
|
30
|
+
"@ai-sdk/anthropic": "^3.0.46",
|
|
31
|
+
"@ai-sdk/openai": "^3.0.30",
|
|
27
32
|
"@ai-sdk/provider": "^3.0.8",
|
|
33
|
+
"@openrouter/ai-sdk-provider": "^2.2.3",
|
|
28
34
|
"@rollup/plugin-typescript": "^12.1.0",
|
|
35
|
+
"ai": "^6.0.97",
|
|
29
36
|
"rollup": "^4.30.0",
|
|
30
37
|
"rollup-plugin-dts": "^6.2.0",
|
|
31
38
|
"tslib": "^2.8.0",
|
|
39
|
+
"tsx": "^4.21.0",
|
|
32
40
|
"typescript": "^5.7.0"
|
|
33
41
|
},
|
|
34
42
|
"keywords": [
|