manageprompt 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +50 -0
- package/dist/index.cjs +89 -0
- package/dist/index.d.ts +8 -0
- package/dist/index.mjs +87 -0
- package/package.json +48 -0
package/README.md
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
# manageprompt
|
|
2
|
+
|
|
3
|
+
[Vercel AI SDK](https://ai-sdk.dev) middleware for [ManagePrompt](https://github.com/techulus/manage-prompt) — a local LLM call debugger.
|
|
4
|
+
|
|
5
|
+
Captures every LLM call with full prompt, response, token usage, cost, and latency.
|
|
6
|
+
|
|
7
|
+
## Install
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
pnpm add manageprompt
|
|
11
|
+
```
|
|
12
|
+
|
|
13
|
+
## Usage
|
|
14
|
+
|
|
15
|
+
```ts
|
|
16
|
+
import { generateText, wrapLanguageModel } from "ai";
|
|
17
|
+
import { openai } from "@ai-sdk/openai";
|
|
18
|
+
import { manageprompt } from "manageprompt";
|
|
19
|
+
|
|
20
|
+
const model = wrapLanguageModel({
|
|
21
|
+
model: openai("gpt-4o"),
|
|
22
|
+
middleware: manageprompt(),
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
const { text } = await generateText({ model, prompt: "Hello" });
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
Works with any AI SDK provider — OpenAI, Anthropic, Google, Mistral, etc.
|
|
29
|
+
|
|
30
|
+
## Options
|
|
31
|
+
|
|
32
|
+
```ts
|
|
33
|
+
manageprompt({
|
|
34
|
+
url: "http://localhost:54321", // default: http://localhost:54321
|
|
35
|
+
});
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## Prerequisites
|
|
39
|
+
|
|
40
|
+
Start the ManagePrompt server before running your app:
|
|
41
|
+
|
|
42
|
+
```bash
|
|
43
|
+
manageprompt start
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
See the [ManagePrompt README](https://github.com/techulus/manage-prompt) for installation and full documentation.
|
|
47
|
+
|
|
48
|
+
## License
|
|
49
|
+
|
|
50
|
+
MIT
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
function extractText(content) {
|
|
4
|
+
return content
|
|
5
|
+
.filter((part) => part.type === "text")
|
|
6
|
+
.map((part) => part.text)
|
|
7
|
+
.join("");
|
|
8
|
+
}
|
|
9
|
+
function extractTokens(usage) {
|
|
10
|
+
return {
|
|
11
|
+
input: usage.inputTokens.total,
|
|
12
|
+
output: usage.outputTokens.total,
|
|
13
|
+
};
|
|
14
|
+
}
|
|
15
|
+
function manageprompt(options) {
|
|
16
|
+
const baseURL = (options?.url ?? "http://localhost:54321").replace(/\/$/, "");
|
|
17
|
+
return {
|
|
18
|
+
specificationVersion: "v3",
|
|
19
|
+
wrapGenerate: async ({ doGenerate, params, model, }) => {
|
|
20
|
+
const start = Date.now();
|
|
21
|
+
const result = await doGenerate();
|
|
22
|
+
const latency = Date.now() - start;
|
|
23
|
+
const tokens = extractTokens(result.usage);
|
|
24
|
+
send(baseURL, {
|
|
25
|
+
model: model.modelId,
|
|
26
|
+
provider: model.provider,
|
|
27
|
+
prompt: params.prompt,
|
|
28
|
+
response_text: extractText(result.content),
|
|
29
|
+
tokens_input: tokens.input,
|
|
30
|
+
tokens_output: tokens.output,
|
|
31
|
+
latency_ms: latency,
|
|
32
|
+
is_streaming: false,
|
|
33
|
+
finish_reason: result.finishReason.unified,
|
|
34
|
+
});
|
|
35
|
+
return result;
|
|
36
|
+
},
|
|
37
|
+
wrapStream: async ({ doStream, params, model, }) => {
|
|
38
|
+
const start = Date.now();
|
|
39
|
+
const { stream, ...rest } = await doStream();
|
|
40
|
+
let text = "";
|
|
41
|
+
let usage = null;
|
|
42
|
+
let finishReason = null;
|
|
43
|
+
const transform = new TransformStream({
|
|
44
|
+
transform(chunk, controller) {
|
|
45
|
+
if (chunk.type === "text-delta") {
|
|
46
|
+
text += chunk.delta;
|
|
47
|
+
}
|
|
48
|
+
if (chunk.type === "finish") {
|
|
49
|
+
usage = chunk.usage;
|
|
50
|
+
finishReason = chunk.finishReason;
|
|
51
|
+
}
|
|
52
|
+
controller.enqueue(chunk);
|
|
53
|
+
},
|
|
54
|
+
flush() {
|
|
55
|
+
const tokens = usage ? extractTokens(usage) : {};
|
|
56
|
+
send(baseURL, {
|
|
57
|
+
model: model.modelId,
|
|
58
|
+
provider: model.provider,
|
|
59
|
+
prompt: params.prompt,
|
|
60
|
+
response_text: text,
|
|
61
|
+
tokens_input: tokens.input,
|
|
62
|
+
tokens_output: tokens.output,
|
|
63
|
+
latency_ms: Date.now() - start,
|
|
64
|
+
is_streaming: true,
|
|
65
|
+
finish_reason: finishReason?.unified,
|
|
66
|
+
});
|
|
67
|
+
},
|
|
68
|
+
});
|
|
69
|
+
return { stream: stream.pipeThrough(transform), ...rest };
|
|
70
|
+
},
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
function send(baseURL, data) {
|
|
74
|
+
fetch(`${baseURL}/api/ingest`, {
|
|
75
|
+
method: "POST",
|
|
76
|
+
headers: { "Content-Type": "application/json" },
|
|
77
|
+
body: JSON.stringify(data),
|
|
78
|
+
})
|
|
79
|
+
.then((res) => {
|
|
80
|
+
if (!res.ok) {
|
|
81
|
+
console.error(`[manageprompt] Failed to send data: ${res.status} ${res.statusText}`);
|
|
82
|
+
}
|
|
83
|
+
})
|
|
84
|
+
.catch((err) => {
|
|
85
|
+
console.error(`[manageprompt] Failed to connect to ${baseURL}:`, err.message);
|
|
86
|
+
});
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
exports.manageprompt = manageprompt;
|
package/dist/index.d.ts
ADDED
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
function extractText(content) {
|
|
2
|
+
return content
|
|
3
|
+
.filter((part) => part.type === "text")
|
|
4
|
+
.map((part) => part.text)
|
|
5
|
+
.join("");
|
|
6
|
+
}
|
|
7
|
+
function extractTokens(usage) {
|
|
8
|
+
return {
|
|
9
|
+
input: usage.inputTokens.total,
|
|
10
|
+
output: usage.outputTokens.total,
|
|
11
|
+
};
|
|
12
|
+
}
|
|
13
|
+
function manageprompt(options) {
|
|
14
|
+
const baseURL = (options?.url ?? "http://localhost:54321").replace(/\/$/, "");
|
|
15
|
+
return {
|
|
16
|
+
specificationVersion: "v3",
|
|
17
|
+
wrapGenerate: async ({ doGenerate, params, model, }) => {
|
|
18
|
+
const start = Date.now();
|
|
19
|
+
const result = await doGenerate();
|
|
20
|
+
const latency = Date.now() - start;
|
|
21
|
+
const tokens = extractTokens(result.usage);
|
|
22
|
+
send(baseURL, {
|
|
23
|
+
model: model.modelId,
|
|
24
|
+
provider: model.provider,
|
|
25
|
+
prompt: params.prompt,
|
|
26
|
+
response_text: extractText(result.content),
|
|
27
|
+
tokens_input: tokens.input,
|
|
28
|
+
tokens_output: tokens.output,
|
|
29
|
+
latency_ms: latency,
|
|
30
|
+
is_streaming: false,
|
|
31
|
+
finish_reason: result.finishReason.unified,
|
|
32
|
+
});
|
|
33
|
+
return result;
|
|
34
|
+
},
|
|
35
|
+
wrapStream: async ({ doStream, params, model, }) => {
|
|
36
|
+
const start = Date.now();
|
|
37
|
+
const { stream, ...rest } = await doStream();
|
|
38
|
+
let text = "";
|
|
39
|
+
let usage = null;
|
|
40
|
+
let finishReason = null;
|
|
41
|
+
const transform = new TransformStream({
|
|
42
|
+
transform(chunk, controller) {
|
|
43
|
+
if (chunk.type === "text-delta") {
|
|
44
|
+
text += chunk.delta;
|
|
45
|
+
}
|
|
46
|
+
if (chunk.type === "finish") {
|
|
47
|
+
usage = chunk.usage;
|
|
48
|
+
finishReason = chunk.finishReason;
|
|
49
|
+
}
|
|
50
|
+
controller.enqueue(chunk);
|
|
51
|
+
},
|
|
52
|
+
flush() {
|
|
53
|
+
const tokens = usage ? extractTokens(usage) : {};
|
|
54
|
+
send(baseURL, {
|
|
55
|
+
model: model.modelId,
|
|
56
|
+
provider: model.provider,
|
|
57
|
+
prompt: params.prompt,
|
|
58
|
+
response_text: text,
|
|
59
|
+
tokens_input: tokens.input,
|
|
60
|
+
tokens_output: tokens.output,
|
|
61
|
+
latency_ms: Date.now() - start,
|
|
62
|
+
is_streaming: true,
|
|
63
|
+
finish_reason: finishReason?.unified,
|
|
64
|
+
});
|
|
65
|
+
},
|
|
66
|
+
});
|
|
67
|
+
return { stream: stream.pipeThrough(transform), ...rest };
|
|
68
|
+
},
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
function send(baseURL, data) {
|
|
72
|
+
fetch(`${baseURL}/api/ingest`, {
|
|
73
|
+
method: "POST",
|
|
74
|
+
headers: { "Content-Type": "application/json" },
|
|
75
|
+
body: JSON.stringify(data),
|
|
76
|
+
})
|
|
77
|
+
.then((res) => {
|
|
78
|
+
if (!res.ok) {
|
|
79
|
+
console.error(`[manageprompt] Failed to send data: ${res.status} ${res.statusText}`);
|
|
80
|
+
}
|
|
81
|
+
})
|
|
82
|
+
.catch((err) => {
|
|
83
|
+
console.error(`[manageprompt] Failed to connect to ${baseURL}:`, err.message);
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
export { manageprompt };
|
package/package.json
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "manageprompt",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "AI SDK middleware for ManagePrompt — local LLM call debugger",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "./dist/index.cjs",
|
|
7
|
+
"module": "./dist/index.mjs",
|
|
8
|
+
"types": "./dist/index.d.ts",
|
|
9
|
+
"exports": {
|
|
10
|
+
".": {
|
|
11
|
+
"types": "./dist/index.d.ts",
|
|
12
|
+
"import": "./dist/index.mjs",
|
|
13
|
+
"require": "./dist/index.cjs"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"files": [
|
|
17
|
+
"dist",
|
|
18
|
+
"README.md"
|
|
19
|
+
],
|
|
20
|
+
"scripts": {
|
|
21
|
+
"build": "rollup -c"
|
|
22
|
+
},
|
|
23
|
+
"peerDependencies": {
|
|
24
|
+
"@ai-sdk/provider": ">=3.0.0"
|
|
25
|
+
},
|
|
26
|
+
"devDependencies": {
|
|
27
|
+
"@ai-sdk/provider": "^3.0.8",
|
|
28
|
+
"@rollup/plugin-typescript": "^12.1.0",
|
|
29
|
+
"rollup": "^4.30.0",
|
|
30
|
+
"rollup-plugin-dts": "^6.2.0",
|
|
31
|
+
"tslib": "^2.8.0",
|
|
32
|
+
"typescript": "^5.7.0"
|
|
33
|
+
},
|
|
34
|
+
"keywords": [
|
|
35
|
+
"ai",
|
|
36
|
+
"llm",
|
|
37
|
+
"debugging",
|
|
38
|
+
"openai",
|
|
39
|
+
"anthropic",
|
|
40
|
+
"vercel-ai-sdk",
|
|
41
|
+
"middleware"
|
|
42
|
+
],
|
|
43
|
+
"license": "MIT",
|
|
44
|
+
"repository": {
|
|
45
|
+
"type": "git",
|
|
46
|
+
"url": "https://github.com/techulus/manage-prompt"
|
|
47
|
+
}
|
|
48
|
+
}
|