aixyz 0.12.0 → 0.13.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/model.test.ts +105 -0
- package/model.ts +77 -0
- package/package.json +4 -4
package/model.test.ts
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import { describe, expect, test } from "bun:test";
|
|
2
|
+
import { fake, type Prompt } from "./model";
|
|
3
|
+
|
|
4
|
+
const makeUserPrompt = (text: string) => [{ role: "user", content: [{ type: "text", text }] }];
|
|
5
|
+
|
|
6
|
+
const identity = (input: string) => input;
|
|
7
|
+
|
|
8
|
+
describe("fake model", () => {
|
|
9
|
+
test("has correct metadata", () => {
|
|
10
|
+
const model = fake(identity);
|
|
11
|
+
expect(model.specificationVersion).toBe("v3");
|
|
12
|
+
expect(model.provider).toBe("aixyz/fake");
|
|
13
|
+
expect(model.modelId).toBe("aixyz/fake");
|
|
14
|
+
expect(model.supportedUrls).toEqual({});
|
|
15
|
+
});
|
|
16
|
+
|
|
17
|
+
describe("doGenerate", () => {
|
|
18
|
+
test("echoes back the last user text with identity transform", async () => {
|
|
19
|
+
const model = fake(identity);
|
|
20
|
+
const result = await model.doGenerate({ prompt: makeUserPrompt("hello world") });
|
|
21
|
+
expect(result.content).toEqual([{ type: "text", text: "hello world" }]);
|
|
22
|
+
expect(result.finishReason).toBe("stop");
|
|
23
|
+
expect(result.warnings).toEqual([]);
|
|
24
|
+
});
|
|
25
|
+
|
|
26
|
+
test("applies custom transform to the last user text", async () => {
|
|
27
|
+
const model = fake((input) => `hello, ${input}`);
|
|
28
|
+
const result = await model.doGenerate({ prompt: makeUserPrompt("world") });
|
|
29
|
+
expect(result.content[0].text).toBe("hello, world");
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
test("applies custom transform to the last user message when multiple messages exist", async () => {
|
|
33
|
+
const model = fake((input) => `hello, ${input}`);
|
|
34
|
+
const prompt = [
|
|
35
|
+
{ role: "user", content: [{ type: "text", text: "first" }] },
|
|
36
|
+
{ role: "assistant", content: [{ type: "text", text: "reply" }] },
|
|
37
|
+
{ role: "user", content: [{ type: "text", text: "you" }] },
|
|
38
|
+
];
|
|
39
|
+
const result = await model.doGenerate({ prompt });
|
|
40
|
+
expect(result.content[0].text).toBe("hello, you");
|
|
41
|
+
});
|
|
42
|
+
|
|
43
|
+
test("passes full prompt as second argument to transform", async () => {
|
|
44
|
+
const model = fake((_input: string, prompt: Prompt) => `${prompt.length} turns`);
|
|
45
|
+
const prompt = [
|
|
46
|
+
{ role: "user", content: [{ type: "text", text: "first" }] },
|
|
47
|
+
{ role: "assistant", content: [{ type: "text", text: "reply" }] },
|
|
48
|
+
{ role: "user", content: [{ type: "text", text: "second" }] },
|
|
49
|
+
];
|
|
50
|
+
const result = await model.doGenerate({ prompt });
|
|
51
|
+
expect(result.content[0].text).toBe("3 turns");
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
test("returns zero usage", async () => {
|
|
55
|
+
const model = fake(identity);
|
|
56
|
+
const result = await model.doGenerate({ prompt: makeUserPrompt("hi") });
|
|
57
|
+
expect(result.usage.inputTokens.total).toBe(0);
|
|
58
|
+
expect(result.usage.outputTokens.total).toBe(0);
|
|
59
|
+
});
|
|
60
|
+
|
|
61
|
+
test("returns empty string when no user message", async () => {
|
|
62
|
+
const model = fake(identity);
|
|
63
|
+
const result = await model.doGenerate({ prompt: [] });
|
|
64
|
+
expect(result.content[0].text).toBe("");
|
|
65
|
+
});
|
|
66
|
+
});
|
|
67
|
+
|
|
68
|
+
describe("doStream", () => {
|
|
69
|
+
test("streams back the last user text with identity transform", async () => {
|
|
70
|
+
const model = fake(identity);
|
|
71
|
+
const { stream } = await model.doStream({ prompt: makeUserPrompt("stream me") });
|
|
72
|
+
const chunks: unknown[] = [];
|
|
73
|
+
for await (const chunk of stream) {
|
|
74
|
+
chunks.push(chunk);
|
|
75
|
+
}
|
|
76
|
+
expect(chunks[0]).toEqual({ type: "stream-start", warnings: [] });
|
|
77
|
+
expect(chunks[1]).toEqual({ type: "text-start", id: "1" });
|
|
78
|
+
expect(chunks[2]).toEqual({ type: "text-delta", id: "1", delta: "stream me" });
|
|
79
|
+
expect(chunks[3]).toEqual({ type: "text-end", id: "1" });
|
|
80
|
+
expect((chunks[4] as { type: string; finishReason: string }).finishReason).toBe("stop");
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
test("applies custom transform when streaming", async () => {
|
|
84
|
+
const model = fake((input) => `hello, ${input}`);
|
|
85
|
+
const { stream } = await model.doStream({ prompt: makeUserPrompt("you") });
|
|
86
|
+
const chunks: unknown[] = [];
|
|
87
|
+
for await (const chunk of stream) {
|
|
88
|
+
chunks.push(chunk);
|
|
89
|
+
}
|
|
90
|
+
expect(chunks[2]).toEqual({ type: "text-delta", id: "1", delta: "hello, you" });
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
test("stream contains finish chunk with zero usage", async () => {
|
|
94
|
+
const model = fake(identity);
|
|
95
|
+
const { stream } = await model.doStream({ prompt: makeUserPrompt("hi") });
|
|
96
|
+
const chunks: unknown[] = [];
|
|
97
|
+
for await (const chunk of stream) {
|
|
98
|
+
chunks.push(chunk);
|
|
99
|
+
}
|
|
100
|
+
const finish = chunks[4] as { type: string; usage: { inputTokens: { total: number } } };
|
|
101
|
+
expect(finish.type).toBe("finish");
|
|
102
|
+
expect(finish.usage.inputTokens.total).toBe(0);
|
|
103
|
+
});
|
|
104
|
+
});
|
|
105
|
+
});
|
package/model.ts
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
export type Prompt = Array<{ role: string; content: Array<{ type: string; text?: string }> }>;
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Returns the text of the last user message in the prompt, or an empty string if none exists.
|
|
5
|
+
* Searches backward through the prompt array for the last user role entry and returns the first
|
|
6
|
+
* text part found within it.
|
|
7
|
+
*/
|
|
8
|
+
function lastUserText(prompt: Prompt): string {
|
|
9
|
+
for (let i = prompt.length - 1; i >= 0; i--) {
|
|
10
|
+
if (prompt[i].role === "user") {
|
|
11
|
+
for (const part of prompt[i].content) {
|
|
12
|
+
if (part.type === "text" && part.text) {
|
|
13
|
+
return part.text;
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
return "";
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Creates a fake language model for testing and development.
|
|
23
|
+
* Conforms to the LanguageModelV2 v3 specification with zero token usage.
|
|
24
|
+
*
|
|
25
|
+
* The `transform` function receives the last user message text and the full prompt,
|
|
26
|
+
* and returns the model output string.
|
|
27
|
+
*
|
|
28
|
+
* @example
|
|
29
|
+
* import { fake } from "aixyz/model";
|
|
30
|
+
*
|
|
31
|
+
* // custom transform using last message only
|
|
32
|
+
* const helloModel = fake((input) => `hello, ${input}`);
|
|
33
|
+
*
|
|
34
|
+
* // custom transform using full prompt context
|
|
35
|
+
* const contextModel = fake((input, prompt) => `${prompt.length} turns: ${input}`);
|
|
36
|
+
*/
|
|
37
|
+
export function fake(transform: (lastMessage: string, prompt: Prompt) => string) {
|
|
38
|
+
return {
|
|
39
|
+
specificationVersion: "v3" as const,
|
|
40
|
+
provider: "aixyz/fake",
|
|
41
|
+
modelId: "aixyz/fake",
|
|
42
|
+
supportedUrls: {},
|
|
43
|
+
doGenerate(options: { prompt: Prompt }) {
|
|
44
|
+
const text = transform(lastUserText(options.prompt), options.prompt);
|
|
45
|
+
return Promise.resolve({
|
|
46
|
+
content: [{ type: "text" as const, text }],
|
|
47
|
+
finishReason: "stop" as const,
|
|
48
|
+
usage: {
|
|
49
|
+
inputTokens: { total: 0, noCache: 0, cacheRead: 0, cacheWrite: 0 },
|
|
50
|
+
outputTokens: { total: 0, text: 0, reasoning: 0 },
|
|
51
|
+
},
|
|
52
|
+
warnings: [],
|
|
53
|
+
});
|
|
54
|
+
},
|
|
55
|
+
doStream(options: { prompt: Prompt }) {
|
|
56
|
+
const text = transform(lastUserText(options.prompt), options.prompt);
|
|
57
|
+
const stream = new ReadableStream({
|
|
58
|
+
start(controller) {
|
|
59
|
+
controller.enqueue({ type: "stream-start" as const, warnings: [] });
|
|
60
|
+
controller.enqueue({ type: "text-start" as const, id: "1" });
|
|
61
|
+
controller.enqueue({ type: "text-delta" as const, id: "1", delta: text });
|
|
62
|
+
controller.enqueue({ type: "text-end" as const, id: "1" });
|
|
63
|
+
controller.enqueue({
|
|
64
|
+
type: "finish" as const,
|
|
65
|
+
finishReason: "stop" as const,
|
|
66
|
+
usage: {
|
|
67
|
+
inputTokens: { total: 0, noCache: 0, cacheRead: 0, cacheWrite: 0 },
|
|
68
|
+
outputTokens: { total: 0, text: 0, reasoning: 0 },
|
|
69
|
+
},
|
|
70
|
+
});
|
|
71
|
+
controller.close();
|
|
72
|
+
},
|
|
73
|
+
});
|
|
74
|
+
return Promise.resolve({ stream });
|
|
75
|
+
},
|
|
76
|
+
};
|
|
77
|
+
}
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "aixyz",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.13.0",
|
|
4
4
|
"description": "Payment-native SDK for AI Agent",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"ai",
|
|
@@ -23,9 +23,9 @@
|
|
|
23
23
|
],
|
|
24
24
|
"dependencies": {
|
|
25
25
|
"@a2a-js/sdk": "^0.3.10",
|
|
26
|
-
"@aixyz/cli": "0.
|
|
27
|
-
"@aixyz/config": "0.
|
|
28
|
-
"@aixyz/erc-8004": "0.
|
|
26
|
+
"@aixyz/cli": "0.13.0",
|
|
27
|
+
"@aixyz/config": "0.13.0",
|
|
28
|
+
"@aixyz/erc-8004": "0.13.0",
|
|
29
29
|
"@modelcontextprotocol/sdk": "^1.26.0",
|
|
30
30
|
"@next/env": "^16.1.6",
|
|
31
31
|
"@x402/core": "^2.3.1",
|