aixyz 0.16.0 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/model.test.ts +27 -2
  2. package/model.ts +17 -12
  3. package/package.json +4 -4
package/model.test.ts CHANGED
@@ -6,12 +6,12 @@ const makeUserPrompt = (text: string): Prompt => [{ role: "user", content: [{ ty
6
6
  const identity = (input: string) => input;
7
7
 
8
8
  describe("fake model", () => {
9
- test("has correct metadata", () => {
9
+ test("has correct metadata", async () => {
10
10
  const model = fake(identity);
11
11
  expect(model.specificationVersion).toBe("v3");
12
12
  expect(model.provider).toBe("aixyz/fake");
13
13
  expect(model.modelId).toBe("aixyz/fake");
14
- expect(model.supportedUrls).toEqual({});
14
+ expect(await model.supportedUrls).toEqual({});
15
15
  });
16
16
 
17
17
  describe("doGenerate", () => {
@@ -107,4 +107,29 @@ describe("fake model", () => {
107
107
  expect(finish.usage.inputTokens.total).toBe(0);
108
108
  });
109
109
  });
110
+
111
+ describe("call recording", () => {
112
+ test("records doGenerate calls", async () => {
113
+ const model = fake(identity);
114
+ const prompt = makeUserPrompt("hi");
115
+ await model.doGenerate({ prompt });
116
+ expect(model.doGenerateCalls).toHaveLength(1);
117
+ expect(model.doGenerateCalls[0].prompt).toEqual(prompt);
118
+ });
119
+
120
+ test("records multiple doGenerate calls", async () => {
121
+ const model = fake(identity);
122
+ await model.doGenerate({ prompt: makeUserPrompt("first") });
123
+ await model.doGenerate({ prompt: makeUserPrompt("second") });
124
+ expect(model.doGenerateCalls).toHaveLength(2);
125
+ });
126
+
127
+ test("records doStream calls", async () => {
128
+ const model = fake(identity);
129
+ const prompt = makeUserPrompt("stream me");
130
+ await model.doStream({ prompt });
131
+ expect(model.doStreamCalls).toHaveLength(1);
132
+ expect(model.doStreamCalls[0].prompt).toEqual(prompt);
133
+ });
134
+ });
110
135
  });
package/model.ts CHANGED
@@ -1,4 +1,5 @@
1
- import type { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3Prompt } from "@ai-sdk/provider";
1
+ import { MockLanguageModelV3 } from "ai/test";
2
+ import type { LanguageModelV3CallOptions, LanguageModelV3Prompt } from "@ai-sdk/provider";
2
3
 
3
4
  export type Prompt = LanguageModelV3Prompt;
4
5
 
@@ -22,30 +23,34 @@ function lastUserText(prompt: Prompt): string {
22
23
  }
23
24
 
24
25
  /**
25
- * Creates a fake language model for testing and development.
26
+ * Creates a fake language model for testing and development, backed by `MockLanguageModelV3`.
26
27
  * Conforms to the LanguageModelV3 specification with zero token usage.
27
28
  *
28
29
  * The `transform` function receives the last user message text and the full prompt,
29
30
  * and returns the model output string.
30
31
  *
32
+ * All calls are recorded in `.doGenerateCalls` and `.doStreamCalls` for test assertions.
33
+ *
31
34
  * @example
32
35
  * import { fake } from "aixyz/model";
33
36
  *
34
37
  * // custom transform using last message only
35
38
  * const helloModel = fake((input) => `hello, ${input}`);
36
39
  *
40
+ * // inspect recorded calls after use
41
+ * await helloModel.doGenerate({ prompt });
42
+ * console.log(helloModel.doGenerateCalls.length); // 1
43
+ *
37
44
  * // custom transform using full prompt context
38
45
  * const contextModel = fake((input, prompt) => `${prompt.length} turns: ${input}`);
39
46
  */
40
- export function fake(transform: (lastMessage: string, prompt: Prompt) => string): LanguageModelV3 {
41
- return {
42
- specificationVersion: "v3" as const,
47
+ export function fake(transform: (lastMessage: string, prompt: Prompt) => string): MockLanguageModelV3 {
48
+ return new MockLanguageModelV3({
43
49
  provider: "aixyz/fake",
44
50
  modelId: "aixyz/fake",
45
- supportedUrls: {},
46
- doGenerate(options: LanguageModelV3CallOptions) {
51
+ doGenerate: async (options: LanguageModelV3CallOptions) => {
47
52
  const text = transform(lastUserText(options.prompt), options.prompt);
48
- return Promise.resolve({
53
+ return {
49
54
  content: [{ type: "text" as const, text }],
50
55
  finishReason: { unified: "stop" as const, raw: undefined },
51
56
  usage: {
@@ -53,9 +58,9 @@ export function fake(transform: (lastMessage: string, prompt: Prompt) => string)
53
58
  outputTokens: { total: 0, text: 0, reasoning: 0 },
54
59
  },
55
60
  warnings: [],
56
- });
61
+ };
57
62
  },
58
- doStream(options: LanguageModelV3CallOptions) {
63
+ doStream: async (options: LanguageModelV3CallOptions) => {
59
64
  const text = transform(lastUserText(options.prompt), options.prompt);
60
65
  const stream = new ReadableStream({
61
66
  start(controller) {
@@ -74,7 +79,7 @@ export function fake(transform: (lastMessage: string, prompt: Prompt) => string)
74
79
  controller.close();
75
80
  },
76
81
  });
77
- return Promise.resolve({ stream });
82
+ return { stream };
78
83
  },
79
- };
84
+ });
80
85
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "aixyz",
3
- "version": "0.16.0",
3
+ "version": "0.17.0",
4
4
  "description": "Payment-native SDK for AI Agent",
5
5
  "keywords": [
6
6
  "ai",
@@ -23,9 +23,9 @@
23
23
  ],
24
24
  "dependencies": {
25
25
  "@a2a-js/sdk": "^0.3.10",
26
- "@aixyz/cli": "0.16.0",
27
- "@aixyz/config": "0.16.0",
28
- "@aixyz/erc-8004": "0.16.0",
26
+ "@aixyz/cli": "0.17.0",
27
+ "@aixyz/config": "0.17.0",
28
+ "@aixyz/erc-8004": "0.17.0",
29
29
  "@modelcontextprotocol/sdk": "^1.26.0",
30
30
  "@next/env": "^16.1.6",
31
31
  "@x402/core": "^2.3.1",