aixyz 0.13.0 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/model.test.ts +26 -21
  2. package/model.ts +12 -9
  3. package/package.json +5 -4
package/model.test.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { describe, expect, test } from "bun:test";
2
2
  import { fake, type Prompt } from "./model";
3
3
 
4
- const makeUserPrompt = (text: string) => [{ role: "user", content: [{ type: "text", text }] }];
4
+ const makeUserPrompt = (text: string): Prompt => [{ role: "user", content: [{ type: "text", text }] }];
5
5
 
6
6
  const identity = (input: string) => input;
7
7
 
@@ -19,36 +19,36 @@ describe("fake model", () => {
19
19
  const model = fake(identity);
20
20
  const result = await model.doGenerate({ prompt: makeUserPrompt("hello world") });
21
21
  expect(result.content).toEqual([{ type: "text", text: "hello world" }]);
22
- expect(result.finishReason).toBe("stop");
22
+ expect(result.finishReason).toEqual({ unified: "stop", raw: undefined });
23
23
  expect(result.warnings).toEqual([]);
24
24
  });
25
25
 
26
26
  test("applies custom transform to the last user text", async () => {
27
27
  const model = fake((input) => `hello, ${input}`);
28
28
  const result = await model.doGenerate({ prompt: makeUserPrompt("world") });
29
- expect(result.content[0].text).toBe("hello, world");
29
+ expect(result.content[0]).toEqual({ type: "text", text: "hello, world" });
30
30
  });
31
31
 
32
32
  test("applies custom transform to the last user message when multiple messages exist", async () => {
33
33
  const model = fake((input) => `hello, ${input}`);
34
- const prompt = [
34
+ const prompt: Prompt = [
35
35
  { role: "user", content: [{ type: "text", text: "first" }] },
36
36
  { role: "assistant", content: [{ type: "text", text: "reply" }] },
37
37
  { role: "user", content: [{ type: "text", text: "you" }] },
38
38
  ];
39
39
  const result = await model.doGenerate({ prompt });
40
- expect(result.content[0].text).toBe("hello, you");
40
+ expect(result.content[0]).toEqual({ type: "text", text: "hello, you" });
41
41
  });
42
42
 
43
43
  test("passes full prompt as second argument to transform", async () => {
44
44
  const model = fake((_input: string, prompt: Prompt) => `${prompt.length} turns`);
45
- const prompt = [
45
+ const prompt: Prompt = [
46
46
  { role: "user", content: [{ type: "text", text: "first" }] },
47
47
  { role: "assistant", content: [{ type: "text", text: "reply" }] },
48
48
  { role: "user", content: [{ type: "text", text: "second" }] },
49
49
  ];
50
50
  const result = await model.doGenerate({ prompt });
51
- expect(result.content[0].text).toBe("3 turns");
51
+ expect(result.content[0]).toEqual({ type: "text", text: "3 turns" });
52
52
  });
53
53
 
54
54
  test("returns zero usage", async () => {
@@ -61,42 +61,47 @@ describe("fake model", () => {
61
61
  test("returns empty string when no user message", async () => {
62
62
  const model = fake(identity);
63
63
  const result = await model.doGenerate({ prompt: [] });
64
- expect(result.content[0].text).toBe("");
64
+ expect(result.content[0]).toEqual({ type: "text", text: "" });
65
65
  });
66
66
  });
67
67
 
68
68
  describe("doStream", () => {
69
+ async function collectStream(stream: ReadableStream): Promise<unknown[]> {
70
+ const reader = stream.getReader();
71
+ const chunks: unknown[] = [];
72
+ while (true) {
73
+ const { done, value } = await reader.read();
74
+ if (done) break;
75
+ chunks.push(value);
76
+ }
77
+ return chunks;
78
+ }
79
+
69
80
  test("streams back the last user text with identity transform", async () => {
70
81
  const model = fake(identity);
71
82
  const { stream } = await model.doStream({ prompt: makeUserPrompt("stream me") });
72
- const chunks: unknown[] = [];
73
- for await (const chunk of stream) {
74
- chunks.push(chunk);
75
- }
83
+ const chunks = await collectStream(stream);
76
84
  expect(chunks[0]).toEqual({ type: "stream-start", warnings: [] });
77
85
  expect(chunks[1]).toEqual({ type: "text-start", id: "1" });
78
86
  expect(chunks[2]).toEqual({ type: "text-delta", id: "1", delta: "stream me" });
79
87
  expect(chunks[3]).toEqual({ type: "text-end", id: "1" });
80
- expect((chunks[4] as { type: string; finishReason: string }).finishReason).toBe("stop");
88
+ expect((chunks[4] as { type: string; finishReason: unknown }).finishReason).toEqual({
89
+ unified: "stop",
90
+ raw: undefined,
91
+ });
81
92
  });
82
93
 
83
94
  test("applies custom transform when streaming", async () => {
84
95
  const model = fake((input) => `hello, ${input}`);
85
96
  const { stream } = await model.doStream({ prompt: makeUserPrompt("you") });
86
- const chunks: unknown[] = [];
87
- for await (const chunk of stream) {
88
- chunks.push(chunk);
89
- }
97
+ const chunks = await collectStream(stream);
90
98
  expect(chunks[2]).toEqual({ type: "text-delta", id: "1", delta: "hello, you" });
91
99
  });
92
100
 
93
101
  test("stream contains finish chunk with zero usage", async () => {
94
102
  const model = fake(identity);
95
103
  const { stream } = await model.doStream({ prompt: makeUserPrompt("hi") });
96
- const chunks: unknown[] = [];
97
- for await (const chunk of stream) {
98
- chunks.push(chunk);
99
- }
104
+ const chunks = await collectStream(stream);
100
105
  const finish = chunks[4] as { type: string; usage: { inputTokens: { total: number } } };
101
106
  expect(finish.type).toBe("finish");
102
107
  expect(finish.usage.inputTokens.total).toBe(0);
package/model.ts CHANGED
@@ -1,4 +1,6 @@
1
- export type Prompt = Array<{ role: string; content: Array<{ type: string; text?: string }> }>;
1
+ import type { LanguageModelV3, LanguageModelV3CallOptions, LanguageModelV3Prompt } from "@ai-sdk/provider";
2
+
3
+ export type Prompt = LanguageModelV3Prompt;
2
4
 
3
5
  /**
4
6
  * Returns the text of the last user message in the prompt, or an empty string if none exists.
@@ -7,8 +9,9 @@ export type Prompt = Array<{ role: string; content: Array<{ type: string; text?:
7
9
  */
8
10
  function lastUserText(prompt: Prompt): string {
9
11
  for (let i = prompt.length - 1; i >= 0; i--) {
10
- if (prompt[i].role === "user") {
11
- for (const part of prompt[i].content) {
12
+ const message = prompt[i];
13
+ if (message.role === "user") {
14
+ for (const part of message.content) {
12
15
  if (part.type === "text" && part.text) {
13
16
  return part.text;
14
17
  }
@@ -20,7 +23,7 @@ function lastUserText(prompt: Prompt): string {
20
23
 
21
24
  /**
22
25
  * Creates a fake language model for testing and development.
23
- * Conforms to the LanguageModelV2 v3 specification with zero token usage.
26
+ * Conforms to the LanguageModelV3 specification with zero token usage.
24
27
  *
25
28
  * The `transform` function receives the last user message text and the full prompt,
26
29
  * and returns the model output string.
@@ -34,17 +37,17 @@ function lastUserText(prompt: Prompt): string {
34
37
  * // custom transform using full prompt context
35
38
  * const contextModel = fake((input, prompt) => `${prompt.length} turns: ${input}`);
36
39
  */
37
- export function fake(transform: (lastMessage: string, prompt: Prompt) => string) {
40
+ export function fake(transform: (lastMessage: string, prompt: Prompt) => string): LanguageModelV3 {
38
41
  return {
39
42
  specificationVersion: "v3" as const,
40
43
  provider: "aixyz/fake",
41
44
  modelId: "aixyz/fake",
42
45
  supportedUrls: {},
43
- doGenerate(options: { prompt: Prompt }) {
46
+ doGenerate(options: LanguageModelV3CallOptions) {
44
47
  const text = transform(lastUserText(options.prompt), options.prompt);
45
48
  return Promise.resolve({
46
49
  content: [{ type: "text" as const, text }],
47
- finishReason: "stop" as const,
50
+ finishReason: { unified: "stop" as const, raw: undefined },
48
51
  usage: {
49
52
  inputTokens: { total: 0, noCache: 0, cacheRead: 0, cacheWrite: 0 },
50
53
  outputTokens: { total: 0, text: 0, reasoning: 0 },
@@ -52,7 +55,7 @@ export function fake(transform: (lastMessage: string, prompt: Prompt) => string)
52
55
  warnings: [],
53
56
  });
54
57
  },
55
- doStream(options: { prompt: Prompt }) {
58
+ doStream(options: LanguageModelV3CallOptions) {
56
59
  const text = transform(lastUserText(options.prompt), options.prompt);
57
60
  const stream = new ReadableStream({
58
61
  start(controller) {
@@ -62,7 +65,7 @@ export function fake(transform: (lastMessage: string, prompt: Prompt) => string)
62
65
  controller.enqueue({ type: "text-end" as const, id: "1" });
63
66
  controller.enqueue({
64
67
  type: "finish" as const,
65
- finishReason: "stop" as const,
68
+ finishReason: { unified: "stop" as const, raw: undefined },
66
69
  usage: {
67
70
  inputTokens: { total: 0, noCache: 0, cacheRead: 0, cacheWrite: 0 },
68
71
  outputTokens: { total: 0, text: 0, reasoning: 0 },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "aixyz",
3
- "version": "0.13.0",
3
+ "version": "0.15.0",
4
4
  "description": "Payment-native SDK for AI Agent",
5
5
  "keywords": [
6
6
  "ai",
@@ -23,9 +23,9 @@
23
23
  ],
24
24
  "dependencies": {
25
25
  "@a2a-js/sdk": "^0.3.10",
26
- "@aixyz/cli": "0.13.0",
27
- "@aixyz/config": "0.13.0",
28
- "@aixyz/erc-8004": "0.13.0",
26
+ "@aixyz/cli": "0.15.0",
27
+ "@aixyz/config": "0.15.0",
28
+ "@aixyz/erc-8004": "0.15.0",
29
29
  "@modelcontextprotocol/sdk": "^1.26.0",
30
30
  "@next/env": "^16.1.6",
31
31
  "@x402/core": "^2.3.1",
@@ -41,6 +41,7 @@
41
41
  "ai": "^6"
42
42
  },
43
43
  "peerDependencies": {
44
+ "@ai-sdk/provider": "^3",
44
45
  "ai": "^6"
45
46
  },
46
47
  "peerDependenciesMeta": {