@flink-app/test-utils 1.0.0 → 2.0.0-alpha.48
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +6 -0
- package/dist/ai/conversation.d.ts +27 -0
- package/dist/ai/conversation.js +30 -0
- package/dist/ai/index.d.ts +4 -0
- package/dist/ai/index.js +20 -0
- package/dist/ai/mockContext.d.ts +16 -0
- package/dist/ai/mockContext.js +39 -0
- package/dist/ai/mockLLMAdapter.d.ts +90 -0
- package/dist/ai/mockLLMAdapter.js +252 -0
- package/dist/ai/mockTool.d.ts +64 -0
- package/dist/ai/mockTool.js +133 -0
- package/dist/index.d.ts +1 -0
- package/dist/index.js +1 -0
- package/package.json +9 -5
- package/spec/ai/conversation.spec.ts +280 -0
- package/spec/ai/mockLLMAdapter.spec.ts +533 -0
- package/spec/ai/mockTool.spec.ts +313 -0
- package/spec/support/jasmine.json +7 -0
- package/src/ai/conversation.ts +54 -0
- package/src/ai/index.ts +4 -0
- package/src/ai/mockContext.ts +41 -0
- package/src/ai/mockLLMAdapter.ts +238 -0
- package/src/ai/mockTool.ts +135 -0
- package/src/index.ts +1 -0
- package/tsconfig.dist.json +4 -0
- package/tsconfig.json +6 -5
package/CHANGELOG.md
CHANGED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import type { Message, ToolCall } from "@flink-app/flink/ai";
|
|
2
|
+
/**
|
|
3
|
+
* Fluent builder for multi-turn conversation testing
|
|
4
|
+
* Simplifies creating conversation histories
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* const conversation = createConversation()
|
|
8
|
+
* .user("Book a flight to Paris")
|
|
9
|
+
* .assistant("When would you like to travel?")
|
|
10
|
+
* .user("Next Monday")
|
|
11
|
+
* .assistant("Let me search", [
|
|
12
|
+
* { id: "1", name: "search_flights", input: { destination: "Paris" } }
|
|
13
|
+
* ])
|
|
14
|
+
* .tool("1", "search_flights", JSON.stringify({ flights: [...] }))
|
|
15
|
+
* .assistant("I found these flights...")
|
|
16
|
+
* .build();
|
|
17
|
+
*/
|
|
18
|
+
export interface ConversationBuilder {
|
|
19
|
+
user(content: string): ConversationBuilder;
|
|
20
|
+
assistant(content: string, toolCalls?: ToolCall[]): ConversationBuilder;
|
|
21
|
+
tool(toolCallId: string, toolName: string, result: string): ConversationBuilder;
|
|
22
|
+
build(): Message[];
|
|
23
|
+
}
|
|
24
|
+
/**
|
|
25
|
+
* Creates a new conversation builder
|
|
26
|
+
*/
|
|
27
|
+
export declare function createConversation(): ConversationBuilder;
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.createConversation = createConversation;
|
|
4
|
+
var ConversationBuilderImpl = /** @class */ (function () {
|
|
5
|
+
function ConversationBuilderImpl() {
|
|
6
|
+
this.messages = [];
|
|
7
|
+
}
|
|
8
|
+
ConversationBuilderImpl.prototype.user = function (content) {
|
|
9
|
+
this.messages.push({ role: "user", content: content });
|
|
10
|
+
return this;
|
|
11
|
+
};
|
|
12
|
+
ConversationBuilderImpl.prototype.assistant = function (content, toolCalls) {
|
|
13
|
+
this.messages.push({ role: "assistant", content: content, toolCalls: toolCalls });
|
|
14
|
+
return this;
|
|
15
|
+
};
|
|
16
|
+
ConversationBuilderImpl.prototype.tool = function (toolCallId, toolName, result) {
|
|
17
|
+
this.messages.push({ role: "tool", toolCallId: toolCallId, toolName: toolName, result: result });
|
|
18
|
+
return this;
|
|
19
|
+
};
|
|
20
|
+
ConversationBuilderImpl.prototype.build = function () {
|
|
21
|
+
return this.messages;
|
|
22
|
+
};
|
|
23
|
+
return ConversationBuilderImpl;
|
|
24
|
+
}());
|
|
25
|
+
/**
|
|
26
|
+
* Creates a new conversation builder
|
|
27
|
+
*/
|
|
28
|
+
function createConversation() {
|
|
29
|
+
return new ConversationBuilderImpl();
|
|
30
|
+
}
|
package/dist/ai/index.js
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
|
3
|
+
if (k2 === undefined) k2 = k;
|
|
4
|
+
var desc = Object.getOwnPropertyDescriptor(m, k);
|
|
5
|
+
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
|
6
|
+
desc = { enumerable: true, get: function() { return m[k]; } };
|
|
7
|
+
}
|
|
8
|
+
Object.defineProperty(o, k2, desc);
|
|
9
|
+
}) : (function(o, m, k, k2) {
|
|
10
|
+
if (k2 === undefined) k2 = k;
|
|
11
|
+
o[k2] = m[k];
|
|
12
|
+
}));
|
|
13
|
+
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
|
+
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
|
15
|
+
};
|
|
16
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
17
|
+
__exportStar(require("./mockLLMAdapter"), exports);
|
|
18
|
+
__exportStar(require("./mockTool"), exports);
|
|
19
|
+
__exportStar(require("./mockContext"), exports);
|
|
20
|
+
__exportStar(require("./conversation"), exports);
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
import { FlinkContext } from "@flink-app/flink";
|
|
2
|
+
/**
|
|
3
|
+
* Creates a mock FlinkContext for tool testing
|
|
4
|
+
* Provides sensible defaults for all required properties
|
|
5
|
+
*
|
|
6
|
+
* @example
|
|
7
|
+
* const ctx = mockToolContext({
|
|
8
|
+
* repos: { carRepo: mockCarRepo },
|
|
9
|
+
* plugins: { email: mockEmailPlugin }
|
|
10
|
+
* });
|
|
11
|
+
*
|
|
12
|
+
* @example
|
|
13
|
+
* // Minimal usage with defaults
|
|
14
|
+
* const ctx = mockToolContext();
|
|
15
|
+
*/
|
|
16
|
+
export declare function mockToolContext<Ctx extends FlinkContext = FlinkContext>(overrides?: Partial<Ctx>): Ctx;
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __assign = (this && this.__assign) || function () {
|
|
3
|
+
__assign = Object.assign || function(t) {
|
|
4
|
+
for (var s, i = 1, n = arguments.length; i < n; i++) {
|
|
5
|
+
s = arguments[i];
|
|
6
|
+
for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p))
|
|
7
|
+
t[p] = s[p];
|
|
8
|
+
}
|
|
9
|
+
return t;
|
|
10
|
+
};
|
|
11
|
+
return __assign.apply(this, arguments);
|
|
12
|
+
};
|
|
13
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
14
|
+
exports.mockToolContext = mockToolContext;
|
|
15
|
+
/**
|
|
16
|
+
* Creates a mock FlinkContext for tool testing
|
|
17
|
+
* Provides sensible defaults for all required properties
|
|
18
|
+
*
|
|
19
|
+
* @example
|
|
20
|
+
* const ctx = mockToolContext({
|
|
21
|
+
* repos: { carRepo: mockCarRepo },
|
|
22
|
+
* plugins: { email: mockEmailPlugin }
|
|
23
|
+
* });
|
|
24
|
+
*
|
|
25
|
+
* @example
|
|
26
|
+
* // Minimal usage with defaults
|
|
27
|
+
* const ctx = mockToolContext();
|
|
28
|
+
*/
|
|
29
|
+
function mockToolContext(overrides) {
|
|
30
|
+
if (overrides === void 0) { overrides = {}; }
|
|
31
|
+
var defaultContext = {
|
|
32
|
+
repos: {},
|
|
33
|
+
plugins: {},
|
|
34
|
+
agents: {},
|
|
35
|
+
};
|
|
36
|
+
return __assign(__assign(__assign({}, defaultContext), overrides), {
|
|
37
|
+
// Deep merge repos, plugins, and agents
|
|
38
|
+
repos: __assign(__assign({}, defaultContext.repos), (overrides.repos || {})), plugins: __assign(__assign({}, defaultContext.plugins), (overrides.plugins || {})), agents: __assign(__assign({}, (defaultContext.agents || {})), (overrides.agents || {})) });
|
|
39
|
+
}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import { LLMAdapter, LLMMessage, LLMStreamChunk } from "@flink-app/flink/ai";
|
|
2
|
+
export interface MockLLMConfig {
|
|
3
|
+
response?: string;
|
|
4
|
+
responses?: Array<{
|
|
5
|
+
text?: string;
|
|
6
|
+
toolCalls?: Array<{
|
|
7
|
+
id: string;
|
|
8
|
+
name: string;
|
|
9
|
+
input: any;
|
|
10
|
+
}>;
|
|
11
|
+
stopReason?: "end_turn" | "tool_use" | "max_tokens";
|
|
12
|
+
streamChunks?: LLMStreamChunk[];
|
|
13
|
+
}>;
|
|
14
|
+
error?: Error;
|
|
15
|
+
usage?: {
|
|
16
|
+
inputTokens: number;
|
|
17
|
+
outputTokens: number;
|
|
18
|
+
};
|
|
19
|
+
defaultStreamChunks?: LLMStreamChunk[];
|
|
20
|
+
}
|
|
21
|
+
export interface MockLLMAdapterInvocation {
|
|
22
|
+
instructions: string;
|
|
23
|
+
messages: LLMMessage[];
|
|
24
|
+
tools: any[];
|
|
25
|
+
maxTokens: number;
|
|
26
|
+
temperature: number;
|
|
27
|
+
}
|
|
28
|
+
export interface MockLLMAdapter extends LLMAdapter {
|
|
29
|
+
invocations: MockLLMAdapterInvocation[];
|
|
30
|
+
reset(): void;
|
|
31
|
+
getLastInvocation(): MockLLMAdapterInvocation | undefined;
|
|
32
|
+
getInvocationCount(): number;
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Creates a mock LLM adapter for testing agents without API calls
|
|
36
|
+
*
|
|
37
|
+
* Features:
|
|
38
|
+
* - Canned responses (single or multi-turn)
|
|
39
|
+
* - Tool call simulation
|
|
40
|
+
* - Error simulation
|
|
41
|
+
* - Automatic invocation tracking
|
|
42
|
+
* - Token usage simulation
|
|
43
|
+
*
|
|
44
|
+
* @example
|
|
45
|
+
* // Simple response
|
|
46
|
+
* const adapter = mockLLMAdapter({ response: "Hello!" });
|
|
47
|
+
*
|
|
48
|
+
* @example
|
|
49
|
+
* // Multi-turn with tool calls
|
|
50
|
+
* const adapter = mockLLMAdapter({
|
|
51
|
+
* responses: [
|
|
52
|
+
* {
|
|
53
|
+
* text: "Let me check",
|
|
54
|
+
* toolCalls: [{ id: "1", name: "get_weather", input: { city: "Stockholm" } }]
|
|
55
|
+
* },
|
|
56
|
+
* { text: "It's sunny and 22°C" }
|
|
57
|
+
* ]
|
|
58
|
+
* });
|
|
59
|
+
*
|
|
60
|
+
* @example
|
|
61
|
+
* // Error simulation
|
|
62
|
+
* const adapter = mockLLMAdapter({
|
|
63
|
+
* error: new Error("Rate limit exceeded")
|
|
64
|
+
* });
|
|
65
|
+
*/
|
|
66
|
+
export declare function mockLLMAdapter(config?: MockLLMConfig): MockLLMAdapter;
|
|
67
|
+
/**
|
|
68
|
+
* Helper function to create streaming chunks for testing
|
|
69
|
+
*
|
|
70
|
+
* @example
|
|
71
|
+
* const chunks = createStreamingChunks({
|
|
72
|
+
* text: "Hello world",
|
|
73
|
+
* chunkText: true,
|
|
74
|
+
* toolCalls: [{ id: "1", name: "search", input: { query: "test" } }]
|
|
75
|
+
* });
|
|
76
|
+
*/
|
|
77
|
+
export declare function createStreamingChunks(config: {
|
|
78
|
+
text?: string;
|
|
79
|
+
toolCalls?: Array<{
|
|
80
|
+
id: string;
|
|
81
|
+
name: string;
|
|
82
|
+
input: any;
|
|
83
|
+
}>;
|
|
84
|
+
chunkText?: boolean;
|
|
85
|
+
usage?: {
|
|
86
|
+
inputTokens: number;
|
|
87
|
+
outputTokens: number;
|
|
88
|
+
};
|
|
89
|
+
stopReason?: "end_turn" | "tool_use" | "max_tokens";
|
|
90
|
+
}): LLMStreamChunk[];
|
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __generator = (this && this.__generator) || function (thisArg, body) {
|
|
3
|
+
var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === "function" ? Iterator : Object).prototype);
|
|
4
|
+
return g.next = verb(0), g["throw"] = verb(1), g["return"] = verb(2), typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g;
|
|
5
|
+
function verb(n) { return function (v) { return step([n, v]); }; }
|
|
6
|
+
function step(op) {
|
|
7
|
+
if (f) throw new TypeError("Generator is already executing.");
|
|
8
|
+
while (g && (g = 0, op[0] && (_ = 0)), _) try {
|
|
9
|
+
if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;
|
|
10
|
+
if (y = 0, t) op = [op[0] & 2, t.value];
|
|
11
|
+
switch (op[0]) {
|
|
12
|
+
case 0: case 1: t = op; break;
|
|
13
|
+
case 4: _.label++; return { value: op[1], done: false };
|
|
14
|
+
case 5: _.label++; y = op[1]; op = [0]; continue;
|
|
15
|
+
case 7: op = _.ops.pop(); _.trys.pop(); continue;
|
|
16
|
+
default:
|
|
17
|
+
if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }
|
|
18
|
+
if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }
|
|
19
|
+
if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }
|
|
20
|
+
if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }
|
|
21
|
+
if (t[2]) _.ops.pop();
|
|
22
|
+
_.trys.pop(); continue;
|
|
23
|
+
}
|
|
24
|
+
op = body.call(thisArg, _);
|
|
25
|
+
} catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }
|
|
26
|
+
if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };
|
|
27
|
+
}
|
|
28
|
+
};
|
|
29
|
+
var __await = (this && this.__await) || function (v) { return this instanceof __await ? (this.v = v, this) : new __await(v); }
|
|
30
|
+
var __asyncGenerator = (this && this.__asyncGenerator) || function (thisArg, _arguments, generator) {
|
|
31
|
+
if (!Symbol.asyncIterator) throw new TypeError("Symbol.asyncIterator is not defined.");
|
|
32
|
+
var g = generator.apply(thisArg, _arguments || []), i, q = [];
|
|
33
|
+
return i = Object.create((typeof AsyncIterator === "function" ? AsyncIterator : Object).prototype), verb("next"), verb("throw"), verb("return", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;
|
|
34
|
+
function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }
|
|
35
|
+
function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }
|
|
36
|
+
function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }
|
|
37
|
+
function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }
|
|
38
|
+
function fulfill(value) { resume("next", value); }
|
|
39
|
+
function reject(value) { resume("throw", value); }
|
|
40
|
+
function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }
|
|
41
|
+
};
|
|
42
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
43
|
+
exports.mockLLMAdapter = mockLLMAdapter;
|
|
44
|
+
exports.createStreamingChunks = createStreamingChunks;
|
|
45
|
+
/**
|
|
46
|
+
* Creates a mock LLM adapter for testing agents without API calls
|
|
47
|
+
*
|
|
48
|
+
* Features:
|
|
49
|
+
* - Canned responses (single or multi-turn)
|
|
50
|
+
* - Tool call simulation
|
|
51
|
+
* - Error simulation
|
|
52
|
+
* - Automatic invocation tracking
|
|
53
|
+
* - Token usage simulation
|
|
54
|
+
*
|
|
55
|
+
* @example
|
|
56
|
+
* // Simple response
|
|
57
|
+
* const adapter = mockLLMAdapter({ response: "Hello!" });
|
|
58
|
+
*
|
|
59
|
+
* @example
|
|
60
|
+
* // Multi-turn with tool calls
|
|
61
|
+
* const adapter = mockLLMAdapter({
|
|
62
|
+
* responses: [
|
|
63
|
+
* {
|
|
64
|
+
* text: "Let me check",
|
|
65
|
+
* toolCalls: [{ id: "1", name: "get_weather", input: { city: "Stockholm" } }]
|
|
66
|
+
* },
|
|
67
|
+
* { text: "It's sunny and 22°C" }
|
|
68
|
+
* ]
|
|
69
|
+
* });
|
|
70
|
+
*
|
|
71
|
+
* @example
|
|
72
|
+
* // Error simulation
|
|
73
|
+
* const adapter = mockLLMAdapter({
|
|
74
|
+
* error: new Error("Rate limit exceeded")
|
|
75
|
+
* });
|
|
76
|
+
*/
|
|
77
|
+
function mockLLMAdapter(config) {
|
|
78
|
+
if (config === void 0) { config = {}; }
|
|
79
|
+
var invocations = [];
|
|
80
|
+
var responseQueue = [];
|
|
81
|
+
var currentResponseIndex = 0;
|
|
82
|
+
// Initialize response queue
|
|
83
|
+
if (config.response) {
|
|
84
|
+
responseQueue = [{ text: config.response, stopReason: "end_turn" }];
|
|
85
|
+
}
|
|
86
|
+
else if (config.responses) {
|
|
87
|
+
responseQueue = config.responses;
|
|
88
|
+
}
|
|
89
|
+
var defaultUsage = config.usage || { inputTokens: 10, outputTokens: 20 };
|
|
90
|
+
var stream = function (params) {
|
|
91
|
+
return __asyncGenerator(this, arguments, function () {
|
|
92
|
+
var responseConfig, _i, _a, chunk, _b, _c, chunk, _d, _e, toolCall, stopReason;
|
|
93
|
+
return __generator(this, function (_f) {
|
|
94
|
+
switch (_f.label) {
|
|
95
|
+
case 0:
|
|
96
|
+
// Track invocation
|
|
97
|
+
invocations.push({
|
|
98
|
+
instructions: params.instructions,
|
|
99
|
+
messages: params.messages,
|
|
100
|
+
tools: params.tools,
|
|
101
|
+
maxTokens: params.maxTokens,
|
|
102
|
+
temperature: params.temperature,
|
|
103
|
+
});
|
|
104
|
+
// Simulate error if configured
|
|
105
|
+
if (config.error) {
|
|
106
|
+
throw config.error;
|
|
107
|
+
}
|
|
108
|
+
responseConfig = responseQueue[currentResponseIndex] || responseQueue[responseQueue.length - 1];
|
|
109
|
+
if (!responseConfig) {
|
|
110
|
+
throw new Error("No responses configured for mockLLMAdapter");
|
|
111
|
+
}
|
|
112
|
+
currentResponseIndex++;
|
|
113
|
+
if (!responseConfig.streamChunks) return [3 /*break*/, 7];
|
|
114
|
+
_i = 0, _a = responseConfig.streamChunks;
|
|
115
|
+
_f.label = 1;
|
|
116
|
+
case 1:
|
|
117
|
+
if (!(_i < _a.length)) return [3 /*break*/, 5];
|
|
118
|
+
chunk = _a[_i];
|
|
119
|
+
return [4 /*yield*/, __await(chunk)];
|
|
120
|
+
case 2: return [4 /*yield*/, _f.sent()];
|
|
121
|
+
case 3:
|
|
122
|
+
_f.sent();
|
|
123
|
+
_f.label = 4;
|
|
124
|
+
case 4:
|
|
125
|
+
_i++;
|
|
126
|
+
return [3 /*break*/, 1];
|
|
127
|
+
case 5: return [4 /*yield*/, __await(void 0)];
|
|
128
|
+
case 6: return [2 /*return*/, _f.sent()];
|
|
129
|
+
case 7:
|
|
130
|
+
if (!config.defaultStreamChunks) return [3 /*break*/, 14];
|
|
131
|
+
_b = 0, _c = config.defaultStreamChunks;
|
|
132
|
+
_f.label = 8;
|
|
133
|
+
case 8:
|
|
134
|
+
if (!(_b < _c.length)) return [3 /*break*/, 12];
|
|
135
|
+
chunk = _c[_b];
|
|
136
|
+
return [4 /*yield*/, __await(chunk)];
|
|
137
|
+
case 9: return [4 /*yield*/, _f.sent()];
|
|
138
|
+
case 10:
|
|
139
|
+
_f.sent();
|
|
140
|
+
_f.label = 11;
|
|
141
|
+
case 11:
|
|
142
|
+
_b++;
|
|
143
|
+
return [3 /*break*/, 8];
|
|
144
|
+
case 12: return [4 /*yield*/, __await(void 0)];
|
|
145
|
+
case 13: return [2 /*return*/, _f.sent()];
|
|
146
|
+
case 14:
|
|
147
|
+
if (!responseConfig.text) return [3 /*break*/, 17];
|
|
148
|
+
return [4 /*yield*/, __await({
|
|
149
|
+
type: "text",
|
|
150
|
+
delta: responseConfig.text
|
|
151
|
+
})];
|
|
152
|
+
case 15: return [4 /*yield*/, _f.sent()];
|
|
153
|
+
case 16:
|
|
154
|
+
_f.sent();
|
|
155
|
+
_f.label = 17;
|
|
156
|
+
case 17:
|
|
157
|
+
_d = 0, _e = responseConfig.toolCalls || [];
|
|
158
|
+
_f.label = 18;
|
|
159
|
+
case 18:
|
|
160
|
+
if (!(_d < _e.length)) return [3 /*break*/, 22];
|
|
161
|
+
toolCall = _e[_d];
|
|
162
|
+
return [4 /*yield*/, __await({ type: "tool_call", toolCall: toolCall })];
|
|
163
|
+
case 19: return [4 /*yield*/, _f.sent()];
|
|
164
|
+
case 20:
|
|
165
|
+
_f.sent();
|
|
166
|
+
_f.label = 21;
|
|
167
|
+
case 21:
|
|
168
|
+
_d++;
|
|
169
|
+
return [3 /*break*/, 18];
|
|
170
|
+
case 22: return [4 /*yield*/, __await({
|
|
171
|
+
type: "usage",
|
|
172
|
+
usage: defaultUsage,
|
|
173
|
+
})];
|
|
174
|
+
case 23: return [4 /*yield*/, _f.sent()];
|
|
175
|
+
case 24:
|
|
176
|
+
_f.sent();
|
|
177
|
+
stopReason = "end_turn";
|
|
178
|
+
if (responseConfig.stopReason) {
|
|
179
|
+
stopReason = responseConfig.stopReason;
|
|
180
|
+
}
|
|
181
|
+
else if (responseConfig.toolCalls && responseConfig.toolCalls.length > 0) {
|
|
182
|
+
stopReason = "tool_use";
|
|
183
|
+
}
|
|
184
|
+
return [4 /*yield*/, __await({
|
|
185
|
+
type: "done",
|
|
186
|
+
stopReason: stopReason,
|
|
187
|
+
})];
|
|
188
|
+
case 25: return [4 /*yield*/, _f.sent()];
|
|
189
|
+
case 26:
|
|
190
|
+
_f.sent();
|
|
191
|
+
return [2 /*return*/];
|
|
192
|
+
}
|
|
193
|
+
});
|
|
194
|
+
});
|
|
195
|
+
};
|
|
196
|
+
var reset = function () {
|
|
197
|
+
invocations.length = 0;
|
|
198
|
+
currentResponseIndex = 0;
|
|
199
|
+
};
|
|
200
|
+
var getLastInvocation = function () {
|
|
201
|
+
return invocations[invocations.length - 1];
|
|
202
|
+
};
|
|
203
|
+
var getInvocationCount = function () {
|
|
204
|
+
return invocations.length;
|
|
205
|
+
};
|
|
206
|
+
return {
|
|
207
|
+
stream: stream,
|
|
208
|
+
invocations: invocations,
|
|
209
|
+
reset: reset,
|
|
210
|
+
getLastInvocation: getLastInvocation,
|
|
211
|
+
getInvocationCount: getInvocationCount,
|
|
212
|
+
};
|
|
213
|
+
}
|
|
214
|
+
/**
|
|
215
|
+
* Helper function to create streaming chunks for testing
|
|
216
|
+
*
|
|
217
|
+
* @example
|
|
218
|
+
* const chunks = createStreamingChunks({
|
|
219
|
+
* text: "Hello world",
|
|
220
|
+
* chunkText: true,
|
|
221
|
+
* toolCalls: [{ id: "1", name: "search", input: { query: "test" } }]
|
|
222
|
+
* });
|
|
223
|
+
*/
|
|
224
|
+
function createStreamingChunks(config) {
|
|
225
|
+
var chunks = [];
|
|
226
|
+
if (config.text) {
|
|
227
|
+
if (config.chunkText) {
|
|
228
|
+
// Split into multiple text deltas (by words)
|
|
229
|
+
var words = config.text.split(' ');
|
|
230
|
+
for (var _i = 0, words_1 = words; _i < words_1.length; _i++) {
|
|
231
|
+
var word = words_1[_i];
|
|
232
|
+
chunks.push({ type: "text", delta: word + ' ' });
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
else {
|
|
236
|
+
chunks.push({ type: "text", delta: config.text });
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
for (var _a = 0, _b = config.toolCalls || []; _a < _b.length; _a++) {
|
|
240
|
+
var toolCall = _b[_a];
|
|
241
|
+
chunks.push({ type: "tool_call", toolCall: toolCall });
|
|
242
|
+
}
|
|
243
|
+
chunks.push({
|
|
244
|
+
type: "usage",
|
|
245
|
+
usage: config.usage || { inputTokens: 100, outputTokens: 50 },
|
|
246
|
+
});
|
|
247
|
+
chunks.push({
|
|
248
|
+
type: "done",
|
|
249
|
+
stopReason: config.stopReason || (config.toolCalls && config.toolCalls.length > 0 ? "tool_use" : "end_turn"),
|
|
250
|
+
});
|
|
251
|
+
return chunks;
|
|
252
|
+
}
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import type { FlinkTool, FlinkToolProps } from "@flink-app/flink/ai";
|
|
3
|
+
export interface MockToolConfig<Input, Output> {
|
|
4
|
+
name: string;
|
|
5
|
+
description?: string;
|
|
6
|
+
inputSchema: z.ZodType<Input>;
|
|
7
|
+
outputSchema?: z.ZodType<Output>;
|
|
8
|
+
response?: Output;
|
|
9
|
+
error?: {
|
|
10
|
+
error: string;
|
|
11
|
+
code?: string;
|
|
12
|
+
};
|
|
13
|
+
fn?: FlinkTool<any, Input, Output>;
|
|
14
|
+
permissions?: FlinkToolProps["permissions"];
|
|
15
|
+
}
|
|
16
|
+
export interface MockToolInvocation<Input> {
|
|
17
|
+
input: Input;
|
|
18
|
+
user?: any;
|
|
19
|
+
}
|
|
20
|
+
export interface MockToolResult<Input, Output> {
|
|
21
|
+
props: FlinkToolProps;
|
|
22
|
+
fn: FlinkTool<any, Input, Output>;
|
|
23
|
+
invocations: MockToolInvocation<Input>[];
|
|
24
|
+
getLastInvocation(): MockToolInvocation<Input> | undefined;
|
|
25
|
+
reset(): void;
|
|
26
|
+
}
|
|
27
|
+
/**
|
|
28
|
+
* Creates a mock tool with tracking and canned responses
|
|
29
|
+
*
|
|
30
|
+
* Features:
|
|
31
|
+
* - Simple canned responses
|
|
32
|
+
* - Error simulation
|
|
33
|
+
* - Custom function support
|
|
34
|
+
* - Automatic invocation tracking
|
|
35
|
+
* - Validation helpers
|
|
36
|
+
*
|
|
37
|
+
* @example
|
|
38
|
+
* // Simple canned response
|
|
39
|
+
* const weatherTool = mockTool({
|
|
40
|
+
* name: "get_weather",
|
|
41
|
+
* inputSchema: z.object({ city: z.string() }),
|
|
42
|
+
* response: { temperature: 22, conditions: "sunny" }
|
|
43
|
+
* });
|
|
44
|
+
*
|
|
45
|
+
* @example
|
|
46
|
+
* // Custom function with tracking
|
|
47
|
+
* const calculatorTool = mockTool({
|
|
48
|
+
* name: "calculate",
|
|
49
|
+
* inputSchema: z.object({ a: z.number(), b: z.number() }),
|
|
50
|
+
* fn: async ({ input }) => ({
|
|
51
|
+
* success: true,
|
|
52
|
+
* data: { result: input.a + input.b }
|
|
53
|
+
* })
|
|
54
|
+
* });
|
|
55
|
+
*
|
|
56
|
+
* @example
|
|
57
|
+
* // Error simulation
|
|
58
|
+
* const failingTool = mockTool({
|
|
59
|
+
* name: "fail",
|
|
60
|
+
* inputSchema: z.object({}),
|
|
61
|
+
* error: { error: "Tool failed", code: "MOCK_ERROR" }
|
|
62
|
+
* });
|
|
63
|
+
*/
|
|
64
|
+
export declare function mockTool<Input = any, Output = any>(config: MockToolConfig<Input, Output>): MockToolResult<Input, Output>;
|