mcp-meilisearch 1.4.11 → 1.4.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -3
- package/dist/client.d.ts +16 -10
- package/dist/client.d.ts.map +1 -1
- package/dist/client.js +15 -3
- package/dist/prompts/developer/index.d.ts +6 -0
- package/dist/prompts/developer/index.d.ts.map +1 -0
- package/dist/prompts/developer/index.js +3 -0
- package/dist/prompts/developer/text.d.ts +3 -0
- package/dist/prompts/developer/text.d.ts.map +1 -0
- package/dist/prompts/developer/text.js +31 -0
- package/dist/prompts/{system.d.ts → developer/tool.d.ts} +1 -1
- package/dist/prompts/developer/tool.d.ts.map +1 -0
- package/dist/tools/core/ai-tools.d.ts.map +1 -1
- package/dist/tools/core/ai-tools.js +123 -25
- package/dist/utils/ai-handler.d.ts +15 -11
- package/dist/utils/ai-handler.d.ts.map +1 -1
- package/dist/utils/ai-handler.js +93 -34
- package/package.json +1 -1
- package/dist/prompts/system.d.ts.map +0 -1
- /package/dist/prompts/{system.js → developer/tool.js} +0 -0
package/README.md
CHANGED
|
@@ -13,7 +13,7 @@ A Model Context Protocol (MCP) server implementation that provides a bridge betw
|
|
|
13
13
|
- **Multiple Transport Options**: Supports both STDIO and StreamableHTTP transports.
|
|
14
14
|
- **Meilisearch API Support**: Full access to Meilisearch functionalities.
|
|
15
15
|
- **Web Client Demo**: Updated interface showcasing search capabilities and features.
|
|
16
|
-
- **AI Inference**: Leverages LLMs from providers such as
|
|
16
|
+
- **AI Inference**: Leverages LLMs from providers such as OpenAI, HuggingFace, OpenRouter, and Ollama to intelligently determine and utilize the most suitable tool for user queries.
|
|
17
17
|
|
|
18
18
|
## Getting Started
|
|
19
19
|
|
|
@@ -100,10 +100,13 @@ const result = await client.callTool("global-search", {
|
|
|
100
100
|
|
|
101
101
|
// Use AI inference to choose the most appropriate tool
|
|
102
102
|
|
|
103
|
-
const result = await client.callToolWithAI("Find articles about cucumber"
|
|
103
|
+
const result = await client.callToolWithAI("Find articles about cucumber", {
|
|
104
|
+
provideSummary: true,
|
|
105
|
+
});
|
|
104
106
|
console.log(`Tool used: ${result.toolUsed}`);
|
|
105
107
|
console.log(`Reasoning: ${result.reasoning}`);
|
|
106
108
|
console.log(`Results: ${JSON.stringify(result.data)}`);
|
|
109
|
+
console.log(`Summary: ${result.summary}`);
|
|
107
110
|
```
|
|
108
111
|
|
|
109
112
|
#### AI Inference Client Methods
|
|
@@ -117,7 +120,9 @@ Processes a user query through AI to determine and execute the most appropriate
|
|
|
117
120
|
- `query`: String - The user's query or request to be processed
|
|
118
121
|
- `options`: Object (Optional) - Configuration options
|
|
119
122
|
- `specificTools`: String[] (Optional) - Restricts tool selection to this list of tool names
|
|
120
|
-
- `justReasoning`: Boolean (Optional) - When set to `true`, returns only the AI's reasoning without executing the
|
|
123
|
+
- `justReasoning`: Boolean (Optional) - When set to `true`, returns only the AI's reasoning without executing the
|
|
124
|
+
selected tool
|
|
125
|
+
- `provideSummary`: Boolean (Optional) - When set to `true`, generates a concise summary of the search results along with the regular response
|
|
121
126
|
|
|
122
127
|
### Starting the Server
|
|
123
128
|
|
package/dist/client.d.ts
CHANGED
|
@@ -1,3 +1,16 @@
|
|
|
1
|
+
interface AIToolClientOptions {
|
|
2
|
+
specificTools?: string[];
|
|
3
|
+
justReasoning?: boolean;
|
|
4
|
+
provideSummary?: boolean;
|
|
5
|
+
}
|
|
6
|
+
interface AIToolClientResponse {
|
|
7
|
+
data?: any;
|
|
8
|
+
summary?: any;
|
|
9
|
+
error?: string;
|
|
10
|
+
success: boolean;
|
|
11
|
+
toolUsed?: string;
|
|
12
|
+
reasoning?: string;
|
|
13
|
+
}
|
|
1
14
|
export declare class MCPClient {
|
|
2
15
|
/**
|
|
3
16
|
* Indicates whether the client is connected to the MCP server
|
|
@@ -60,19 +73,11 @@ export declare class MCPClient {
|
|
|
60
73
|
* @param options Options for the AI processing
|
|
61
74
|
* @param options.specificTools Optional array of specific tool names to consider
|
|
62
75
|
* @param options.justReasoning If true, only returns the reasoning without calling the tool
|
|
76
|
+
* @param options.provideSummary If true, beyond the raw JSON, it will also provide a summary of the result
|
|
63
77
|
* @throws Error if AI inference fails
|
|
64
78
|
* @returns The result of calling the selected tool, or an error
|
|
65
79
|
*/
|
|
66
|
-
callToolWithAI(query: string, options?:
|
|
67
|
-
specificTools?: string[];
|
|
68
|
-
justReasoning?: boolean;
|
|
69
|
-
}): Promise<{
|
|
70
|
-
success: boolean;
|
|
71
|
-
data?: any;
|
|
72
|
-
error?: string;
|
|
73
|
-
toolUsed?: string;
|
|
74
|
-
reasoning?: string;
|
|
75
|
-
}>;
|
|
80
|
+
callToolWithAI(query: string, options?: AIToolClientOptions): Promise<AIToolClientResponse>;
|
|
76
81
|
private setUpTransport;
|
|
77
82
|
/**
|
|
78
83
|
* Closes the connection to the server and resets the connection state
|
|
@@ -80,4 +85,5 @@ export declare class MCPClient {
|
|
|
80
85
|
*/
|
|
81
86
|
cleanup(): Promise<void>;
|
|
82
87
|
}
|
|
88
|
+
export {};
|
|
83
89
|
//# sourceMappingURL=client.d.ts.map
|
package/dist/client.d.ts.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"client.d.ts","sourceRoot":"","sources":["../src/client.ts"],"names":[],"mappings":"AAQA,qBAAa,SAAS;IACpB;;;OAGG;IACH,WAAW,EAAE,OAAO,CAAS;IAE7B;;;OAGG;IACH,KAAK,EAAE;QACL,IAAI,EAAE,MAAM,CAAC;QACb,WAAW,EAAE,MAAM,CAAC;QACpB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;KACjC,EAAE,CAAM;IAET,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,KAAK,CAAa;IAC1B,OAAO,CAAC,SAAS,CAA8C;IAC/D,OAAO,CAAC,oBAAoB,CAEZ;gBAEJ,UAAU,EAAE,MAAM;IAI9B;;;OAGG;IACH,sBAAsB,CACpB,QAAQ,EAAE,CAAC,KAAK,EAAE,KAAK,CAAC;QAAE,IAAI,EAAE,MAAM,CAAC;QAAC,WAAW,EAAE,MAAM,CAAA;KAAE,CAAC,KAAK,IAAI;IAKzE;;;;;OAKG;IACG,WAAW,CAAC,CAAC,EAAE,QAAQ,EAAE,MAAM,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;IAOhE;;;;OAIG;IACG,eAAe,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;YAuBzC,SAAS;IA0BvB,OAAO,CAAC,kBAAkB;IAW1B;;;;;;;OAOG;IACG,QAAQ,CACZ,IAAI,EAAE,MAAM,EACZ,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GACzB,OAAO,CAAC;QACT,OAAO,EAAE,OAAO,CAAC;QACjB,IAAI,CAAC,EAAE,GAAG,CAAC;QACX,KAAK,CAAC,EAAE,MAAM,CAAC;KAChB,CAAC;IA2CF
|
|
1
|
+
{"version":3,"file":"client.d.ts","sourceRoot":"","sources":["../src/client.ts"],"names":[],"mappings":"AAQA,UAAU,mBAAmB;IAC3B,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;IACzB,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,cAAc,CAAC,EAAE,OAAO,CAAC;CAC1B;AAED,UAAU,oBAAoB;IAC5B,IAAI,CAAC,EAAE,GAAG,CAAC;IACX,OAAO,CAAC,EAAE,GAAG,CAAC;IACd,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,OAAO,EAAE,OAAO,CAAC;IACjB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,SAAS,CAAC,EAAE,MAAM,CAAC;CACpB;AAED,qBAAa,SAAS;IACpB;;;OAGG;IACH,WAAW,EAAE,OAAO,CAAS;IAE7B;;;OAGG;IACH,KAAK,EAAE;QACL,IAAI,EAAE,MAAM,CAAC;QACb,WAAW,EAAE,MAAM,CAAC;QACpB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;KACjC,EAAE,CAAM;IAET,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,KAAK,CAAa;IAC1B,OAAO,CAAC,SAAS,CAA8C;IAC/D,OAAO,CAAC,oBAAoB,CAEZ;gBAEJ,UAAU,EAAE,MAAM;IAI9B;;;OAGG;IACH,sBAAsB,CACpB,QAAQ,EAAE,CAAC,KAAK,EAAE,KAAK,CAAC;QAAE,IAAI,EAAE,MAAM,CAAC;QAAC,WAAW,EAAE,MAAM,CAAA;KAAE,CAAC,KAAK,IAAI;IAKzE;;;;;OAKG;IACG,WAAW,CAAC,CAAC,EAAE,QAAQ,EAAE,MAAM,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;IAOhE;;;;OAIG;IACG,eAAe,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;YAuBzC,SAAS;IA0BvB,OAAO,CAAC,kBAAkB;IAW1B;;;;;;;OAOG;IACG,QAAQ,CACZ,IAAI,EAAE,MAAM,EACZ,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GACzB,OAAO,CAAC;QACT,OAAO,EAAE,OAAO,CAAC;QACjB,IAAI,CAAC,EAAE,GAAG,CAAC;QACX,KAAK,CAAC,EAAE,MAAM,CAAC;KAChB,CAAC;IA2CF;;;;;;;;;OASG;IACG,cAAc,CAClB,KAAK,EAAE,MAAM,EACb,OAAO,GAAE,mBAAwB,GAChC,OAAO,CAAC,oBAAoB,CAAC;IA2DhC,OAAO,CAAC,cAAc;IAKtB;;;OAGG;IACG,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;CAK/B"}
|
package/dist/client.js
CHANGED
|
@@ -147,13 +147,14 @@ export class MCPClient {
|
|
|
147
147
|
* @param options Options for the AI processing
|
|
148
148
|
* @param options.specificTools Optional array of specific tool names to consider
|
|
149
149
|
* @param options.justReasoning If true, only returns the reasoning without calling the tool
|
|
150
|
+
* @param options.provideSummary If true, beyond the raw JSON, it will also provide a summary of the result
|
|
150
151
|
* @throws Error if AI inference fails
|
|
151
152
|
* @returns The result of calling the selected tool, or an error
|
|
152
153
|
*/
|
|
153
154
|
async callToolWithAI(query, options = {}) {
|
|
154
|
-
const { specificTools, justReasoning } = options;
|
|
155
|
+
const { specificTools, justReasoning, provideSummary } = options;
|
|
155
156
|
try {
|
|
156
|
-
const result = await this.callTool("process-ai-
|
|
157
|
+
const result = await this.callTool("process-ai-tool", {
|
|
157
158
|
query,
|
|
158
159
|
specificTools,
|
|
159
160
|
});
|
|
@@ -174,11 +175,22 @@ export class MCPClient {
|
|
|
174
175
|
};
|
|
175
176
|
}
|
|
176
177
|
const toolResult = await this.callTool(toolName, parameters);
|
|
177
|
-
|
|
178
|
+
if (!toolResult.success)
|
|
179
|
+
return toolResult;
|
|
180
|
+
const response = {
|
|
178
181
|
...toolResult,
|
|
179
182
|
reasoning,
|
|
180
183
|
toolUsed: toolName,
|
|
181
184
|
};
|
|
185
|
+
if (provideSummary) {
|
|
186
|
+
const summary = await this.callTool("process-ai-text", {
|
|
187
|
+
query: JSON.stringify(toolResult.data),
|
|
188
|
+
});
|
|
189
|
+
if (!summary.success)
|
|
190
|
+
console.error(summary);
|
|
191
|
+
response["summary"] = summary.data;
|
|
192
|
+
}
|
|
193
|
+
return response;
|
|
182
194
|
}
|
|
183
195
|
catch (error) {
|
|
184
196
|
const errorMessage = error instanceof Error ? error.message : String(error);
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/prompts/developer/index.ts"],"names":[],"mappings":";;;;AAGA,wBAA8B"}
|
|
@@ -0,0 +1,3 @@
|
|
|
1
|
+
declare const _default: "\n<identity>\n You are PATI, an AI agent.\n</identity>\n\n<instructions>\n Your primary function is to generate a summary. This summary MUST adhere to the following rules:\n\n 1. **Language Matching:** The language of your summary MUST strictly match the predominant language of the user's provided input.\n - If the user's input is in Spanish, your summary MUST be in Spanish.\n - If the user's input is in English, your summary MUST be in English.\n - And so on for any other language.\n\n 2. **Content to Summarize:** Your summary MUST be a description of the key information, findings, or results presented in the user's input.\n - Do NOT summarize the user's instructions to you or the act of providing input.\n - Focus ONLY on the core data or text provided by the user for summarization.\n\n 3. **Handling Structured Data (e.g., JSON):**\n - If the user provides structured data (like a JSON object), identify the key textual fields that contain the main information (e.g., 'title', 'abstract', 'summary', 'content_to_search', 'description', 'text', etc.).\n - Synthesize the information from these relevant fields into a coherent, natural language summary.\n - If the JSON contains multiple results (e.g., in a 'hits' or 'results' array), list each result separately and provide a summary for each.\n - Include a general summary of all results at the beginning or end when multiple results are present.\n - Do NOT describe the structure of the data (e.g., \"The JSON has a 'hits' array...\"). Summarize the *meaning* conveyed by the content within those fields.\n\n 4. **Output Format:**\n - Your response MUST be formatted as valid HTML.\n - Use appropriate HTML elements for structure (headings, paragraphs, lists, etc.).\n - No greetings, no apologies, no explanations, no meta-comments. Just the summary in HTML format.\n - Do not include HTML, HEAD, or BODY tags - focus only on the content elements.\n</instructions>\n";
|
|
2
|
+
export default _default;
|
|
3
|
+
//# sourceMappingURL=text.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"text.d.ts","sourceRoot":"","sources":["../../../src/prompts/developer/text.ts"],"names":[],"mappings":";AAAA,wBA8BE"}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
export default `
|
|
2
|
+
<identity>
|
|
3
|
+
You are PATI, an AI agent.
|
|
4
|
+
</identity>
|
|
5
|
+
|
|
6
|
+
<instructions>
|
|
7
|
+
Your primary function is to generate a summary. This summary MUST adhere to the following rules:
|
|
8
|
+
|
|
9
|
+
1. **Language Matching:** The language of your summary MUST strictly match the predominant language of the user's provided input.
|
|
10
|
+
- If the user's input is in Spanish, your summary MUST be in Spanish.
|
|
11
|
+
- If the user's input is in English, your summary MUST be in English.
|
|
12
|
+
- And so on for any other language.
|
|
13
|
+
|
|
14
|
+
2. **Content to Summarize:** Your summary MUST be a description of the key information, findings, or results presented in the user's input.
|
|
15
|
+
- Do NOT summarize the user's instructions to you or the act of providing input.
|
|
16
|
+
- Focus ONLY on the core data or text provided by the user for summarization.
|
|
17
|
+
|
|
18
|
+
3. **Handling Structured Data (e.g., JSON):**
|
|
19
|
+
- If the user provides structured data (like a JSON object), identify the key textual fields that contain the main information (e.g., 'title', 'abstract', 'summary', 'content_to_search', 'description', 'text', etc.).
|
|
20
|
+
- Synthesize the information from these relevant fields into a coherent, natural language summary.
|
|
21
|
+
- If the JSON contains multiple results (e.g., in a 'hits' or 'results' array), list each result separately and provide a summary for each.
|
|
22
|
+
- Include a general summary of all results at the beginning or end when multiple results are present.
|
|
23
|
+
- Do NOT describe the structure of the data (e.g., "The JSON has a 'hits' array..."). Summarize the *meaning* conveyed by the content within those fields.
|
|
24
|
+
|
|
25
|
+
4. **Output Format:**
|
|
26
|
+
- Your response MUST be formatted as valid HTML.
|
|
27
|
+
- Use appropriate HTML elements for structure (headings, paragraphs, lists, etc.).
|
|
28
|
+
- No greetings, no apologies, no explanations, no meta-comments. Just the summary in HTML format.
|
|
29
|
+
- Do not include HTML, HEAD, or BODY tags - focus only on the content elements.
|
|
30
|
+
</instructions>
|
|
31
|
+
`;
|
|
@@ -1,3 +1,3 @@
|
|
|
1
1
|
declare const _default: "\n <identity>\n You are PATI, an AI agent that translates user requests into JSON tool calls.\n Your output MUST be ONLY the JSON tool call or error object. NO TEXT BEFORE OR AFTER.\n </identity>\n\n <instructions>\n 1. Select the most appropriate tool from the <functions> section based on the user's request.\n \n 2. Extract parameters directly from the user's request:\n \u2022 Extract ONLY what is explicitly stated or clearly implied.\n \u2022 Preserve quoted values EXACTLY as provided by the user.\n \u2022 For indexUid parameters, ALWAYS translate to English equivalent (e.g., \"articulos\" \u2192 \"articles\").\n \n 3. Format:\n \u2022 RESPONSE MUST BE JUST A VALID JSON OBJECT with this structure:\n {\n \"name\": \"tool_name_from_schema\",\n \"parameters\": {\n \"parameter1\": \"value1\",\n \"parameter2\": \"value2\"\n }\n }\n \n \u2022 For errors, use:\n {\n \"name\": \"cannot_fulfill_request\",\n \"parameters\": {\n \"reason_code\": \"CODE\",\n \"message\": \"Brief explanation\",\n \"missing_parameters\": [\"param1\", \"param2\"] // Only for MISSING_REQUIRED_PARAMETERS\n }\n }\n </instructions>\n\n <functions>\n MCP_TOOLS\n </functions>\n";
|
|
2
2
|
export default _default;
|
|
3
|
-
//# sourceMappingURL=
|
|
3
|
+
//# sourceMappingURL=tool.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"tool.d.ts","sourceRoot":"","sources":["../../../src/prompts/developer/tool.ts"],"names":[],"mappings":";AAAA,wBAsCE"}
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai-tools.d.ts","sourceRoot":"","sources":["../../../src/tools/core/ai-tools.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,SAAS,EAAE,MAAM,yCAAyC,CAAC;
|
|
1
|
+
{"version":3,"file":"ai-tools.d.ts","sourceRoot":"","sources":["../../../src/tools/core/ai-tools.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,SAAS,EAAE,MAAM,yCAAyC,CAAC;AA6EpE;;;GAGG;AACH,eAAO,MAAM,eAAe,GAAI,QAAQ,SAAS,SAsIhD,CAAC;AAEF,eAAe,eAAe,CAAC"}
|
|
@@ -3,12 +3,57 @@ import { zodToJsonSchema } from "zod-to-json-schema";
|
|
|
3
3
|
import { AIService } from "../../utils/ai-handler.js";
|
|
4
4
|
import { createErrorResponse } from "../../utils/error-handler.js";
|
|
5
5
|
import { convertNullToUndefined } from "../../utils/response-handler.js";
|
|
6
|
+
const setAvailableTools = (aiService, server) => {
|
|
7
|
+
const registeredTools = Object.entries(server._registeredTools);
|
|
8
|
+
const availableTools = registeredTools
|
|
9
|
+
.filter(([_, { annotations }]) => annotations?.category !== "core")
|
|
10
|
+
.map(([name, { description, inputSchema }]) => {
|
|
11
|
+
const { definitions } = zodToJsonSchema(inputSchema, "parameters");
|
|
12
|
+
return {
|
|
13
|
+
name,
|
|
14
|
+
description,
|
|
15
|
+
parameters: definitions?.parameters ?? {},
|
|
16
|
+
};
|
|
17
|
+
});
|
|
18
|
+
aiService.setAvailableTools(availableTools);
|
|
19
|
+
};
|
|
20
|
+
const splitTextIntoChunks = (text, chunkSize) => {
|
|
21
|
+
if (text.length <= chunkSize) {
|
|
22
|
+
return [text];
|
|
23
|
+
}
|
|
24
|
+
let currentIndex = 0;
|
|
25
|
+
const chunks = [];
|
|
26
|
+
while (currentIndex < text.length) {
|
|
27
|
+
let endIndex = Math.min(currentIndex + chunkSize, text.length);
|
|
28
|
+
if (endIndex < text.length) {
|
|
29
|
+
const sentenceEndMatch = text
|
|
30
|
+
.substring(currentIndex, endIndex)
|
|
31
|
+
.match(/[.!?]\s+/g);
|
|
32
|
+
if (sentenceEndMatch?.length) {
|
|
33
|
+
const lastMatch = sentenceEndMatch[sentenceEndMatch.length - 1];
|
|
34
|
+
const lastMatchIndex = text.lastIndexOf(lastMatch, currentIndex + chunkSize);
|
|
35
|
+
if (lastMatchIndex > currentIndex) {
|
|
36
|
+
endIndex = lastMatchIndex + lastMatch.length;
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
else {
|
|
40
|
+
const lastSpace = text.lastIndexOf(" ", endIndex);
|
|
41
|
+
if (lastSpace > currentIndex) {
|
|
42
|
+
endIndex = lastSpace + 1;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
chunks.push(text.substring(currentIndex, endIndex));
|
|
47
|
+
currentIndex = endIndex;
|
|
48
|
+
}
|
|
49
|
+
return chunks;
|
|
50
|
+
};
|
|
6
51
|
/**
|
|
7
52
|
* Register AI tools with the MCP server
|
|
8
53
|
* @param server - The MCP server instance
|
|
9
54
|
*/
|
|
10
55
|
export const registerAITools = (server) => {
|
|
11
|
-
server.tool("process-ai-
|
|
56
|
+
server.tool("process-ai-tool", "Process a natural language query using AI to determine which tool to use", {
|
|
12
57
|
query: z.string().describe("The natural language query to process"),
|
|
13
58
|
specificTools: z
|
|
14
59
|
.array(z.string())
|
|
@@ -17,40 +62,93 @@ export const registerAITools = (server) => {
|
|
|
17
62
|
}, { category: "core" }, async ({ query, specificTools }) => {
|
|
18
63
|
try {
|
|
19
64
|
const aiService = AIService.getInstance();
|
|
20
|
-
|
|
21
|
-
const
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
const { definitions } = zodToJsonSchema(inputSchema, "parameters");
|
|
25
|
-
return {
|
|
26
|
-
name,
|
|
27
|
-
description,
|
|
28
|
-
parameters: definitions?.parameters ?? {},
|
|
29
|
-
};
|
|
65
|
+
setAvailableTools(aiService, server);
|
|
66
|
+
const response = await aiService.setupAIProcess(query, {
|
|
67
|
+
specificTools,
|
|
68
|
+
processType: "tool",
|
|
30
69
|
});
|
|
31
|
-
aiService.setAvailableTools(availableTools);
|
|
32
|
-
const response = await aiService.processQuery(query, specificTools);
|
|
33
70
|
if (response.error)
|
|
34
71
|
return createErrorResponse(response.error);
|
|
72
|
+
const { toolName, parameters: rawParameters } = response;
|
|
73
|
+
const parameters = convertNullToUndefined(rawParameters);
|
|
74
|
+
const result = {
|
|
75
|
+
toolName,
|
|
76
|
+
parameters,
|
|
77
|
+
reasoning: JSON.stringify({ name: toolName, parameters }),
|
|
78
|
+
};
|
|
35
79
|
return {
|
|
36
80
|
isError: false,
|
|
37
|
-
content: [
|
|
38
|
-
{
|
|
39
|
-
type: "text",
|
|
40
|
-
text: JSON.stringify({
|
|
41
|
-
toolName: response.toolName,
|
|
42
|
-
parameters: convertNullToUndefined(response.parameters),
|
|
43
|
-
get reasoning() {
|
|
44
|
-
return { name: this.toolName, parameters: this.parameters };
|
|
45
|
-
},
|
|
46
|
-
}, null, 2),
|
|
47
|
-
},
|
|
48
|
-
],
|
|
81
|
+
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
49
82
|
};
|
|
50
83
|
}
|
|
51
84
|
catch (error) {
|
|
52
85
|
return createErrorResponse(error);
|
|
53
86
|
}
|
|
54
87
|
});
|
|
88
|
+
server.tool("process-ai-text", "Process a summary text using AI to describe the data result from a tool", {
|
|
89
|
+
query: z.string().describe("The natural language query to process"),
|
|
90
|
+
chunkSize: z
|
|
91
|
+
.number()
|
|
92
|
+
.positive()
|
|
93
|
+
.default(50000)
|
|
94
|
+
.describe("Optional size of chunks to split the query into (characters)"),
|
|
95
|
+
}, { category: "core" }, async ({ query, chunkSize }) => {
|
|
96
|
+
try {
|
|
97
|
+
const aiService = AIService.getInstance();
|
|
98
|
+
if (query.length <= chunkSize) {
|
|
99
|
+
const response = await aiService.setupAIProcess(query, {
|
|
100
|
+
processType: "text",
|
|
101
|
+
});
|
|
102
|
+
return response.error
|
|
103
|
+
? createErrorResponse(response.error)
|
|
104
|
+
: {
|
|
105
|
+
isError: false,
|
|
106
|
+
content: [
|
|
107
|
+
{
|
|
108
|
+
type: "text",
|
|
109
|
+
text: JSON.stringify(response.summary, null, 2),
|
|
110
|
+
},
|
|
111
|
+
],
|
|
112
|
+
};
|
|
113
|
+
}
|
|
114
|
+
const chunks = splitTextIntoChunks(query, chunkSize);
|
|
115
|
+
const chunkPromises = chunks.map((chunk) => aiService.setupAIProcess(chunk, { processType: "text" }));
|
|
116
|
+
const chunkResponses = await Promise.all(chunkPromises);
|
|
117
|
+
const errorResponse = chunkResponses.find((response) => response.error);
|
|
118
|
+
if (errorResponse) {
|
|
119
|
+
return createErrorResponse(errorResponse.error);
|
|
120
|
+
}
|
|
121
|
+
const summaries = chunkResponses
|
|
122
|
+
.map((response) => response.summary)
|
|
123
|
+
.filter(Boolean);
|
|
124
|
+
if (!summaries.length) {
|
|
125
|
+
return createErrorResponse("Failed to process query chunks");
|
|
126
|
+
}
|
|
127
|
+
if (summaries.length === 1) {
|
|
128
|
+
return {
|
|
129
|
+
isError: false,
|
|
130
|
+
content: [
|
|
131
|
+
{ type: "text", text: JSON.stringify(summaries[0], null, 2) },
|
|
132
|
+
],
|
|
133
|
+
};
|
|
134
|
+
}
|
|
135
|
+
const combinedText = summaries.join(" ");
|
|
136
|
+
const finalResponse = await aiService.setupAIProcess(`Synthesize the following text into a coherent summary: ${combinedText}`, { processType: "text" });
|
|
137
|
+
return finalResponse.error
|
|
138
|
+
? createErrorResponse(finalResponse.error)
|
|
139
|
+
: {
|
|
140
|
+
isError: false,
|
|
141
|
+
content: [
|
|
142
|
+
{
|
|
143
|
+
type: "text",
|
|
144
|
+
text: JSON.stringify(finalResponse.summary || combinedText, null, 2),
|
|
145
|
+
},
|
|
146
|
+
],
|
|
147
|
+
};
|
|
148
|
+
}
|
|
149
|
+
catch (error) {
|
|
150
|
+
return createErrorResponse(error);
|
|
151
|
+
}
|
|
152
|
+
});
|
|
55
153
|
};
|
|
56
154
|
export default registerAITools;
|
|
@@ -4,11 +4,20 @@ interface AITool {
|
|
|
4
4
|
description: string;
|
|
5
5
|
parameters: Record<string, unknown>;
|
|
6
6
|
}
|
|
7
|
+
interface AIProcessSetupOptions {
|
|
8
|
+
specificTools?: string[];
|
|
9
|
+
processType: "tool" | "text";
|
|
10
|
+
}
|
|
7
11
|
interface AIToolResponse {
|
|
8
|
-
error?: unknown;
|
|
9
12
|
toolName?: string;
|
|
10
13
|
parameters?: Record<string, unknown>;
|
|
11
14
|
}
|
|
15
|
+
interface AITextResponse {
|
|
16
|
+
summary?: string;
|
|
17
|
+
}
|
|
18
|
+
interface AIProcessResponse extends AIToolResponse, AITextResponse {
|
|
19
|
+
error?: unknown;
|
|
20
|
+
}
|
|
12
21
|
/**
|
|
13
22
|
* AI Inference Service
|
|
14
23
|
*
|
|
@@ -21,7 +30,6 @@ export declare class AIService {
|
|
|
21
30
|
private static instance;
|
|
22
31
|
private static serverInitialized;
|
|
23
32
|
private provider;
|
|
24
|
-
private readonly systemPrompt;
|
|
25
33
|
private client;
|
|
26
34
|
/**
|
|
27
35
|
* Private constructor to prevent direct instantiation
|
|
@@ -59,15 +67,11 @@ export declare class AIService {
|
|
|
59
67
|
* @returns Array of tool names mentioned in the query, or empty array if none found
|
|
60
68
|
*/
|
|
61
69
|
private extractToolNames;
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
*/
|
|
68
|
-
processQuery(query: string, specificTools?: string[]): Promise<AIToolResponse>;
|
|
69
|
-
private processOpenAIQuery;
|
|
70
|
-
private processHuggingFaceQuery;
|
|
70
|
+
setupAIProcess(query: string, options: AIProcessSetupOptions): Promise<AIProcessResponse>;
|
|
71
|
+
private processOpenAITool;
|
|
72
|
+
private processOpenAIText;
|
|
73
|
+
private processHuggingFaceText;
|
|
74
|
+
private processHuggingFaceTool;
|
|
71
75
|
}
|
|
72
76
|
export {};
|
|
73
77
|
//# sourceMappingURL=ai-handler.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"ai-handler.d.ts","sourceRoot":"","sources":["../../src/utils/ai-handler.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,qBAAqB,EAAE,MAAM,qBAAqB,CAAC;AAI5D,UAAU,MAAM;IACd,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACrC;
|
|
1
|
+
{"version":3,"file":"ai-handler.d.ts","sourceRoot":"","sources":["../../src/utils/ai-handler.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,qBAAqB,EAAE,MAAM,qBAAqB,CAAC;AAI5D,UAAU,MAAM;IACd,IAAI,EAAE,MAAM,CAAC;IACb,WAAW,EAAE,MAAM,CAAC;IACpB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACrC;AAED,UAAU,qBAAqB;IAC7B,aAAa,CAAC,EAAE,MAAM,EAAE,CAAC;IACzB,WAAW,EAAE,MAAM,GAAG,MAAM,CAAC;CAC9B;AAkBD,UAAU,cAAc;IACtB,QAAQ,CAAC,EAAE,MAAM,CAAC;IAClB,UAAU,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,OAAO,CAAC,CAAC;CACtC;AAED,UAAU,cAAc;IACtB,OAAO,CAAC,EAAE,MAAM,CAAC;CAClB;AAED,UAAU,iBAAkB,SAAQ,cAAc,EAAE,cAAc;IAChE,KAAK,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;GAKG;AACH,qBAAa,SAAS;IACpB,OAAO,CAAC,cAAc,CAAgB;IACtC,OAAO,CAAC,KAAK,CAA2B;IACxC,OAAO,CAAC,MAAM,CAAC,QAAQ,CAA0B;IACjD,OAAO,CAAC,MAAM,CAAC,iBAAiB,CAAkB;IAClD,OAAO,CAAC,QAAQ,CAAmC;IACnD,OAAO,CAAC,MAAM,CAAgD;IAE9D;;;OAGG;IACH,OAAO;IAEP;;;OAGG;WACW,WAAW,IAAI,SAAS;IAOtC;;;;;;OAMG;IACH,UAAU,CACR,MAAM,EAAE,MAAM,EACd,QAAQ,GAAE,qBAAgC,EAC1C,KAAK,CAAC,EAAE,MAAM,GACb,IAAI;IA4BP;;;OAGG;IACH,iBAAiB,CAAC,KAAK,EAAE,MAAM,EAAE,GAAG,IAAI;IAIxC,iBAAiB,IAAI,OAAO;IAI5B;;;;OAIG;IACH,OAAO,CAAC,kBAAkB;IAe1B;;;;OAIG;IACH,OAAO,CAAC,gBAAgB;IAalB,cAAc,CAClB,KAAK,EAAE,MAAM,EACb,OAAO,EAAE,qBAAqB,GAC7B,OAAO,CAAC,iBAAiB,CAAC;YAwCf,iBAAiB;YA+DjB,iBAAiB;YA+BjB,sBAAsB;YAiCtB,sBAAsB;CA8DrC"}
|
package/dist/utils/ai-handler.js
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { OpenAI } from "openai";
|
|
2
|
-
import
|
|
2
|
+
import developerPrompts from "../prompts/developer/index.js";
|
|
3
3
|
import { markdownToJson } from "./response-handler.js";
|
|
4
4
|
import { InferenceClient } from "@huggingface/inference";
|
|
5
5
|
import { OLLAMA_API, OPEN_ROUTER_API } from "../types/enums.js";
|
|
@@ -15,7 +15,6 @@ export class AIService {
|
|
|
15
15
|
static instance = null;
|
|
16
16
|
static serverInitialized = false;
|
|
17
17
|
provider = "openai";
|
|
18
|
-
systemPrompt = systemPrompt;
|
|
19
18
|
client = null;
|
|
20
19
|
/**
|
|
21
20
|
* Private constructor to prevent direct instantiation
|
|
@@ -87,7 +86,6 @@ export class AIService {
|
|
|
87
86
|
return tools.map((tool) => ({
|
|
88
87
|
type: "function",
|
|
89
88
|
function: {
|
|
90
|
-
strict: true,
|
|
91
89
|
name: tool.name,
|
|
92
90
|
parameters: tool.parameters,
|
|
93
91
|
description: tool.description,
|
|
@@ -109,47 +107,57 @@ export class AIService {
|
|
|
109
107
|
}
|
|
110
108
|
return mentionedTools;
|
|
111
109
|
}
|
|
112
|
-
|
|
113
|
-
* Process a user query and determine which tool to use
|
|
114
|
-
* @param query User query
|
|
115
|
-
* @param specificTools Optional array of specific tool names to consider
|
|
116
|
-
* @returns Object containing the selected tool name and parameters
|
|
117
|
-
*/
|
|
118
|
-
async processQuery(query, specificTools) {
|
|
110
|
+
async setupAIProcess(query, options) {
|
|
119
111
|
if (!this.ensureInitialized()) {
|
|
120
112
|
return {
|
|
121
113
|
error: "AI service not initialized. Please provide an API key.",
|
|
122
114
|
};
|
|
123
115
|
}
|
|
116
|
+
const toolsSubstringIdentifier = "MCP_TOOLS";
|
|
117
|
+
const { processType, specificTools } = options;
|
|
118
|
+
let developerPrompt = developerPrompts[processType];
|
|
124
119
|
const mentionedTools = this.extractToolNames(query);
|
|
125
120
|
const toolsToUse = specificTools || (mentionedTools.length ? mentionedTools : undefined);
|
|
126
121
|
const tools = this.getToolDefinitions(toolsToUse);
|
|
127
|
-
|
|
122
|
+
if (developerPrompt.includes(toolsSubstringIdentifier)) {
|
|
123
|
+
developerPrompt = developerPrompt.replace(toolsSubstringIdentifier, JSON.stringify(tools, null, 2));
|
|
124
|
+
}
|
|
128
125
|
const messages = [
|
|
129
|
-
{ role: "
|
|
126
|
+
{ role: "developer", content: developerPrompt },
|
|
130
127
|
{ role: "user", content: query },
|
|
131
128
|
];
|
|
132
|
-
if (
|
|
133
|
-
return
|
|
129
|
+
if (processType === "text") {
|
|
130
|
+
return this.provider === "huggingface"
|
|
131
|
+
? await this.processHuggingFaceText(messages)
|
|
132
|
+
: await this.processOpenAIText(messages);
|
|
133
|
+
}
|
|
134
|
+
else {
|
|
135
|
+
return this.provider === "huggingface"
|
|
136
|
+
? await this.processHuggingFaceTool(tools, messages)
|
|
137
|
+
: await this.processOpenAITool(tools, messages);
|
|
134
138
|
}
|
|
135
|
-
return await this.processOpenAIQuery(tools, messages);
|
|
136
139
|
}
|
|
137
|
-
async
|
|
140
|
+
async processOpenAITool(tools, messages) {
|
|
138
141
|
try {
|
|
139
142
|
const client = this.client;
|
|
140
143
|
const response = await client.chat.completions.create({
|
|
144
|
+
tools,
|
|
141
145
|
messages,
|
|
142
146
|
model: this.model,
|
|
143
|
-
|
|
147
|
+
tool_choice: "required",
|
|
144
148
|
});
|
|
145
149
|
if (!response.choices?.length) {
|
|
146
|
-
return {
|
|
150
|
+
return {
|
|
151
|
+
error: "No choices returned from OpenAI; processType: 'tool'",
|
|
152
|
+
};
|
|
147
153
|
}
|
|
148
154
|
const message = response.choices[0].message;
|
|
149
155
|
if (message.tool_calls?.length) {
|
|
150
156
|
const toolCall = message.tool_calls[0]?.function;
|
|
151
157
|
if (!toolCall) {
|
|
152
|
-
return {
|
|
158
|
+
return {
|
|
159
|
+
error: "Invalid tool from OpenAI response; processType: 'tool'",
|
|
160
|
+
};
|
|
153
161
|
}
|
|
154
162
|
return {
|
|
155
163
|
toolName: toolCall.name,
|
|
@@ -160,7 +168,7 @@ export class AIService {
|
|
|
160
168
|
const toolCall = markdownToJson(message.content);
|
|
161
169
|
if (!toolCall) {
|
|
162
170
|
return {
|
|
163
|
-
error:
|
|
171
|
+
error: "Invalid tool call format in content; processType: 'tool'",
|
|
164
172
|
};
|
|
165
173
|
}
|
|
166
174
|
return {
|
|
@@ -168,33 +176,84 @@ export class AIService {
|
|
|
168
176
|
parameters: toolCall.parameters,
|
|
169
177
|
};
|
|
170
178
|
}
|
|
171
|
-
return {
|
|
179
|
+
return {
|
|
180
|
+
error: "No tool call or content in OpenAI response; processType: 'tool'.",
|
|
181
|
+
};
|
|
182
|
+
}
|
|
183
|
+
catch (error) {
|
|
184
|
+
console.error(error);
|
|
185
|
+
return { error };
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
async processOpenAIText(messages) {
|
|
189
|
+
try {
|
|
190
|
+
const client = this.client;
|
|
191
|
+
const response = await client.chat.completions.create({
|
|
192
|
+
messages,
|
|
193
|
+
model: this.model,
|
|
194
|
+
});
|
|
195
|
+
if (!response.choices?.length) {
|
|
196
|
+
return {
|
|
197
|
+
error: "No response returned from OpenAI; processType: 'text'.",
|
|
198
|
+
};
|
|
199
|
+
}
|
|
200
|
+
const message = response.choices[0].message;
|
|
201
|
+
if (message.content) {
|
|
202
|
+
return { summary: message.content };
|
|
203
|
+
}
|
|
204
|
+
return { error: "No content in OpenAI response; processType: 'text'." };
|
|
172
205
|
}
|
|
173
206
|
catch (error) {
|
|
174
207
|
console.error(error);
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
208
|
+
return { error };
|
|
209
|
+
}
|
|
210
|
+
}
|
|
211
|
+
async processHuggingFaceText(messages) {
|
|
212
|
+
try {
|
|
213
|
+
const client = this.client;
|
|
214
|
+
const response = await client.chatCompletion({
|
|
215
|
+
messages,
|
|
216
|
+
model: this.model,
|
|
217
|
+
});
|
|
218
|
+
if (!response.choices?.length) {
|
|
219
|
+
return {
|
|
220
|
+
error: "No response returned from OpenAI; processType: 'text'.",
|
|
221
|
+
};
|
|
222
|
+
}
|
|
223
|
+
const message = response.choices[0].message;
|
|
224
|
+
if (message.content) {
|
|
225
|
+
return { summary: message.content };
|
|
178
226
|
}
|
|
227
|
+
return {
|
|
228
|
+
error: "No content in Hugging Face response; processType: 'text'.",
|
|
229
|
+
};
|
|
230
|
+
}
|
|
231
|
+
catch (error) {
|
|
232
|
+
console.error(error);
|
|
179
233
|
return { error };
|
|
180
234
|
}
|
|
181
235
|
}
|
|
182
|
-
async
|
|
236
|
+
async processHuggingFaceTool(tools, messages) {
|
|
183
237
|
try {
|
|
184
238
|
const client = this.client;
|
|
185
239
|
const response = await client.chatCompletion({
|
|
240
|
+
tools,
|
|
186
241
|
messages,
|
|
187
242
|
model: this.model,
|
|
188
|
-
|
|
243
|
+
tool_choice: "required",
|
|
189
244
|
});
|
|
190
245
|
if (!response.choices?.length) {
|
|
191
|
-
return {
|
|
246
|
+
return {
|
|
247
|
+
error: "No choices in Hugging Face response; processType: 'tool'",
|
|
248
|
+
};
|
|
192
249
|
}
|
|
193
250
|
const message = response.choices[0].message;
|
|
194
251
|
if (message.tool_calls?.length) {
|
|
195
252
|
const toolCall = message.tool_calls[0]?.function;
|
|
196
253
|
if (!toolCall) {
|
|
197
|
-
return {
|
|
254
|
+
return {
|
|
255
|
+
error: "Invalid tool from Hugging Face response; processType: 'tool'",
|
|
256
|
+
};
|
|
198
257
|
}
|
|
199
258
|
return {
|
|
200
259
|
toolName: toolCall.name,
|
|
@@ -204,20 +263,20 @@ export class AIService {
|
|
|
204
263
|
if (message.content) {
|
|
205
264
|
const toolCall = markdownToJson(message.content);
|
|
206
265
|
if (!toolCall)
|
|
207
|
-
return {
|
|
266
|
+
return {
|
|
267
|
+
error: "Invalid tool call format in content; processType: 'tool'",
|
|
268
|
+
};
|
|
208
269
|
return {
|
|
209
270
|
toolName: toolCall.name,
|
|
210
271
|
parameters: toolCall.parameters,
|
|
211
272
|
};
|
|
212
273
|
}
|
|
213
|
-
return {
|
|
274
|
+
return {
|
|
275
|
+
error: "No tool call or content in Hugging Face response; processType: 'tool'",
|
|
276
|
+
};
|
|
214
277
|
}
|
|
215
278
|
catch (error) {
|
|
216
279
|
console.error(error);
|
|
217
|
-
if (!withoutFC) {
|
|
218
|
-
console.info("Retrying without function calling...");
|
|
219
|
-
return this.processHuggingFaceQuery(tools, messages, true);
|
|
220
|
-
}
|
|
221
280
|
return { error };
|
|
222
281
|
}
|
|
223
282
|
}
|
package/package.json
CHANGED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
{"version":3,"file":"system.d.ts","sourceRoot":"","sources":["../../src/prompts/system.ts"],"names":[],"mappings":";AAAA,wBAsCE"}
|
|
File without changes
|