mcp-meilisearch 1.2.8 → 1.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -6,13 +6,15 @@ A Model Context Protocol (MCP) server implementation that provides a bridge betw
6
6
 
7
7
  - **MCP Server**: Exposes Meilisearch APIs as tools using the Model Context Protocol.
8
8
  - **Web Client Demo**: A demo interface showcasing search functionalities.
9
+ - **AI Inference**: Intelligent tool selection based on user queries.
9
10
 
10
11
  ## Key Features
11
12
 
12
13
  - **Multiple Transport Options**: Supports both STDIO and StreamableHTTP transports.
13
14
  - **Real-time Communication**: Enables seamless interaction between clients and the server.
14
15
  - **Meilisearch API Support**: Full access to Meilisearch functionalities.
15
- - **Web Client Demo**: Updated interface for demonstrating search capabilities.
16
+ - **Web Client Demo**: Updated interface showcasing search capabilities and features.
17
+ - **AI Inference**: Leverages LLMs from providers such as OpenAI, Hugging Face, or Anthropic to intelligently determine and utilize the most suitable tool for user queries.
16
18
 
17
19
  ## Getting Started
18
20
 
@@ -21,6 +23,7 @@ A Model Context Protocol (MCP) server implementation that provides a bridge betw
21
23
  - Node.js v20 or higher.
22
24
  - A running Meilisearch instance (local or remote).
23
25
  - API key for Meilisearch (if required).
26
+ - AI provider API key (if using AI inference).
24
27
 
25
28
  ### Installation
26
29
 
@@ -55,20 +58,54 @@ pnpm add mcp-meilisearch
55
58
  - `sessionTimeout`: Session timeout in milliseconds (Default: 3600000)
56
59
  - `sessionCleanupInterval`: Session cleanup interval in milliseconds (Default: 60000)
57
60
 
61
+ #### AI Inference Options
62
+
63
+ - `providerApiKey`: AI provider API key for AI inference
64
+ - `llmModel`: AI model to use (Default: "gpt-3.5-turbo")
65
+
58
66
  ### Using the MCPClient
59
67
 
60
- The package also exports the MCPClient class for client-side integration:
68
+ The package exports the MCPClient class for client-side integration:
61
69
 
62
70
  ```typescript
63
71
  import { MCPClient } from "mcp-meilisearch/client";
64
72
 
65
73
  const client = new MCPClient("mcp-meilisearch-client");
74
+
66
75
  await client.connectToServer("http://localhost:4995/mcp");
67
76
 
68
- // Call a tool
69
77
  const result = await client.callTool("search-across-all-indexes", {
70
78
  q: "search kiosco antonio",
71
79
  });
80
+
81
+ // Use AI inference to choose the most appropriate tool
82
+ // First enable AI inference
83
+ client.setUseAI(true);
84
+
85
+ const result = await client.callToolWithAI("Find articles about cucumber");
86
+ console.log(`Tool used: ${result.toolUsed}`);
87
+ console.log(`Reasoning: ${result.reasoning}`);
88
+ console.log(`Results: ${JSON.stringify(result.data)}`);
89
+ ```
90
+
91
+ #### AI Inference Client Methods
92
+
93
+ - `setUseAI(use: boolean)`: Enable or disable AI inference.
94
+ - `callToolWithAI(query: string, specificTools?: string[])`: Process a user query with AI inference, optionally limiting to specific tools.
95
+
96
+ ### Starting the Server
97
+
98
+ You can start the server programmatically:
99
+
100
+ ```typescript
101
+ import mcpMeilisearchServer from "mcp-meilisearch";
102
+
103
+ await mcpMeilisearchServer({
104
+ meilisearchHost: "http://localhost:7700",
105
+ meilisearchApiKey: "your_meilisearch_api_key",
106
+ providerApiKey: "your_ai_provider_api_key", // Required for AI inference
107
+ llmModel: "gpt-4", // Optional, defaults to gpt-3.5-turbo
108
+ });
72
109
  ```
73
110
 
74
111
  ## Tools
package/dist/client.d.ts CHANGED
@@ -1,4 +1,9 @@
1
1
  export declare class MCPClient {
2
+ /**
3
+ * Flag to enable/disable AI inference
4
+ * When enabled, user queries are processed by an AI to determine which tool to use
5
+ */
6
+ useAI: boolean;
2
7
  /**
3
8
  * Indicates whether the client is connected to the MCP server
4
9
  * Used to track the connection state and control async operations
@@ -11,12 +16,18 @@ export declare class MCPClient {
11
16
  tools: {
12
17
  name: string;
13
18
  description: string;
19
+ parameters: Record<string, any>;
14
20
  }[];
15
21
  private client;
16
22
  private tries;
17
23
  private transport;
18
24
  private toolsUpdatedCallback;
19
25
  constructor(serverName: string);
26
+ /**
27
+ * Set whether to use AI inference for tool selection
28
+ * @param use Whether to use AI inference
29
+ */
30
+ setUseAI(use: boolean): void;
20
31
  /**
21
32
  * Registers a callback to be called when the list of available tools changes
22
33
  * @param callback The function to call when tools are updated
@@ -52,6 +63,19 @@ export declare class MCPClient {
52
63
  data?: any;
53
64
  error?: string;
54
65
  }>;
66
+ /**
67
+ * Process a user query through the AI to determine which tool to use
68
+ * @param query The user's query
69
+ * @param specificTools Optional array of specific tools to consider
70
+ * @returns The result of calling the selected tool, or an error
71
+ */
72
+ callToolWithAI(query: string, specificTools?: string[]): Promise<{
73
+ success: boolean;
74
+ data?: any;
75
+ error?: string;
76
+ toolUsed?: string;
77
+ reasoning?: string;
78
+ }>;
55
79
  private setUpTransport;
56
80
  /**
57
81
  * Closes the connection to the server and resets the connection state
@@ -1 +1 @@
1
- {"version":3,"file":"client.d.ts","sourceRoot":"","sources":["../src/client.ts"],"names":[],"mappings":"AAQA,qBAAa,SAAS;IACpB;;;OAGG;IACH,WAAW,EAAE,OAAO,CAAS;IAE7B;;;OAGG;IACH,KAAK,EAAE;QAAE,IAAI,EAAE,MAAM,CAAC;QAAC,WAAW,EAAE,MAAM,CAAA;KAAE,EAAE,CAAM;IAEpD,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,KAAK,CAAa;IAC1B,OAAO,CAAC,SAAS,CAA8C;IAC/D,OAAO,CAAC,oBAAoB,CAEZ;gBAEJ,UAAU,EAAE,MAAM;IAI9B;;;OAGG;IACH,sBAAsB,CACpB,QAAQ,EAAE,CAAC,KAAK,EAAE,KAAK,CAAC;QAAE,IAAI,EAAE,MAAM,CAAC;QAAC,WAAW,EAAE,MAAM,CAAA;KAAE,CAAC,KAAK,IAAI;IAKzE;;;;;OAKG;IACG,WAAW,CAAC,CAAC,EAAE,QAAQ,EAAE,MAAM,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;IAOhE;;;;OAIG;IACG,eAAe,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;YAsBzC,SAAS;IAuBvB,OAAO,CAAC,kBAAkB;IAW1B;;;;;;OAMG;IACG,QAAQ,CACZ,IAAI,EAAE,MAAM,EACZ,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GACzB,OAAO,CAAC;QACT,OAAO,EAAE,OAAO,CAAC;QACjB,IAAI,CAAC,EAAE,GAAG,CAAC;QACX,KAAK,CAAC,EAAE,MAAM,CAAC;KAChB,CAAC;IA2CF,OAAO,CAAC,cAAc;IAKtB;;;OAGG;IACG,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;CAK/B"}
1
+ {"version":3,"file":"client.d.ts","sourceRoot":"","sources":["../src/client.ts"],"names":[],"mappings":"AAQA,qBAAa,SAAS;IACpB;;;OAGG;IACH,KAAK,EAAE,OAAO,CAAS;IAEvB;;;OAGG;IACH,WAAW,EAAE,OAAO,CAAS;IAE7B;;;OAGG;IACH,KAAK,EAAE;QACL,IAAI,EAAE,MAAM,CAAC;QACb,WAAW,EAAE,MAAM,CAAC;QACpB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;KACjC,EAAE,CAAM;IAET,OAAO,CAAC,MAAM,CAAS;IACvB,OAAO,CAAC,KAAK,CAAa;IAC1B,OAAO,CAAC,SAAS,CAA8C;IAC/D,OAAO,CAAC,oBAAoB,CAEZ;gBAEJ,UAAU,EAAE,MAAM;IAI9B;;;OAGG;IACH,QAAQ,CAAC,GAAG,EAAE,OAAO,GAAG,IAAI;IAI5B;;;OAGG;IACH,sBAAsB,CACpB,QAAQ,EAAE,CAAC,KAAK,EAAE,KAAK,CAAC;QAAE,IAAI,EAAE,MAAM,CAAC;QAAC,WAAW,EAAE,MAAM,CAAA;KAAE,CAAC,KAAK,IAAI;IAKzE;;;;;OAKG;IACG,WAAW,CAAC,CAAC,EAAE,QAAQ,EAAE,MAAM,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC,GAAG,OAAO,CAAC,CAAC,CAAC;IAOhE;;;;OAIG;IACG,eAAe,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,IAAI,CAAC;YAuBzC,SAAS;IAwBvB,OAAO,CAAC,kBAAkB;IAW1B;;;;;;OAMG;IACG,QAAQ,CACZ,IAAI,EAAE,MAAM,EACZ,IAAI,CAAC,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,GACzB,OAAO,CAAC;QACT,OAAO,EAAE,OAAO,CAAC;QACjB,IAAI,CAAC,EAAE,GAAG,CAAC;QACX,KAAK,CAAC,EAAE,MAAM,CAAC;KAChB,CAAC;IA2CF;;;;;OAKG;IACG,cAAc,CAClB,KAAK,EAAE,MAAM,EACb,aAAa,CAAC,EAAE,MAAM,EAAE,GACvB,OAAO,CAAC;QACT,OAAO,EAAE,OAAO,CAAC;QACjB,IAAI,CAAC,EAAE,GAAG,CAAC;QACX,KAAK,CAAC,EAAE,MAAM,CAAC;QACf,QAAQ,CAAC,EAAE,MAAM,CAAC;QAClB,SAAS,CAAC,EAAE,MAAM,CAAC;KACpB,CAAC;IAmCF,OAAO,CAAC,cAAc;IAKtB;;;OAGG;IACG,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC;CAK/B"}
package/dist/client.js CHANGED
@@ -1,7 +1,12 @@
1
+ import { TextContentSchema, LoggingMessageNotificationSchema, ToolListChangedNotificationSchema, } from "@modelcontextprotocol/sdk/types.js";
1
2
  import { Client } from "@modelcontextprotocol/sdk/client/index.js";
2
3
  import { StreamableHTTPClientTransport } from "@modelcontextprotocol/sdk/client/streamableHttp.js";
3
- import { TextContentSchema, LoggingMessageNotificationSchema, ToolListChangedNotificationSchema, } from "@modelcontextprotocol/sdk/types.js";
4
4
  export class MCPClient {
5
+ /**
6
+ * Flag to enable/disable AI inference
7
+ * When enabled, user queries are processed by an AI to determine which tool to use
8
+ */
9
+ useAI = false;
5
10
  /**
6
11
  * Indicates whether the client is connected to the MCP server
7
12
  * Used to track the connection state and control async operations
@@ -19,6 +24,13 @@ export class MCPClient {
19
24
  constructor(serverName) {
20
25
  this.client = new Client({ name: serverName, version: "1.0.0" });
21
26
  }
27
+ /**
28
+ * Set whether to use AI inference for tool selection
29
+ * @param use Whether to use AI inference
30
+ */
31
+ setUseAI(use) {
32
+ this.useAI = use;
33
+ }
22
34
  /**
23
35
  * Registers a callback to be called when the list of available tools changes
24
36
  * @param callback The function to call when tools are updated
@@ -53,11 +65,11 @@ export class MCPClient {
53
65
  await this.listTools();
54
66
  this.isConnected = true;
55
67
  }
56
- catch (e) {
68
+ catch (error) {
57
69
  this.tries++;
58
70
  if (this.tries > 5) {
59
71
  this.isConnected = false;
60
- throw e;
72
+ throw error;
61
73
  }
62
74
  await new Promise((resolve) => setTimeout(resolve, this.tries * 1000));
63
75
  await this.connectToServer(serverUrl);
@@ -73,6 +85,7 @@ export class MCPClient {
73
85
  this.tools = toolsResult.tools.map((tool) => ({
74
86
  name: tool.name,
75
87
  description: tool.description ?? "",
88
+ parameters: tool.parameters || {},
76
89
  }));
77
90
  }
78
91
  else {
@@ -137,6 +150,42 @@ export class MCPClient {
137
150
  return { success: false, error: errorMessage };
138
151
  }
139
152
  }
153
+ /**
154
+ * Process a user query through the AI to determine which tool to use
155
+ * @param query The user's query
156
+ * @param specificTools Optional array of specific tools to consider
157
+ * @returns The result of calling the selected tool, or an error
158
+ */
159
+ async callToolWithAI(query, specificTools) {
160
+ try {
161
+ const result = await this.callTool("process-ai-query", {
162
+ query,
163
+ specificTools,
164
+ });
165
+ if (!result.success)
166
+ return result;
167
+ const { toolName, parameters, reasoning } = result.data;
168
+ if (!toolName) {
169
+ return {
170
+ success: false,
171
+ error: "AI could not determine which tool to use for this query",
172
+ };
173
+ }
174
+ const toolResult = await this.callTool(toolName, parameters);
175
+ return {
176
+ ...toolResult,
177
+ reasoning,
178
+ toolUsed: toolName,
179
+ };
180
+ }
181
+ catch (error) {
182
+ const errorMessage = error instanceof Error ? error.message : String(error);
183
+ return {
184
+ success: false,
185
+ error: `AI inference error: ${errorMessage}`,
186
+ };
187
+ }
188
+ }
140
189
  setUpTransport() {
141
190
  if (this.transport == null)
142
191
  return;
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,WAAW,CAAC;AAG7B,OAAO,EAAE,aAAa,EAAE,MAAM,oBAAoB,CAAC;AAKnD;;;;GAIG;AACH,wBAAsB,oBAAoB,CACxC,OAAO,GAAE,aAGR,GACA,OAAO,CAAC,IAAI,CAAC,MAAM,CAAC,CA8GtB;AAkCD,eAAe,oBAAoB,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,WAAW,CAAC;AAO7B,OAAO,EAAyB,aAAa,EAAE,MAAM,oBAAoB,CAAC;AAU1E;;;;GAIG;AACH,wBAAsB,oBAAoB,CACxC,OAAO,GAAE,aAA8B,GACtC,OAAO,CAAC,IAAI,CAAC,MAAM,CAAC,CA2HtB;AAuCD,eAAe,oBAAoB,CAAC"}
package/dist/index.js CHANGED
@@ -1,19 +1,37 @@
1
1
  import { createServer } from "node:http";
2
2
  import { parse as parseUrl } from "node:url";
3
+ import { AIService } from "./utils/ai-handler.js";
3
4
  import { initServer } from "./server.js";
4
5
  import { configHandler } from "./utils/config-handler.js";
5
6
  import { createErrorResponse } from "./utils/error-handler.js";
7
+ const defaultOptions = {
8
+ aiProviderApiKey: "",
9
+ meilisearchApiKey: "",
10
+ llmModel: "gpt-3.5-turbo",
11
+ aiProviderName: "openai",
12
+ meilisearchHost: "http://localhost:7700",
13
+ };
6
14
  /**
7
15
  * Start a MCP server
8
16
  * @param options Configuration options for the MCP server
9
17
  * @returns A promise that resolves to the HTTP server instance
10
18
  */
11
- export async function mcpMeilisearchServer(options = {
12
- meilisearchApiKey: "",
13
- meilisearchHost: "http://localhost:7700",
14
- }) {
19
+ export async function mcpMeilisearchServer(options = defaultOptions) {
20
+ configHandler.setLlmModel(options.llmModel);
21
+ configHandler.setAiProviderName(options.aiProviderName);
15
22
  configHandler.setMeilisearchHost(options.meilisearchHost);
23
+ configHandler.setAiProviderApiKey(options.aiProviderApiKey);
16
24
  configHandler.setMeilisearchApiKey(options.meilisearchApiKey);
25
+ const aiService = AIService.getInstance();
26
+ const apiKey = configHandler.getAiProviderApiKey();
27
+ if (apiKey) {
28
+ const llmModel = configHandler.getLlmModel();
29
+ const aiProviderName = configHandler.getAiProviderName();
30
+ aiService.initialize(apiKey, aiProviderName, llmModel);
31
+ }
32
+ else {
33
+ console.warn("AI provider API key not found. AI will not be available");
34
+ }
17
35
  const httpPort = options.httpPort || 4995;
18
36
  const transport = options.transport || "http";
19
37
  let mcpServerInstance = null;
@@ -73,14 +91,13 @@ export async function mcpMeilisearchServer(options = {
73
91
  });
74
92
  await new Promise((resolve) => {
75
93
  server.listen(httpPort, () => {
76
- console.log(`MCP server listening on port ${httpPort}`);
94
+ console.info(`MCP server listening on port ${httpPort}`);
77
95
  resolve();
78
96
  });
79
97
  });
80
98
  try {
81
99
  const serverInstances = await initServer(transport, options);
82
100
  mcpServerInstance = serverInstances.mcpServer;
83
- console.log("MCP server initialized successfully");
84
101
  }
85
102
  catch (error) {
86
103
  console.error("Failed to initialize MCP server:", error);
@@ -88,7 +105,7 @@ export async function mcpMeilisearchServer(options = {
88
105
  throw error;
89
106
  }
90
107
  const shutdownHandler = () => {
91
- console.log("Shutting down MCP server...");
108
+ console.info("Shutting down MCP server...");
92
109
  if (mcpServerInstance && typeof mcpServerInstance.shutdown === "function") {
93
110
  try {
94
111
  mcpServerInstance.shutdown();
@@ -105,10 +122,7 @@ export async function mcpMeilisearchServer(options = {
105
122
  }
106
123
  if (import.meta.url === `file://${process.argv?.[1]}`) {
107
124
  const args = process.argv.slice(2);
108
- const options = {
109
- meilisearchHost: "http://localhost:7700",
110
- meilisearchApiKey: "",
111
- };
125
+ const options = defaultOptions;
112
126
  for (let i = 0; i < args.length; i += 2) {
113
127
  const key = args[i].replace("--", "");
114
128
  const value = args[i + 1];
@@ -125,10 +139,18 @@ if (import.meta.url === `file://${process.argv?.[1]}`) {
125
139
  case "apiKey":
126
140
  options.meilisearchApiKey = value;
127
141
  break;
142
+ case "aiApiKey":
143
+ options.aiProviderApiKey = value;
144
+ case "aiProvider":
145
+ options.aiProviderName = value;
146
+ break;
147
+ case "llmModel":
148
+ options.llmModel = value;
149
+ break;
128
150
  }
129
151
  }
130
152
  mcpMeilisearchServer(options)
131
- .then(() => console.log("MCP server running"))
153
+ .then(() => console.info("MCP server running"))
132
154
  .catch((err) => {
133
155
  console.error("Failed to start server:", err);
134
156
  process.exit(1);
@@ -0,0 +1,3 @@
1
+ declare const _default: "\n\t\tAnswer the user's request using the relevant tool(s), if they are available. Check that all the required parameters for each tool call are provided or can reasonably be inferred from context. IF there are no relevant tools or there are missing values for required parameters, ask the user to supply these values; otherwise proceed with the tool calls. If the user provides a specific value for a parameter (for example provided in quotes), make sure to use that value EXACTLY. DO NOT make up values for or ask about optional parameters. Carefully analyze descriptive terms in the request as they may indicate required parameter values that should be included even if not explicitly quoted.\n\n\t\t<identity>\n\t\tYou are an AI programming assistant.\n\t\tWhen asked for your name, you must respond with \"Pati\".\n\t\tFollow the user's requirements carefully & to the letter.\n\t\tIf you are asked to generate content that is harmful, hateful, racist, sexist, lewd, violent, or completely irrelevant to software engineering, only respond with \"Sorry, I can't assist with that.\"\n\t\tKeep your answers short and impersonal.\n\t\t</identity>\n\n\t\t<instructions>\n\t\tYou are a highly sophisticated automated coding agent with expert-level knowledge across many different programming languages and frameworks.\n\t\tThe user will ask a question, or ask you to perform a task, and it may require lots of research to answer correctly. There is a selection of tools that let you perform actions or retrieve helpful context to answer the user's question.\n\t\tIf you can infer the project type (languages, frameworks, and libraries) from the user's query or the context that you have, make sure to keep them in mind when making changes.\n\t\tIf the user wants you to implement a feature and they have not specified the files to edit, first break down the user's request into smaller concepts and think about the kinds of files you need to grasp each concept.\n\t\tIf you aren't sure which tool is relevant, you can call multiple tools. You can call tools repeatedly to take actions or gather as much context as needed until you have completed the task fully. Don't give up unless you are sure the request cannot be fulfilled with the tools you have. It's YOUR RESPONSIBILITY to make sure that you have done all you can to collect necessary context.\n\t\tPrefer using the semantic_search tool to search for context unless you know the exact string or filename pattern you're searching for.\n\t\tDon't make assumptions about the situation- gather context first, then perform the task or answer the question.\n\t\tThink creatively and explore the workspace in order to make a complete fix.\n\t\tDon't repeat yourself after a tool call, pick up where you left off.\n\t\tNEVER print out a codeblock with file changes unless the user asked for it. Use the insert_edit_into_file tool instead.\n\t\tNEVER print out a codeblock with a terminal command to run unless the user asked for it. Use the run_in_terminal tool instead.\n\t\tYou don't need to read a file if it's already provided in context.\n\t\t</instructions>\n\n\t\t<toolUseInstructions>\n\t\tWhen using a tool, follow the json schema very carefully and make sure to include ALL required properties.\n\t\tAlways output valid JSON when using a tool.\n\t\tIf a tool exists to do a task, use the tool instead of asking the user to manually take an action.\n\t\tIf you say that you will take an action, then go ahead and use the tool to do it. No need to ask permission.\n\t\tNever use multi_tool_use.parallel or any tool that does not exist. Use tools using the proper procedure, DO NOT write out a json codeblock with the tool inputs.\n\t\tNever say the name of a tool to a user. For example, instead of saying that you'll use the run_in_terminal tool, say \"I'll run the command in a terminal\".\n\t\tIf you think running multiple tools can answer the user's question, prefer calling them in parallel whenever possible, but do not call semantic_search in parallel.\n\t\tIf semantic_search returns the full contents of the text files in the workspace, you have all the workspace context.\n\t\tDon't call the run_in_terminal tool multiple times in parallel. Instead, run one command and wait for the output before running the next command.\n\t\tAfter you have performed the user's task, if the user corrected something you did, expressed a coding preference, or communicated a fact that you need to remember, use the update_user_preferences tool to save their preferences.\n\t\t</toolUseInstructions>\n\n\t\t<editFileInstructions>\n\t\tDon't try to edit an existing file without reading it first, so you can make changes properly.\n\t\tUse the insert_edit_into_file tool to edit files. When editing files, group your changes by file.\n\t\tNEVER show the changes to the user, just call the tool, and the edits will be applied and shown to the user.\n\t\tNEVER print a codeblock that represents a change to a file, use insert_edit_into_file instead.\n\t\tFor each file, give a short description of what needs to be changed, then use the insert_edit_into_file tool. You can use any tool multiple times in a response, and you can keep writing text after using a tool.\n\t\tFollow best practices when editing files. If a popular external library exists to solve a problem, use it and properly install the package e.g. with \"npm install\" or creating a \"requirements.txt\".\n\t\tAfter editing a file, you MUST call get_errors to validate the change. Fix the errors if they are relevant to your change or the prompt, and remember to validate that they were actually fixed.\n\t\tThe insert_edit_into_file tool is very smart and can understand how to apply your edits to the user's files, you just need to provide minimal hints.\n\t\tWhen you use the insert_edit_into_file tool, avoid repeating existing code, instead use comments to represent regions of unchanged code. The tool prefers that you are as concise as possible. For example:\n\t\t// ...existing code...\n\t\tchanged code\n\t\t// ...existing code...\n\t\tchanged code\n\t\t// ...existing code...\n\n\t\tHere is an example of how you should format an edit to an existing Person class:\n\t\tclass Person {\n\t\t\t// ...existing code...\n\t\t\tage: number;\n\t\t\t// ...existing code...\n\t\t\tgetAge() {\n\t\t\t\treturn this.age;\n\t\t\t}\n\t\t}\n\t\t</editFileInstructions>\n\n\t\t<functions>\n\t\tMCP_TOOLS\n\t\t</functions>\n\n\t\t<context>\n\t\tMy current OS is: Linux\n\t\tI am working in a workspace that has the following structure:\n\t\t```\n\t\texample.txt\n\t\traw_complete_instructions.txt\n\t\traw_instructions.txt\n\t\t```\n\t\tThis view of the workspace structure may be truncated. You can use tools to collect more context if needed.\n\t\t</context>\n\n\t\t<reminder>\n\t\tWhen using the insert_edit_into_file tool, avoid repeating existing code, instead use a line comment with `...existing code...` to represent regions of unchanged code.\n\t\t</reminder>\n\n\t\t<tool_format>\n\t\t<function_calls>\n\t\t<invoke name=\"[tool_name]\">\n\t\t<parameter name=\"[param_name]\">[param_value]\n\t\t";
2
+ export default _default;
3
+ //# sourceMappingURL=general.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"general.d.ts","sourceRoot":"","sources":["../../src/prompts/general.ts"],"names":[],"mappings":";AAAA,wBAyFI"}
@@ -0,0 +1,90 @@
1
+ export default `
2
+ Answer the user's request using the relevant tool(s), if they are available. Check that all the required parameters for each tool call are provided or can reasonably be inferred from context. IF there are no relevant tools or there are missing values for required parameters, ask the user to supply these values; otherwise proceed with the tool calls. If the user provides a specific value for a parameter (for example provided in quotes), make sure to use that value EXACTLY. DO NOT make up values for or ask about optional parameters. Carefully analyze descriptive terms in the request as they may indicate required parameter values that should be included even if not explicitly quoted.
3
+
4
+ <identity>
5
+ You are an AI programming assistant.
6
+ When asked for your name, you must respond with "Pati".
7
+ Follow the user's requirements carefully & to the letter.
8
+ If you are asked to generate content that is harmful, hateful, racist, sexist, lewd, violent, or completely irrelevant to software engineering, only respond with "Sorry, I can't assist with that."
9
+ Keep your answers short and impersonal.
10
+ </identity>
11
+
12
+ <instructions>
13
+ You are a highly sophisticated automated coding agent with expert-level knowledge across many different programming languages and frameworks.
14
+ The user will ask a question, or ask you to perform a task, and it may require lots of research to answer correctly. There is a selection of tools that let you perform actions or retrieve helpful context to answer the user's question.
15
+ If you can infer the project type (languages, frameworks, and libraries) from the user's query or the context that you have, make sure to keep them in mind when making changes.
16
+ If the user wants you to implement a feature and they have not specified the files to edit, first break down the user's request into smaller concepts and think about the kinds of files you need to grasp each concept.
17
+ If you aren't sure which tool is relevant, you can call multiple tools. You can call tools repeatedly to take actions or gather as much context as needed until you have completed the task fully. Don't give up unless you are sure the request cannot be fulfilled with the tools you have. It's YOUR RESPONSIBILITY to make sure that you have done all you can to collect necessary context.
18
+ Prefer using the semantic_search tool to search for context unless you know the exact string or filename pattern you're searching for.
19
+ Don't make assumptions about the situation- gather context first, then perform the task or answer the question.
20
+ Think creatively and explore the workspace in order to make a complete fix.
21
+ Don't repeat yourself after a tool call, pick up where you left off.
22
+ NEVER print out a codeblock with file changes unless the user asked for it. Use the insert_edit_into_file tool instead.
23
+ NEVER print out a codeblock with a terminal command to run unless the user asked for it. Use the run_in_terminal tool instead.
24
+ You don't need to read a file if it's already provided in context.
25
+ </instructions>
26
+
27
+ <toolUseInstructions>
28
+ When using a tool, follow the json schema very carefully and make sure to include ALL required properties.
29
+ Always output valid JSON when using a tool.
30
+ If a tool exists to do a task, use the tool instead of asking the user to manually take an action.
31
+ If you say that you will take an action, then go ahead and use the tool to do it. No need to ask permission.
32
+ Never use multi_tool_use.parallel or any tool that does not exist. Use tools using the proper procedure, DO NOT write out a json codeblock with the tool inputs.
33
+ Never say the name of a tool to a user. For example, instead of saying that you'll use the run_in_terminal tool, say "I'll run the command in a terminal".
34
+ If you think running multiple tools can answer the user's question, prefer calling them in parallel whenever possible, but do not call semantic_search in parallel.
35
+ If semantic_search returns the full contents of the text files in the workspace, you have all the workspace context.
36
+ Don't call the run_in_terminal tool multiple times in parallel. Instead, run one command and wait for the output before running the next command.
37
+ After you have performed the user's task, if the user corrected something you did, expressed a coding preference, or communicated a fact that you need to remember, use the update_user_preferences tool to save their preferences.
38
+ </toolUseInstructions>
39
+
40
+ <editFileInstructions>
41
+ Don't try to edit an existing file without reading it first, so you can make changes properly.
42
+ Use the insert_edit_into_file tool to edit files. When editing files, group your changes by file.
43
+ NEVER show the changes to the user, just call the tool, and the edits will be applied and shown to the user.
44
+ NEVER print a codeblock that represents a change to a file, use insert_edit_into_file instead.
45
+ For each file, give a short description of what needs to be changed, then use the insert_edit_into_file tool. You can use any tool multiple times in a response, and you can keep writing text after using a tool.
46
+ Follow best practices when editing files. If a popular external library exists to solve a problem, use it and properly install the package e.g. with "npm install" or creating a "requirements.txt".
47
+ After editing a file, you MUST call get_errors to validate the change. Fix the errors if they are relevant to your change or the prompt, and remember to validate that they were actually fixed.
48
+ The insert_edit_into_file tool is very smart and can understand how to apply your edits to the user's files, you just need to provide minimal hints.
49
+ When you use the insert_edit_into_file tool, avoid repeating existing code, instead use comments to represent regions of unchanged code. The tool prefers that you are as concise as possible. For example:
50
+ // ...existing code...
51
+ changed code
52
+ // ...existing code...
53
+ changed code
54
+ // ...existing code...
55
+
56
+ Here is an example of how you should format an edit to an existing Person class:
57
+ class Person {
58
+ // ...existing code...
59
+ age: number;
60
+ // ...existing code...
61
+ getAge() {
62
+ return this.age;
63
+ }
64
+ }
65
+ </editFileInstructions>
66
+
67
+ <functions>
68
+ MCP_TOOLS
69
+ </functions>
70
+
71
+ <context>
72
+ My current OS is: Linux
73
+ I am working in a workspace that has the following structure:
74
+ \`\`\`
75
+ example.txt
76
+ raw_complete_instructions.txt
77
+ raw_instructions.txt
78
+ \`\`\`
79
+ This view of the workspace structure may be truncated. You can use tools to collect more context if needed.
80
+ </context>
81
+
82
+ <reminder>
83
+ When using the insert_edit_into_file tool, avoid repeating existing code, instead use a line comment with \`...existing code...\` to represent regions of unchanged code.
84
+ </reminder>
85
+
86
+ <tool_format>
87
+ <function_calls>
88
+ <invoke name="[tool_name]">
89
+ <parameter name="[param_name]">[param_value]
90
+ `;
package/dist/server.d.ts CHANGED
@@ -4,6 +4,8 @@ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
4
4
  * Configuration for the MCP server
5
5
  */
6
6
  interface ServerConfig {
7
+ host: string;
8
+ apiKey: string;
7
9
  httpPort: number;
8
10
  mcpEndpoint: string;
9
11
  sessionTimeout: number;
@@ -1 +1 @@
1
- {"version":3,"file":"server.d.ts","sourceRoot":"","sources":["../src/server.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,eAAe,EAAE,cAAc,EAAE,MAAM,MAAM,CAAC;AAevD,OAAO,EAAE,SAAS,EAAE,MAAM,yCAAyC,CAAC;AAIpE;;GAEG;AACH,UAAU,YAAY;IACpB,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,cAAc,EAAE,MAAM,CAAC;IACvB,sBAAsB,EAAE,MAAM,CAAC;CAChC;AAED;;GAEG;AACH,UAAU,cAAc;IACtB,SAAS,EAAE,SAAS,CAAC;CACtB;AAiBD;;GAEG;AACH,qBAAa,SAAS;IACpB,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAS;IAClC,OAAO,CAAC,QAAQ,CAAC,sBAAsB,CAAoB;IAE3D,OAAO,CAAC,MAAM,CAAY;IAC1B,OAAO,CAAC,MAAM,CAAe;IAC7B,OAAO,CAAC,eAAe,CAA+B;IACtD,OAAO,CAAC,QAAQ,CAAuC;IAEvD;;;;OAIG;gBACS,MAAM,EAAE,SAAS,EAAE,MAAM,GAAE,OAAO,CAAC,YAAY,CAAM;IAOjE;;;;OAIG;IACG,gBAAgB,CACpB,GAAG,EAAE,eAAe,EACpB,GAAG,EAAE,cAAc,GAClB,OAAO,CAAC,IAAI,CAAC;IA+BhB;;;;;OAKG;IACG,iBAAiB,CACrB,GAAG,EAAE,eAAe,EACpB,GAAG,EAAE,cAAc,EACnB,IAAI,EAAE,GAAG,GACR,OAAO,CAAC,IAAI,CAAC;IA+BhB;;OAEG;IACH,QAAQ,IAAI,IAAI;IAqBhB;;;;;OAKG;YACW,uBAAuB;IAwCrC;;OAEG;IACH,OAAO,CAAC,+BAA+B;IAWvC;;OAEG;YACW,gBAAgB;IAmB9B;;OAEG;IACH,OAAO,CAAC,mBAAmB;IAa3B;;OAEG;IACH,OAAO,CAAC,gBAAgB;IAKxB;;OAEG;IACH,OAAO,CAAC,qBAAqB;IAO7B;;OAEG;IACH,OAAO,CAAC,iBAAiB;IAUzB;;OAEG;IACH,OAAO,CAAC,mBAAmB;IAM3B;;OAEG;IACH,OAAO,CAAC,sBAAsB;CA0B/B;AAsED;;;;;GAKG;AACH,eAAO,MAAM,UAAU,GACrB,WAAW,OAAO,GAAG,MAAM,EAC3B,SAAS,OAAO,CAAC,YAAY,CAAC,KAC7B,OAAO,CAAC,cAAc,CAcxB,CAAC"}
1
+ {"version":3,"file":"server.d.ts","sourceRoot":"","sources":["../src/server.ts"],"names":[],"mappings":"AACA,OAAO,EAAE,eAAe,EAAE,cAAc,EAAE,MAAM,MAAM,CAAC;AAgBvD,OAAO,EAAE,SAAS,EAAE,MAAM,yCAAyC,CAAC;AAIpE;;GAEG;AACH,UAAU,YAAY;IACpB,IAAI,EAAE,MAAM,CAAC;IACb,MAAM,EAAE,MAAM,CAAC;IACf,QAAQ,EAAE,MAAM,CAAC;IACjB,WAAW,EAAE,MAAM,CAAC;IACpB,cAAc,EAAE,MAAM,CAAC;IACvB,sBAAsB,EAAE,MAAM,CAAC;CAChC;AAED;;GAEG;AACH,UAAU,cAAc;IACtB,SAAS,EAAE,SAAS,CAAC;CACtB;AAmBD;;GAEG;AACH,qBAAa,SAAS;IACpB,OAAO,CAAC,QAAQ,CAAC,QAAQ,CAAS;IAClC,OAAO,CAAC,QAAQ,CAAC,sBAAsB,CAAoB;IAE3D,OAAO,CAAC,MAAM,CAAY;IAC1B,OAAO,CAAC,MAAM,CAAe;IAC7B,OAAO,CAAC,eAAe,CAA+B;IACtD,OAAO,CAAC,QAAQ,CAAuC;IAEvD;;;;OAIG;gBACS,MAAM,EAAE,SAAS,EAAE,MAAM,GAAE,OAAO,CAAC,YAAY,CAAM;IAOjE;;;;OAIG;IACG,gBAAgB,CACpB,GAAG,EAAE,eAAe,EACpB,GAAG,EAAE,cAAc,GAClB,OAAO,CAAC,IAAI,CAAC;IA+BhB;;;;;OAKG;IACG,iBAAiB,CACrB,GAAG,EAAE,eAAe,EACpB,GAAG,EAAE,cAAc,EACnB,IAAI,EAAE,GAAG,GACR,OAAO,CAAC,IAAI,CAAC;IA+BhB;;OAEG;IACH,QAAQ,IAAI,IAAI;IAqBhB;;;;;OAKG;YACW,uBAAuB;IAuCrC;;OAEG;IACH,OAAO,CAAC,+BAA+B;IAWvC;;OAEG;YACW,gBAAgB;IAmB9B;;OAEG;IACH,OAAO,CAAC,mBAAmB;IAa3B;;OAEG;IACH,OAAO,CAAC,gBAAgB;IAKxB;;OAEG;IACH,OAAO,CAAC,qBAAqB;IAO7B;;OAEG;IACH,OAAO,CAAC,iBAAiB;IAUzB;;OAEG;IACH,OAAO,CAAC,mBAAmB;IAM3B;;OAEG;IACH,OAAO,CAAC,sBAAsB;CA0B/B;AA+DD;;;;;GAKG;AACH,eAAO,MAAM,UAAU,GACrB,WAAW,OAAO,GAAG,MAAM,EAC3B,SAAS,OAAO,CAAC,YAAY,CAAC,KAC7B,OAAO,CAAC,cAAc,CAcxB,CAAC"}
package/dist/server.js CHANGED
@@ -1,5 +1,6 @@
1
1
  import { randomUUID } from "node:crypto";
2
2
  import { InitializeRequestSchema, } from "@modelcontextprotocol/sdk/types.js";
3
+ import registerAITools from "./tools/ai-tools.js";
3
4
  import registerTaskTools from "./tools/task-tools.js";
4
5
  import registerIndexTools from "./tools/index-tools.js";
5
6
  import registerSearchTools from "./tools/search-tools.js";
@@ -11,11 +12,13 @@ import { createErrorResponse } from "./utils/error-handler.js";
11
12
  import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
12
13
  import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
13
14
  import { StreamableHTTPServerTransport } from "@modelcontextprotocol/sdk/server/streamableHttp.js";
14
- const DEFAULT_CONFIG = {
15
+ const defaultConfig = {
15
16
  httpPort: 4995,
16
17
  mcpEndpoint: "/mcp",
17
18
  sessionTimeout: 3600000,
18
19
  sessionCleanupInterval: 60000,
20
+ apiKey: process.env.MEILISEARCH_API_KEY || "",
21
+ host: process.env.MEILISEARCH_HOST || "http://localhost:7700",
19
22
  };
20
23
  /**
21
24
  * Implementation of an MCP server for Meilisearch
@@ -34,7 +37,7 @@ export class MCPServer {
34
37
  */
35
38
  constructor(server, config = {}) {
36
39
  this.server = server;
37
- this.config = { ...DEFAULT_CONFIG, ...config };
40
+ this.config = { ...defaultConfig, ...config };
38
41
  this.startSessionCleanup();
39
42
  }
40
43
  /**
@@ -43,14 +46,14 @@ export class MCPServer {
43
46
  * @param res The HTTP response
44
47
  */
45
48
  async handleGetRequest(req, res) {
46
- console.log("GET request received");
49
+ console.info("GET request received");
47
50
  const sessionId = this.extractSessionId(req);
48
51
  if (!sessionId || !this.sessions.has(sessionId)) {
49
52
  console.error(`Invalid session ID: ${sessionId}`);
50
53
  this.sendErrorResponse(res, 400, "Bad Request: invalid session ID");
51
54
  return;
52
55
  }
53
- console.log(`Establishing HTTP stream for session ${sessionId}`);
56
+ console.info(`Establishing HTTP stream for session ${sessionId}`);
54
57
  const sessionInfo = this.sessions.get(sessionId);
55
58
  const transport = sessionInfo.transport;
56
59
  try {
@@ -76,7 +79,7 @@ export class MCPServer {
76
79
  const sessionId = this.extractSessionId(req);
77
80
  try {
78
81
  if (sessionId && this.sessions.has(sessionId)) {
79
- console.log(`POST request for existing session ${sessionId}`);
82
+ console.info(`POST request for existing session ${sessionId}`);
80
83
  const sessionInfo = this.sessions.get(sessionId);
81
84
  await sessionInfo.transport.handleRequest(req, res, body);
82
85
  this.updateSessionActivity(sessionId);
@@ -98,14 +101,14 @@ export class MCPServer {
98
101
  * Clean up and release server resources
99
102
  */
100
103
  shutdown() {
101
- console.log("Shutting down MCP server...");
104
+ console.info("Shutting down MCP server...");
102
105
  if (this.cleanupInterval) {
103
106
  clearInterval(this.cleanupInterval);
104
107
  this.cleanupInterval = null;
105
108
  }
106
109
  for (const [sessionId, sessionInfo] of this.sessions.entries()) {
107
110
  try {
108
- console.log(`Closing session ${sessionId}`);
111
+ console.info(`Closing session ${sessionId}`);
109
112
  sessionInfo.transport.close();
110
113
  }
111
114
  catch (error) {
@@ -113,7 +116,7 @@ export class MCPServer {
113
116
  }
114
117
  }
115
118
  this.sessions.clear();
116
- console.log("MCP server shutdown complete");
119
+ console.info("MCP server shutdown complete");
117
120
  }
118
121
  /**
119
122
  * Handles the initial connection request
@@ -122,7 +125,6 @@ export class MCPServer {
122
125
  * @param body The request body
123
126
  */
124
127
  async handleInitializeRequest(req, res, body) {
125
- console.log("Handling initialize request");
126
128
  const newSessionId = randomUUID();
127
129
  const transport = new StreamableHTTPServerTransport({
128
130
  sessionIdGenerator: () => newSessionId,
@@ -138,7 +140,7 @@ export class MCPServer {
138
140
  lastActivity: Date.now(),
139
141
  });
140
142
  this.sendToolListChangedNotification(transport);
141
- console.log(`New session established: ${newSessionId}`);
143
+ console.info(`New session established: ${newSessionId}`);
142
144
  }
143
145
  catch (error) {
144
146
  console.error("Error handling initialize request:", error);
@@ -166,7 +168,7 @@ export class MCPServer {
166
168
  jsonrpc: this.JSON_RPC,
167
169
  };
168
170
  await transport.send(rpcNotification);
169
- console.log(`Sent notification: ${notification.method}`);
171
+ console.info(`Sent notification: ${notification.method}`);
170
172
  }
171
173
  catch (error) {
172
174
  console.error(`Failed to send notification ${notification.method}:`, error);
@@ -229,7 +231,7 @@ export class MCPServer {
229
231
  }
230
232
  }
231
233
  if (expiredIds.length) {
232
- console.log(`Cleaning up ${expiredIds.length} expired sessions`);
234
+ console.info(`Cleaning up ${expiredIds.length} expired sessions`);
233
235
  for (const sessionId of expiredIds) {
234
236
  try {
235
237
  const info = this.sessions.get(sessionId);
@@ -249,10 +251,6 @@ export class MCPServer {
249
251
  * Initialize the MCP server with HTTP transport using Vite
250
252
  */
251
253
  const initServerHTTPTransport = async (customConfig) => {
252
- const config = {
253
- ...DEFAULT_CONFIG,
254
- ...customConfig,
255
- };
256
254
  const serverInstance = new McpServer({
257
255
  version: "1.0.0",
258
256
  name: "mcp-meilisearch",
@@ -264,19 +262,15 @@ const initServerHTTPTransport = async (customConfig) => {
264
262
  registerVectorTools(serverInstance);
265
263
  registerSystemTools(serverInstance);
266
264
  registerTaskTools(serverInstance);
267
- const server = new MCPServer(serverInstance, config);
265
+ registerAITools(serverInstance);
266
+ const server = new MCPServer(serverInstance, customConfig);
268
267
  return { mcpServer: server };
269
268
  };
270
269
  /**
271
270
  * Initialize the MCP server with stdio transport
272
271
  * @returns MCP server instance
273
272
  */
274
- const initServerStdioTransport = async () => {
275
- const config = {
276
- ...DEFAULT_CONFIG,
277
- host: process.env.MEILISEARCH_HOST,
278
- apiKey: process.env.MEILISEARCH_API_KEY,
279
- };
273
+ const initServerStdioTransport = async (customConfig) => {
280
274
  const serverInstance = new McpServer({
281
275
  version: "1.0.0",
282
276
  name: "mcp-meilisearch",
@@ -288,12 +282,13 @@ const initServerStdioTransport = async () => {
288
282
  registerVectorTools(serverInstance);
289
283
  registerSystemTools(serverInstance);
290
284
  registerTaskTools(serverInstance);
291
- const server = new MCPServer(serverInstance, config);
285
+ registerAITools(serverInstance);
286
+ const server = new MCPServer(serverInstance, customConfig);
292
287
  const transport = new StdioServerTransport();
293
288
  await serverInstance.connect(transport);
294
- console.log("Meilisearch MCP Server is running on stdio transport");
289
+ console.info("Meilisearch MCP Server is running on stdio transport");
295
290
  process.on("SIGINT", () => {
296
- console.log("Shutting down stdio server...");
291
+ console.info("Shutting down stdio server...");
297
292
  process.exit(0);
298
293
  });
299
294
  return { mcpServer: server };
@@ -308,7 +303,7 @@ export const initServer = async (transport, config) => {
308
303
  try {
309
304
  switch (transport) {
310
305
  case "stdio":
311
- return await initServerStdioTransport();
306
+ return await initServerStdioTransport(config);
312
307
  case "http":
313
308
  return await initServerHTTPTransport(config);
314
309
  default:
package/dist/stdio.js CHANGED
@@ -1,6 +1,5 @@
1
1
  import { mcpMeilisearchServer } from "./index.js";
2
2
  mcpMeilisearchServer({
3
3
  transport: "stdio",
4
- meilisearchHost: process.env.VITE_MEILISEARCH_HOST,
5
- meilisearchApiKey: process.env.VITE_MEILISEARCH_API_KEY,
4
+ meilisearchHost: process.env.MEILISEARCH_HOST,
6
5
  });
@@ -0,0 +1,8 @@
1
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
2
+ /**
3
+ * Register AI tools with the MCP server
4
+ * @param server - The MCP server instance
5
+ */
6
+ export declare const registerAITools: (server: McpServer) => void;
7
+ export default registerAITools;
8
+ //# sourceMappingURL=ai-tools.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-tools.d.ts","sourceRoot":"","sources":["../../src/tools/ai-tools.ts"],"names":[],"mappings":"AAEA,OAAO,EAAE,SAAS,EAAE,MAAM,yCAAyC,CAAC;AAcpE;;;GAGG;AACH,eAAO,MAAM,eAAe,GAAI,QAAQ,SAAS,SAqEhD,CAAC;AAEF,eAAe,eAAe,CAAC"}
@@ -0,0 +1,66 @@
1
+ import { z } from "zod";
2
+ import { AIService } from "../utils/ai-handler.js";
3
+ import { createErrorResponse } from "../utils/error-handler.js";
4
+ /**
5
+ * Register AI tools with the MCP server
6
+ * @param server - The MCP server instance
7
+ */
8
+ export const registerAITools = (server) => {
9
+ server.tool("process-ai-query", "Process a natural language query using AI to determine which tool to use", {
10
+ query: z.string().describe("The natural language query to process"),
11
+ specificTools: z
12
+ .array(z.string())
13
+ .optional()
14
+ .describe("Optional array of specific tool names to consider"),
15
+ }, async ({ query, specificTools }) => {
16
+ try {
17
+ const aiService = AIService.getInstance();
18
+ const availableTools = Object.entries(server._registeredTools)
19
+ .filter(([name]) => name !== "process-ai-query")
20
+ .map(([name, { description }]) => ({
21
+ name,
22
+ description,
23
+ parameters: {},
24
+ }));
25
+ aiService.setAvailableTools(availableTools);
26
+ const result = await aiService.processQuery(query, specificTools);
27
+ if (!aiService.ensureInitialized()) {
28
+ return {
29
+ isError: true,
30
+ content: [
31
+ {
32
+ type: "text",
33
+ text: "AI service not initialized. Please provide an API key.",
34
+ },
35
+ ],
36
+ };
37
+ }
38
+ if (!result) {
39
+ return {
40
+ content: [
41
+ {
42
+ type: "text",
43
+ text: "AI couldn't determine an appropriate tool to use for this query.",
44
+ },
45
+ ],
46
+ };
47
+ }
48
+ return {
49
+ content: [
50
+ {
51
+ type: "text",
52
+ text: JSON.stringify({
53
+ toolName: result.toolName,
54
+ parameters: result.parameters,
55
+ reasoning: result.reasoning || "No explanation provided",
56
+ }, null, 2),
57
+ },
58
+ ],
59
+ };
60
+ }
61
+ catch (error) {
62
+ return createErrorResponse(error);
63
+ }
64
+ });
65
+ };
66
+ export default registerAITools;
@@ -147,7 +147,7 @@ export const registerSearchTools = (server) => {
147
147
  params: { limit: 1000 },
148
148
  });
149
149
  const indexUids = indexesResponse.data.results.map((index) => index.uid);
150
- if (!indexUids || indexUids.length === 0) {
150
+ if (!indexUids?.length) {
151
151
  return {
152
152
  content: [
153
153
  {
@@ -1,15 +1,14 @@
1
+ export type AiProviderNameOptions = "openai" | "huggingface" | "anthropic";
1
2
  export interface ServerOptions {
2
3
  /**
3
4
  * The URL of the Meilisearch instance
4
- * @required
5
- * @example "http://localhost:7700"
5
+ * @default "http://localhost:7700"
6
6
  */
7
- meilisearchHost: string;
7
+ meilisearchHost?: string;
8
8
  /**
9
9
  * The API key for authenticating with Meilisearch
10
- * @required
11
10
  */
12
- meilisearchApiKey: string;
11
+ meilisearchApiKey?: string;
13
12
  /**
14
13
  * Transport type for MCP server ("http" | "stdio")
15
14
  * @default "http"
@@ -35,5 +34,19 @@ export interface ServerOptions {
35
34
  * @default 60000 (1 minute)
36
35
  */
37
36
  sessionCleanupInterval?: number;
37
+ /**
38
+ * AI inference provider name
39
+ * @default "openai"
40
+ */
41
+ aiProviderName?: AiProviderNameOptions;
42
+ /**
43
+ * AI provider API key for AI inference
44
+ */
45
+ aiProviderApiKey?: string;
46
+ /**
47
+ * AI model to use for inference
48
+ * @default "gpt-3.5-turbo"
49
+ */
50
+ llmModel?: string;
38
51
  }
39
52
  //# sourceMappingURL=options.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"options.d.ts","sourceRoot":"","sources":["../../src/types/options.ts"],"names":[],"mappings":"AAAA,MAAM,WAAW,aAAa;IAC5B;;;;OAIG;IACH,eAAe,EAAE,MAAM,CAAC;IAExB;;;OAGG;IACH,iBAAiB,EAAE,MAAM,CAAC;IAC1B;;;OAGG;IACH,SAAS,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;IAE7B;;;OAGG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAC;IAElB;;;OAGG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IAExB;;;OAGG;IACH,sBAAsB,CAAC,EAAE,MAAM,CAAC;CACjC"}
1
+ {"version":3,"file":"options.d.ts","sourceRoot":"","sources":["../../src/types/options.ts"],"names":[],"mappings":"AAAA,MAAM,MAAM,qBAAqB,GAAG,QAAQ,GAAG,aAAa,GAAG,WAAW,CAAC;AAE3E,MAAM,WAAW,aAAa;IAC5B;;;OAGG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IAEzB;;OAEG;IACH,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B;;;OAGG;IACH,SAAS,CAAC,EAAE,MAAM,GAAG,OAAO,CAAC;IAE7B;;;OAGG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAC;IAElB;;;OAGG;IACH,WAAW,CAAC,EAAE,MAAM,CAAC;IAErB;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IAExB;;;OAGG;IACH,sBAAsB,CAAC,EAAE,MAAM,CAAC;IAEhC;;;OAGG;IACH,cAAc,CAAC,EAAE,qBAAqB,CAAC;IAEvC;;OAEG;IACH,gBAAgB,CAAC,EAAE,MAAM,CAAC;IAE1B;;;OAGG;IACH,QAAQ,CAAC,EAAE,MAAM,CAAC;CACnB"}
@@ -0,0 +1,72 @@
1
+ import { AiProviderNameOptions } from "../types/options.js";
2
+ /**
3
+ * AI Inference Service
4
+ *
5
+ * This service handles the interaction with the AI to determine the appropriate tools
6
+ * to use based on the user's query
7
+ */
8
+ export declare class AIService {
9
+ private model;
10
+ private systemPrompt;
11
+ private static instance;
12
+ private static serverInitialized;
13
+ private provider;
14
+ private client;
15
+ private availableTools;
16
+ /**
17
+ * Private constructor to prevent direct instantiation
18
+ * Use getInstance() method instead
19
+ */
20
+ private constructor();
21
+ /**
22
+ * Get the singleton instance of AIService
23
+ * @returns The singleton AIService instance
24
+ */
25
+ static getInstance(): AIService;
26
+ /**
27
+ * Initialize the AI service with an API key and optionally set the model
28
+ * This should ONLY be called from the server side
29
+ * @param apiKey AI provider API key (required)
30
+ * @param provider AI provider name (defaults to openai)
31
+ * @param model Optional model to use (defaults to gpt-3.5-turbo)
32
+ */
33
+ initialize(apiKey: string, provider?: AiProviderNameOptions, model?: string): void;
34
+ /**
35
+ * Set the available tools that can be used by the AI
36
+ * @param tools Array of tools with name, description, and parameters
37
+ */
38
+ setAvailableTools(tools: {
39
+ name: string;
40
+ description: string;
41
+ parameters: Record<string, any>;
42
+ }[]): void;
43
+ ensureInitialized(): boolean;
44
+ /**
45
+ * Get tool definitions for the AI from the available tools
46
+ * @param toolNames Optional array of tool names to filter by (if not provided, all tools will be included)
47
+ * @returns Array of tool definitions
48
+ */
49
+ private getToolDefinitions;
50
+ /**
51
+ * Attempt to extract specific tool names from the user query
52
+ * @param query The user's query
53
+ * @returns Array of tool names mentioned in the query, or empty array if none found
54
+ */
55
+ private extractToolNames;
56
+ /**
57
+ * Process a user query and determine which tool to use
58
+ * @param query User query
59
+ * @param specificTools Optional array of specific tool names to consider
60
+ * @returns Object containing the selected tool name and parameters
61
+ */
62
+ processQuery(query: string, specificTools?: string[]): Promise<{
63
+ toolName: string;
64
+ parameters: Record<string, any>;
65
+ reasoning?: string;
66
+ } | null>;
67
+ private processHuggingFaceQuery;
68
+ private processAnthropicQuery;
69
+ private processOpenAIQuery;
70
+ private setSystemPrompt;
71
+ }
72
+ //# sourceMappingURL=ai-handler.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"ai-handler.d.ts","sourceRoot":"","sources":["../../src/utils/ai-handler.ts"],"names":[],"mappings":"AAIA,OAAO,EAAE,qBAAqB,EAAE,MAAM,qBAAqB,CAAC;AAsB5D;;;;;GAKG;AACH,qBAAa,SAAS;IACpB,OAAO,CAAC,KAAK,CAA2B;IACxC,OAAO,CAAC,YAAY,CAAyB;IAC7C,OAAO,CAAC,MAAM,CAAC,QAAQ,CAA0B;IACjD,OAAO,CAAC,MAAM,CAAC,iBAAiB,CAAkB;IAClD,OAAO,CAAC,QAAQ,CAAmC;IACnD,OAAO,CAAC,MAAM,CAAqD;IACnE,OAAO,CAAC,cAAc,CAIb;IAET;;;OAGG;IACH,OAAO;IAEP;;;OAGG;WACW,WAAW,IAAI,SAAS;IAOtC;;;;;;OAMG;IACH,UAAU,CACR,MAAM,EAAE,MAAM,EACd,QAAQ,GAAE,qBAAgC,EAC1C,KAAK,CAAC,EAAE,MAAM,GACb,IAAI;IAyBP;;;OAGG;IACH,iBAAiB,CACf,KAAK,EAAE;QACL,IAAI,EAAE,MAAM,CAAC;QACb,WAAW,EAAE,MAAM,CAAC;QACpB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;KACjC,EAAE,GACF,IAAI;IAUP,iBAAiB,IAAI,OAAO;IAI5B;;;;OAIG;IACH,OAAO,CAAC,kBAAkB;IAwB1B;;;;OAIG;IACH,OAAO,CAAC,gBAAgB;IAaxB;;;;;OAKG;IACG,YAAY,CAChB,KAAK,EAAE,MAAM,EACb,aAAa,CAAC,EAAE,MAAM,EAAE,GACvB,OAAO,CAAC;QACT,QAAQ,EAAE,MAAM,CAAC;QACjB,UAAU,EAAE,MAAM,CAAC,MAAM,EAAE,GAAG,CAAC,CAAC;QAChC,SAAS,CAAC,EAAE,MAAM,CAAC;KACpB,GAAG,IAAI,CAAC;YAgCK,uBAAuB;YA2BvB,qBAAqB;YAmCrB,kBAAkB;IA2BhC,OAAO,CAAC,eAAe;CAGxB"}
@@ -0,0 +1,218 @@
1
+ import { OpenAI } from "openai";
2
+ import Anthropic from "@anthropic-ai/sdk";
3
+ import generalPrompt from "../prompts/general.js";
4
+ import { InferenceClient } from "@huggingface/inference";
5
+ /**
6
+ * AI Inference Service
7
+ *
8
+ * This service handles the interaction with the AI to determine the appropriate tools
9
+ * to use based on the user's query
10
+ */
11
+ export class AIService {
12
+ model = "gpt-3.5-turbo";
13
+ systemPrompt = generalPrompt;
14
+ static instance = null;
15
+ static serverInitialized = false;
16
+ provider = "openai";
17
+ client = null;
18
+ availableTools = [];
19
+ /**
20
+ * Private constructor to prevent direct instantiation
21
+ * Use getInstance() method instead
22
+ */
23
+ constructor() { }
24
+ /**
25
+ * Get the singleton instance of AIService
26
+ * @returns The singleton AIService instance
27
+ */
28
+ static getInstance() {
29
+ if (!AIService.instance) {
30
+ AIService.instance = new AIService();
31
+ }
32
+ return AIService.instance;
33
+ }
34
+ /**
35
+ * Initialize the AI service with an API key and optionally set the model
36
+ * This should ONLY be called from the server side
37
+ * @param apiKey AI provider API key (required)
38
+ * @param provider AI provider name (defaults to openai)
39
+ * @param model Optional model to use (defaults to gpt-3.5-turbo)
40
+ */
41
+ initialize(apiKey, provider = "openai", model) {
42
+ if (AIService.serverInitialized) {
43
+ console.warn("AIService has already been initialized by the server.");
44
+ return;
45
+ }
46
+ this.provider = provider;
47
+ if (model)
48
+ this.model = model;
49
+ switch (this.provider) {
50
+ case "openai":
51
+ this.client = new OpenAI({ apiKey });
52
+ break;
53
+ case "anthropic":
54
+ this.client = new Anthropic({ apiKey });
55
+ break;
56
+ case "huggingface":
57
+ this.client = new InferenceClient(apiKey);
58
+ break;
59
+ default:
60
+ throw new Error(`Unsupported AI provider: ${this.provider}`);
61
+ }
62
+ AIService.serverInitialized = true;
63
+ }
64
+ /**
65
+ * Set the available tools that can be used by the AI
66
+ * @param tools Array of tools with name, description, and parameters
67
+ */
68
+ setAvailableTools(tools) {
69
+ this.availableTools = tools;
70
+ this.setSystemPrompt(this.systemPrompt.replace("MCP_TOOLS", JSON.stringify(this.availableTools, null, 2)));
71
+ }
72
+ ensureInitialized() {
73
+ return this.client !== null;
74
+ }
75
+ /**
76
+ * Get tool definitions for the AI from the available tools
77
+ * @param toolNames Optional array of tool names to filter by (if not provided, all tools will be included)
78
+ * @returns Array of tool definitions
79
+ */
80
+ getToolDefinitions(toolNames) {
81
+ if (!toolNames?.length) {
82
+ return this.availableTools.map((tool) => ({
83
+ type: "function",
84
+ function: {
85
+ name: tool.name,
86
+ description: tool.description,
87
+ parameters: tool.parameters,
88
+ },
89
+ }));
90
+ }
91
+ return this.availableTools
92
+ .filter((tool) => toolNames.includes(tool.name))
93
+ .map((tool) => ({
94
+ type: "function",
95
+ function: {
96
+ name: tool.name,
97
+ description: tool.description,
98
+ parameters: tool.parameters,
99
+ },
100
+ }));
101
+ }
102
+ /**
103
+ * Attempt to extract specific tool names from the user query
104
+ * @param query The user's query
105
+ * @returns Array of tool names mentioned in the query, or empty array if none found
106
+ */
107
+ extractToolNames(query) {
108
+ const mentionedTools = [];
109
+ for (const tool of this.availableTools) {
110
+ const toolNameRegex = new RegExp(`\\b${tool.name}\\b`, "i");
111
+ if (toolNameRegex.test(query)) {
112
+ mentionedTools.push(tool.name);
113
+ }
114
+ }
115
+ return mentionedTools;
116
+ }
117
+ /**
118
+ * Process a user query and determine which tool to use
119
+ * @param query User query
120
+ * @param specificTools Optional array of specific tool names to consider
121
+ * @returns Object containing the selected tool name and parameters
122
+ */
123
+ async processQuery(query, specificTools) {
124
+ if (!this.ensureInitialized())
125
+ return null;
126
+ try {
127
+ const mentionedTools = this.extractToolNames(query);
128
+ const toolsToUse = specificTools || (mentionedTools.length ? mentionedTools : undefined);
129
+ const tools = this.getToolDefinitions(toolsToUse);
130
+ const messages = [
131
+ { role: "system", content: this.systemPrompt },
132
+ { role: "user", content: query },
133
+ ];
134
+ if (this.provider === "openai") {
135
+ return this.processOpenAIQuery(tools, messages);
136
+ }
137
+ if (this.provider === "anthropic") {
138
+ return this.processAnthropicQuery(tools, messages);
139
+ }
140
+ if (this.provider === "huggingface") {
141
+ return this.processHuggingFaceQuery(tools, messages);
142
+ }
143
+ return null;
144
+ }
145
+ catch (error) {
146
+ if (error instanceof Error) {
147
+ throw new Error(error.message);
148
+ }
149
+ throw error;
150
+ }
151
+ }
152
+ async processHuggingFaceQuery(tools, messages) {
153
+ const response = await this.client.chatCompletion({
154
+ tools,
155
+ messages,
156
+ max_tokens: 512,
157
+ model: this.model,
158
+ });
159
+ if (!response.choices?.length)
160
+ return null;
161
+ const message = response.choices[0].message;
162
+ if (message.tool_calls?.length) {
163
+ const toolCall = message.tool_calls[0];
164
+ return {
165
+ toolName: toolCall.function.name,
166
+ reasoning: message.content || undefined,
167
+ parameters: JSON.parse(toolCall.function.arguments),
168
+ };
169
+ }
170
+ return null;
171
+ }
172
+ async processAnthropicQuery(tools, messages) {
173
+ const response = await this.client.messages.create({
174
+ tools,
175
+ messages,
176
+ max_tokens: 1024,
177
+ model: this.model,
178
+ });
179
+ const content = response.content;
180
+ if (Array.isArray(content) && content.length) {
181
+ const toolCallItem = content.find((item) => item.type === "tool_call");
182
+ if (toolCallItem?.tool_call) {
183
+ const textItems = content.filter((item) => item.type === "text" &&
184
+ content.indexOf(item) < content.indexOf(toolCallItem));
185
+ const reasoning = textItems.map((item) => item.text).join(" ");
186
+ return {
187
+ reasoning: reasoning || undefined,
188
+ toolName: toolCallItem.tool_call.name,
189
+ parameters: JSON.parse(toolCallItem.tool_call.input),
190
+ };
191
+ }
192
+ }
193
+ return null;
194
+ }
195
+ async processOpenAIQuery(tools, messages) {
196
+ const response = await this.client.chat.completions.create({
197
+ model: this.model,
198
+ messages,
199
+ tools,
200
+ tool_choice: "auto",
201
+ });
202
+ if (!response.choices?.length)
203
+ return null;
204
+ const message = response.choices[0].message;
205
+ if (message.tool_calls?.length) {
206
+ const toolCall = message.tool_calls[0];
207
+ return {
208
+ toolName: toolCall.function.name,
209
+ reasoning: message.content || undefined,
210
+ parameters: JSON.parse(toolCall.function.arguments),
211
+ };
212
+ }
213
+ return null;
214
+ }
215
+ setSystemPrompt(prompt) {
216
+ this.systemPrompt = prompt;
217
+ }
218
+ }
@@ -1,19 +1,23 @@
1
+ import { AiProviderNameOptions } from "../types/options.js";
1
2
  /**
2
3
  * Configuration service to store and retrieve Meilisearch configuration
3
4
  */
4
5
  declare class ConfigHandler {
6
+ private _llmModel;
7
+ private _aiProviderName;
5
8
  private _meilisearchHost;
9
+ private _aiProviderApiKey;
6
10
  private _meilisearchApiKey;
7
11
  /**
8
12
  * Set the Meilisearch host URL
9
13
  * @param host The URL of the Meilisearch instance
10
14
  */
11
- setMeilisearchHost(host: string): void;
15
+ setMeilisearchHost(host?: string): void;
12
16
  /**
13
17
  * Set the Meilisearch API key
14
18
  * @param apiKey The API key for Meilisearch
15
19
  */
16
- setMeilisearchApiKey(apiKey: string): void;
20
+ setMeilisearchApiKey(apiKey?: string): void;
17
21
  /**
18
22
  * Get the current Meilisearch host URL
19
23
  * @returns The URL of the Meilisearch instance
@@ -24,6 +28,36 @@ declare class ConfigHandler {
24
28
  * @returns The API key for Meilisearch
25
29
  */
26
30
  getMeilisearchApiKey(): string;
31
+ /**
32
+ * Set the provider for AI inference
33
+ * @param provider The provider name: openai, huggingface.
34
+ */
35
+ setAiProviderName(provider?: AiProviderNameOptions): void;
36
+ /**
37
+ * Get the current provider for AI inference
38
+ * @returns The provider name
39
+ */
40
+ getAiProviderName(): AiProviderNameOptions;
41
+ /**
42
+ * Set the provider API key
43
+ * @param apiKey The API key for provider
44
+ */
45
+ setAiProviderApiKey(apiKey?: string): void;
46
+ /**
47
+ * Get the current provider API key
48
+ * @returns The API key for provider
49
+ */
50
+ getAiProviderApiKey(): string;
51
+ /**
52
+ * Set the AI model to use
53
+ * @param model The model name (e.g., gpt-3.5-turbo, gpt-4)
54
+ */
55
+ setLlmModel(model?: string): void;
56
+ /**
57
+ * Get the current AI model
58
+ * @returns The model name
59
+ */
60
+ getLlmModel(): string;
27
61
  }
28
62
  export declare const configHandler: ConfigHandler;
29
63
  export default configHandler;
@@ -1 +1 @@
1
- {"version":3,"file":"config-handler.d.ts","sourceRoot":"","sources":["../../src/utils/config-handler.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,cAAM,aAAa;IACjB,OAAO,CAAC,gBAAgB,CAAM;IAC9B,OAAO,CAAC,kBAAkB,CAAM;IAEhC;;;OAGG;IACH,kBAAkB,CAAC,IAAI,EAAE,MAAM,GAAG,IAAI;IAItC;;;OAGG;IACH,oBAAoB,CAAC,MAAM,EAAE,MAAM,GAAG,IAAI;IAI1C;;;OAGG;IACH,kBAAkB,IAAI,MAAM;IAI5B;;;OAGG;IACH,oBAAoB,IAAI,MAAM;CAG/B;AAED,eAAO,MAAM,aAAa,eAAsB,CAAC;AAEjD,eAAe,aAAa,CAAC"}
1
+ {"version":3,"file":"config-handler.d.ts","sourceRoot":"","sources":["../../src/utils/config-handler.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,qBAAqB,EAAE,MAAM,qBAAqB,CAAC;AAE5D;;GAEG;AACH,cAAM,aAAa;IACjB,OAAO,CAAC,SAAS,CAAM;IACvB,OAAO,CAAC,eAAe,CAAM;IAC7B,OAAO,CAAC,gBAAgB,CAAM;IAC9B,OAAO,CAAC,iBAAiB,CAAM;IAC/B,OAAO,CAAC,kBAAkB,CAAM;IAEhC;;;OAGG;IACH,kBAAkB,CAAC,IAAI,CAAC,EAAE,MAAM,GAAG,IAAI;IAIvC;;;OAGG;IACH,oBAAoB,CAAC,MAAM,CAAC,EAAE,MAAM,GAAG,IAAI;IAI3C;;;OAGG;IACH,kBAAkB,IAAI,MAAM;IAI5B;;;OAGG;IACH,oBAAoB,IAAI,MAAM;IAI9B;;;OAGG;IACH,iBAAiB,CAAC,QAAQ,CAAC,EAAE,qBAAqB,GAAG,IAAI;IAIzD;;;OAGG;IACH,iBAAiB,IACgB,qBAAqB;IAGtD;;;OAGG;IACH,mBAAmB,CAAC,MAAM,CAAC,EAAE,MAAM,GAAG,IAAI;IAI1C;;;OAGG;IACH,mBAAmB,IAAI,MAAM;IAI7B;;;OAGG;IACH,WAAW,CAAC,KAAK,CAAC,EAAE,MAAM,GAAG,IAAI;IAIjC;;;OAGG;IACH,WAAW,IAAI,MAAM;CAGtB;AAED,eAAO,MAAM,aAAa,eAAsB,CAAC;AAEjD,eAAe,aAAa,CAAC"}
@@ -2,14 +2,17 @@
2
2
  * Configuration service to store and retrieve Meilisearch configuration
3
3
  */
4
4
  class ConfigHandler {
5
+ _llmModel = "";
6
+ _aiProviderName = "";
5
7
  _meilisearchHost = "";
8
+ _aiProviderApiKey = "";
6
9
  _meilisearchApiKey = "";
7
10
  /**
8
11
  * Set the Meilisearch host URL
9
12
  * @param host The URL of the Meilisearch instance
10
13
  */
11
14
  setMeilisearchHost(host) {
12
- this._meilisearchHost = host;
15
+ this._meilisearchHost = host || "http://localhost:7700";
13
16
  }
14
17
  /**
15
18
  * Set the Meilisearch API key
@@ -32,6 +35,48 @@ class ConfigHandler {
32
35
  getMeilisearchApiKey() {
33
36
  return this._meilisearchApiKey;
34
37
  }
38
+ /**
39
+ * Set the provider for AI inference
40
+ * @param provider The provider name: openai, huggingface.
41
+ */
42
+ setAiProviderName(provider) {
43
+ this._aiProviderName = provider || "openai";
44
+ }
45
+ /**
46
+ * Get the current provider for AI inference
47
+ * @returns The provider name
48
+ */
49
+ getAiProviderName() {
50
+ return this._aiProviderName;
51
+ }
52
+ /**
53
+ * Set the provider API key
54
+ * @param apiKey The API key for provider
55
+ */
56
+ setAiProviderApiKey(apiKey) {
57
+ this._aiProviderApiKey = apiKey || "";
58
+ }
59
+ /**
60
+ * Get the current provider API key
61
+ * @returns The API key for provider
62
+ */
63
+ getAiProviderApiKey() {
64
+ return this._aiProviderApiKey;
65
+ }
66
+ /**
67
+ * Set the AI model to use
68
+ * @param model The model name (e.g., gpt-3.5-turbo, gpt-4)
69
+ */
70
+ setLlmModel(model) {
71
+ this._llmModel = model || "gpt-3.5-turbo";
72
+ }
73
+ /**
74
+ * Get the current AI model
75
+ * @returns The model name
76
+ */
77
+ getLlmModel() {
78
+ return this._llmModel;
79
+ }
35
80
  }
36
81
  export const configHandler = new ConfigHandler();
37
82
  export default configHandler;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "mcp-meilisearch",
3
- "version": "1.2.8",
3
+ "version": "1.3.1",
4
4
  "description": "Model Context Protocol (MCP) implementation for Meilisearch",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",
@@ -23,14 +23,17 @@
23
23
  "demo"
24
24
  ],
25
25
  "scripts": {
26
- "build": "tsc && tsc --project tsconfig.types.json",
26
+ "build": "tsc && tsc -p tsconfig.types.json",
27
27
  "demo": "npm run build & npm run dev --workspace=demo",
28
28
  "server": "npm run build && node --env-file=.env dist/index.js",
29
29
  "prepublishOnly": "rm -rf dist && npm version patch && npm run build"
30
30
  },
31
31
  "dependencies": {
32
+ "@anthropic-ai/sdk": "^0.50.4",
33
+ "@huggingface/inference": "^3.13.0",
32
34
  "@modelcontextprotocol/sdk": "^1.11.2",
33
35
  "axios": "^1.9.0",
36
+ "openai": "^4.98.0",
34
37
  "zod": "^3.24.4"
35
38
  },
36
39
  "devDependencies": {