@laskarks/mcp-rag-node 1.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,181 @@
1
+ # RAG MCP Server
2
+
3
+ MCP (Model Context Protocol) server for RAG (Retrieval-Augmented Generation) using Pinecone, OpenAI-compatible embedding APIs, and the official MCP SDK. Save documents and search by semantic similarity via MCP tools.
4
+
5
+ ## Tools
6
+
7
+ | Tool | Description |
8
+ | ------------------------ | ------------------------------------------------------ |
9
+ | `save_to_rag` | Chunk text, create embeddings, and save to Pinecone. |
10
+ | `search_document_on_rag` | Search documents by keyword using semantic similarity. |
11
+
12
+ ## Installation
13
+
14
+ ```bash
15
+ npm install rag-mcp-nodejs
16
+ # or
17
+ npx rag-mcp-nodejs
18
+ ```
19
+
20
+ ## Environment Variables
21
+
22
+ ### Required
23
+
24
+ | Variable | Description | Example |
25
+ | ------------------ | ---------------------------------------------------------- | --------------------------------------------------------- |
26
+ | `APIKEY` | OpenAI or OpenRouter API key for embeddings | `sk-...` |
27
+ | `EMBEDDING_MODEL` | Embedding model ID | `text-embedding-3-small`, `openai/text-embedding-3-small` |
28
+ | `PINECONE_API_KEY` | Pinecone API key | `...` |
29
+ | `PINECONE_INDEX` | Pinecone index name (dimension must match embedding model) | `rag-index` |
30
+ | `PROVIDER` | AI provider (allowed values: `openai`, `openrouter`) | `openai` or `openrouter` |
31
+
32
+ > **Important:** Create your Pinecone index with the same dimension as your embedding model.
33
+
34
+ ### Embedding models and vector dimensions
35
+
36
+ Use the **Dimension** column when creating your Pinecone index.
37
+
38
+ | Model | Dimension | Provider |
39
+ | ------------------------------------------------ | --------- | ------------------------ |
40
+ | `text-embedding-3-small` | 1536 | OpenAI, OpenRouter |
41
+ | `text-embedding-3-large` | 3072 | OpenAI, OpenRouter |
42
+ | `text-embedding-ada-002` | 1536 | OpenAI, OpenRouter |
43
+ | `text-embedding-3-small` (with dimensions param) | 512–1536 | OpenAI |
44
+ | `voyage-3` | 1024 | Voyage (via OpenRouter) |
45
+ | `nomic-embed-text-v1.5` | 768 | Nomic (via OpenRouter) |
46
+ | `mistral-embed` | 1024 | Mistral (via OpenRouter) |
47
+ | `cohere/embed-english-v3.0` | 1024 | Cohere (via OpenRouter) |
48
+
49
+ For OpenRouter, use the model ID format, e.g. `openai/text-embedding-3-small` or `voyage/voyage-3`.
50
+
51
+ ### Optional
52
+
53
+ | Variable | Description | Default |
54
+ | ---------------------- | ------------------------------------- | ------- |
55
+ | `RAG_CHUNK_MAX_TOKENS` | Max tokens per chunk before embedding | `512` |
56
+ | `RAG_CHUNK_OVERLAP` | Overlap tokens between chunks | `50` |
57
+
58
+ ## Usage
59
+
60
+ ### Run the server
61
+
62
+ ```bash
63
+ npm run build
64
+ npm start
65
+ ```
66
+
67
+ Or with env file:
68
+
69
+ ```bash
70
+ # .env
71
+ APIKEY=sk-...
72
+ EMBEDDING_MODEL=text-embedding-3-small
73
+ PINECONE_API_KEY=...
74
+ PINECONE_INDEX=rag-index
75
+ PROVIDER=openai
76
+ ```
77
+
78
+ ```bash
79
+ npm start
80
+ ```
81
+
82
+ ### Add to MCP clients
83
+
84
+ **Claude Desktop** (`claude_desktop_config.json`):
85
+
86
+ ```json
87
+ {
88
+ "mcpServers": {
89
+ "rag": {
90
+ "command": "node",
91
+ "args": ["/path/to/rag-mcp-nodejs/dist/index.js"],
92
+ "env": {
93
+ "APIKEY": "sk-...",
94
+ "EMBEDDING_MODEL": "text-embedding-3-small",
95
+ "PINECONE_API_KEY": "...",
96
+ "PINECONE_INDEX": "rag-index",
97
+ "PROVIDER": "openai"
98
+ }
99
+ }
100
+ }
101
+ }
102
+ ```
103
+
104
+ **Cursor** (`.cursor/mcp.json` or MCP settings):
105
+
106
+ ```json
107
+ {
108
+ "mcpServers": {
109
+ "rag": {
110
+ "command": "node",
111
+ "args": ["/path/to/rag-mcp-nodejs/dist/index.js"],
112
+ "env": {
113
+ "APIKEY": "sk-...",
114
+ "EMBEDDING_MODEL": "text-embedding-3-small",
115
+ "PINECONE_API_KEY": "...",
116
+ "PINECONE_INDEX": "rag-index",
117
+ "PROVIDER": "openai"
118
+ }
119
+ }
120
+ }
121
+ }
122
+ ```
123
+
124
+ ### Development
125
+
126
+ ```bash
127
+ # Install dependencies
128
+ npm install
129
+
130
+ # Build
131
+ npm run build
132
+
133
+ # Run server (from compiled JS)
134
+ npm start
135
+
136
+ # Run server (dev, from TypeScript)
137
+ npm run dev
138
+
139
+ # Run sample client
140
+ npm run client
141
+ ```
142
+
143
+ ## Project structure
144
+
145
+ ```
146
+ src/
147
+ ├── index.ts # MCP server entry, tools registration
148
+ ├── ai.ts # AI controller (chunking, embeddings, Pinecone)
149
+ ├── env.ts # Environment loading
150
+ └── client.ts # Example MCP client for testing
151
+ dist/ # Compiled output (after npm run build)
152
+ ```
153
+
154
+ ## Publish to npm
155
+
156
+ Before publishing:
157
+
158
+ 1. Add `files` to `package.json` to include only `dist/` and docs:
159
+
160
+ ```json
161
+ "files": ["dist", "README.md"]
162
+ ```
163
+
164
+ 2. Ensure `npm run build` succeeds and `dist/` is committed or built on publish.
165
+ 3. Add `bin` entry for `npx rag-mcp-nodejs` (optional):
166
+
167
+ ```json
168
+ "bin": { "rag-mcp-nodejs": "dist/index.js" }
169
+ ```
170
+
171
+ Note: MCP servers are usually run via `node dist/index.js`; a `bin` is optional. 4. Set a unique package name (npm may require scoped name, e.g. `@yourname/rag-mcp-nodejs`). 5. Add `repository`, `homepage`, and `engines.node` in `package.json` (optional but recommended).
172
+
173
+ ## Requirements
174
+
175
+ - Node.js >= 18
176
+ - Pinecone account
177
+ - OpenAI or OpenRouter API key
178
+
179
+ ## License
180
+
181
+ ISC
package/dist/ai.js ADDED
@@ -0,0 +1,149 @@
1
+ import OpenAI from "openai";
2
+ import { encodingForModel } from "js-tiktoken";
3
+ import { Pinecone, } from "@pinecone-database/pinecone";
4
+ import { env } from "./env.js";
5
+ /**
6
+ * Ai Controller
7
+ * Contructor parameters
8
+ * apikey: string
9
+ * model: string
10
+ * embeddingModel: string
11
+ * provider: "openrouter" | "openai"
12
+ */
13
+ class AI {
14
+ // Contructors
15
+ constructor(args) {
16
+ // States
17
+ this.pineconeKey = "";
18
+ this.embeddingModel = "";
19
+ this.pineconeIndex = "";
20
+ this.provider = null;
21
+ this.embeddingModel = args.embeddingModel;
22
+ this.provider = args.provider;
23
+ this.pineconeKey = args.pineconeKey;
24
+ this.pineconeIndex = args.pineconeIndex;
25
+ if (args.provider === "openrouter") {
26
+ this.MCP_AI = new OpenAI({
27
+ apiKey: args.apikey,
28
+ baseURL: `https://openrouter.ai/api/v1`,
29
+ defaultHeaders: {
30
+ "HTTP-Referer": "http://localhost:3000",
31
+ "X-Title": "OpenRouter RAG",
32
+ "Cache-Control": "no-cache, no-store, must-revalidate",
33
+ Pragma: "no-cache",
34
+ Expires: "0",
35
+ },
36
+ });
37
+ }
38
+ else {
39
+ this.MCP_AI = new OpenAI({
40
+ apiKey: args.apikey,
41
+ defaultHeaders: {
42
+ "Cache-Control": "no-cache, no-store, must-revalidate",
43
+ Pragma: "no-cache",
44
+ Expires: "0",
45
+ },
46
+ });
47
+ }
48
+ }
49
+ // ================================================================================================== //
50
+ // CHUNK TEXT
51
+ // ================================================================================================== //
52
+ chunkByToken(text, maxTokens, overlap) {
53
+ const defaultMax = env.RAG_CHUNK_MAX_TOKENS
54
+ ? Number(env.RAG_CHUNK_MAX_TOKENS)
55
+ : 512;
56
+ const defaultOverlap = 50;
57
+ const limit = maxTokens ?? defaultMax;
58
+ const overlapTokens = overlap ?? defaultOverlap;
59
+ let model = null;
60
+ if (this.provider === "openrouter") {
61
+ model = this.embeddingModel.split("/")[1];
62
+ }
63
+ else
64
+ model = this.provider;
65
+ const enc = encodingForModel(model);
66
+ const tokens = Array.from(enc.encode(text));
67
+ const chunks = [];
68
+ let start = 0;
69
+ while (start < tokens.length) {
70
+ const slice = tokens.slice(start, start + limit);
71
+ chunks.push(enc.decode(slice));
72
+ start += limit - overlapTokens;
73
+ }
74
+ return chunks;
75
+ }
76
+ // ================================================================================================== //
77
+ // SAVE_TO_RAG
78
+ // ================================================================================================== //
79
+ async save_to_rag(content) {
80
+ try {
81
+ const chunkText = this.chunkByToken(content);
82
+ const response = await this.MCP_AI.embeddings.create({
83
+ model: this.embeddingModel,
84
+ input: chunkText,
85
+ });
86
+ if (response?.data[0]?.object === "embedding") {
87
+ const pinecone = new Pinecone({
88
+ apiKey: env.PINECONE_API_KEY,
89
+ });
90
+ const index = pinecone.index(this.pineconeIndex);
91
+ const vectors = [
92
+ {
93
+ id: `${Date.now()}`,
94
+ values: response.data[0]?.embedding,
95
+ metadata: {
96
+ text: chunkText[0],
97
+ createdAt: new Date().toISOString(),
98
+ },
99
+ },
100
+ ];
101
+ await index.upsert({ records: vectors });
102
+ return "Document successfully saved!";
103
+ }
104
+ else
105
+ return `${response?.data[0] || "Unexpected error"} `;
106
+ }
107
+ catch (error) {
108
+ return `${error}`;
109
+ // return error?.error?.message || "Unexpected error!";
110
+ }
111
+ }
112
+ // ================================================================================================== //
113
+ // SEARCH_DOCUMENT
114
+ // ================================================================================================== //
115
+ async search_documents(content) {
116
+ try {
117
+ const chunkText = this.chunkByToken(content);
118
+ const response = await this.MCP_AI.embeddings.create({
119
+ model: this.embeddingModel,
120
+ input: chunkText,
121
+ });
122
+ if (response?.data[0]?.object === "embedding") {
123
+ const questionVector = response.data[0].embedding;
124
+ // search
125
+ const pinecone = new Pinecone({ apiKey: this.pineconeKey });
126
+ const index = pinecone.index(this.pineconeIndex);
127
+ const results = await index.query({
128
+ vector: questionVector,
129
+ topK: 3,
130
+ includeMetadata: true,
131
+ });
132
+ return results;
133
+ // const relevantChunks = results.matches.map((match) => ({
134
+ // text: match.metadata?.text as string,
135
+ // score: match.score,
136
+ // }));
137
+ // const context = relevantChunks.map((c) => c.text).join("\n\n");
138
+ // return context;
139
+ }
140
+ else {
141
+ return response?.data[0] || "Unexpected error";
142
+ }
143
+ }
144
+ catch (error) {
145
+ return `${error}` || "Unexpected error";
146
+ }
147
+ }
148
+ }
149
+ export default AI;
package/dist/env.js ADDED
@@ -0,0 +1,10 @@
1
+ import { config } from "dotenv";
2
+ config();
3
+ export const env = {
4
+ PINECONE_API_KEY: `${process.env.PINECONE_API_KEY}`,
5
+ APIKEY: `${process.env.APIKEY}`,
6
+ EMBEDDING_MODEL: `${process.env.EMBEDDING_MODEL}`,
7
+ PINECONE_INDEX: `${process.env.PINECONE_INDEX}`,
8
+ RAG_CHUNK_MAX_TOKENS: `${process.env.RAG_CHUNK_MAX_TOKENS}`,
9
+ PROVIDER: `${process.env.PROVIDER}`,
10
+ };
package/dist/index.js ADDED
@@ -0,0 +1,72 @@
1
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
2
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
3
+ import { z } from "zod";
4
+ import { config } from "zod";
5
+ import AI from "./ai.js";
6
+ import { env } from "./env.js";
7
+ const PROVIDER_OPTIONS = ["openai", "openrouter"];
8
+ function parseProvider(value) {
9
+ const v = (value ?? "").toLowerCase();
10
+ if (v === "openai" || v === "openrouter")
11
+ return v;
12
+ throw new Error(`Invalid PROVIDER: "${value}". Must be one of: ${PROVIDER_OPTIONS.join(", ")}`);
13
+ }
14
+ async function main() {
15
+ config();
16
+ const provider = parseProvider(env.PROVIDER);
17
+ const CallAI = new AI({
18
+ apikey: env.APIKEY,
19
+ embeddingModel: env.EMBEDDING_MODEL,
20
+ pineconeIndex: env.PINECONE_INDEX,
21
+ pineconeKey: env.PINECONE_API_KEY,
22
+ provider,
23
+ });
24
+ const server = new McpServer({
25
+ name: "rag-mcp-nodejs",
26
+ version: "1.0.0",
27
+ });
28
+ // ================================================================================================================= //
29
+ // TOOLS
30
+ // ================================================================================================================= //
31
+ // 1. SAVE_DOCUMENT ---------------------------------------------------- >
32
+ server.registerTool("save_document_to_rag", {
33
+ title: "Save Document To RAG",
34
+ description: "Save document or information to RAG",
35
+ inputSchema: {
36
+ text: z
37
+ .string()
38
+ .describe("Document or information that would be save to RAG"),
39
+ },
40
+ }, async ({ text }) => {
41
+ const response = await CallAI.save_to_rag(text);
42
+ return {
43
+ content: [
44
+ {
45
+ type: "text",
46
+ text: response,
47
+ },
48
+ ],
49
+ };
50
+ });
51
+ // 2. SEARCH_DOCUMENT ------------------------------------------------ >
52
+ server.registerTool("search_document_on_rag", {
53
+ title: "Search Document On RAG",
54
+ description: "Search Document on RAG with keyword",
55
+ inputSchema: {
56
+ keyword: z
57
+ .string()
58
+ .describe("Query / keyword that will search on RAG with similarity"),
59
+ },
60
+ }, async ({ keyword }) => {
61
+ const response = await CallAI.search_documents(keyword);
62
+ return {
63
+ content: [{ type: "text", text: `${response}` }],
64
+ };
65
+ });
66
+ const transport = new StdioServerTransport();
67
+ await server.connect(transport);
68
+ }
69
+ main().catch((error) => {
70
+ console.error("Server error:", error);
71
+ process.exit(1);
72
+ });
package/package.json ADDED
@@ -0,0 +1,43 @@
1
+ {
2
+ "name": "@laskarks/mcp-rag-node",
3
+ "version": "1.0.1",
4
+ "description": "Simple MCP RAG server using @modelcontextprotocol/sdk",
5
+ "main": "dist/index.js",
6
+ "type": "module",
7
+ "scripts": {
8
+ "build": "tsc",
9
+ "start": "node dist/index.js",
10
+ "dev": "node --loader ts-node/esm src/index.ts",
11
+ "client": "node --loader ts-node/esm src/client.ts"
12
+ },
13
+ "keywords": [
14
+ "mcp",
15
+ "modelcontextprotocol",
16
+ "sdk"
17
+ ],
18
+ "author": "laskarksatria",
19
+ "license": "ISC",
20
+ "repository": {
21
+ "type": "git",
22
+ "url": "https://github.com/laskar-ksatria/rag-mcp-nodejs"
23
+ },
24
+ "homepage": "https://github.com/laskar-ksatria/rag-mcp-nodejs#readme",
25
+ "bugs": {
26
+ "url": "https://github.com/laskar-ksatria/rag-mcp-nodejs/issues"
27
+ },
28
+ "publishConfig": {
29
+ "access": "public"
30
+ },
31
+ "dependencies": {
32
+ "@modelcontextprotocol/sdk": "^1.27.1",
33
+ "@pinecone-database/pinecone": "^7.1.0",
34
+ "dotenv": "^17.3.1",
35
+ "js-tiktoken": "^1.0.21",
36
+ "openai": "^6.27.0",
37
+ "zod": "^4.3.6"
38
+ },
39
+ "devDependencies": {
40
+ "ts-node": "^10.9.2",
41
+ "typescript": "^5.9.3"
42
+ }
43
+ }
package/src/ai.ts ADDED
@@ -0,0 +1,155 @@
1
+ import OpenAI from "openai";
2
+ import { encodingForModel, TiktokenModel } from "js-tiktoken";
3
+ import {
4
+ Pinecone,
5
+ PineconeRecord,
6
+ RecordMetadata,
7
+ } from "@pinecone-database/pinecone";
8
+ import { env } from "./env.js";
9
+
10
+ export type TProvider = "openrouter" | "openai" | null;
11
+ export interface IContructorsPayload {
12
+ apikey: string;
13
+ embeddingModel: string;
14
+ provider: TProvider;
15
+ pineconeKey: string;
16
+ pineconeIndex: string;
17
+ }
18
+
19
+ /**
20
+ * Ai Controller
21
+ * Contructor parameters
22
+ * apikey: string
23
+ * model: string
24
+ * embeddingModel: string
25
+ * provider: "openrouter" | "openai"
26
+ */
27
+ class AI {
28
+ // States
29
+ private pineconeKey: string = "";
30
+ private embeddingModel: string = "";
31
+ private pineconeIndex: string = "";
32
+ private MCP_AI: OpenAI;
33
+ private provider: "openrouter" | "openai" | null = null;
34
+
35
+ // Contructors
36
+ constructor(args: IContructorsPayload) {
37
+ this.embeddingModel = args.embeddingModel;
38
+ this.provider = args.provider;
39
+ this.pineconeKey = args.pineconeKey;
40
+ this.pineconeIndex = args.pineconeIndex;
41
+ if (args.provider === "openrouter") {
42
+ this.MCP_AI = new OpenAI({
43
+ apiKey: args.apikey,
44
+ baseURL: `https://openrouter.ai/api/v1`,
45
+ defaultHeaders: {
46
+ "HTTP-Referer": "http://localhost:3000",
47
+ "X-Title": "OpenRouter RAG",
48
+ "Cache-Control": "no-cache, no-store, must-revalidate",
49
+ Pragma: "no-cache",
50
+ Expires: "0",
51
+ },
52
+ });
53
+ } else {
54
+ this.MCP_AI = new OpenAI({
55
+ apiKey: args.apikey,
56
+ defaultHeaders: {
57
+ "Cache-Control": "no-cache, no-store, must-revalidate",
58
+ Pragma: "no-cache",
59
+ Expires: "0",
60
+ },
61
+ });
62
+ }
63
+ }
64
+
65
+ // ================================================================================================== //
66
+ // CHUNK TEXT
67
+ // ================================================================================================== //
68
+ chunkByToken(text: string, maxTokens?: number, overlap?: number): string[] {
69
+ const defaultMax = env.RAG_CHUNK_MAX_TOKENS
70
+ ? Number(env.RAG_CHUNK_MAX_TOKENS)
71
+ : 512;
72
+ const defaultOverlap = 50;
73
+ const limit = maxTokens ?? defaultMax;
74
+ const overlapTokens = overlap ?? defaultOverlap;
75
+
76
+ let model: TProvider | string = null;
77
+ if (this.provider === "openrouter") {
78
+ model = this.embeddingModel.split("/")[1];
79
+ } else model = this.provider;
80
+ const enc = encodingForModel(model as TiktokenModel);
81
+ const tokens = Array.from(enc.encode(text));
82
+ const chunks: string[] = [];
83
+ let start = 0;
84
+ while (start < tokens.length) {
85
+ const slice = tokens.slice(start, start + limit);
86
+ chunks.push(enc.decode(slice));
87
+ start += limit - overlapTokens;
88
+ }
89
+ return chunks;
90
+ }
91
+
92
+ // ================================================================================================== //
93
+ // SAVE_TO_RAG
94
+ // ================================================================================================== //
95
+ async save_to_rag(content: string) {
96
+ try {
97
+ const chunkText = this.chunkByToken(content);
98
+ const response = await this.MCP_AI.embeddings.create({
99
+ model: this.embeddingModel,
100
+ input: chunkText,
101
+ });
102
+ if (response?.data[0]?.object === "embedding") {
103
+ const pinecone = new Pinecone({
104
+ apiKey: env.PINECONE_API_KEY as string,
105
+ });
106
+ const index = pinecone.index(this.pineconeIndex);
107
+ const vectors: PineconeRecord<RecordMetadata>[] = [
108
+ {
109
+ id: `${Date.now()}`,
110
+ values: response.data[0]?.embedding,
111
+ metadata: {
112
+ text: chunkText[0],
113
+ createdAt: new Date().toISOString(),
114
+ },
115
+ },
116
+ ];
117
+ await index.upsert({ records: vectors });
118
+ return "Document successfully saved!";
119
+ } else return `${response?.data[0] || "Unexpected error"} `;
120
+ } catch (error: any) {
121
+ return `${error}`;
122
+ // return error?.error?.message || "Unexpected error!";
123
+ }
124
+ }
125
+ // ================================================================================================== //
126
+ // SEARCH_DOCUMENT
127
+ // ================================================================================================== //
128
+ async search_documents(content: string) {
129
+ try {
130
+ const chunkText = this.chunkByToken(content);
131
+ const response = await this.MCP_AI.embeddings.create({
132
+ model: this.embeddingModel,
133
+ input: chunkText,
134
+ });
135
+ if (response?.data[0]?.object === "embedding") {
136
+ const questionVector = response.data[0].embedding;
137
+ // search
138
+ const pinecone = new Pinecone({ apiKey: this.pineconeKey });
139
+ const index = pinecone.index(this.pineconeIndex);
140
+ const results = await index.query({
141
+ vector: questionVector,
142
+ topK: 3,
143
+ includeMetadata: true,
144
+ });
145
+ return results;
146
+ } else {
147
+ return response?.data[0] || "Unexpected error";
148
+ }
149
+ } catch (error) {
150
+ return `${error}` || "Unexpected error";
151
+ }
152
+ }
153
+ }
154
+
155
+ export default AI;
package/src/env.ts ADDED
@@ -0,0 +1,12 @@
1
+ import { config } from "dotenv";
2
+
3
+ config();
4
+
5
+ export const env = {
6
+ PINECONE_API_KEY: `${process.env.PINECONE_API_KEY}`,
7
+ APIKEY: `${process.env.APIKEY}`,
8
+ EMBEDDING_MODEL: `${process.env.EMBEDDING_MODEL}`,
9
+ PINECONE_INDEX: `${process.env.PINECONE_INDEX}`,
10
+ RAG_CHUNK_MAX_TOKENS: `${process.env.RAG_CHUNK_MAX_TOKENS}`,
11
+ PROVIDER: `${process.env.PROVIDER}`,
12
+ };
package/src/index.ts ADDED
@@ -0,0 +1,93 @@
1
+ import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
2
+ import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
3
+ import { string, z } from "zod";
4
+ import { config } from "zod";
5
+ import AI from "./ai.js";
6
+ import { env } from "./env.js";
7
+
8
+ const PROVIDER_OPTIONS = ["openai", "openrouter"] as const;
9
+ type ProviderOption = (typeof PROVIDER_OPTIONS)[number];
10
+
11
+ function parseProvider(value: string | undefined): ProviderOption {
12
+ const v = (value ?? "").toLowerCase();
13
+ if (v === "openai" || v === "openrouter") return v;
14
+ throw new Error(
15
+ `Invalid PROVIDER: "${value}". Must be one of: ${PROVIDER_OPTIONS.join(", ")}`,
16
+ );
17
+ }
18
+
19
+ async function main() {
20
+ config();
21
+
22
+ const provider = parseProvider(env.PROVIDER);
23
+
24
+ const CallAI = new AI({
25
+ apikey: env.APIKEY,
26
+ embeddingModel: env.EMBEDDING_MODEL,
27
+ pineconeIndex: env.PINECONE_INDEX,
28
+ pineconeKey: env.PINECONE_API_KEY,
29
+ provider,
30
+ });
31
+
32
+ const server = new McpServer({
33
+ name: "rag-mcp-nodejs",
34
+ version: "1.0.0",
35
+ });
36
+
37
+ // ================================================================================================================= //
38
+ // TOOLS
39
+ // ================================================================================================================= //
40
+
41
+ // 1. SAVE_DOCUMENT ---------------------------------------------------- >
42
+ server.registerTool(
43
+ "save_document_to_rag",
44
+ {
45
+ title: "Save Document To RAG",
46
+ description: "Save document or information to RAG",
47
+ inputSchema: {
48
+ text: z
49
+ .string()
50
+ .describe("Document or information that would be save to RAG"),
51
+ },
52
+ },
53
+ async ({ text }: { text: string }) => {
54
+ const response = await CallAI.save_to_rag(text);
55
+ return {
56
+ content: [
57
+ {
58
+ type: "text",
59
+ text: response,
60
+ },
61
+ ],
62
+ };
63
+ },
64
+ );
65
+
66
+ // 2. SEARCH_DOCUMENT ------------------------------------------------ >
67
+ server.registerTool(
68
+ "search_document_on_rag",
69
+ {
70
+ title: "Search Document On RAG",
71
+ description: "Search Document on RAG with keyword",
72
+ inputSchema: {
73
+ keyword: z
74
+ .string()
75
+ .describe("Query / keyword that will search on RAG with similarity"),
76
+ },
77
+ },
78
+ async ({ keyword }: { keyword: string }) => {
79
+ const response = await CallAI.search_documents(keyword);
80
+ return {
81
+ content: [{ type: "text", text: `${response}` }],
82
+ };
83
+ },
84
+ );
85
+
86
+ const transport = new StdioServerTransport();
87
+ await server.connect(transport);
88
+ }
89
+
90
+ main().catch((error) => {
91
+ console.error("Server error:", error);
92
+ process.exit(1);
93
+ });
package/tsconfig.json ADDED
@@ -0,0 +1,14 @@
1
+ {
2
+ "compilerOptions": {
3
+ "target": "ES2020",
4
+ "module": "NodeNext",
5
+ "moduleResolution": "NodeNext",
6
+ "outDir": "dist",
7
+ "rootDir": "src",
8
+ "strict": true,
9
+ "esModuleInterop": true,
10
+ "forceConsistentCasingInFileNames": true,
11
+ "skipLibCheck": true
12
+ },
13
+ "include": ["src"]
14
+ }