@gotza02/sequential-thinking 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +262 -0
- package/dist/graph.js +214 -0
- package/dist/graph.test.js +79 -0
- package/dist/graph_repro.test.js +63 -0
- package/dist/index.js +33 -0
- package/dist/lib.js +189 -0
- package/dist/notes.js +77 -0
- package/dist/repro_search.test.js +79 -0
- package/dist/server.test.js +95 -0
- package/dist/tools/filesystem.js +152 -0
- package/dist/tools/graph.js +79 -0
- package/dist/tools/notes.js +55 -0
- package/dist/tools/thinking.js +113 -0
- package/dist/tools/web.js +134 -0
- package/dist/utils.js +35 -0
- package/dist/verify_edit.test.js +66 -0
- package/dist/verify_notes.test.js +36 -0
- package/dist/verify_viz.test.js +25 -0
- package/package.json +49 -0
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
export function registerNoteTools(server, notesManager) {
|
|
3
|
+
// 15. manage_notes
|
|
4
|
+
server.tool("manage_notes", "Manage long-term memory/notes. Use this to save important information, rules, or learnings that should persist across sessions.", {
|
|
5
|
+
action: z.enum(['add', 'list', 'search', 'update', 'delete']).describe("Action to perform"),
|
|
6
|
+
title: z.string().optional().describe("Title of the note (for add/update)"),
|
|
7
|
+
content: z.string().optional().describe("Content of the note (for add/update)"),
|
|
8
|
+
tags: z.array(z.string()).optional().describe("Tags for categorization (for add/update)"),
|
|
9
|
+
searchQuery: z.string().optional().describe("Query to search notes (for search)"),
|
|
10
|
+
noteId: z.string().optional().describe("ID of the note (for update/delete)")
|
|
11
|
+
}, async ({ action, title, content, tags, searchQuery, noteId }) => {
|
|
12
|
+
try {
|
|
13
|
+
switch (action) {
|
|
14
|
+
case 'add':
|
|
15
|
+
if (!title || !content) {
|
|
16
|
+
return { content: [{ type: "text", text: "Error: 'title' and 'content' are required for add action." }], isError: true };
|
|
17
|
+
}
|
|
18
|
+
const newNote = await notesManager.addNote(title, content, tags);
|
|
19
|
+
return { content: [{ type: "text", text: `Note added successfully.\nID: ${newNote.id}` }] };
|
|
20
|
+
case 'list':
|
|
21
|
+
const notes = await notesManager.listNotes();
|
|
22
|
+
return { content: [{ type: "text", text: JSON.stringify(notes, null, 2) }] };
|
|
23
|
+
case 'search':
|
|
24
|
+
if (!searchQuery) {
|
|
25
|
+
return { content: [{ type: "text", text: "Error: 'searchQuery' is required for search action." }], isError: true };
|
|
26
|
+
}
|
|
27
|
+
const searchResults = await notesManager.searchNotes(searchQuery);
|
|
28
|
+
return { content: [{ type: "text", text: searchResults.length > 0 ? JSON.stringify(searchResults, null, 2) : "No matching notes found." }] };
|
|
29
|
+
case 'update':
|
|
30
|
+
if (!noteId) {
|
|
31
|
+
return { content: [{ type: "text", text: "Error: 'noteId' is required for update action." }], isError: true };
|
|
32
|
+
}
|
|
33
|
+
const updatedNote = await notesManager.updateNote(noteId, { title, content, tags });
|
|
34
|
+
if (!updatedNote) {
|
|
35
|
+
return { content: [{ type: "text", text: `Error: Note with ID ${noteId} not found.` }], isError: true };
|
|
36
|
+
}
|
|
37
|
+
return { content: [{ type: "text", text: `Note updated successfully.` }] };
|
|
38
|
+
case 'delete':
|
|
39
|
+
if (!noteId) {
|
|
40
|
+
return { content: [{ type: "text", text: "Error: 'noteId' is required for delete action." }], isError: true };
|
|
41
|
+
}
|
|
42
|
+
const deleted = await notesManager.deleteNote(noteId);
|
|
43
|
+
return { content: [{ type: "text", text: deleted ? "Note deleted successfully." : `Error: Note with ID ${noteId} not found.` }], isError: !deleted };
|
|
44
|
+
default:
|
|
45
|
+
return { content: [{ type: "text", text: `Error: Unknown action '${action}'` }], isError: true };
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
catch (error) {
|
|
49
|
+
return {
|
|
50
|
+
content: [{ type: "text", text: `Notes Error: ${error instanceof Error ? error.message : String(error)}` }],
|
|
51
|
+
isError: true
|
|
52
|
+
};
|
|
53
|
+
}
|
|
54
|
+
});
|
|
55
|
+
}
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
export function registerThinkingTools(server, thinkingServer) {
|
|
3
|
+
// --- Sequential Thinking Tool ---
|
|
4
|
+
server.tool("sequentialthinking", `A detailed tool for dynamic and reflective problem-solving through thoughts.
|
|
5
|
+
This tool helps analyze problems through a flexible thinking process that can adapt and evolve.
|
|
6
|
+
Each thought can build on, question, or revise previous insights as understanding deepens.
|
|
7
|
+
|
|
8
|
+
When to use this tool:
|
|
9
|
+
- Breaking down complex problems into steps
|
|
10
|
+
- Planning and design with room for revision
|
|
11
|
+
- Analysis that might need course correction
|
|
12
|
+
- Problems where the full scope might not be clear initially
|
|
13
|
+
- Problems that require a multi-step solution
|
|
14
|
+
- Tasks that need to maintain context over multiple steps
|
|
15
|
+
- Situations where irrelevant information needs to be filtered out
|
|
16
|
+
|
|
17
|
+
Key features:
|
|
18
|
+
- You can adjust total_thoughts up or down as you progress
|
|
19
|
+
- You can question or revise previous thoughts
|
|
20
|
+
- You can add more thoughts even after reaching what seemed like the end
|
|
21
|
+
- You can express uncertainty and explore alternative approaches
|
|
22
|
+
- Not every thought needs to build linearly - you can branch or backtrack
|
|
23
|
+
- Iterative Reasoning: Think step-by-step in a structured manner
|
|
24
|
+
- Tree of Thoughts: Generate and evaluate multiple options (Conservative/Balanced/Aggressive)
|
|
25
|
+
- Self-Critique: Check for risks, biases, and errors in thinking
|
|
26
|
+
- Branch Merging: Combine insights from multiple divergent paths
|
|
27
|
+
- Hypothesis Testing: Formulate and verify hypotheses
|
|
28
|
+
- Generates a solution hypothesis
|
|
29
|
+
- Verifies the hypothesis based on the Chain of Thought steps
|
|
30
|
+
- Repeats the process until satisfied
|
|
31
|
+
- Provides a correct answer
|
|
32
|
+
|
|
33
|
+
Parameters explained:
|
|
34
|
+
- thought: Your current thinking step, which can include:
|
|
35
|
+
* Regular analytical steps
|
|
36
|
+
* Revisions of previous thoughts
|
|
37
|
+
* Questions about previous decisions
|
|
38
|
+
* Realizations about needing more analysis
|
|
39
|
+
* Changes in approach
|
|
40
|
+
* Hypothesis generation
|
|
41
|
+
* Hypothesis verification
|
|
42
|
+
- nextThoughtNeeded: True if you need more thinking, even if at what seemed like the end
|
|
43
|
+
- thoughtNumber: Current number in sequence (can go beyond initial total if needed)
|
|
44
|
+
- totalThoughts: Current estimate of thoughts needed (can be adjusted up/down)
|
|
45
|
+
- isRevision: A boolean indicating if this thought revises previous thinking
|
|
46
|
+
- revisesThought: If is_revision is true, which thought number is being reconsidered
|
|
47
|
+
- branchFromThought: If branching, which thought number is the branching point
|
|
48
|
+
- branchId: Identifier for the current branch (if any)
|
|
49
|
+
- needsMoreThoughts: If reaching end but realizing more thoughts needed
|
|
50
|
+
- thoughtType: The type of thought (analysis, generation, evaluation, reflexion, selection)
|
|
51
|
+
- score: Score for evaluation (1-10)
|
|
52
|
+
- options: List of options generated
|
|
53
|
+
- selectedOption: The option selected
|
|
54
|
+
|
|
55
|
+
You should:
|
|
56
|
+
1. Start with an initial estimate of needed thoughts, but be ready to adjust
|
|
57
|
+
2. Feel free to question or revise previous thoughts
|
|
58
|
+
3. Don't hesitate to add more thoughts if needed, even at the "end"
|
|
59
|
+
4. Express uncertainty when present
|
|
60
|
+
5. Mark thoughts that revise previous thinking or branch into new paths
|
|
61
|
+
6. Ignore information that is irrelevant to the current step
|
|
62
|
+
7. Generate a solution hypothesis when appropriate
|
|
63
|
+
8. Verify the hypothesis based on the Chain of Thought steps
|
|
64
|
+
9. Repeat the process until satisfied with the solution
|
|
65
|
+
10. Provide a single, ideally correct answer as the final output
|
|
66
|
+
11. Only set nextThoughtNeeded to false when truly done and a satisfactory answer is reached`, {
|
|
67
|
+
thought: z.string().describe("Your current thinking step"),
|
|
68
|
+
nextThoughtNeeded: z.boolean().describe("Whether another thought step is needed"),
|
|
69
|
+
thoughtNumber: z.number().int().min(1).describe("Current thought number (numeric value, e.g., 1, 2, 3)"),
|
|
70
|
+
totalThoughts: z.number().int().min(1).describe("Estimated total thoughts needed (numeric value, e.g., 5, 10)"),
|
|
71
|
+
isRevision: z.boolean().optional().describe("Whether this revises previous thinking"),
|
|
72
|
+
revisesThought: z.number().int().min(1).optional().describe("Which thought is being reconsidered"),
|
|
73
|
+
branchFromThought: z.number().int().min(1).optional().describe("Branching point thought number"),
|
|
74
|
+
branchId: z.string().optional().describe("Branch identifier"),
|
|
75
|
+
needsMoreThoughts: z.boolean().optional().describe("If more thoughts are needed"),
|
|
76
|
+
thoughtType: z.enum(['analysis', 'generation', 'evaluation', 'reflexion', 'selection']).optional().describe("The type of thought"),
|
|
77
|
+
score: z.number().min(1).max(10).optional().describe("Score for evaluation (1-10)"),
|
|
78
|
+
options: z.array(z.string()).optional().describe("List of options generated"),
|
|
79
|
+
selectedOption: z.string().optional().describe("The option selected")
|
|
80
|
+
}, async (args) => {
|
|
81
|
+
const result = await thinkingServer.processThought(args);
|
|
82
|
+
return {
|
|
83
|
+
content: result.content,
|
|
84
|
+
isError: result.isError
|
|
85
|
+
};
|
|
86
|
+
});
|
|
87
|
+
// 11. clear_thought_history
|
|
88
|
+
server.tool("clear_thought_history", "Clear the sequential thinking history.", {}, async () => {
|
|
89
|
+
await thinkingServer.clearHistory();
|
|
90
|
+
return {
|
|
91
|
+
content: [{ type: "text", text: "Thought history cleared." }]
|
|
92
|
+
};
|
|
93
|
+
});
|
|
94
|
+
// 12. summarize_history
|
|
95
|
+
server.tool("summarize_history", "Compress multiple thoughts into a single summary thought to save space/context.", {
|
|
96
|
+
startIndex: z.number().int().min(1).describe("The starting thought number to summarize"),
|
|
97
|
+
endIndex: z.number().int().min(1).describe("The ending thought number to summarize"),
|
|
98
|
+
summary: z.string().describe("The summary text that replaces the range")
|
|
99
|
+
}, async ({ startIndex, endIndex, summary }) => {
|
|
100
|
+
try {
|
|
101
|
+
const result = await thinkingServer.archiveHistory(startIndex, endIndex, summary);
|
|
102
|
+
return {
|
|
103
|
+
content: [{ type: "text", text: `Successfully summarized thoughts ${startIndex}-${endIndex}. New history length: ${result.newHistoryLength}` }]
|
|
104
|
+
};
|
|
105
|
+
}
|
|
106
|
+
catch (error) {
|
|
107
|
+
return {
|
|
108
|
+
content: [{ type: "text", text: `Archive Error: ${error instanceof Error ? error.message : String(error)}` }],
|
|
109
|
+
isError: true
|
|
110
|
+
};
|
|
111
|
+
}
|
|
112
|
+
});
|
|
113
|
+
}
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { fetchWithRetry } from "../utils.js";
|
|
3
|
+
import { JSDOM } from 'jsdom';
|
|
4
|
+
import { Readability } from '@mozilla/readability';
|
|
5
|
+
import TurndownService from 'turndown';
|
|
6
|
+
export function registerWebTools(server) {
|
|
7
|
+
// 1. web_search
|
|
8
|
+
server.tool("web_search", "Search the web using Brave or Exa APIs (requires API keys in environment variables: BRAVE_API_KEY or EXA_API_KEY).", {
|
|
9
|
+
query: z.string().min(1).describe("The search query"),
|
|
10
|
+
provider: z.enum(['brave', 'exa', 'google']).optional().describe("Preferred search provider")
|
|
11
|
+
}, async ({ query, provider }) => {
|
|
12
|
+
try {
|
|
13
|
+
// Priority: User Preference > Brave > Exa > Google
|
|
14
|
+
let selectedProvider = provider;
|
|
15
|
+
if (!selectedProvider) {
|
|
16
|
+
if (process.env.BRAVE_API_KEY)
|
|
17
|
+
selectedProvider = 'brave';
|
|
18
|
+
else if (process.env.EXA_API_KEY)
|
|
19
|
+
selectedProvider = 'exa';
|
|
20
|
+
else if (process.env.GOOGLE_SEARCH_API_KEY)
|
|
21
|
+
selectedProvider = 'google';
|
|
22
|
+
else
|
|
23
|
+
return { content: [{ type: "text", text: "Error: No search provider configured. Please set BRAVE_API_KEY, EXA_API_KEY, or GOOGLE_SEARCH_API_KEY." }], isError: true };
|
|
24
|
+
}
|
|
25
|
+
if (selectedProvider === 'brave') {
|
|
26
|
+
if (!process.env.BRAVE_API_KEY)
|
|
27
|
+
throw new Error("BRAVE_API_KEY not found");
|
|
28
|
+
const response = await fetchWithRetry(`https://api.search.brave.com/res/v1/web/search?q=${encodeURIComponent(query)}&count=5`, {
|
|
29
|
+
headers: { 'X-Subscription-Token': process.env.BRAVE_API_KEY }
|
|
30
|
+
});
|
|
31
|
+
if (!response.ok)
|
|
32
|
+
throw new Error(`Brave API error: ${response.statusText}`);
|
|
33
|
+
const data = await response.json();
|
|
34
|
+
return { content: [{ type: "text", text: JSON.stringify(data.web?.results || data, null, 2) }] };
|
|
35
|
+
}
|
|
36
|
+
if (selectedProvider === 'exa') {
|
|
37
|
+
if (!process.env.EXA_API_KEY)
|
|
38
|
+
throw new Error("EXA_API_KEY not found");
|
|
39
|
+
const response = await fetchWithRetry('https://api.exa.ai/search', {
|
|
40
|
+
method: 'POST',
|
|
41
|
+
headers: {
|
|
42
|
+
'x-api-key': process.env.EXA_API_KEY,
|
|
43
|
+
'Content-Type': 'application/json'
|
|
44
|
+
},
|
|
45
|
+
body: JSON.stringify({ query, numResults: 5 })
|
|
46
|
+
});
|
|
47
|
+
if (!response.ok)
|
|
48
|
+
throw new Error(`Exa API error: ${response.statusText}`);
|
|
49
|
+
const data = await response.json();
|
|
50
|
+
return { content: [{ type: "text", text: JSON.stringify(data.results || data, null, 2) }] };
|
|
51
|
+
}
|
|
52
|
+
if (selectedProvider === 'google') {
|
|
53
|
+
if (!process.env.GOOGLE_SEARCH_API_KEY)
|
|
54
|
+
throw new Error("GOOGLE_SEARCH_API_KEY not found");
|
|
55
|
+
if (!process.env.GOOGLE_SEARCH_CX)
|
|
56
|
+
throw new Error("GOOGLE_SEARCH_CX (Search Engine ID) not found");
|
|
57
|
+
const response = await fetchWithRetry(`https://www.googleapis.com/customsearch/v1?key=${process.env.GOOGLE_SEARCH_API_KEY}&cx=${process.env.GOOGLE_SEARCH_CX}&q=${encodeURIComponent(query)}&num=5`);
|
|
58
|
+
if (!response.ok)
|
|
59
|
+
throw new Error(`Google API error: ${response.statusText}`);
|
|
60
|
+
const data = await response.json();
|
|
61
|
+
// Extract relevant fields to keep output clean
|
|
62
|
+
const results = data.items?.map((item) => ({
|
|
63
|
+
title: item.title,
|
|
64
|
+
link: item.link,
|
|
65
|
+
snippet: item.snippet
|
|
66
|
+
})) || [];
|
|
67
|
+
return { content: [{ type: "text", text: JSON.stringify(results, null, 2) }] };
|
|
68
|
+
}
|
|
69
|
+
return { content: [{ type: "text", text: "Error: Unsupported or unconfigured provider." }], isError: true };
|
|
70
|
+
}
|
|
71
|
+
catch (error) {
|
|
72
|
+
return {
|
|
73
|
+
content: [{ type: "text", text: `Search Error: ${error instanceof Error ? error.message : String(error)}` }],
|
|
74
|
+
isError: true
|
|
75
|
+
};
|
|
76
|
+
}
|
|
77
|
+
});
|
|
78
|
+
// 2. fetch
|
|
79
|
+
server.tool("fetch", "Perform an HTTP request to a specific URL.", {
|
|
80
|
+
url: z.string().url().describe("The URL to fetch"),
|
|
81
|
+
method: z.enum(['GET', 'POST', 'PUT', 'DELETE']).optional().default('GET').describe("HTTP Method"),
|
|
82
|
+
headers: z.record(z.string(), z.string()).optional().describe("HTTP Headers"),
|
|
83
|
+
body: z.string().optional().describe("Request body (for POST/PUT)")
|
|
84
|
+
}, async ({ url, method, headers, body }) => {
|
|
85
|
+
try {
|
|
86
|
+
const response = await fetchWithRetry(url, {
|
|
87
|
+
method,
|
|
88
|
+
headers: headers || {},
|
|
89
|
+
body: body
|
|
90
|
+
});
|
|
91
|
+
const text = await response.text();
|
|
92
|
+
return {
|
|
93
|
+
content: [{
|
|
94
|
+
type: "text",
|
|
95
|
+
text: `Status: ${response.status}\n\n${text.substring(0, 10000)}${text.length > 10000 ? '\n...(truncated)' : ''}`
|
|
96
|
+
}]
|
|
97
|
+
};
|
|
98
|
+
}
|
|
99
|
+
catch (error) {
|
|
100
|
+
return {
|
|
101
|
+
content: [{ type: "text", text: `Fetch Error: ${error instanceof Error ? error.message : String(error)}` }],
|
|
102
|
+
isError: true
|
|
103
|
+
};
|
|
104
|
+
}
|
|
105
|
+
});
|
|
106
|
+
// 9. read_webpage
|
|
107
|
+
server.tool("read_webpage", "Read a webpage and convert it to clean Markdown (removes ads, navs, etc.).", {
|
|
108
|
+
url: z.string().url().describe("The URL to read")
|
|
109
|
+
}, async ({ url }) => {
|
|
110
|
+
try {
|
|
111
|
+
const response = await fetchWithRetry(url);
|
|
112
|
+
const html = await response.text();
|
|
113
|
+
const doc = new JSDOM(html, { url });
|
|
114
|
+
const reader = new Readability(doc.window.document);
|
|
115
|
+
const article = reader.parse();
|
|
116
|
+
if (!article)
|
|
117
|
+
throw new Error("Could not parse article content");
|
|
118
|
+
const turndownService = new TurndownService();
|
|
119
|
+
const markdown = turndownService.turndown(article.content || "");
|
|
120
|
+
return {
|
|
121
|
+
content: [{
|
|
122
|
+
type: "text",
|
|
123
|
+
text: `Title: ${article.title}\n\n${markdown}`
|
|
124
|
+
}]
|
|
125
|
+
};
|
|
126
|
+
}
|
|
127
|
+
catch (error) {
|
|
128
|
+
return {
|
|
129
|
+
content: [{ type: "text", text: `Read Error: ${error instanceof Error ? error.message : String(error)}` }],
|
|
130
|
+
isError: true
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
});
|
|
134
|
+
}
|
package/dist/utils.js
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import { exec } from 'child_process';
|
|
2
|
+
import { promisify } from 'util';
|
|
3
|
+
export const execAsync = promisify(exec);
|
|
4
|
+
export const DEFAULT_HEADERS = {
|
|
5
|
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
|
6
|
+
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8',
|
|
7
|
+
'Accept-Language': 'en-US,en;q=0.9',
|
|
8
|
+
};
|
|
9
|
+
export async function fetchWithRetry(url, options = {}, retries = 3, backoff = 1000) {
|
|
10
|
+
const fetchOptions = {
|
|
11
|
+
...options,
|
|
12
|
+
headers: { ...DEFAULT_HEADERS, ...options.headers }
|
|
13
|
+
};
|
|
14
|
+
try {
|
|
15
|
+
const response = await fetch(url, fetchOptions);
|
|
16
|
+
if (response.status === 429 && retries > 0) {
|
|
17
|
+
const retryAfter = response.headers.get('Retry-After');
|
|
18
|
+
const waitTime = retryAfter ? parseInt(retryAfter) * 1000 : backoff;
|
|
19
|
+
await new Promise(resolve => setTimeout(resolve, waitTime));
|
|
20
|
+
return fetchWithRetry(url, options, retries - 1, backoff * 2);
|
|
21
|
+
}
|
|
22
|
+
if (!response.ok && retries > 0 && response.status >= 500) {
|
|
23
|
+
await new Promise(resolve => setTimeout(resolve, backoff));
|
|
24
|
+
return fetchWithRetry(url, options, retries - 1, backoff * 2);
|
|
25
|
+
}
|
|
26
|
+
return response;
|
|
27
|
+
}
|
|
28
|
+
catch (error) {
|
|
29
|
+
if (retries > 0) {
|
|
30
|
+
await new Promise(resolve => setTimeout(resolve, backoff));
|
|
31
|
+
return fetchWithRetry(url, options, retries - 1, backoff * 2);
|
|
32
|
+
}
|
|
33
|
+
throw error;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
|
2
|
+
import * as fs from 'fs/promises';
|
|
3
|
+
import * as path from 'path';
|
|
4
|
+
// Replicating the logic from src/index.ts for testing
|
|
5
|
+
async function editFileLogic(path, oldText, newText, allowMultiple = false) {
|
|
6
|
+
try {
|
|
7
|
+
const content = await fs.readFile(path, 'utf-8');
|
|
8
|
+
const escapeRegExp = (string) => string.replace(/[.*+?^${}()|[\\]/g, '\\$&');
|
|
9
|
+
const regex = new RegExp(escapeRegExp(oldText), 'g');
|
|
10
|
+
const matchCount = (content.match(regex) || []).length;
|
|
11
|
+
if (matchCount === 0) {
|
|
12
|
+
return { error: "Error: 'oldText' not found" };
|
|
13
|
+
}
|
|
14
|
+
if (matchCount > 1 && !allowMultiple) {
|
|
15
|
+
return { error: `Error: Found ${matchCount} occurrences` };
|
|
16
|
+
}
|
|
17
|
+
const newContent = content.replace(allowMultiple ? regex : oldText, newText);
|
|
18
|
+
await fs.writeFile(path, newContent, 'utf-8');
|
|
19
|
+
return { success: true };
|
|
20
|
+
}
|
|
21
|
+
catch (error) {
|
|
22
|
+
return { error: String(error) };
|
|
23
|
+
}
|
|
24
|
+
}
|
|
25
|
+
describe('edit_file logic', () => {
|
|
26
|
+
const testFile = path.join(__dirname, 'test_edit.txt');
|
|
27
|
+
beforeEach(async () => {
|
|
28
|
+
await fs.writeFile(testFile, 'Line 1\nTarget\nLine 3\nTarget again');
|
|
29
|
+
});
|
|
30
|
+
afterEach(async () => {
|
|
31
|
+
try {
|
|
32
|
+
await fs.unlink(testFile);
|
|
33
|
+
}
|
|
34
|
+
catch { }
|
|
35
|
+
});
|
|
36
|
+
it('should replace single occurrence', async () => {
|
|
37
|
+
await fs.writeFile(testFile, 'Line 1\nUnique\nLine 3');
|
|
38
|
+
const result = await editFileLogic(testFile, 'Unique', 'Replaced');
|
|
39
|
+
expect(result.error).toBeUndefined();
|
|
40
|
+
const content = await fs.readFile(testFile, 'utf-8');
|
|
41
|
+
expect(content).toContain('Line 1\nReplaced\nLine 3');
|
|
42
|
+
});
|
|
43
|
+
it('should fail if text not found', async () => {
|
|
44
|
+
const result = await editFileLogic(testFile, 'Missing', 'New');
|
|
45
|
+
expect(result.error).toContain("not found");
|
|
46
|
+
});
|
|
47
|
+
it('should fail if multiple found and allowMultiple=false', async () => {
|
|
48
|
+
const result = await editFileLogic(testFile, 'Target', 'New');
|
|
49
|
+
expect(result.error).toContain("Found 2 occurrences");
|
|
50
|
+
});
|
|
51
|
+
it('should replace multiple if allowMultiple=true', async () => {
|
|
52
|
+
const result = await editFileLogic(testFile, 'Target', 'New', true);
|
|
53
|
+
expect(result.error).toBeUndefined();
|
|
54
|
+
const content = await fs.readFile(testFile, 'utf-8');
|
|
55
|
+
expect(content).toBe('Line 1\nNew\nLine 3\nNew again');
|
|
56
|
+
});
|
|
57
|
+
it('should handle special regex characters in text', async () => {
|
|
58
|
+
await fs.writeFile(testFile, 'func(a, b) { return a+b; }');
|
|
59
|
+
const oldText = 'func(a, b) { return a+b; }';
|
|
60
|
+
const newText = 'replacement';
|
|
61
|
+
const result = await editFileLogic(testFile, oldText, newText);
|
|
62
|
+
expect(result.error).toBeUndefined();
|
|
63
|
+
const content = await fs.readFile(testFile, 'utf-8');
|
|
64
|
+
expect(content).toBe('replacement');
|
|
65
|
+
});
|
|
66
|
+
});
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
|
2
|
+
import { NotesManager } from './notes.js';
|
|
3
|
+
import * as fs from 'fs/promises';
|
|
4
|
+
import * as path from 'path';
|
|
5
|
+
describe('Notes Manager', () => {
|
|
6
|
+
const testFile = path.join(__dirname, 'test_notes.json');
|
|
7
|
+
let manager;
|
|
8
|
+
beforeEach(async () => {
|
|
9
|
+
manager = new NotesManager(testFile);
|
|
10
|
+
});
|
|
11
|
+
afterEach(async () => {
|
|
12
|
+
try {
|
|
13
|
+
await fs.unlink(testFile);
|
|
14
|
+
}
|
|
15
|
+
catch { }
|
|
16
|
+
});
|
|
17
|
+
it('should add and list notes', async () => {
|
|
18
|
+
await manager.addNote("My Note", "Content", ["tag1"]);
|
|
19
|
+
const notes = await manager.listNotes();
|
|
20
|
+
expect(notes.length).toBe(1);
|
|
21
|
+
expect(notes[0].title).toBe("My Note");
|
|
22
|
+
});
|
|
23
|
+
it('should search notes', async () => {
|
|
24
|
+
await manager.addNote("React Tips", "Use hooks", ["react"]);
|
|
25
|
+
await manager.addNote("Vue Tips", "Use composition", ["vue"]);
|
|
26
|
+
const results = await manager.searchNotes("hooks");
|
|
27
|
+
expect(results.length).toBe(1);
|
|
28
|
+
expect(results[0].title).toBe("React Tips");
|
|
29
|
+
});
|
|
30
|
+
it('should delete note', async () => {
|
|
31
|
+
const note = await manager.addNote("To Delete", "...");
|
|
32
|
+
await manager.deleteNote(note.id);
|
|
33
|
+
const notes = await manager.listNotes();
|
|
34
|
+
expect(notes.length).toBe(0);
|
|
35
|
+
});
|
|
36
|
+
});
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
import { describe, it, expect, beforeEach, afterEach } from 'vitest';
|
|
2
|
+
import { ProjectKnowledgeGraph } from './graph.js';
|
|
3
|
+
import * as fs from 'fs/promises';
|
|
4
|
+
import * as path from 'path';
|
|
5
|
+
describe('Graph Visualization', () => {
|
|
6
|
+
const testDir = path.join(__dirname, 'test_viz_env');
|
|
7
|
+
const graph = new ProjectKnowledgeGraph();
|
|
8
|
+
beforeEach(async () => {
|
|
9
|
+
await fs.mkdir(testDir, { recursive: true });
|
|
10
|
+
await fs.writeFile(path.join(testDir, 'a.ts'), 'import { b } from "./b";');
|
|
11
|
+
await fs.writeFile(path.join(testDir, 'b.ts'), 'export const b = 1;');
|
|
12
|
+
});
|
|
13
|
+
afterEach(async () => {
|
|
14
|
+
await fs.rm(testDir, { recursive: true, force: true });
|
|
15
|
+
});
|
|
16
|
+
it('should generate mermaid diagram', async () => {
|
|
17
|
+
await graph.build(testDir);
|
|
18
|
+
const mermaid = graph.toMermaid();
|
|
19
|
+
console.log("Generated Mermaid:", mermaid);
|
|
20
|
+
expect(mermaid).toContain('graph TD');
|
|
21
|
+
expect(mermaid).toMatch(/N\d+\["a\.ts"\]/);
|
|
22
|
+
expect(mermaid).toMatch(/N\d+\["b\.ts"\]/);
|
|
23
|
+
expect(mermaid).toContain('-->');
|
|
24
|
+
});
|
|
25
|
+
});
|
package/package.json
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@gotza02/sequential-thinking",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"publishConfig": {
|
|
5
|
+
"access": "public"
|
|
6
|
+
},
|
|
7
|
+
"description": "MCP server for sequential thinking and problem solving (Extended with Web Search & Graph)",
|
|
8
|
+
"license": "MIT",
|
|
9
|
+
"mcpName": "sequential-thinking-extended",
|
|
10
|
+
"author": "Anthropic, PBC (https://anthropic.com)",
|
|
11
|
+
"homepage": "https://modelcontextprotocol.io",
|
|
12
|
+
"bugs": "https://github.com/modelcontextprotocol/servers/issues",
|
|
13
|
+
"repository": {
|
|
14
|
+
"type": "git",
|
|
15
|
+
"url": "git+https://github.com/modelcontextprotocol/servers.git"
|
|
16
|
+
},
|
|
17
|
+
"type": "module",
|
|
18
|
+
"bin": {
|
|
19
|
+
"mcp-server-sequential-thinking": "dist/index.js"
|
|
20
|
+
},
|
|
21
|
+
"files": [
|
|
22
|
+
"dist"
|
|
23
|
+
],
|
|
24
|
+
"scripts": {
|
|
25
|
+
"build": "tsc",
|
|
26
|
+
"start": "node dist/index.js",
|
|
27
|
+
"prepare": "npm run build",
|
|
28
|
+
"watch": "tsc --watch",
|
|
29
|
+
"test": "vitest run --coverage"
|
|
30
|
+
},
|
|
31
|
+
"dependencies": {
|
|
32
|
+
"@modelcontextprotocol/sdk": "^1.24.0",
|
|
33
|
+
"@mozilla/readability": "^0.6.0",
|
|
34
|
+
"chalk": "^5.3.0",
|
|
35
|
+
"jsdom": "^27.4.0",
|
|
36
|
+
"turndown": "^7.2.2",
|
|
37
|
+
"typescript": "^5.3.3",
|
|
38
|
+
"yargs": "^17.7.2"
|
|
39
|
+
},
|
|
40
|
+
"devDependencies": {
|
|
41
|
+
"@types/jsdom": "^27.0.0",
|
|
42
|
+
"@types/node": "^22",
|
|
43
|
+
"@types/turndown": "^5.0.6",
|
|
44
|
+
"@types/yargs": "^17.0.32",
|
|
45
|
+
"@vitest/coverage-v8": "^2.1.8",
|
|
46
|
+
"shx": "^0.3.4",
|
|
47
|
+
"vitest": "^2.1.8"
|
|
48
|
+
}
|
|
49
|
+
}
|