@nbiish/cognitive-tools-mcp 0.9.4 → 0.9.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +0 -31
- package/build/index.js +10 -7
- package/package.json +1 -1
- package/integration-prompts/new-prompts/latest.md +0 -134
- package/integration-prompts/old-prompts/integration-prompt-01.md +0 -71
- package/integration-prompts/old-prompts/integration-prompt-02.md +0 -32
- package/integration-prompts/old-prompts/integration-prompt-03.md +0 -71
- package/integration-prompts/old-prompts/integration-prompt-04.md +0 -144
- package/integration-prompts/old-prompts/integration-prompt-05.md +0 -84
- package/integration-prompts/old-prompts/integration-prompt-06.md +0 -91
- package/integration-prompts/old-prompts/integration-prompt-07.md +0 -88
- package/integration-prompts/old-prompts/integration-prompt-08.md +0 -86
- package/integration-prompts/old-prompts/integration-prompt-09.md +0 -86
- package/integration-prompts/old-prompts/integration-prompt-10.md +0 -100
- package/integration-prompts/old-prompts/integration-prompt-11.md +0 -79
- package/integration-prompts/old-prompts/integration-prompt-12.md +0 -93
- package/integration-prompts/old-prompts/integration-prompt-13.md +0 -81
- package/integration-prompts/old-prompts/integration-prompt-14.md +0 -81
- package/integration-prompts/old-prompts/integration-prompt-15.md +0 -80
- package/integration-prompts/old-prompts/integration-prompt-16.md +0 -96
- package/integration-tool-descriptions/old-descriptions/tool-descriptions-01.ts +0 -171
- package/integration-tool-descriptions/old-descriptions/tool-descriptions-02.ts +0 -216
- package/integration-tool-descriptions/old-descriptions/tool-descriptions-03.ts +0 -225
- package/integration-tool-descriptions/old-descriptions/tool-descriptions-04.ts +0 -221
- package/integration-tool-descriptions/old-descriptions/tool-descriptions-05.ts +0 -230
- package/integration-tool-descriptions/old-descriptions/tool-descriptions-06.ts +0 -506
- package/integration-tool-descriptions/old-descriptions/tool-descriptions-07.ts +0 -293
- package/integration-tool-descriptions/old-descriptions/tool-descriptions-08.ts +0 -458
|
@@ -1,171 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
3
|
-
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
4
|
-
import { z } from "zod";
|
|
5
|
-
|
|
6
|
-
// Create the MCP server
|
|
7
|
-
const server = new McpServer({
|
|
8
|
-
name: "gikendaasowin-aabajichiganan-mcp",
|
|
9
|
-
// Version reflects significant enhancement in descriptive guidance
|
|
10
|
-
version: "0.4.0",
|
|
11
|
-
description: "ᑭᑫᓐᑖᓱᐎᓐ ᐋᐸᒋᒋᑲᓇᓐ - Gikendaasowin Aabajichiganan - (Cognitive Tools v0.4.0): An MCP server providing a suite of advanced internal reasoning tools designed to elevate an LLM agent's performance in complex problem-solving, particularly in pair programming contexts. Emphasizes structured thought (like Anthropic's 'think' tool research), strategic planning, explicit reasoning (CoT), and iterative self-correction (Reflection) to achieve higher reliability and benchmark-level cognitive capabilities."
|
|
12
|
-
});
|
|
13
|
-
|
|
14
|
-
// --- Core Cognitive Tool ---
|
|
15
|
-
|
|
16
|
-
server.tool(
|
|
17
|
-
"think",
|
|
18
|
-
// Main Description: Reinforced role as the central cognitive hub, linking to robustness and pair programming clarity.
|
|
19
|
-
"MANDATORY Cognitive Hub & Navigator's Log. Use this internal workspace for structured deliberation BEFORE any external action/response and AFTER using ANY other cognitive tool. This logs your detailed reasoning, enhancing traceability, reliability, and facilitating effective pair programming communication. Essential for complex tasks requiring policy adherence, sequential decision-making, and robust error handling.",
|
|
20
|
-
{
|
|
21
|
-
// Parameter Description: Added emphasis on quality, structure elements, and pair programming role.
|
|
22
|
-
thought: z.string().describe("Your detailed, structured internal monologue (Navigator's Log). MUST explicitly cover: 1) **Analysis** (Deconstruct request/situation/tool output), 2) **Planning** (Concrete next steps, potential tool use), 3) **Verification** (Check against requirements, constraints, best practices), 4) **Risk Assessment** (Identify potential issues, edge cases, errors), 5) **Self-Correction** (Explicitly state corrections to prior reasoning/plans). Use clear headings/structure (e.g., ## Analysis, ## Plan). Quality and completeness of reasoning are paramount for robust performance.")
|
|
23
|
-
},
|
|
24
|
-
// Implementation: Logs the structured thought process.
|
|
25
|
-
async ({ thought }) => {
|
|
26
|
-
if (!thought || typeof thought !== 'string' || thought.trim().length === 0) {
|
|
27
|
-
throw new Error('Invalid thought: Must be a non-empty string containing substantive reasoning.');
|
|
28
|
-
}
|
|
29
|
-
console.error(`[CognitiveToolsServer] Think Tool Logged: ${thought.substring(0, 100)}...`);
|
|
30
|
-
return {
|
|
31
|
-
content: [{
|
|
32
|
-
type: "text" as const,
|
|
33
|
-
text: thought // Return the logged thought for context preservation
|
|
34
|
-
}]
|
|
35
|
-
};
|
|
36
|
-
}
|
|
37
|
-
);
|
|
38
|
-
|
|
39
|
-
// --- Supporting Cognitive Strategy Tools ---
|
|
40
|
-
// Note: These guide the LLM's internal generation process *before* the tool call.
|
|
41
|
-
// The subsequent mandatory 'think' call analyzes the *result* of that internal generation.
|
|
42
|
-
|
|
43
|
-
server.tool(
|
|
44
|
-
"chain_of_thought",
|
|
45
|
-
// Main Description: Emphasizes use for complex reasoning, explainability, and reducing errors.
|
|
46
|
-
"Guides the LLM to generate and articulate an explicit, step-by-step logical deduction path. Crucial for complex problem decomposition, algorithmic reasoning, debugging logic, and tasks demanding high explainability to minimize reasoning errors. The generated CoT MUST be analyzed via `think`.",
|
|
47
|
-
{
|
|
48
|
-
// Parameter Description: Instructs LLM on the required *internal* generation task.
|
|
49
|
-
problem_statement: z.string().describe("Input the specific problem/question requiring detailed step-by-step reasoning. *You* (the LLM) must now *internally generate* your detailed Chain of Thought process for this problem *before* calling this tool. This tool call signals the completion of that internal CoT generation.")
|
|
50
|
-
},
|
|
51
|
-
// Implementation: Signals CoT was performed for the given problem.
|
|
52
|
-
async ({ problem_statement }) => {
|
|
53
|
-
if (!problem_statement || typeof problem_statement !== 'string' || problem_statement.trim().length === 0) {
|
|
54
|
-
throw new Error('Invalid problem statement: Must be a non-empty string.');
|
|
55
|
-
}
|
|
56
|
-
console.error(`[CognitiveToolsServer] ChainOfThought Tool Signaled for: ${problem_statement.substring(0, 100)}...`);
|
|
57
|
-
return {
|
|
58
|
-
content: [{
|
|
59
|
-
type: "text" as const,
|
|
60
|
-
text: `Chain of Thought generation signaled for problem: ${problem_statement}`
|
|
61
|
-
}]
|
|
62
|
-
};
|
|
63
|
-
}
|
|
64
|
-
);
|
|
65
|
-
|
|
66
|
-
server.tool(
|
|
67
|
-
"reflection",
|
|
68
|
-
// Main Description: Positions as key for iterative refinement, accuracy, and benchmark performance.
|
|
69
|
-
"Guides the LLM to perform critical self-evaluation of its own prior reasoning, plans, or generated code. Essential for iterative refinement, identifying hidden flaws, improving robustness, and achieving higher accuracy on complex benchmarks. The critique MUST be analyzed via `think`.",
|
|
70
|
-
{
|
|
71
|
-
// Parameter Description: Instructs LLM on the self-critique task.
|
|
72
|
-
input_reasoning_or_plan: z.string().describe("Input the specific reasoning, plan, or code segment *you* (the LLM) must now critically evaluate *before* calling this tool. Your internal critique should identify logical fallacies, overlooked assumptions, potential inefficiencies, biases, or edge cases, and propose concrete improvements.")
|
|
73
|
-
},
|
|
74
|
-
// Implementation: Signals Reflection was performed.
|
|
75
|
-
async ({ input_reasoning_or_plan }) => {
|
|
76
|
-
if (!input_reasoning_or_plan || typeof input_reasoning_or_plan !== 'string' || input_reasoning_or_plan.trim().length === 0) {
|
|
77
|
-
throw new Error('Invalid input reasoning/plan: Must be a non-empty string.');
|
|
78
|
-
}
|
|
79
|
-
console.error(`[CognitiveToolsServer] Reflection Tool Signaled for analysis.`);
|
|
80
|
-
return {
|
|
81
|
-
content: [{
|
|
82
|
-
type: "text" as const,
|
|
83
|
-
text: `Reflection generation signaled for input: ${input_reasoning_or_plan.substring(0, 150)}...`
|
|
84
|
-
}]
|
|
85
|
-
};
|
|
86
|
-
}
|
|
87
|
-
);
|
|
88
|
-
|
|
89
|
-
server.tool(
|
|
90
|
-
"plan_and_solve",
|
|
91
|
-
// Main Description: Highlights role in structuring complex tasks and managing agentic workflows.
|
|
92
|
-
"Guides the LLM to decompose a complex objective into a high-level, structured strategic plan. Outlines necessary phases, potential sub-tasks, and anticipated tool usage, improving manageability of multi-step agentic workflows. The generated plan MUST be validated and detailed via `think`.",
|
|
93
|
-
{
|
|
94
|
-
// Parameter Description: Instructs LLM on plan generation.
|
|
95
|
-
task_objective: z.string().describe("Input the high-level objective. *You* (the LLM) must now *internally generate* a structured, multi-step plan (roadmap) to achieve this objective *before* calling this tool. Consider dependencies and necessary intermediate steps.")
|
|
96
|
-
},
|
|
97
|
-
// Implementation: Signals Planning was performed.
|
|
98
|
-
async ({ task_objective }) => {
|
|
99
|
-
if (!task_objective || typeof task_objective !== 'string' || task_objective.trim().length === 0) {
|
|
100
|
-
throw new Error('Invalid task objective: Must be a non-empty string.');
|
|
101
|
-
}
|
|
102
|
-
console.error(`[CognitiveToolsServer] PlanAndSolve Tool Signaled for: ${task_objective.substring(0, 100)}...`);
|
|
103
|
-
return {
|
|
104
|
-
content: [{
|
|
105
|
-
type: "text" as const,
|
|
106
|
-
text: `Planning generation signaled for objective: ${task_objective}`
|
|
107
|
-
}]
|
|
108
|
-
};
|
|
109
|
-
}
|
|
110
|
-
);
|
|
111
|
-
|
|
112
|
-
server.tool(
|
|
113
|
-
"chain_of_draft",
|
|
114
|
-
// Main Description: Positions for efficient exploration and hypothesis generation.
|
|
115
|
-
"Guides the LLM to generate concise, iterative reasoning drafts ('thought-sketches'). Useful for efficiently exploring multiple solution paths, brainstorming hypotheses, or outlining approaches when full CoT verbosity is premature. Drafts MUST be analyzed comparatively via `think`.",
|
|
116
|
-
{
|
|
117
|
-
// Parameter Description: Instructs LLM on draft generation.
|
|
118
|
-
problem_statement: z.string().describe("Input the problem or question for exploration. *You* (the LLM) must now *internally generate* brief, iterative reasoning drafts (key steps, pros/cons, core ideas) for potential approaches *before* calling this tool.")
|
|
119
|
-
},
|
|
120
|
-
// Implementation: Signals Drafting was performed.
|
|
121
|
-
async ({ problem_statement }) => {
|
|
122
|
-
if (!problem_statement || typeof problem_statement !== 'string' || problem_statement.trim().length === 0) {
|
|
123
|
-
throw new Error('Invalid problem statement: Must be a non-empty string.');
|
|
124
|
-
}
|
|
125
|
-
console.error(`[CognitiveToolsServer] ChainOfDraft Tool Signaled for: ${problem_statement.substring(0, 100)}...`);
|
|
126
|
-
return {
|
|
127
|
-
content: [{
|
|
128
|
-
type: "text" as const,
|
|
129
|
-
text: `Chain of Draft generation signaled for problem: ${problem_statement}`
|
|
130
|
-
}]
|
|
131
|
-
};
|
|
132
|
-
}
|
|
133
|
-
);
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
// --- Server Lifecycle and Error Handling ---
|
|
137
|
-
|
|
138
|
-
process.on('SIGINT', async () => {
|
|
139
|
-
console.error('[CognitiveToolsServer] Received SIGINT, shutting down.');
|
|
140
|
-
await server.close();
|
|
141
|
-
process.exit(0);
|
|
142
|
-
});
|
|
143
|
-
|
|
144
|
-
process.on('SIGTERM', async () => {
|
|
145
|
-
console.error('[CognitiveToolsServer] Received SIGTERM, shutting down.');
|
|
146
|
-
await server.close();
|
|
147
|
-
process.exit(0);
|
|
148
|
-
});
|
|
149
|
-
|
|
150
|
-
process.on('uncaughtException', (error) => {
|
|
151
|
-
console.error('[CognitiveToolsServer] Uncaught exception:', error);
|
|
152
|
-
});
|
|
153
|
-
|
|
154
|
-
process.on('unhandledRejection', (reason, promise) => {
|
|
155
|
-
console.error('[CognitiveToolsServer] Unhandled promise rejection:', reason);
|
|
156
|
-
});
|
|
157
|
-
|
|
158
|
-
// Start the server
|
|
159
|
-
async function main() {
|
|
160
|
-
try {
|
|
161
|
-
const transport = new StdioServerTransport();
|
|
162
|
-
await server.connect(transport);
|
|
163
|
-
console.error('ᑭᑫᓐᑖᓱᐎᓐ ᐋᐸᒋᒋᑲᓇᓐ - Gikendaasowin Aabajichiganan - (Cognitive Tools v0.4.0) MCP Server running on stdio');
|
|
164
|
-
}
|
|
165
|
-
catch (error) {
|
|
166
|
-
console.error('Fatal error in main():', error);
|
|
167
|
-
process.exit(1);
|
|
168
|
-
}
|
|
169
|
-
}
|
|
170
|
-
|
|
171
|
-
main();
|
|
@@ -1,216 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
3
|
-
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
4
|
-
import { z } from "zod";
|
|
5
|
-
|
|
6
|
-
// Create the MCP server
|
|
7
|
-
const server = new McpServer({
|
|
8
|
-
name: "gikendaasowin-aabajichiganan",
|
|
9
|
-
// Version reflects novel tools and enhanced guidance
|
|
10
|
-
version: "0.6.2",
|
|
11
|
-
description: "ᑭᑫᓐᑖᓱᐎᓐ ᐋᐸᒋᒋᑲᓇᓐ - Gikendaasowin Aabajichiganan - (Cognitive Tools v0.6.2): SOTA internal reasoning suite for LLM agents. Features advanced deliberation (`think`), rapid checks (`quick_think`), explicit complexity assessment, context synthesis (`synthesize`), confidence gauging, planning, CoT, and reflection. Designed to maximize reliability, traceability, and performance on complex cognitive tasks, pushing beyond current research."
|
|
12
|
-
});
|
|
13
|
-
|
|
14
|
-
// --- Core Cognitive Deliberation Tools ---
|
|
15
|
-
|
|
16
|
-
server.tool(
|
|
17
|
-
"think",
|
|
18
|
-
// Main Description: For High Cognitive Load situations.
|
|
19
|
-
"MANDATORY Cognitive Hub for **High Complexity/Uncertainty/Consequence/Novelty**. Use for deep analysis, planning, verification, risk assessment, self-correction, and integrating complex outputs (CoT, Plans, Critiques, Syntheses, Low Confidence Gauges). Logs detailed reasoning.",
|
|
20
|
-
{
|
|
21
|
-
// Parameter Description: Must analyze inputs including novel tool outputs.
|
|
22
|
-
thought: z.string().describe("Your **detailed** internal monologue for complex situations. MUST explicitly analyze prior steps/generated text (CoT, Plans, Critiques, Synthesized summaries, Confidence justifications). Structure: ## Analysis, ## Plan, ## Verification, ## Risk Assessment, ## Self-Correction. Ensure depth and clear linkage.")
|
|
23
|
-
},
|
|
24
|
-
async ({ thought }) => {
|
|
25
|
-
if (!thought || typeof thought !== 'string' || thought.trim().length === 0) { throw new Error('Invalid thought: Must be non-empty.'); }
|
|
26
|
-
console.error(`[CognitiveToolsServer] Think Tool Logged: ${thought.substring(0, 100)}...`);
|
|
27
|
-
return { content: [{ type: "text" as const, text: `Deep Thought Logged: ${thought}` }] };
|
|
28
|
-
}
|
|
29
|
-
);
|
|
30
|
-
|
|
31
|
-
server.tool(
|
|
32
|
-
"quick_think",
|
|
33
|
-
// Main Description: For Low Cognitive Load situations. Explicitly contrasted with 'think'.
|
|
34
|
-
"Cognitive Checkpoint for **Low Complexity/Uncertainty/Consequence**. Use ONLY for simple confirmations, acknowledgements, minor step decisions, or sanity checks where deep analysis is clearly unnecessary. Logs brief thought.",
|
|
35
|
-
{
|
|
36
|
-
brief_thought: z.string().describe("Your **concise** thought for simple situations (e.g., 'Acknowledged.', 'Proceeding with planned step X.', 'API call successful, extracting data.'). DO NOT use for analyzing complex outputs or making significant plans.")
|
|
37
|
-
},
|
|
38
|
-
async ({ brief_thought }) => {
|
|
39
|
-
if (!brief_thought || typeof brief_thought !== 'string' || brief_thought.trim().length === 0) { throw new Error('Invalid brief_thought: Must be non-empty.'); }
|
|
40
|
-
console.error(`[CognitiveToolsServer] QuickThink Tool Logged: ${brief_thought.substring(0, 100)}...`);
|
|
41
|
-
return { content: [{ type: "text" as const, text: `Quick Thought Logged: ${brief_thought}` }] };
|
|
42
|
-
}
|
|
43
|
-
);
|
|
44
|
-
|
|
45
|
-
// --- Novel Meta-Cognitive & Context Management Tools ---
|
|
46
|
-
|
|
47
|
-
server.tool(
|
|
48
|
-
"assess_cuc_n_mode",
|
|
49
|
-
// Main Description: Forces explicit decision between think/quick_think.
|
|
50
|
-
"**Mandatory Pre-Thought Assessment.** Guides the LLM to explicitly evaluate the upcoming cognitive step's complexity, uncertainty, consequence, and novelty, and *commit* to using either `think` or `quick_think` next. Enhances deliberate cognitive resource allocation.",
|
|
51
|
-
{
|
|
52
|
-
// Parameter Description: LLM provides its assessment and chosen mode.
|
|
53
|
-
assessment_and_choice: z.string().describe("Input your assessment: 1) Briefly describe the situation/next step. 2) Rate Complexity (Low/Med/High), Uncertainty (L/M/H), Consequence (L/M/H), Novelty (L/M/H). 3) State your choice: 'Selected Mode: think' or 'Selected Mode: quick_think'. *You* (LLM) make this assessment *before* calling.")
|
|
54
|
-
},
|
|
55
|
-
async ({ assessment_and_choice }) => {
|
|
56
|
-
if (!assessment_and_choice || typeof assessment_and_choice !== 'string' || (!assessment_and_choice.includes("Selected Mode: think") && !assessment_and_choice.includes("Selected Mode: quick_think"))) {
|
|
57
|
-
throw new Error('Invalid assessment: Must include complexity/uncertainty/consequence/novelty ratings and explicit mode selection ("Selected Mode: think" or "Selected Mode: quick_think").');
|
|
58
|
-
}
|
|
59
|
-
console.error(`[CognitiveToolsServer] AssessComplexity Tool Signaled: ${assessment_and_choice.substring(0, 150)}...`);
|
|
60
|
-
// Output confirms the assessment was made and which mode was selected.
|
|
61
|
-
const mode = assessment_and_choice.includes("Selected Mode: think") ? "think" : "quick_think";
|
|
62
|
-
return { content: [{ type: "text" as const, text: `Complexity Assessment Completed. Selected Next Mode: ${mode}. Assessment: ${assessment_and_choice}` }] };
|
|
63
|
-
}
|
|
64
|
-
);
|
|
65
|
-
|
|
66
|
-
server.tool(
|
|
67
|
-
"synthesize_prior_reasoning",
|
|
68
|
-
// Main Description: Manages context window and focuses reasoning.
|
|
69
|
-
"Context Management Tool. Guides the LLM to **generate a concise summary text** of preceding lengthy reasoning chains (multiple `think` logs, CoT outputs). Used to manage context limits and refocus attention before major subsequent `think` steps.",
|
|
70
|
-
{
|
|
71
|
-
// Parameter Description: LLM generates the summary internally first.
|
|
72
|
-
context_to_summarize_description: z.string().describe("Briefly describe the span of reasoning you are summarizing (e.g., 'Summary of planning phase', 'Key takeaways from debugging CoT'). *You* (LLM) must now *internally generate the concise summary text* before calling this tool. This signals the summary is ready.")
|
|
73
|
-
},
|
|
74
|
-
async ({ context_to_summarize_description }) => {
|
|
75
|
-
if (!context_to_summarize_description || typeof context_to_summarize_description !== 'string' || context_to_summarize_description.trim().length === 0) { throw new Error('Invalid context description: Must be non-empty.'); }
|
|
76
|
-
console.error(`[CognitiveToolsServer] SynthesizeReasoning Tool Signaled for: ${context_to_summarize_description}...`);
|
|
77
|
-
// Output confirms context and implies summary text is available internally
|
|
78
|
-
return { content: [{ type: "text" as const, text: `Synthesis internally generated for context: '${context_to_summarize_description}'. Ready for 'think' analysis.` }] };
|
|
79
|
-
}
|
|
80
|
-
);
|
|
81
|
-
|
|
82
|
-
server.tool(
|
|
83
|
-
"gauge_confidence",
|
|
84
|
-
// Main Description: Explicit meta-cognition about certainty.
|
|
85
|
-
"Meta-Cognitive Checkpoint. Guides the LLM to explicitly **state its confidence level (High/Medium/Low) and justification** regarding a specific plan, analysis, conclusion, or proposed action *before* proceeding. Low confidence may trigger Reflection or deeper Thinking.",
|
|
86
|
-
{
|
|
87
|
-
// Parameter Description: LLM provides its confidence assessment.
|
|
88
|
-
assessment_and_confidence: z.string().describe("Input the item being assessed (e.g., 'Confidence in current plan', 'Confidence in generated code correctness'). Then state: 1) Confidence Level (High/Medium/Low). 2) Brief Justification for this level. *You* (LLM) make this assessment *before* calling.")
|
|
89
|
-
},
|
|
90
|
-
async ({ assessment_and_confidence }) => {
|
|
91
|
-
const confidenceRegex = /Confidence Level: (High|Medium|Low)/i;
|
|
92
|
-
if (!assessment_and_confidence || typeof assessment_and_confidence !== 'string' || !confidenceRegex.test(assessment_and_confidence)) {
|
|
93
|
-
throw new Error('Invalid confidence assessment: Must include "Confidence Level: High/Medium/Low" and justification.');
|
|
94
|
-
}
|
|
95
|
-
const match = assessment_and_confidence.match(confidenceRegex);
|
|
96
|
-
const level = match ? match[1] : "Unknown";
|
|
97
|
-
console.error(`[CognitiveToolsServer] GaugeConfidence Tool Signaled: Level ${level}`);
|
|
98
|
-
// Output confirms assessment and level
|
|
99
|
-
return { content: [{ type: "text" as const, text: `Confidence Gauge Completed. Level: ${level}. Assessment: ${assessment_and_confidence}` }] };
|
|
100
|
-
}
|
|
101
|
-
);
|
|
102
|
-
|
|
103
|
-
// --- Supporting Cognitive Strategy Tools (Enhanced Descriptions) ---
|
|
104
|
-
|
|
105
|
-
server.tool(
|
|
106
|
-
"plan_and_solve",
|
|
107
|
-
// Main Description: Highlights role in structuring complex tasks and managing agentic workflows.
|
|
108
|
-
"Guides the LLM to **generate a structured, multi-step plan text** for a complex objective. Outlines necessary phases, potential sub-tasks, and anticipated tool usage, improving manageability of multi-step agentic workflows. The generated plan MUST be validated and detailed via `think`, and can optionally be passed to `reflection` for critique.",
|
|
109
|
-
{
|
|
110
|
-
// Parameter Description: Instructs LLM on plan generation.
|
|
111
|
-
task_objective: z.string().describe("Input the high-level objective. *You* (the LLM) must now *internally generate the structured plan text* before calling this tool. This signals the plan text is ready for analysis/critique.")
|
|
112
|
-
},
|
|
113
|
-
// Implementation: Signals Planning was performed.
|
|
114
|
-
async ({ task_objective }) => {
|
|
115
|
-
if (!task_objective || typeof task_objective !== 'string' || task_objective.trim().length === 0) {
|
|
116
|
-
throw new Error('Invalid task objective: Must be a non-empty string.');
|
|
117
|
-
}
|
|
118
|
-
console.error(`[CognitiveToolsServer] PlanAndSolve Tool Signaled for: ${task_objective.substring(0, 100)}...`);
|
|
119
|
-
return { content: [{ type: "text" as const, text: `Planning generation signaled for objective: ${task_objective}. Ready for 'think' analysis.` }] };
|
|
120
|
-
}
|
|
121
|
-
);
|
|
122
|
-
|
|
123
|
-
server.tool(
|
|
124
|
-
"chain_of_thought",
|
|
125
|
-
// Main Description: Emphasizes generating text for later analysis.
|
|
126
|
-
"Guides the LLM to **generate detailed, step-by-step reasoning text**. Used for complex logic or explainability. The *generated CoT text* MUST then be analyzed in a subsequent `think` call.",
|
|
127
|
-
{
|
|
128
|
-
// Parameter Description: Focus on the input problem.
|
|
129
|
-
problem_statement: z.string().describe("Input the problem requiring detailed step-by-step reasoning. *You* (the LLM) must now *internally generate the full CoT text* before calling this tool. This signals that CoT text is ready for analysis in the next `think` step.")
|
|
130
|
-
},
|
|
131
|
-
// Implementation: Signals CoT was performed for the given problem.
|
|
132
|
-
async ({ problem_statement }) => {
|
|
133
|
-
if (!problem_statement || typeof problem_statement !== 'string' || problem_statement.trim().length === 0) {
|
|
134
|
-
throw new Error('Invalid problem statement: Must be a non-empty string.');
|
|
135
|
-
}
|
|
136
|
-
console.error(`[CognitiveToolsServer] ChainOfThought Tool Signaled for: ${problem_statement.substring(0, 100)}...`);
|
|
137
|
-
return { content: [{ type: "text" as const, text: `Chain of Thought internally generated for problem: ${problem_statement}. Ready for 'think' analysis.` }] };
|
|
138
|
-
}
|
|
139
|
-
);
|
|
140
|
-
|
|
141
|
-
server.tool(
|
|
142
|
-
"chain_of_draft",
|
|
143
|
-
// Main Description: Positions for efficient exploration and hypothesis generation.
|
|
144
|
-
"Guides the LLM to **generate concise, iterative reasoning draft texts** ('thought-sketches'). Useful for efficiently exploring multiple solution paths, brainstorming hypotheses, or outlining approaches when full CoT verbosity is premature. Drafts MUST be analyzed comparatively via `think`.",
|
|
145
|
-
{
|
|
146
|
-
// Parameter Description: Instructs LLM on draft generation.
|
|
147
|
-
problem_statement: z.string().describe("Input the problem or question for exploration. *You* (the LLM) must now *internally generate brief, iterative draft texts* (key steps, pros/cons, core ideas) for potential approaches *before* calling this tool.")
|
|
148
|
-
},
|
|
149
|
-
// Implementation: Signals Drafting was performed.
|
|
150
|
-
async ({ problem_statement }) => {
|
|
151
|
-
if (!problem_statement || typeof problem_statement !== 'string' || problem_statement.trim().length === 0) {
|
|
152
|
-
throw new Error('Invalid problem statement: Must be a non-empty string.');
|
|
153
|
-
}
|
|
154
|
-
console.error(`[CognitiveToolsServer] ChainOfDraft Tool Signaled for: ${problem_statement.substring(0, 100)}...`);
|
|
155
|
-
return { content: [{ type: "text" as const, text: `Chain of Draft generation signaled for problem: ${problem_statement}. Ready for 'think' analysis.` }] };
|
|
156
|
-
}
|
|
157
|
-
);
|
|
158
|
-
|
|
159
|
-
server.tool(
|
|
160
|
-
"reflection",
|
|
161
|
-
// Main Description: Explicitly mentions taking prior text as input for critique.
|
|
162
|
-
"Guides the LLM to perform critical self-evaluation on **previously generated text** (reasoning, plans, code concepts). Essential for iterative refinement and improving accuracy. The *generated critique text* MUST be analyzed via `think`.",
|
|
163
|
-
{
|
|
164
|
-
// Parameter Description: Input is the text to be critiqued.
|
|
165
|
-
input_reasoning_or_plan: z.string().describe("Input the **exact text** (e.g., from a prior `think` log, or internally generated plan/CoT/code concept) that *you* (the LLM) must now *internally generate a critique for*. Your critique should identify flaws and suggest improvements.")
|
|
166
|
-
},
|
|
167
|
-
// Implementation: Signals Reflection was performed.
|
|
168
|
-
async ({ input_reasoning_or_plan }) => {
|
|
169
|
-
if (!input_reasoning_or_plan || typeof input_reasoning_or_plan !== 'string' || input_reasoning_or_plan.trim().length === 0) {
|
|
170
|
-
throw new Error('Invalid input reasoning/plan: Must be a non-empty string.');
|
|
171
|
-
}
|
|
172
|
-
console.error(`[CognitiveToolsServer] Reflection Tool Signaled for analysis.`);
|
|
173
|
-
return { content: [{ type: "text" as const, text: `Reflection internally generated for input text: '${input_reasoning_or_plan.substring(0, 100)}...'. Ready for 'think' analysis.` }] };
|
|
174
|
-
}
|
|
175
|
-
);
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
// --- Server Lifecycle and Error Handling ---
|
|
179
|
-
|
|
180
|
-
process.on('SIGINT', async () => {
|
|
181
|
-
console.error('[CognitiveToolsServer] Received SIGINT, shutting down.');
|
|
182
|
-
await server.close();
|
|
183
|
-
process.exit(0);
|
|
184
|
-
});
|
|
185
|
-
|
|
186
|
-
process.on('SIGTERM', async () => {
|
|
187
|
-
console.error('[CognitiveToolsServer] Received SIGTERM, shutting down.');
|
|
188
|
-
await server.close();
|
|
189
|
-
process.exit(0);
|
|
190
|
-
});
|
|
191
|
-
|
|
192
|
-
process.on('uncaughtException', (error) => {
|
|
193
|
-
console.error('[CognitiveToolsServer] Uncaught exception:', error);
|
|
194
|
-
// Depending on severity, you might want to gracefully shutdown or just log
|
|
195
|
-
});
|
|
196
|
-
|
|
197
|
-
process.on('unhandledRejection', (reason, promise) => {
|
|
198
|
-
console.error('[CognitiveToolsServer] Unhandled promise rejection:', reason);
|
|
199
|
-
// Depending on severity, you might want to gracefully shutdown or just log
|
|
200
|
-
});
|
|
201
|
-
|
|
202
|
-
// Start the server
|
|
203
|
-
async function main() {
|
|
204
|
-
try {
|
|
205
|
-
const transport = new StdioServerTransport();
|
|
206
|
-
await server.connect(transport);
|
|
207
|
-
console.error('ᑭᑫᓐᑖᓱᐎᓐ ᐋᐸᒋᒋᑲᓇᓐ - Gikendaasowin Aabajichiganan - (Cognitive Tools v0.6.2) MCP Server running on stdio');
|
|
208
|
-
}
|
|
209
|
-
catch (error) {
|
|
210
|
-
console.error('[CognitiveToolsServer] Fatal error during startup:', error);
|
|
211
|
-
process.exit(1);
|
|
212
|
-
}
|
|
213
|
-
}
|
|
214
|
-
|
|
215
|
-
// Execute the main function to start the server
|
|
216
|
-
main();
|
|
@@ -1,225 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env node
|
|
2
|
-
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
3
|
-
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
4
|
-
import { z } from "zod";
|
|
5
|
-
|
|
6
|
-
// Create the MCP server
|
|
7
|
-
const server = new McpServer({
|
|
8
|
-
name: "gikendaasowin-aabajichiganan-mcp",
|
|
9
|
-
// Version reflects refined tool integration guidance
|
|
10
|
-
version: "0.7.1",
|
|
11
|
-
description: "ᑭᑫᓐᑖᓱᐎᓐ ᐋᐸᒋᒋᑲᓇᓐ - Gikendaasowin Aabajichiganan - (Cognitive Tools v0.7.1): SOTA internal reasoning suite aligned with AI Pair Programmer Prompt v0.7.1+. Features advanced deliberation (`think`), rapid checks (`quick_think`), mandatory complexity assessment & strategy selection, context synthesis, confidence gauging, proactive planning, explicit reasoning (CoT), and reflection, designed for robust agentic workflows including potential executable actions and dynamic tool discovery."
|
|
12
|
-
});
|
|
13
|
-
|
|
14
|
-
// --- Core Cognitive Deliberation Tools ---
|
|
15
|
-
|
|
16
|
-
server.tool(
|
|
17
|
-
"think",
|
|
18
|
-
// Main Description: For High Cognitive Load situations.
|
|
19
|
-
"MANDATORY Cognitive Hub for **High/Medium Complexity, Uncertainty, Consequence, or Novelty (CUC-N)** situations. Use for deep analysis, planning, verification, risk assessment, self-correction, and integrating complex outputs (CoT, Plans, Critiques, Syntheses, Confidence Gauges). Logs detailed reasoning.",
|
|
20
|
-
{
|
|
21
|
-
// Parameter Description: Must analyze inputs including novel tool outputs.
|
|
22
|
-
thought: z.string().describe("Your **detailed** internal monologue for complex situations, triggered after CUC-N assessment indicates 'think' mode. MUST explicitly analyze prior steps & generated text (e.g., CoT text, Plan text including Anticipated Challenges, Reflection critique text, Synthesized summary text, Confidence justification text). Structure MANDATORY: ## Analysis, ## Plan, ## Verification, ## Anticipated Challenges Analysis & Contingency, ## Risk Assessment, ## Lookahead, ## Self-Correction & Learning.")
|
|
23
|
-
},
|
|
24
|
-
async ({ thought }) => {
|
|
25
|
-
if (!thought || typeof thought !== 'string' || thought.trim().length === 0) { throw new Error('Invalid thought: Must be non-empty, structured reasoning.'); }
|
|
26
|
-
console.error(`[CognitiveToolsServer] Think Tool Logged: ${thought.substring(0, 100)}...`);
|
|
27
|
-
// Output confirms deep thought logged, ready for next assessment or action.
|
|
28
|
-
return { content: [{ type: "text" as const, text: `Deep Thought (structured analysis/plan/etc.) logged successfully.` }] };
|
|
29
|
-
}
|
|
30
|
-
);
|
|
31
|
-
|
|
32
|
-
server.tool(
|
|
33
|
-
"quick_think",
|
|
34
|
-
// Main Description: For Low Cognitive Load situations. Explicitly contrasted with 'think'.
|
|
35
|
-
"Cognitive Checkpoint ONLY for situations explicitly assessed as **strictly Low CUC-N AND simple task nature** (e.g., confirmation, acknowledgement). Logs brief thought. **DO NOT USE** for analysis, planning, or after other cognitive tools.",
|
|
36
|
-
{
|
|
37
|
-
// Parameter Description: Simpler input for brief thoughts.
|
|
38
|
-
brief_thought: z.string().describe("Your **concise** thought for strictly simple, low CUC-N situations (e.g., 'Acknowledged user input.', 'Proceeding with confirmed step 3.', 'Code execution successful.').")
|
|
39
|
-
},
|
|
40
|
-
async ({ brief_thought }) => {
|
|
41
|
-
if (!brief_thought || typeof brief_thought !== 'string' || brief_thought.trim().length === 0) { throw new Error('Invalid brief_thought: Must be non-empty.'); }
|
|
42
|
-
console.error(`[CognitiveToolsServer] QuickThink Tool Logged: ${brief_thought.substring(0, 100)}...`);
|
|
43
|
-
// Output confirms brief thought logged.
|
|
44
|
-
return { content: [{ type: "text" as const, text: `Quick Thought logged successfully.` }] };
|
|
45
|
-
}
|
|
46
|
-
);
|
|
47
|
-
|
|
48
|
-
// --- Novel Meta-Cognitive & Context Management Tools ---
|
|
49
|
-
|
|
50
|
-
server.tool(
|
|
51
|
-
"assess_cuc_n",
|
|
52
|
-
// Main Description: Forces explicit decision between think/quick_think.
|
|
53
|
-
"**Mandatory Pre-Cognitive Assessment.** Must be called BEFORE every `think` or `quick_think`. Guides the LLM to explicitly evaluate CUC-N, recommend an initial strategy, and commit to the next thought mode (`think` or `quick_think`).",
|
|
54
|
-
{
|
|
55
|
-
// Parameter Description: LLM provides its assessment and chosen mode.
|
|
56
|
-
assessment_and_choice: z.string().describe("Input your assessment *before* calling. MUST include: 1) Situation/Next Step Description. 2) CUC-N Ratings: Complexity(L/M/H), Uncertainty(L/M/H), Consequence(L/M/H), Novelty(L/M/H). 3) Recommended Initial Strategy (e.g., 'Start `think` analysis', 'Use `plan_and_solve`'). 4) Explicit Mode Selection: 'Selected Mode: think' or 'Selected Mode: quick_think'.")
|
|
57
|
-
},
|
|
58
|
-
async ({ assessment_and_choice }) => {
|
|
59
|
-
// Enhanced validation to check for key phrases expected from the prompt's instructions
|
|
60
|
-
const requiredPhrases = ["Complexity", "Uncertainty", "Consequence", "Novelty", "Recommended Initial Strategy", "Selected Mode:"];
|
|
61
|
-
const hasRequiredPhrases = requiredPhrases.every(phrase => assessment_and_choice.includes(phrase));
|
|
62
|
-
const hasModeSelection = assessment_and_choice.includes("Selected Mode: think") || assessment_and_choice.includes("Selected Mode: quick_think");
|
|
63
|
-
|
|
64
|
-
if (!assessment_and_choice || typeof assessment_and_choice !== 'string' || !hasRequiredPhrases || !hasModeSelection) {
|
|
65
|
-
throw new Error('Invalid assessment: String must include CUC-N ratings, Recommended Initial Strategy, and explicit Selected Mode ("think" or "quick_think").');
|
|
66
|
-
}
|
|
67
|
-
console.error(`[CognitiveToolsServer] AssessComplexity Tool Signaled: ${assessment_and_choice.substring(0, 150)}...`);
|
|
68
|
-
const mode = assessment_and_choice.includes("Selected Mode: think") ? "think" : "quick_think";
|
|
69
|
-
// Output confirms the assessment was made and guides the next step.
|
|
70
|
-
return { content: [{ type: "text" as const, text: `Cognitive Assessment Completed. Proceeding with selected mode: ${mode}. Full Assessment: ${assessment_and_choice}` }] };
|
|
71
|
-
}
|
|
72
|
-
);
|
|
73
|
-
|
|
74
|
-
server.tool(
|
|
75
|
-
"synthesize_prior_reasoning",
|
|
76
|
-
// Main Description: Manages context window and focuses reasoning.
|
|
77
|
-
"Context Management Tool. Guides the LLM to **internally generate a structured summary text** of preceding lengthy reasoning, focusing on **`Key Decisions Made`** and **`Open Questions/Uncertainties`**. Used to manage context and refocus attention.",
|
|
78
|
-
{
|
|
79
|
-
// Parameter Description: Reminds LLM of the required internal summary structure.
|
|
80
|
-
context_to_summarize_description: z.string().describe("Describe the reasoning span being summarized. *You* (LLM) must now *internally generate the structured summary text (including Key Decisions, Open Questions)* before calling. This signals the summary is ready for `think` analysis.")
|
|
81
|
-
},
|
|
82
|
-
async ({ context_to_summarize_description }) => {
|
|
83
|
-
if (!context_to_summarize_description || typeof context_to_summarize_description !== 'string' || context_to_summarize_description.trim().length === 0) { throw new Error('Invalid context description: Must be non-empty.'); }
|
|
84
|
-
console.error(`[CognitiveToolsServer] SynthesizeReasoning Tool Signaled for: ${context_to_summarize_description}...`);
|
|
85
|
-
// Output implies structured summary is ready for analysis.
|
|
86
|
-
return { content: [{ type: "text" as const, text: `Structured synthesis internally generated for context: '${context_to_summarize_description}'. Ready for detailed analysis in next 'think' step.` }] };
|
|
87
|
-
}
|
|
88
|
-
);
|
|
89
|
-
|
|
90
|
-
server.tool(
|
|
91
|
-
"gauge_confidence",
|
|
92
|
-
// Main Description: Explicit meta-cognition about certainty.
|
|
93
|
-
"Meta-Cognitive Checkpoint. Guides the LLM to explicitly **state confidence (High/Medium/Low) and justification** regarding a specific item (plan, conclusion, action). Output MUST be analyzed in next `think` step; Low/Medium confidence requires specific action.",
|
|
94
|
-
{
|
|
95
|
-
// Parameter Description: Matches prompt requirements.
|
|
96
|
-
assessment_and_confidence: z.string().describe("Input the item being assessed (e.g., 'Confidence in proposed refactoring plan'). Then *internally determine and state*: 1) Confidence Level (High/Medium/Low). 2) Justification for this level. Call this tool *after* making the assessment.")
|
|
97
|
-
},
|
|
98
|
-
async ({ assessment_and_confidence }) => {
|
|
99
|
-
const confidenceRegex = /Confidence Level: (High|Medium|Low)/i;
|
|
100
|
-
if (!assessment_and_confidence || typeof assessment_and_confidence !== 'string' || !confidenceRegex.test(assessment_and_confidence)) {
|
|
101
|
-
throw new Error('Invalid confidence assessment: String must include "Confidence Level: High/Medium/Low" and justification.');
|
|
102
|
-
}
|
|
103
|
-
const match = assessment_and_confidence.match(confidenceRegex);
|
|
104
|
-
const level = match ? match[1] : "Unknown";
|
|
105
|
-
console.error(`[CognitiveToolsServer] GaugeConfidence Tool Signaled: Level ${level}`);
|
|
106
|
-
// Output confirms level and prepares for analysis.
|
|
107
|
-
return { content: [{ type: "text" as const, text: `Confidence Gauge Completed. Level: ${level}. Assessment Text: ${assessment_and_confidence}. Ready for mandatory 'think' analysis (action required if Low/Medium).` }] };
|
|
108
|
-
}
|
|
109
|
-
);
|
|
110
|
-
|
|
111
|
-
// --- Supporting Cognitive Strategy Tools ---
|
|
112
|
-
|
|
113
|
-
server.tool(
|
|
114
|
-
"plan_and_solve",
|
|
115
|
-
// Main Description: Emphasizes plan text structure and potential for other tools.
|
|
116
|
-
"Guides the LLM to **internally generate structured plan text**, including **`Anticipated Challenges/Risks`**. The plan steps might logically suggest the need for other tools (known or discovered). The generated plan text MUST be validated/detailed via `think`.",
|
|
117
|
-
{
|
|
118
|
-
// Parameter Description: Reminds LLM of required internal generation content and openness.
|
|
119
|
-
task_objective: z.string().describe("Input the objective. *You* (LLM) must now *internally generate the structured plan text, including Anticipated Challenges/Risks, and noting steps where other tools might be applicable,* before calling. Signals plan text is ready for `think` analysis.")
|
|
120
|
-
},
|
|
121
|
-
async ({ task_objective }) => {
|
|
122
|
-
if (!task_objective || typeof task_objective !== 'string' || task_objective.trim().length === 0) { throw new Error('Invalid task objective.'); }
|
|
123
|
-
console.error(`[CognitiveToolsServer] PlanAndSolve Tool Signaled for: ${task_objective.substring(0, 100)}...`);
|
|
124
|
-
// Output implies plan text *with risks and potential tool needs* is ready.
|
|
125
|
-
return { content: [{ type: "text" as const, text: `Structured plan (incl. Risks/Challenges, potential tool needs) internally generated for objective: ${task_objective}. Ready for mandatory 'think' analysis.` }] };
|
|
126
|
-
}
|
|
127
|
-
);
|
|
128
|
-
|
|
129
|
-
server.tool(
|
|
130
|
-
"chain_of_thought",
|
|
131
|
-
// Main Description: Refined to explicitly mention potential for other tools within the reasoning.
|
|
132
|
-
"Guides the LLM to **internally generate detailed, step-by-step reasoning text (CoT)**. Steps within the CoT might logically identify points requiring external data, computation, code execution, or checks for other available tools. The generated CoT text MUST be analyzed via `think`.",
|
|
133
|
-
{
|
|
134
|
-
// Parameter Description: Explicitly guides internal generation to consider tool needs.
|
|
135
|
-
problem_statement: z.string().describe("Input the specific problem requiring detailed CoT. *You* (LLM) must now *internally generate the full CoT text*, structuring it clearly and explicitly noting any steps where other tools (e.g., code execution, file access, web search, list_tools) might be needed *after* this CoT is analyzed. Call this tool *after* generating the text.")
|
|
136
|
-
},
|
|
137
|
-
// Implementation: Signals CoT was performed and is ready for analysis.
|
|
138
|
-
async ({ problem_statement }) => {
|
|
139
|
-
if (!problem_statement || typeof problem_statement !== 'string' || problem_statement.trim().length === 0) { throw new Error('Invalid problem statement.'); }
|
|
140
|
-
console.error(`[CognitiveToolsServer] ChainOfThought Tool Signaled for: ${problem_statement.substring(0, 100)}...`);
|
|
141
|
-
// Output implies CoT text *potentially identifying tool needs* is ready for analysis.
|
|
142
|
-
return { content: [{ type: "text" as const, text: `Detailed CoT (potentially identifying needs for other tools) internally generated for problem: ${problem_statement}. Ready for mandatory 'think' analysis.` }] };
|
|
143
|
-
}
|
|
144
|
-
);
|
|
145
|
-
|
|
146
|
-
server.tool(
|
|
147
|
-
"chain_of_draft",
|
|
148
|
-
// Main Description: Positions for efficient exploration, results analyzed by 'think'.
|
|
149
|
-
"Guides the LLM to **internally generate concise, iterative reasoning draft texts** ('thought-sketches'). Useful for efficiently exploring multiple solution paths or brainstorming hypotheses. Drafts MUST be analyzed comparatively via `think`.",
|
|
150
|
-
{
|
|
151
|
-
// Parameter Description: Instructs LLM on internal draft generation.
|
|
152
|
-
problem_statement: z.string().describe("Input the problem or question for exploration. *You* (LLM) must now *internally generate brief, iterative draft texts* (e.g., key steps, pros/cons) for potential approaches before calling this tool. Signals drafts are ready for `think` analysis.")
|
|
153
|
-
},
|
|
154
|
-
// Implementation: Signals Drafting was performed.
|
|
155
|
-
async ({ problem_statement }) => {
|
|
156
|
-
if (!problem_statement || typeof problem_statement !== 'string' || problem_statement.trim().length === 0) { throw new Error('Invalid problem statement.'); }
|
|
157
|
-
console.error(`[CognitiveToolsServer] ChainOfDraft Tool Signaled for: ${problem_statement.substring(0, 100)}...`);
|
|
158
|
-
// Output implies draft texts are ready for comparative analysis.
|
|
159
|
-
return { content: [{ type: "text" as const, text: `Reasoning drafts internally generated for problem: ${problem_statement}. Ready for mandatory 'think' analysis.` }] };
|
|
160
|
-
}
|
|
161
|
-
);
|
|
162
|
-
|
|
163
|
-
server.tool(
|
|
164
|
-
"reflection",
|
|
165
|
-
// Main Description: Explicitly mentions taking prior text as input for critique, results analyzed by 'think'.
|
|
166
|
-
"Guides the LLM to perform critical self-evaluation on **previously generated text** (reasoning, plans, code concepts). Essential for iterative refinement and improving accuracy. The *generated critique text* MUST be analyzed via `think`.",
|
|
167
|
-
{
|
|
168
|
-
// Parameter Description: Input is the specific text to be critiqued internally.
|
|
169
|
-
input_reasoning_or_plan: z.string().describe("Input the **exact text** (e.g., from a prior `think` log, or internally generated plan/CoT/code concept) that *you* (the LLM) must now *internally generate a critique for*. Your critique should identify flaws and suggest improvements. Call this tool *after* generating the critique.")
|
|
170
|
-
},
|
|
171
|
-
// Implementation: Signals Reflection was performed.
|
|
172
|
-
async ({ input_reasoning_or_plan }) => {
|
|
173
|
-
if (!input_reasoning_or_plan || typeof input_reasoning_or_plan !== 'string' || input_reasoning_or_plan.trim().length === 0) { throw new Error('Invalid input reasoning/plan.'); }
|
|
174
|
-
console.error(`[CognitiveToolsServer] Reflection Tool Signaled for analysis.`);
|
|
175
|
-
// Output implies critique text is ready for analysis.
|
|
176
|
-
return { content: [{ type: "text" as const, text: `Reflection critique internally generated for input text: '${input_reasoning_or_plan.substring(0, 100)}...'. Ready for mandatory 'think' analysis.` }] };
|
|
177
|
-
}
|
|
178
|
-
);
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
// --- Server Lifecycle and Error Handling ---
|
|
182
|
-
|
|
183
|
-
process.on('SIGINT', async () => {
|
|
184
|
-
console.error('\n[CognitiveToolsServer] Received SIGINT, shutting down gracefully.');
|
|
185
|
-
await server.close();
|
|
186
|
-
process.exit(0);
|
|
187
|
-
});
|
|
188
|
-
|
|
189
|
-
process.on('SIGTERM', async () => {
|
|
190
|
-
console.error('\n[CognitiveToolsServer] Received SIGTERM, shutting down gracefully.');
|
|
191
|
-
await server.close();
|
|
192
|
-
process.exit(0);
|
|
193
|
-
});
|
|
194
|
-
|
|
195
|
-
process.on('uncaughtException', (error) => {
|
|
196
|
-
console.error('[CognitiveToolsServer] FATAL: Uncaught Exception:', error);
|
|
197
|
-
// Attempt graceful shutdown, but prioritize process exit
|
|
198
|
-
server.close().catch(err => console.error('[CognitiveToolsServer] Error during shutdown on uncaughtException:', err)).finally(() => {
|
|
199
|
-
process.exit(1); // Exit on fatal error
|
|
200
|
-
});
|
|
201
|
-
});
|
|
202
|
-
|
|
203
|
-
process.on('unhandledRejection', (reason, promise) => {
|
|
204
|
-
console.error('[CognitiveToolsServer] FATAL: Unhandled Promise Rejection:', reason);
|
|
205
|
-
// Attempt graceful shutdown, but prioritize process exit
|
|
206
|
-
server.close().catch(err => console.error('[CognitiveToolsServer] Error during shutdown on unhandledRejection:', err)).finally(() => {
|
|
207
|
-
process.exit(1); // Exit on fatal error
|
|
208
|
-
});
|
|
209
|
-
});
|
|
210
|
-
|
|
211
|
-
// Start the server
|
|
212
|
-
async function main() {
|
|
213
|
-
try {
|
|
214
|
-
const transport = new StdioServerTransport();
|
|
215
|
-
await server.connect(transport);
|
|
216
|
-
console.error('ᑭᑫᓐᑖᓱᐎᓐ ᐋᐸᒋᒋᑲᓇᓐ - Gikendaasowin Aabajichiganan - (Cognitive Tools v0.7.1) MCP Server running on stdio');
|
|
217
|
-
}
|
|
218
|
-
catch (error) {
|
|
219
|
-
console.error('[CognitiveToolsServer] Fatal error during startup:', error);
|
|
220
|
-
process.exit(1);
|
|
221
|
-
}
|
|
222
|
-
}
|
|
223
|
-
|
|
224
|
-
// Execute the main function to start the server
|
|
225
|
-
main();
|