converse-mcp-server 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +177 -0
- package/README.md +425 -0
- package/bin/converse.js +45 -0
- package/docs/API.md +897 -0
- package/docs/ARCHITECTURE.md +552 -0
- package/docs/EXAMPLES.md +736 -0
- package/package.json +101 -0
- package/src/config.js +521 -0
- package/src/continuationStore.js +340 -0
- package/src/index.js +216 -0
- package/src/providers/google.js +441 -0
- package/src/providers/index.js +87 -0
- package/src/providers/openai.js +348 -0
- package/src/providers/xai.js +305 -0
- package/src/router.js +497 -0
- package/src/systemPrompts.js +90 -0
- package/src/tools/chat.js +336 -0
- package/src/tools/consensus.js +478 -0
- package/src/tools/index.js +156 -0
- package/src/transport/httpTransport.js +548 -0
- package/src/utils/console.js +64 -0
- package/src/utils/contextProcessor.js +475 -0
- package/src/utils/errorHandler.js +555 -0
- package/src/utils/logger.js +450 -0
- package/src/utils/tokenLimiter.js +217 -0
package/.env.example
ADDED
|
@@ -0,0 +1,177 @@
|
|
|
1
|
+
# Zen MCP Server Environment Configuration
|
|
2
|
+
# Copy this file to .env and fill in your values
|
|
3
|
+
|
|
4
|
+
# API Keys - At least one is required
|
|
5
|
+
#
|
|
6
|
+
# IMPORTANT: Choose ONE approach:
|
|
7
|
+
# - Native APIs (Gemini/OpenAI/XAI) for direct access
|
|
8
|
+
# - DIAL for unified enterprise access
|
|
9
|
+
# - OpenRouter for unified cloud access
|
|
10
|
+
# Having multiple unified providers creates ambiguity about which serves each model.
|
|
11
|
+
#
|
|
12
|
+
# Option 1: Use native APIs (recommended for direct access)
|
|
13
|
+
# Get your Gemini API key from: https://makersuite.google.com/app/apikey
|
|
14
|
+
GEMINI_API_KEY=your_gemini_api_key_here
|
|
15
|
+
|
|
16
|
+
# Get your OpenAI API key from: https://platform.openai.com/api-keys
|
|
17
|
+
OPENAI_API_KEY=your_openai_api_key_here
|
|
18
|
+
|
|
19
|
+
# Get your X.AI API key from: https://console.x.ai/
|
|
20
|
+
XAI_API_KEY=your_xai_api_key_here
|
|
21
|
+
|
|
22
|
+
# Get your DIAL API key and configure host URL
|
|
23
|
+
# DIAL provides unified access to multiple AI models through a single API
|
|
24
|
+
DIAL_API_KEY=your_dial_api_key_here
|
|
25
|
+
# DIAL_API_HOST=https://core.dialx.ai # Optional: Base URL without /openai suffix (auto-appended)
|
|
26
|
+
# DIAL_API_VERSION=2025-01-01-preview # Optional: API version header for DIAL requests
|
|
27
|
+
|
|
28
|
+
# Option 2: Use OpenRouter for access to multiple models through one API
|
|
29
|
+
# Get your OpenRouter API key from: https://openrouter.ai/
|
|
30
|
+
# If using OpenRouter, comment out the native API keys above
|
|
31
|
+
OPENROUTER_API_KEY=your_openrouter_api_key_here
|
|
32
|
+
|
|
33
|
+
# Option 3: Use custom API endpoints for local models (Ollama, vLLM, LM Studio, etc.)
|
|
34
|
+
# CUSTOM_API_URL=http://localhost:11434/v1 # Ollama example
|
|
35
|
+
# CUSTOM_API_KEY= # Empty for Ollama (no auth needed)
|
|
36
|
+
# CUSTOM_MODEL_NAME=llama3.2 # Default model name
|
|
37
|
+
|
|
38
|
+
# Optional: Default model to use
|
|
39
|
+
# Options: 'auto' (Claude picks best model), 'pro', 'flash', 'o3', 'o3-mini', 'o4-mini', 'o4-mini-high',
|
|
40
|
+
# 'grok', 'opus-4', 'sonnet-4', or any DIAL model if DIAL is configured
|
|
41
|
+
# When set to 'auto', Claude will select the best model for each task
|
|
42
|
+
# Defaults to 'auto' if not specified
|
|
43
|
+
DEFAULT_MODEL=auto
|
|
44
|
+
|
|
45
|
+
# Optional: Default thinking mode for ThinkDeep tool
|
|
46
|
+
# NOTE: Only applies to models that support extended thinking (e.g., Gemini 2.5 Pro)
|
|
47
|
+
# Flash models (2.0) will use system prompt engineering instead
|
|
48
|
+
# Token consumption per mode:
|
|
49
|
+
# minimal: 128 tokens - Quick analysis, fastest response
|
|
50
|
+
# low: 2,048 tokens - Light reasoning tasks
|
|
51
|
+
# medium: 8,192 tokens - Balanced reasoning (good for most cases)
|
|
52
|
+
# high: 16,384 tokens - Complex analysis (recommended for thinkdeep)
|
|
53
|
+
# max: 32,768 tokens - Maximum reasoning depth, slowest but most thorough
|
|
54
|
+
# Defaults to 'high' if not specified
|
|
55
|
+
DEFAULT_THINKING_MODE_THINKDEEP=high
|
|
56
|
+
|
|
57
|
+
# Optional: Model usage restrictions
|
|
58
|
+
# Limit which models can be used from each provider for cost control, compliance, or standardization
|
|
59
|
+
# Format: Comma-separated list of allowed model names (case-insensitive, whitespace tolerant)
|
|
60
|
+
# Empty or unset = all models allowed (default behavior)
|
|
61
|
+
# If you want to disable a provider entirely, don't set its API key
|
|
62
|
+
#
|
|
63
|
+
# Supported OpenAI models:
|
|
64
|
+
# - o3 (200K context, high reasoning)
|
|
65
|
+
# - o3-mini (200K context, balanced)
|
|
66
|
+
# - o4-mini (200K context, latest balanced, temperature=1.0 only)
|
|
67
|
+
# - o4-mini-high (200K context, enhanced reasoning, temperature=1.0 only)
|
|
68
|
+
# - mini (shorthand for o4-mini)
|
|
69
|
+
#
|
|
70
|
+
# Supported Google/Gemini models:
|
|
71
|
+
# - gemini-2.5-flash (1M context, fast, supports thinking)
|
|
72
|
+
# - gemini-2.5-pro (1M context, powerful, supports thinking)
|
|
73
|
+
# - flash (shorthand for gemini-2.5-flash)
|
|
74
|
+
# - pro (shorthand for gemini-2.5-pro)
|
|
75
|
+
#
|
|
76
|
+
# Supported X.AI GROK models:
|
|
77
|
+
# - grok-3 (131K context, advanced reasoning)
|
|
78
|
+
# - grok-3-fast (131K context, higher performance but more expensive)
|
|
79
|
+
# - grok (shorthand for grok-3)
|
|
80
|
+
# - grok3 (shorthand for grok-3)
|
|
81
|
+
# - grokfast (shorthand for grok-3-fast)
|
|
82
|
+
#
|
|
83
|
+
# Supported DIAL models (when available in your DIAL deployment):
|
|
84
|
+
# - o3-2025-04-16 (200K context, latest O3 release)
|
|
85
|
+
# - o4-mini-2025-04-16 (200K context, latest O4 mini)
|
|
86
|
+
# - o3 (shorthand for o3-2025-04-16)
|
|
87
|
+
# - o4-mini (shorthand for o4-mini-2025-04-16)
|
|
88
|
+
# - anthropic.claude-sonnet-4-20250514-v1:0 (200K context, Claude 4 Sonnet)
|
|
89
|
+
# - anthropic.claude-sonnet-4-20250514-v1:0-with-thinking (200K context, Claude 4 Sonnet with thinking mode)
|
|
90
|
+
# - anthropic.claude-opus-4-20250514-v1:0 (200K context, Claude 4 Opus)
|
|
91
|
+
# - anthropic.claude-opus-4-20250514-v1:0-with-thinking (200K context, Claude 4 Opus with thinking mode)
|
|
92
|
+
# - sonnet-4 (shorthand for Claude 4 Sonnet)
|
|
93
|
+
# - sonnet-4-thinking (shorthand for Claude 4 Sonnet with thinking)
|
|
94
|
+
# - opus-4 (shorthand for Claude 4 Opus)
|
|
95
|
+
# - opus-4-thinking (shorthand for Claude 4 Opus with thinking)
|
|
96
|
+
# - gemini-2.5-pro-preview-03-25-google-search (1M context, with Google Search)
|
|
97
|
+
# - gemini-2.5-pro-preview-05-06 (1M context, latest preview)
|
|
98
|
+
# - gemini-2.5-flash-preview-05-20 (1M context, latest flash preview)
|
|
99
|
+
# - gemini-2.5-pro (shorthand for gemini-2.5-pro-preview-05-06)
|
|
100
|
+
# - gemini-2.5-pro-search (shorthand for gemini-2.5-pro-preview-03-25-google-search)
|
|
101
|
+
# - gemini-2.5-flash (shorthand for gemini-2.5-flash-preview-05-20)
|
|
102
|
+
#
|
|
103
|
+
# Examples:
|
|
104
|
+
# OPENAI_ALLOWED_MODELS=o3-mini,o4-mini,mini # Only allow mini models (cost control)
|
|
105
|
+
# GOOGLE_ALLOWED_MODELS=flash # Only allow Flash (fast responses)
|
|
106
|
+
# XAI_ALLOWED_MODELS=grok-3 # Only allow standard GROK (not fast variant)
|
|
107
|
+
# OPENAI_ALLOWED_MODELS=o4-mini # Single model standardization
|
|
108
|
+
# GOOGLE_ALLOWED_MODELS=flash,pro # Allow both Gemini models
|
|
109
|
+
# XAI_ALLOWED_MODELS=grok,grok-3-fast # Allow both GROK variants
|
|
110
|
+
# DIAL_ALLOWED_MODELS=o3,o4-mini # Only allow O3/O4 models via DIAL
|
|
111
|
+
# DIAL_ALLOWED_MODELS=opus-4,sonnet-4 # Only Claude 4 models (without thinking)
|
|
112
|
+
# DIAL_ALLOWED_MODELS=opus-4-thinking,sonnet-4-thinking # Only Claude 4 with thinking mode
|
|
113
|
+
# DIAL_ALLOWED_MODELS=gemini-2.5-pro,gemini-2.5-flash # Only Gemini 2.5 models via DIAL
|
|
114
|
+
#
|
|
115
|
+
# Note: These restrictions apply even in 'auto' mode - Claude will only pick from allowed models
|
|
116
|
+
# OPENAI_ALLOWED_MODELS=
|
|
117
|
+
# GOOGLE_ALLOWED_MODELS=
|
|
118
|
+
# XAI_ALLOWED_MODELS=
|
|
119
|
+
# DIAL_ALLOWED_MODELS=
|
|
120
|
+
|
|
121
|
+
# Optional: Custom model configuration file path
|
|
122
|
+
# Override the default location of custom_models.json
|
|
123
|
+
# CUSTOM_MODELS_CONFIG_PATH=/path/to/your/custom_models.json
|
|
124
|
+
|
|
125
|
+
# Note: Conversations are stored in memory during the session
|
|
126
|
+
|
|
127
|
+
# Optional: Conversation timeout (hours)
|
|
128
|
+
# How long AI-to-AI conversation threads persist before expiring
|
|
129
|
+
# Longer timeouts use more memory but allow resuming conversations later
|
|
130
|
+
# Defaults to 3 hours if not specified
|
|
131
|
+
CONVERSATION_TIMEOUT_HOURS=3
|
|
132
|
+
|
|
133
|
+
# Optional: Max conversation turns
|
|
134
|
+
# Maximum number of turns allowed in an AI-to-AI conversation thread
|
|
135
|
+
# Each exchange (Claude asks, Gemini responds) counts as 2 turns
|
|
136
|
+
# So 20 turns = 10 exchanges. Defaults to 20 if not specified
|
|
137
|
+
MAX_CONVERSATION_TURNS=20
|
|
138
|
+
|
|
139
|
+
# Optional: Logging level (DEBUG, INFO, WARNING, ERROR)
|
|
140
|
+
# DEBUG: Shows detailed operational messages for troubleshooting (default)
|
|
141
|
+
# INFO: Shows general operational messages
|
|
142
|
+
# WARNING: Shows only warnings and errors
|
|
143
|
+
# ERROR: Shows only errors
|
|
144
|
+
LOG_LEVEL=DEBUG
|
|
145
|
+
|
|
146
|
+
# Optional: Tool Selection
|
|
147
|
+
# Comma-separated list of tools to disable. If not set, all tools are enabled.
|
|
148
|
+
# Essential tools (version, listmodels) cannot be disabled.
|
|
149
|
+
# Available tools: chat, thinkdeep, planner, consensus, codereview, precommit,
|
|
150
|
+
# debug, docgen, analyze, refactor, tracer, testgen
|
|
151
|
+
# Examples:
|
|
152
|
+
# DISABLED_TOOLS= # All tools enabled (default)
|
|
153
|
+
# DISABLED_TOOLS=debug,tracer # Disable debug and tracer tools
|
|
154
|
+
# DISABLED_TOOLS=planner,consensus # Disable planning tools
|
|
155
|
+
|
|
156
|
+
# Optional: Language/Locale for AI responses
|
|
157
|
+
# When set, all AI tools will respond in the specified language
|
|
158
|
+
# while maintaining their analytical capabilities
|
|
159
|
+
# Examples: "fr-FR", "en-US", "zh-CN", "zh-TW", "ja-JP", "ko-KR", "es-ES"
|
|
160
|
+
# Leave empty for default language (English)
|
|
161
|
+
# LOCALE=fr-FR
|
|
162
|
+
|
|
163
|
+
# ===========================================
|
|
164
|
+
# Docker Configuration
|
|
165
|
+
# ===========================================
|
|
166
|
+
|
|
167
|
+
# Container name for Docker Compose
|
|
168
|
+
# Used when running with docker-compose.yml
|
|
169
|
+
COMPOSE_PROJECT_NAME=zen-mcp
|
|
170
|
+
|
|
171
|
+
# Timezone for Docker containers
|
|
172
|
+
# Ensures consistent time handling in containerized environments
|
|
173
|
+
TZ=UTC
|
|
174
|
+
|
|
175
|
+
# Maximum log file size (default: 10MB)
|
|
176
|
+
# Applicable when using file-based logging
|
|
177
|
+
LOG_MAX_SIZE=10MB
|
package/README.md
ADDED
|
@@ -0,0 +1,425 @@
|
|
|
1
|
+
# Converse MCP Server
|
|
2
|
+
|
|
3
|
+
A simplified, functional Node.js implementation of an MCP (Model Context Protocol) server with chat and consensus tools. Built with modern Node.js practices and official SDKs for seamless AI provider integration.
|
|
4
|
+
|
|
5
|
+
## š Quick Start
|
|
6
|
+
|
|
7
|
+
### Option 1: Direct from GitHub (Recommended)
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
# Using npx (recommended)
|
|
11
|
+
npx FallDownTheSystem/converse
|
|
12
|
+
|
|
13
|
+
# Using pnpm dlx (alternative)
|
|
14
|
+
pnpm dlx FallDownTheSystem/converse
|
|
15
|
+
|
|
16
|
+
# Using yarn dlx (alternative)
|
|
17
|
+
yarn dlx FallDownTheSystem/converse
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
### Option 2: Clone and Install
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
# Clone the repository
|
|
24
|
+
git clone https://github.com/FallDownTheSystem/converse.git
|
|
25
|
+
cd converse
|
|
26
|
+
|
|
27
|
+
# Install dependencies
|
|
28
|
+
npm install
|
|
29
|
+
|
|
30
|
+
# Copy and configure environment
|
|
31
|
+
cp .env.example .env
|
|
32
|
+
# Edit .env with your API keys
|
|
33
|
+
|
|
34
|
+
# Start the server
|
|
35
|
+
npm start
|
|
36
|
+
```
|
|
37
|
+
|
|
38
|
+
## š Requirements
|
|
39
|
+
|
|
40
|
+
- **Node.js**: >= 20.0.0 (LTS recommended)
|
|
41
|
+
- **Package Manager**: npm, pnpm, or yarn
|
|
42
|
+
- **API Keys**: At least one of OpenAI, Google, or X.AI
|
|
43
|
+
|
|
44
|
+
## š Configuration
|
|
45
|
+
|
|
46
|
+
### 1. Environment Variables
|
|
47
|
+
|
|
48
|
+
Create a `.env` file in your project root:
|
|
49
|
+
|
|
50
|
+
```bash
|
|
51
|
+
# Required: At least one API key
|
|
52
|
+
OPENAI_API_KEY=sk-proj-your_openai_key_here
|
|
53
|
+
GOOGLE_API_KEY=your_google_api_key_here
|
|
54
|
+
XAI_API_KEY=xai-your_xai_key_here
|
|
55
|
+
|
|
56
|
+
# Optional: Server configuration
|
|
57
|
+
PORT=3000
|
|
58
|
+
LOG_LEVEL=info
|
|
59
|
+
MAX_MCP_OUTPUT_TOKENS=200000
|
|
60
|
+
|
|
61
|
+
# Optional: Provider-specific settings
|
|
62
|
+
GOOGLE_LOCATION=us-central1
|
|
63
|
+
XAI_BASE_URL=https://api.x.ai/v1
|
|
64
|
+
```
|
|
65
|
+
|
|
66
|
+
### 2. Get API Keys
|
|
67
|
+
|
|
68
|
+
| Provider | Where to Get | Example Format |
|
|
69
|
+
|----------|-------------|----------------|
|
|
70
|
+
| **OpenAI** | [platform.openai.com/api-keys](https://platform.openai.com/api-keys) | `sk-proj-...` |
|
|
71
|
+
| **Google** | [makersuite.google.com/app/apikey](https://makersuite.google.com/app/apikey) | `AIzaSy...` |
|
|
72
|
+
| **X.AI** | [console.x.ai](https://console.x.ai/) | `xai-...` |
|
|
73
|
+
|
|
74
|
+
### 3. MCP Client Configuration
|
|
75
|
+
|
|
76
|
+
Add to your MCP client configuration (e.g., Claude Desktop):
|
|
77
|
+
|
|
78
|
+
```json
|
|
79
|
+
{
|
|
80
|
+
"mcpServers": {
|
|
81
|
+
"converse": {
|
|
82
|
+
"command": "npx",
|
|
83
|
+
"args": ["FallDownTheSystem/converse"],
|
|
84
|
+
"env": {
|
|
85
|
+
"OPENAI_API_KEY": "your_key_here",
|
|
86
|
+
"GOOGLE_API_KEY": "your_key_here",
|
|
87
|
+
"XAI_API_KEY": "your_key_here"
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
```
|
|
93
|
+
|
|
94
|
+
## š ļø Available Tools
|
|
95
|
+
|
|
96
|
+
### 1. Chat Tool
|
|
97
|
+
|
|
98
|
+
General conversational AI with context and continuation support.
|
|
99
|
+
|
|
100
|
+
```javascript
|
|
101
|
+
// Example usage
|
|
102
|
+
{
|
|
103
|
+
"prompt": "How should I structure the authentication module for this Express.js API?",
|
|
104
|
+
"model": "gemini-2.5-flash",
|
|
105
|
+
"files": ["/path/to/src/auth.js", "/path/to/config.json"],
|
|
106
|
+
"images": ["/path/to/architecture.png"],
|
|
107
|
+
"temperature": 0.5,
|
|
108
|
+
"reasoning_effort": "medium",
|
|
109
|
+
"use_websearch": false
|
|
110
|
+
}
|
|
111
|
+
```
|
|
112
|
+
|
|
113
|
+
### 2. Consensus Tool
|
|
114
|
+
|
|
115
|
+
Multi-provider parallel execution with cross-model feedback.
|
|
116
|
+
|
|
117
|
+
```javascript
|
|
118
|
+
// Example usage
|
|
119
|
+
{
|
|
120
|
+
"prompt": "Should we use microservices or monolith architecture for our e-commerce platform?",
|
|
121
|
+
"models": [
|
|
122
|
+
{"model": "o3"},
|
|
123
|
+
{"model": "gemini-2.5-flash"},
|
|
124
|
+
{"model": "grok-4-0709"}
|
|
125
|
+
],
|
|
126
|
+
"relevant_files": ["/path/to/requirements.md"],
|
|
127
|
+
"enable_cross_feedback": true,
|
|
128
|
+
"temperature": 0.2
|
|
129
|
+
}
|
|
130
|
+
```
|
|
131
|
+
|
|
132
|
+
## š Supported Models
|
|
133
|
+
|
|
134
|
+
### OpenAI Models
|
|
135
|
+
- **o3**: Strong reasoning (200K context)
|
|
136
|
+
- **o3-mini**: Fast O3 variant (200K context)
|
|
137
|
+
- **o4-mini**: Latest reasoning model (200K context)
|
|
138
|
+
- **gpt-4o**: Multimodal flagship (128K context)
|
|
139
|
+
- **gpt-4o-mini**: Fast multimodal (128K context)
|
|
140
|
+
|
|
141
|
+
### Google/Gemini Models
|
|
142
|
+
- **gemini-2.5-flash** (alias: `flash`): Ultra-fast (1M context)
|
|
143
|
+
- **gemini-2.5-pro** (alias: `pro`): Deep reasoning (1M context)
|
|
144
|
+
- **gemini-2.0-flash**: Latest with experimental thinking
|
|
145
|
+
|
|
146
|
+
### X.AI/Grok Models
|
|
147
|
+
- **grok-4-0709** (alias: `grok`): Latest advanced model (256K context)
|
|
148
|
+
- **grok-3**: Previous generation (131K context)
|
|
149
|
+
- **grok-3-fast**: Higher performance variant
|
|
150
|
+
|
|
151
|
+
## š Development
|
|
152
|
+
|
|
153
|
+
### Install from Source
|
|
154
|
+
|
|
155
|
+
```bash
|
|
156
|
+
# Clone and setup
|
|
157
|
+
git clone https://github.com/FallDownTheSystem/converse.git
|
|
158
|
+
cd converse
|
|
159
|
+
npm install
|
|
160
|
+
|
|
161
|
+
# Development with hot reload
|
|
162
|
+
npm run dev
|
|
163
|
+
|
|
164
|
+
# Run tests
|
|
165
|
+
npm test
|
|
166
|
+
|
|
167
|
+
# Run with specific log level
|
|
168
|
+
LOG_LEVEL=debug npm run dev
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
### Scripts Available
|
|
172
|
+
|
|
173
|
+
```bash
|
|
174
|
+
# Server management
|
|
175
|
+
npm start # Start server (auto-kills existing server on port 3000)
|
|
176
|
+
npm run start:clean # Start server without killing existing processes
|
|
177
|
+
npm run start:port # Start server on port 3001 (avoids port conflicts)
|
|
178
|
+
npm run dev # Development with hot reload (auto-kills existing server)
|
|
179
|
+
npm run dev:clean # Development without killing existing processes
|
|
180
|
+
npm run dev:port # Development on port 3001 (avoids port conflicts)
|
|
181
|
+
npm run dev:quiet # Development with minimal logging
|
|
182
|
+
npm run kill-server # Kill any server running on port 3000
|
|
183
|
+
|
|
184
|
+
# Testing
|
|
185
|
+
npm test # Run all tests
|
|
186
|
+
npm run test:unit # Unit tests only
|
|
187
|
+
npm run test:integration # Integration tests
|
|
188
|
+
npm run test:real-api # Real API tests (requires keys)
|
|
189
|
+
npm run test:coverage # Coverage report
|
|
190
|
+
|
|
191
|
+
# Code quality
|
|
192
|
+
npm run lint # Check code style
|
|
193
|
+
npm run lint:fix # Fix code style issues
|
|
194
|
+
npm run format # Format code with Prettier
|
|
195
|
+
npm run validate # Full validation (lint + test)
|
|
196
|
+
|
|
197
|
+
# Utilities
|
|
198
|
+
npm run build # Build for production
|
|
199
|
+
npm run debug # Start with debugger
|
|
200
|
+
npm run check-deps # Check for outdated dependencies
|
|
201
|
+
npm run kill-server # Kill any server running on port 3000
|
|
202
|
+
```
|
|
203
|
+
|
|
204
|
+
### š” Development Notes
|
|
205
|
+
|
|
206
|
+
**Port Management**: The server runs on port 3000 by default for HTTP transport. If you encounter "EADDRINUSE" errors:
|
|
207
|
+
|
|
208
|
+
1. **Automatic cleanup**: `npm start` and `npm run dev` will automatically attempt to kill existing processes on port 3000
|
|
209
|
+
2. **Manual cleanup**: Run `npm run kill-server` to manually free up port 3000
|
|
210
|
+
3. **Clean start**: Use `:clean` variants (`npm run start:clean`, `npm run dev:clean`) to skip auto-cleanup
|
|
211
|
+
4. **Persistent issues**: If port conflicts persist, manually kill Node.js processes or restart your terminal
|
|
212
|
+
|
|
213
|
+
**Troubleshooting EADDRINUSE errors**:
|
|
214
|
+
```bash
|
|
215
|
+
# Try manual cleanup first
|
|
216
|
+
npm run kill-server
|
|
217
|
+
|
|
218
|
+
# Or use a different port
|
|
219
|
+
PORT=3001 npm start
|
|
220
|
+
|
|
221
|
+
# Or use stdio transport instead
|
|
222
|
+
npm start -- --transport=stdio
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
**Transport Modes**:
|
|
226
|
+
- **HTTP Transport** (default): `http://localhost:3000/mcp` - Better for development and debugging
|
|
227
|
+
- **Stdio Transport**: Use `--transport=stdio` or set `MCP_TRANSPORT=stdio` for traditional stdio communication
|
|
228
|
+
|
|
229
|
+
### Testing with Real APIs
|
|
230
|
+
|
|
231
|
+
```bash
|
|
232
|
+
# Set up your API keys in .env first
|
|
233
|
+
OPENAI_API_KEY=sk-proj-...
|
|
234
|
+
GOOGLE_API_KEY=AIzaSy...
|
|
235
|
+
XAI_API_KEY=xai-...
|
|
236
|
+
|
|
237
|
+
# Run real API tests
|
|
238
|
+
npm run test:real-api
|
|
239
|
+
|
|
240
|
+
# Run comprehensive integration tests
|
|
241
|
+
node final-integration-test.js
|
|
242
|
+
|
|
243
|
+
# Validate server functionality
|
|
244
|
+
npm run validate
|
|
245
|
+
```
|
|
246
|
+
|
|
247
|
+
### ā
Validation Steps
|
|
248
|
+
|
|
249
|
+
After installation, verify everything is working:
|
|
250
|
+
|
|
251
|
+
```bash
|
|
252
|
+
# 1. Quick server test (should show startup message)
|
|
253
|
+
npm start
|
|
254
|
+
|
|
255
|
+
# 2. Run basic functionality tests
|
|
256
|
+
npm test
|
|
257
|
+
|
|
258
|
+
# 3. Test real API connectivity (requires API keys)
|
|
259
|
+
npm run test:real-api
|
|
260
|
+
|
|
261
|
+
# 4. Comprehensive validation
|
|
262
|
+
node final-integration-test.js
|
|
263
|
+
```
|
|
264
|
+
|
|
265
|
+
**Expected Results:**
|
|
266
|
+
- Server starts without errors on port 3000
|
|
267
|
+
- All unit tests pass
|
|
268
|
+
- Real API tests connect successfully (if keys configured)
|
|
269
|
+
- Integration tests achieve >70% success rate
|
|
270
|
+
|
|
271
|
+
## š Project Structure
|
|
272
|
+
|
|
273
|
+
```
|
|
274
|
+
converse/
|
|
275
|
+
āāā src/
|
|
276
|
+
ā āāā index.js # Main server entry point
|
|
277
|
+
ā āāā config.js # Configuration management
|
|
278
|
+
ā āāā router.js # Central request dispatcher
|
|
279
|
+
ā āāā continuationStore.js # State management
|
|
280
|
+
ā āāā systemPrompts.js # Tool system prompts
|
|
281
|
+
ā āāā providers/ # AI provider implementations
|
|
282
|
+
ā ā āāā index.js # Provider registry
|
|
283
|
+
ā ā āāā openai.js # OpenAI provider
|
|
284
|
+
ā ā āāā xai.js # XAI provider
|
|
285
|
+
ā ā āāā google.js # Google provider
|
|
286
|
+
ā āāā tools/ # MCP tool implementations
|
|
287
|
+
ā ā āāā index.js # Tool registry
|
|
288
|
+
ā ā āāā chat.js # Chat tool
|
|
289
|
+
ā ā āāā consensus.js # Consensus tool
|
|
290
|
+
ā āāā utils/ # Utility modules
|
|
291
|
+
ā āāā contextProcessor.js # File/image processing
|
|
292
|
+
ā āāā errorHandler.js # Error handling
|
|
293
|
+
ā āāā logger.js # Logging utilities
|
|
294
|
+
āāā tests/ # Comprehensive test suite
|
|
295
|
+
āāā docs/ # API and architecture docs
|
|
296
|
+
āāā package.json # Dependencies and scripts
|
|
297
|
+
```
|
|
298
|
+
|
|
299
|
+
## š§ Configuration Options
|
|
300
|
+
|
|
301
|
+
### Environment Variables
|
|
302
|
+
|
|
303
|
+
| Variable | Description | Default | Example |
|
|
304
|
+
|----------|-------------|---------|---------|
|
|
305
|
+
| `PORT` | Server port | `3000` | `3000` |
|
|
306
|
+
| `LOG_LEVEL` | Logging level | `info` | `debug`, `info`, `error` |
|
|
307
|
+
| `MAX_MCP_OUTPUT_TOKENS` | Token response limit | `25000` | `200000` |
|
|
308
|
+
| `GOOGLE_LOCATION` | Google API region | `us-central1` | `us-central1` |
|
|
309
|
+
| `XAI_BASE_URL` | XAI API endpoint | `https://api.x.ai/v1` | Custom endpoint |
|
|
310
|
+
|
|
311
|
+
### Model Selection
|
|
312
|
+
|
|
313
|
+
Use `"auto"` for automatic model selection, or specify exact models:
|
|
314
|
+
|
|
315
|
+
```javascript
|
|
316
|
+
// Auto-selection (recommended)
|
|
317
|
+
{ "model": "auto" }
|
|
318
|
+
|
|
319
|
+
// Specific models
|
|
320
|
+
{ "model": "gemini-2.5-flash" }
|
|
321
|
+
{ "model": "o3" }
|
|
322
|
+
{ "model": "grok-4-0709" }
|
|
323
|
+
|
|
324
|
+
// Using aliases
|
|
325
|
+
{ "model": "flash" } // -> gemini-2.5-flash
|
|
326
|
+
{ "model": "pro" } // -> gemini-2.5-pro
|
|
327
|
+
{ "model": "grok" } // -> grok-4-0709
|
|
328
|
+
```
|
|
329
|
+
|
|
330
|
+
## š Troubleshooting
|
|
331
|
+
|
|
332
|
+
### Common Issues
|
|
333
|
+
|
|
334
|
+
**Server won't start:**
|
|
335
|
+
```bash
|
|
336
|
+
# Check Node.js version
|
|
337
|
+
node --version # Should be >= 20.0.0
|
|
338
|
+
|
|
339
|
+
# Check for port conflicts
|
|
340
|
+
PORT=3001 npm start
|
|
341
|
+
```
|
|
342
|
+
|
|
343
|
+
**API key errors:**
|
|
344
|
+
```bash
|
|
345
|
+
# Verify your .env file format
|
|
346
|
+
cat .env
|
|
347
|
+
|
|
348
|
+
# Test API keys
|
|
349
|
+
npm run test:real-api
|
|
350
|
+
```
|
|
351
|
+
|
|
352
|
+
**Module import errors:**
|
|
353
|
+
```bash
|
|
354
|
+
# Clear cache and reinstall
|
|
355
|
+
npm run clean
|
|
356
|
+
```
|
|
357
|
+
|
|
358
|
+
### Debug Mode
|
|
359
|
+
|
|
360
|
+
```bash
|
|
361
|
+
# Enable debug logging
|
|
362
|
+
LOG_LEVEL=debug npm run dev
|
|
363
|
+
|
|
364
|
+
# Start with debugger
|
|
365
|
+
npm run debug
|
|
366
|
+
|
|
367
|
+
# Trace all operations
|
|
368
|
+
LOG_LEVEL=trace npm run dev
|
|
369
|
+
```
|
|
370
|
+
|
|
371
|
+
## š Documentation
|
|
372
|
+
|
|
373
|
+
- **API Reference**: [docs/API.md](docs/API.md)
|
|
374
|
+
- **Architecture Guide**: [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md)
|
|
375
|
+
- **Integration Examples**: [docs/EXAMPLES.md](docs/EXAMPLES.md)
|
|
376
|
+
|
|
377
|
+
## š¤ Contributing
|
|
378
|
+
|
|
379
|
+
1. Fork the repository
|
|
380
|
+
2. Create a feature branch: `git checkout -b feature/amazing-feature`
|
|
381
|
+
3. Make your changes
|
|
382
|
+
4. Run tests: `npm run validate`
|
|
383
|
+
5. Commit changes: `git commit -m 'Add amazing feature'`
|
|
384
|
+
6. Push to branch: `git push origin feature/amazing-feature`
|
|
385
|
+
7. Open a Pull Request
|
|
386
|
+
|
|
387
|
+
### Development Setup
|
|
388
|
+
|
|
389
|
+
```bash
|
|
390
|
+
# Fork and clone your fork
|
|
391
|
+
git clone https://github.com/yourusername/converse.git
|
|
392
|
+
cd converse
|
|
393
|
+
|
|
394
|
+
# Install dependencies
|
|
395
|
+
npm install
|
|
396
|
+
|
|
397
|
+
# Create feature branch
|
|
398
|
+
git checkout -b feature/your-feature
|
|
399
|
+
|
|
400
|
+
# Make changes and test
|
|
401
|
+
npm run validate
|
|
402
|
+
|
|
403
|
+
# Commit and push
|
|
404
|
+
git add .
|
|
405
|
+
git commit -m "Description of changes"
|
|
406
|
+
git push origin feature/your-feature
|
|
407
|
+
```
|
|
408
|
+
|
|
409
|
+
## š Acknowledgments
|
|
410
|
+
|
|
411
|
+
This MCP Server was inspired by and builds upon the excellent work from [BeehiveInnovations/zen-mcp-server](https://github.com/BeehiveInnovations/zen-mcp-server). We're grateful for their pioneering implementation and innovative approach to MCP server development.
|
|
412
|
+
|
|
413
|
+
## š License
|
|
414
|
+
|
|
415
|
+
MIT License - see [LICENSE](LICENSE) file for details.
|
|
416
|
+
|
|
417
|
+
## š Links
|
|
418
|
+
|
|
419
|
+
- **GitHub**: https://github.com/FallDownTheSystem/converse
|
|
420
|
+
- **Issues**: https://github.com/FallDownTheSystem/converse/issues
|
|
421
|
+
- **NPM Package**: https://www.npmjs.com/package/converse-mcp-server
|
|
422
|
+
|
|
423
|
+
---
|
|
424
|
+
|
|
425
|
+
**Built with ā¤ļø using Node.js and modern AI APIs**
|
package/bin/converse.js
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* Converse MCP Server - CLI Entry Point
|
|
5
|
+
*
|
|
6
|
+
* This script allows the MCP server to be run via npx/pnpm dlx for easy installation and execution.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { fileURLToPath, pathToFileURL } from 'url';
|
|
10
|
+
import { dirname, join } from 'path';
|
|
11
|
+
import { createRequire } from 'module';
|
|
12
|
+
|
|
13
|
+
// Get the directory of this script
|
|
14
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
15
|
+
const __dirname = dirname(__filename);
|
|
16
|
+
|
|
17
|
+
// Get the project root (parent of bin directory)
|
|
18
|
+
const projectRoot = dirname(__dirname);
|
|
19
|
+
|
|
20
|
+
// Import and start the server
|
|
21
|
+
try {
|
|
22
|
+
const indexPath = join(projectRoot, 'src/index.js');
|
|
23
|
+
const { startServer } = await import(pathToFileURL(indexPath).href);
|
|
24
|
+
|
|
25
|
+
console.log('š Starting Converse MCP Server...');
|
|
26
|
+
console.log(`š Project root: ${projectRoot}`);
|
|
27
|
+
|
|
28
|
+
await startServer();
|
|
29
|
+
} catch (error) {
|
|
30
|
+
console.error('ā Failed to start Converse MCP Server:', error.message);
|
|
31
|
+
|
|
32
|
+
if (error.code === 'ERR_MODULE_NOT_FOUND') {
|
|
33
|
+
console.error('\nš” Troubleshooting:');
|
|
34
|
+
console.error(' 1. Ensure you have Node.js >= 20.0.0');
|
|
35
|
+
console.error(' 2. Try: npm install (if running from source)');
|
|
36
|
+
console.error(' 3. Check that all dependencies are installed');
|
|
37
|
+
} else if (error.message.includes('API key')) {
|
|
38
|
+
console.error('\nš API Key Configuration:');
|
|
39
|
+
console.error(' 1. Create a .env file with your API keys');
|
|
40
|
+
console.error(' 2. Set environment variables in your MCP client');
|
|
41
|
+
console.error(' 3. See README.md for detailed setup instructions');
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
process.exit(1);
|
|
45
|
+
}
|