resuml 1.9.0 → 1.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/scripts/mcp-call.mjs +99 -0
- package/scripts/test-mcp.mjs +583 -0
package/package.json
CHANGED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Simple MCP client — sends one tool/resource/prompt call and prints the result.
|
|
4
|
+
*
|
|
5
|
+
* Usage:
|
|
6
|
+
* node scripts/mcp-call.mjs tool <toolName> '<json args>'
|
|
7
|
+
* node scripts/mcp-call.mjs resource <uri>
|
|
8
|
+
* node scripts/mcp-call.mjs prompt <name> '<json args>'
|
|
9
|
+
* node scripts/mcp-call.mjs list-tools
|
|
10
|
+
* node scripts/mcp-call.mjs list-resources
|
|
11
|
+
* node scripts/mcp-call.mjs list-prompts
|
|
12
|
+
*/
|
|
13
|
+
import { spawn } from 'child_process';
|
|
14
|
+
import { resolve, dirname } from 'path';
|
|
15
|
+
import { fileURLToPath } from 'url';
|
|
16
|
+
import { existsSync } from 'fs';
|
|
17
|
+
|
|
18
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
19
|
+
const ROOT = resolve(__dirname, '..');
|
|
20
|
+
const entry = existsSync(resolve(ROOT, 'dist/index.js'))
|
|
21
|
+
? './dist/index.js' : './dist/index.cjs';
|
|
22
|
+
|
|
23
|
+
const [,, cmd, arg1, arg2] = process.argv;
|
|
24
|
+
|
|
25
|
+
async function run() {
|
|
26
|
+
const proc = spawn('node', [entry, 'mcp'], { cwd: ROOT, stdio: ['pipe', 'pipe', 'pipe'] });
|
|
27
|
+
let buf = '';
|
|
28
|
+
const pending = new Map();
|
|
29
|
+
let msgId = 0;
|
|
30
|
+
|
|
31
|
+
proc.stdout.on('data', (chunk) => {
|
|
32
|
+
buf += chunk.toString();
|
|
33
|
+
let idx;
|
|
34
|
+
while ((idx = buf.indexOf('\n')) !== -1) {
|
|
35
|
+
const line = buf.slice(0, idx).trim();
|
|
36
|
+
buf = buf.slice(idx + 1);
|
|
37
|
+
if (!line) continue;
|
|
38
|
+
try {
|
|
39
|
+
const msg = JSON.parse(line);
|
|
40
|
+
if (msg.id != null && pending.has(msg.id)) {
|
|
41
|
+
const { resolve } = pending.get(msg.id);
|
|
42
|
+
pending.delete(msg.id);
|
|
43
|
+
resolve(msg);
|
|
44
|
+
}
|
|
45
|
+
} catch {}
|
|
46
|
+
}
|
|
47
|
+
});
|
|
48
|
+
|
|
49
|
+
function send(method, params) {
|
|
50
|
+
return new Promise((resolve) => {
|
|
51
|
+
const id = ++msgId;
|
|
52
|
+
pending.set(id, { resolve });
|
|
53
|
+
proc.stdin.write(JSON.stringify({ jsonrpc: '2.0', id, method, params }) + '\n');
|
|
54
|
+
});
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
// Initialize
|
|
58
|
+
await send('initialize', {
|
|
59
|
+
protocolVersion: '2025-03-26',
|
|
60
|
+
capabilities: {},
|
|
61
|
+
clientInfo: { name: 'mcp-call', version: '1.0.0' },
|
|
62
|
+
});
|
|
63
|
+
proc.stdin.write(JSON.stringify({ jsonrpc: '2.0', method: 'notifications/initialized', params: {} }) + '\n');
|
|
64
|
+
|
|
65
|
+
let result;
|
|
66
|
+
switch (cmd) {
|
|
67
|
+
case 'tool': {
|
|
68
|
+
const args = arg2 ? JSON.parse(arg2) : {};
|
|
69
|
+
result = await send('tools/call', { name: arg1, arguments: args });
|
|
70
|
+
break;
|
|
71
|
+
}
|
|
72
|
+
case 'resource':
|
|
73
|
+
result = await send('resources/read', { uri: arg1 });
|
|
74
|
+
break;
|
|
75
|
+
case 'prompt': {
|
|
76
|
+
const args = arg2 ? JSON.parse(arg2) : {};
|
|
77
|
+
result = await send('prompts/get', { name: arg1, arguments: args });
|
|
78
|
+
break;
|
|
79
|
+
}
|
|
80
|
+
case 'list-tools':
|
|
81
|
+
result = await send('tools/list', {});
|
|
82
|
+
break;
|
|
83
|
+
case 'list-resources':
|
|
84
|
+
result = await send('resources/list', {});
|
|
85
|
+
break;
|
|
86
|
+
case 'list-prompts':
|
|
87
|
+
result = await send('prompts/list', {});
|
|
88
|
+
break;
|
|
89
|
+
default:
|
|
90
|
+
console.error('Usage: mcp-call.mjs tool|resource|prompt|list-tools|list-resources|list-prompts');
|
|
91
|
+
process.exit(1);
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
console.log(JSON.stringify(result.result ?? result.error, null, 2));
|
|
95
|
+
proc.stdin.end();
|
|
96
|
+
proc.kill();
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
run().catch(e => { console.error(e); process.exit(1); });
|
|
@@ -0,0 +1,583 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* MCP Server Integration Test
|
|
4
|
+
*
|
|
5
|
+
* Spawns the resuml MCP server and exercises every tool, resource, and prompt
|
|
6
|
+
* using the JSON-RPC 2.0 protocol over stdio.
|
|
7
|
+
*
|
|
8
|
+
* Usage: node scripts/test-mcp.mjs
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { spawn } from 'child_process';
|
|
12
|
+
import { resolve, dirname } from 'path';
|
|
13
|
+
import { fileURLToPath } from 'url';
|
|
14
|
+
import { existsSync } from 'fs';
|
|
15
|
+
|
|
16
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
17
|
+
const ROOT = resolve(__dirname, '..');
|
|
18
|
+
|
|
19
|
+
// ── Job Description ─────────────────────────────────────────────────
|
|
20
|
+
|
|
21
|
+
const JOB_DESCRIPTION = `
|
|
22
|
+
Summary
|
|
23
|
+
|
|
24
|
+
We are the ML Data Team of the Intelligent System Experience (ISE) group at Apple.
|
|
25
|
+
We are responsible for building high quality datasets at scale. Our team produces
|
|
26
|
+
datasets used in the training of ML and AI-centric features for many Apple products,
|
|
27
|
+
including iPhone, iPad, Mac, Apple Watch and AirPods.
|
|
28
|
+
|
|
29
|
+
We're looking for an exceptional software & data engineer who is passionate about
|
|
30
|
+
Apple products; who has a passion for data, is comfortable in a fast pace environment
|
|
31
|
+
and who is committed to improving ML data pipelines.
|
|
32
|
+
|
|
33
|
+
Responsibilities:
|
|
34
|
+
- Design consistent and robust data models
|
|
35
|
+
- Design and implement data pipelines to process data at scale (Petabyte scale)
|
|
36
|
+
- Automate data flows, build self service tooling
|
|
37
|
+
- Production-ize synthetic data workflows
|
|
38
|
+
- Preprocess, transform and clean data (tabular, image, video, text) at scale
|
|
39
|
+
- Interact with ML models to optimize human-in-the-loop workflows
|
|
40
|
+
- Support day-to-day operations of the data team
|
|
41
|
+
|
|
42
|
+
Minimum Qualifications:
|
|
43
|
+
- Bachelors, Masters or PhD in Computer Science, Mathematics, Physics or related field
|
|
44
|
+
- Excellent programming skills in Python with strong CS foundations (data structures, parallelization)
|
|
45
|
+
- Experience in Machine Learning (model training) in NLP or Computer Vision
|
|
46
|
+
- Design, prototype and put in production robust data components that scale
|
|
47
|
+
|
|
48
|
+
Preferred Qualifications:
|
|
49
|
+
- Data orchestration frameworks: Airflow, SQL/NoSQL, Docker, Kubernetes, Spark, Databricks
|
|
50
|
+
- Resilient in fast pace environment, comfortable with ambiguity
|
|
51
|
+
- Excellent written and verbal communication skills
|
|
52
|
+
- Experience designing and implementing agentic workflows
|
|
53
|
+
`.trim();
|
|
54
|
+
|
|
55
|
+
// ── Test Resume YAML ────────────────────────────────────────────────
|
|
56
|
+
|
|
57
|
+
const RESUME_YAML = `
|
|
58
|
+
basics:
|
|
59
|
+
name: "Alex Chen"
|
|
60
|
+
label: "Senior ML Data Engineer"
|
|
61
|
+
email: "alex.chen@example.com"
|
|
62
|
+
phone: "+1-408-555-0142"
|
|
63
|
+
url: "https://alexchen.dev"
|
|
64
|
+
summary: >-
|
|
65
|
+
Senior ML Data Engineer with 7+ years of experience building
|
|
66
|
+
production-grade data pipelines for machine learning at scale.
|
|
67
|
+
Specialized in computer vision and NLP data processing, with deep
|
|
68
|
+
expertise in Python, distributed systems, and data orchestration.
|
|
69
|
+
location:
|
|
70
|
+
city: "Cupertino"
|
|
71
|
+
countryCode: "US"
|
|
72
|
+
region: "California"
|
|
73
|
+
profiles:
|
|
74
|
+
- network: "LinkedIn"
|
|
75
|
+
username: "alexchen-ml"
|
|
76
|
+
url: "https://linkedin.com/in/alexchen-ml"
|
|
77
|
+
- network: "GitHub"
|
|
78
|
+
username: "alexchen"
|
|
79
|
+
url: "https://github.com/alexchen"
|
|
80
|
+
|
|
81
|
+
work:
|
|
82
|
+
- name: "Meta"
|
|
83
|
+
position: "Senior ML Data Engineer"
|
|
84
|
+
url: "https://meta.com"
|
|
85
|
+
startDate: "2021-06-01"
|
|
86
|
+
summary: "Led data pipeline architecture for ML training infrastructure"
|
|
87
|
+
highlights:
|
|
88
|
+
- "Designed and implemented petabyte-scale data pipelines processing 50TB+ daily for computer vision model training"
|
|
89
|
+
- "Built automated data quality validation framework reducing bad training data by 73%"
|
|
90
|
+
- "Led team of 5 engineers to production-ize synthetic data generation workflows for NLP models"
|
|
91
|
+
- "Optimized Spark jobs reducing preprocessing time by 60% across 2000+ GPU cluster"
|
|
92
|
+
- "Implemented human-in-the-loop annotation pipeline serving 200+ annotators with 99.5% uptime"
|
|
93
|
+
|
|
94
|
+
- name: "Amazon Web Services"
|
|
95
|
+
position: "Data Engineer"
|
|
96
|
+
url: "https://aws.amazon.com"
|
|
97
|
+
startDate: "2018-03-01"
|
|
98
|
+
endDate: "2021-05-31"
|
|
99
|
+
summary: "Built ML data infrastructure for Alexa and Rekognition teams"
|
|
100
|
+
highlights:
|
|
101
|
+
- "Designed robust data models for multi-modal ML datasets spanning text, image, and video domains"
|
|
102
|
+
- "Implemented Airflow-based orchestration processing 10TB+ daily with 99.9% reliability"
|
|
103
|
+
- "Built self-service data tooling enabling PMs and researchers to iterate 3x faster on dataset creation"
|
|
104
|
+
- "Preprocessed and transformed training data for NLP models powering Alexa voice recognition"
|
|
105
|
+
- "Reduced data pipeline costs by 40% through Kubernetes-based auto-scaling and resource optimization"
|
|
106
|
+
|
|
107
|
+
- name: "Nvidia"
|
|
108
|
+
position: "Junior Data Engineer"
|
|
109
|
+
url: "https://nvidia.com"
|
|
110
|
+
startDate: "2016-07-01"
|
|
111
|
+
endDate: "2018-02-28"
|
|
112
|
+
summary: "Data pipeline development for autonomous driving ML team"
|
|
113
|
+
highlights:
|
|
114
|
+
- "Built data preprocessing pipelines for computer vision training on autonomous driving datasets"
|
|
115
|
+
- "Implemented data cleaning workflows processing 500K+ images daily with automated quality checks"
|
|
116
|
+
- "Developed Python tools for dataset versioning and lineage tracking across ML experiments"
|
|
117
|
+
|
|
118
|
+
education:
|
|
119
|
+
- institution: "Stanford University"
|
|
120
|
+
area: "Computer Science"
|
|
121
|
+
studyType: "Master"
|
|
122
|
+
startDate: "2014-09-01"
|
|
123
|
+
endDate: "2016-06-01"
|
|
124
|
+
|
|
125
|
+
- institution: "UC Berkeley"
|
|
126
|
+
area: "Mathematics and Computer Science"
|
|
127
|
+
studyType: "Bachelor"
|
|
128
|
+
startDate: "2010-09-01"
|
|
129
|
+
endDate: "2014-06-01"
|
|
130
|
+
|
|
131
|
+
skills:
|
|
132
|
+
- name: "Programming"
|
|
133
|
+
level: "Expert"
|
|
134
|
+
keywords: ["Python", "SQL", "Scala", "Bash", "Go"]
|
|
135
|
+
|
|
136
|
+
- name: "Data Engineering"
|
|
137
|
+
level: "Expert"
|
|
138
|
+
keywords: ["Spark", "Airflow", "Databricks", "Kafka", "ETL", "Data Pipelines", "Data Models"]
|
|
139
|
+
|
|
140
|
+
- name: "Machine Learning"
|
|
141
|
+
level: "Advanced"
|
|
142
|
+
keywords: ["Computer Vision", "NLP", "Model Training", "PyTorch", "TensorFlow", "Synthetic Data"]
|
|
143
|
+
|
|
144
|
+
- name: "Infrastructure"
|
|
145
|
+
level: "Advanced"
|
|
146
|
+
keywords: ["Docker", "Kubernetes", "AWS", "GCP", "CI/CD", "Distributed Systems"]
|
|
147
|
+
|
|
148
|
+
- name: "Data Storage"
|
|
149
|
+
level: "Advanced"
|
|
150
|
+
keywords: ["PostgreSQL", "NoSQL", "MongoDB", "Redis", "S3", "HDFS", "Delta Lake"]
|
|
151
|
+
|
|
152
|
+
projects:
|
|
153
|
+
- name: "ML Data Quality Framework"
|
|
154
|
+
description: "Open-source framework for automated ML training data validation"
|
|
155
|
+
highlights:
|
|
156
|
+
- "Implemented statistical drift detection across 15+ data quality dimensions"
|
|
157
|
+
- "Adopted by 3 internal teams, reducing data incidents by 85%"
|
|
158
|
+
keywords: ["Python", "Data Quality", "ML", "Open Source"]
|
|
159
|
+
startDate: "2023-01-01"
|
|
160
|
+
url: "https://github.com/alexchen/ml-data-quality"
|
|
161
|
+
|
|
162
|
+
- name: "Agentic Data Pipeline Orchestrator"
|
|
163
|
+
description: "LLM-powered agent for automated data pipeline debugging and optimization"
|
|
164
|
+
highlights:
|
|
165
|
+
- "Designed agentic workflow that autonomously diagnoses and fixes 60% of pipeline failures"
|
|
166
|
+
- "Reduced on-call burden by 45% through intelligent alert routing"
|
|
167
|
+
keywords: ["Agentic Workflows", "LLM", "Python", "Airflow"]
|
|
168
|
+
startDate: "2024-06-01"
|
|
169
|
+
|
|
170
|
+
certificates:
|
|
171
|
+
- name: "AWS Certified Data Engineer - Associate"
|
|
172
|
+
date: "2023-03-15"
|
|
173
|
+
issuer: "Amazon Web Services"
|
|
174
|
+
|
|
175
|
+
- name: "Databricks Certified Data Engineer Professional"
|
|
176
|
+
date: "2024-01-10"
|
|
177
|
+
issuer: "Databricks"
|
|
178
|
+
|
|
179
|
+
languages:
|
|
180
|
+
- language: "English"
|
|
181
|
+
fluency: "Native speaker"
|
|
182
|
+
- language: "Mandarin"
|
|
183
|
+
fluency: "Native speaker"
|
|
184
|
+
`.trim();
|
|
185
|
+
|
|
186
|
+
// ── MCP Client ──────────────────────────────────────────────────────
|
|
187
|
+
|
|
188
|
+
class McpTestClient {
|
|
189
|
+
constructor() {
|
|
190
|
+
this.msgId = 0;
|
|
191
|
+
this.pending = new Map();
|
|
192
|
+
this.buffer = '';
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
start() {
|
|
196
|
+
return new Promise((resolve, reject) => {
|
|
197
|
+
const entry = existsSync(resolve(ROOT, 'dist/index.js'))
|
|
198
|
+
? './dist/index.js'
|
|
199
|
+
: './dist/index.cjs';
|
|
200
|
+
this.proc = spawn('node', [entry, 'mcp'], {
|
|
201
|
+
cwd: ROOT,
|
|
202
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
this.proc.stdout.on('data', (chunk) => this.onData(chunk));
|
|
206
|
+
this.proc.stderr.on('data', (chunk) => {
|
|
207
|
+
// MCP debug output goes to stderr, ignore
|
|
208
|
+
});
|
|
209
|
+
this.proc.on('error', reject);
|
|
210
|
+
this.proc.on('close', (code) => {
|
|
211
|
+
if (this.pending.size > 0) {
|
|
212
|
+
for (const [, { reject: rej }] of this.pending) {
|
|
213
|
+
rej(new Error(`MCP server exited with code ${code}`));
|
|
214
|
+
}
|
|
215
|
+
}
|
|
216
|
+
});
|
|
217
|
+
|
|
218
|
+
// Initialize the MCP connection
|
|
219
|
+
this.send('initialize', {
|
|
220
|
+
protocolVersion: '2025-03-26',
|
|
221
|
+
capabilities: {},
|
|
222
|
+
clientInfo: { name: 'test-client', version: '1.0.0' },
|
|
223
|
+
}).then((result) => {
|
|
224
|
+
// Send initialized notification
|
|
225
|
+
this.notify('notifications/initialized', {});
|
|
226
|
+
resolve(result);
|
|
227
|
+
}).catch(reject);
|
|
228
|
+
});
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
onData(chunk) {
|
|
232
|
+
this.buffer += chunk.toString();
|
|
233
|
+
// MCP messages are newline-delimited JSON
|
|
234
|
+
let newlineIdx;
|
|
235
|
+
while ((newlineIdx = this.buffer.indexOf('\n')) !== -1) {
|
|
236
|
+
const line = this.buffer.slice(0, newlineIdx).trim();
|
|
237
|
+
this.buffer = this.buffer.slice(newlineIdx + 1);
|
|
238
|
+
if (!line) continue;
|
|
239
|
+
try {
|
|
240
|
+
const msg = JSON.parse(line);
|
|
241
|
+
if (msg.id != null && this.pending.has(msg.id)) {
|
|
242
|
+
const { resolve, reject } = this.pending.get(msg.id);
|
|
243
|
+
this.pending.delete(msg.id);
|
|
244
|
+
if (msg.error) reject(new Error(JSON.stringify(msg.error)));
|
|
245
|
+
else resolve(msg.result);
|
|
246
|
+
}
|
|
247
|
+
} catch {
|
|
248
|
+
// skip non-JSON lines
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
}
|
|
252
|
+
|
|
253
|
+
send(method, params) {
|
|
254
|
+
return new Promise((resolve, reject) => {
|
|
255
|
+
const id = ++this.msgId;
|
|
256
|
+
this.pending.set(id, { resolve, reject });
|
|
257
|
+
const msg = JSON.stringify({ jsonrpc: '2.0', id, method, params });
|
|
258
|
+
this.proc.stdin.write(msg + '\n');
|
|
259
|
+
});
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
notify(method, params) {
|
|
263
|
+
const msg = JSON.stringify({ jsonrpc: '2.0', method, params });
|
|
264
|
+
this.proc.stdin.write(msg + '\n');
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
async callTool(name, args = {}) {
|
|
268
|
+
return this.send('tools/call', { name, arguments: args });
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
async listTools() {
|
|
272
|
+
return this.send('tools/list', {});
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
async listResources() {
|
|
276
|
+
return this.send('resources/list', {});
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
async readResource(uri) {
|
|
280
|
+
return this.send('resources/read', { uri });
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
async listPrompts() {
|
|
284
|
+
return this.send('prompts/list', {});
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
async getPrompt(name, args = {}) {
|
|
288
|
+
return this.send('prompts/get', { name, arguments: args });
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
stop() {
|
|
292
|
+
if (this.proc) {
|
|
293
|
+
this.proc.stdin.end();
|
|
294
|
+
this.proc.kill();
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
// ── Helpers ─────────────────────────────────────────────────────────
|
|
300
|
+
|
|
301
|
+
function header(text) {
|
|
302
|
+
const line = '═'.repeat(70);
|
|
303
|
+
console.log(`\n\x1b[36m${line}\x1b[0m`);
|
|
304
|
+
console.log(`\x1b[1;36m ${text}\x1b[0m`);
|
|
305
|
+
console.log(`\x1b[36m${line}\x1b[0m\n`);
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
function subheader(text) {
|
|
309
|
+
console.log(`\n\x1b[33m── ${text} ${'─'.repeat(Math.max(0, 64 - text.length))}\x1b[0m\n`);
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
function ok(msg) { console.log(` \x1b[32m✓\x1b[0m ${msg}`); }
|
|
313
|
+
function fail(msg) { console.log(` \x1b[31m✗\x1b[0m ${msg}`); }
|
|
314
|
+
function info(msg) { console.log(` \x1b[90m${msg}\x1b[0m`); }
|
|
315
|
+
|
|
316
|
+
function truncate(str, max = 200) {
|
|
317
|
+
if (str.length <= max) return str;
|
|
318
|
+
return str.slice(0, max) + '…';
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
let passed = 0;
|
|
322
|
+
let failed = 0;
|
|
323
|
+
|
|
324
|
+
function assert(condition, label, detail) {
|
|
325
|
+
if (condition) { ok(label); passed++; }
|
|
326
|
+
else { fail(`${label}${detail ? ': ' + detail : ''}`); failed++; }
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
// ── Main Test ───────────────────────────────────────────────────────
|
|
330
|
+
|
|
331
|
+
async function main() {
|
|
332
|
+
console.log('\x1b[1m\n🚀 resuml MCP Server Integration Test\x1b[0m');
|
|
333
|
+
console.log(' Testing all tools, resources, and prompts\n');
|
|
334
|
+
|
|
335
|
+
// Build first
|
|
336
|
+
header('0. Build');
|
|
337
|
+
console.log(' Building dist/index.js...');
|
|
338
|
+
const { execSync } = await import('child_process');
|
|
339
|
+
try {
|
|
340
|
+
execSync('npm run build:lib', { cwd: ROOT, stdio: 'pipe' });
|
|
341
|
+
ok('Build succeeded');
|
|
342
|
+
} catch (e) {
|
|
343
|
+
fail('Build failed: ' + e.message);
|
|
344
|
+
process.exit(1);
|
|
345
|
+
}
|
|
346
|
+
|
|
347
|
+
const client = new McpTestClient();
|
|
348
|
+
|
|
349
|
+
try {
|
|
350
|
+
// ── Initialize ────────────────────────────────────────────────
|
|
351
|
+
|
|
352
|
+
header('1. Initialize MCP Connection');
|
|
353
|
+
const initResult = await client.start();
|
|
354
|
+
assert(initResult != null, 'Server initialized successfully');
|
|
355
|
+
info(`Init response keys: ${Object.keys(initResult).join(', ')}`);
|
|
356
|
+
info(`Protocol version: ${initResult.protocolVersion || 'negotiated'}`);
|
|
357
|
+
|
|
358
|
+
// ── List Tools ────────────────────────────────────────────────
|
|
359
|
+
|
|
360
|
+
header('2. List Tools');
|
|
361
|
+
const { tools } = await client.listTools();
|
|
362
|
+
const toolNames = tools.map(t => t.name);
|
|
363
|
+
info(`Found ${tools.length} tools: ${toolNames.join(', ')}`);
|
|
364
|
+
for (const expected of ['resuml_init_resume', 'resuml_validate', 'resuml_ats_check', 'resuml_render', 'resuml_list_themes', 'resuml_export_pdf']) {
|
|
365
|
+
assert(toolNames.includes(expected), `Tool "${expected}" registered`);
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
// ── List Resources ────────────────────────────────────────────
|
|
369
|
+
|
|
370
|
+
header('3. List Resources');
|
|
371
|
+
const { resources } = await client.listResources();
|
|
372
|
+
const resourceUris = resources.map(r => r.uri);
|
|
373
|
+
info(`Found ${resources.length} resources:`);
|
|
374
|
+
for (const r of resources) {
|
|
375
|
+
info(` ${r.uri} — ${r.description?.slice(0, 60) || r.name}`);
|
|
376
|
+
}
|
|
377
|
+
assert(resourceUris.includes('resuml://schema/json-resume'), 'Resource: json-resume schema');
|
|
378
|
+
assert(resourceUris.includes('resuml://docs/ats-scoring'), 'Resource: ats-scoring rubric');
|
|
379
|
+
assert(resourceUris.includes('resuml://themes/catalog'), 'Resource: theme catalog');
|
|
380
|
+
|
|
381
|
+
// ── Read Resources ────────────────────────────────────────────
|
|
382
|
+
|
|
383
|
+
header('4. Read Resources');
|
|
384
|
+
|
|
385
|
+
subheader('4a. JSON Resume Schema');
|
|
386
|
+
const schema = await client.readResource('resuml://schema/json-resume');
|
|
387
|
+
const schemaText = schema.contents[0].text;
|
|
388
|
+
assert(schemaText.includes('basics'), 'Schema contains "basics" section');
|
|
389
|
+
assert(schemaText.includes('work'), 'Schema contains "work" section');
|
|
390
|
+
assert(schemaText.includes('ISO 8601'), 'Schema mentions ISO 8601 dates');
|
|
391
|
+
info(`Schema length: ${schemaText.length} chars`);
|
|
392
|
+
|
|
393
|
+
subheader('4b. ATS Scoring Rubric');
|
|
394
|
+
const rubric = await client.readResource('resuml://docs/ats-scoring');
|
|
395
|
+
const rubricText = rubric.contents[0].text;
|
|
396
|
+
assert(rubricText.includes('contact-complete'), 'Rubric lists contact-complete check');
|
|
397
|
+
assert(rubricText.includes('action-verbs'), 'Rubric lists action-verbs check');
|
|
398
|
+
assert(rubricText.includes('90-100'), 'Rubric defines scoring scale');
|
|
399
|
+
info(`Rubric length: ${rubricText.length} chars`);
|
|
400
|
+
|
|
401
|
+
subheader('4c. Theme Catalog');
|
|
402
|
+
const catalog = await client.readResource('resuml://themes/catalog');
|
|
403
|
+
const catalogData = JSON.parse(catalog.contents[0].text);
|
|
404
|
+
assert(catalogData.themes.length > 0, `Theme catalog has ${catalogData.themes.length} themes`);
|
|
405
|
+
assert(catalogData.themes.some(t => t.name === 'even'), 'Catalog includes "even" theme');
|
|
406
|
+
assert(catalogData.themes.some(t => t.name === 'stackoverflow'), 'Catalog includes "stackoverflow" theme');
|
|
407
|
+
const installedThemes = catalogData.themes.filter(t => t.installed);
|
|
408
|
+
info(`Installed themes: ${installedThemes.map(t => t.name).join(', ') || 'none'}`);
|
|
409
|
+
|
|
410
|
+
// ── List Prompts ──────────────────────────────────────────────
|
|
411
|
+
|
|
412
|
+
header('5. List Prompts');
|
|
413
|
+
const { prompts } = await client.listPrompts();
|
|
414
|
+
const promptNames = prompts.map(p => p.name);
|
|
415
|
+
info(`Found ${prompts.length} prompts: ${promptNames.join(', ')}`);
|
|
416
|
+
assert(promptNames.includes('tailor-resume-to-jd'), 'Prompt: tailor-resume-to-jd');
|
|
417
|
+
assert(promptNames.includes('optimize-ats-score'), 'Prompt: optimize-ats-score');
|
|
418
|
+
assert(promptNames.includes('review-resume'), 'Prompt: review-resume');
|
|
419
|
+
|
|
420
|
+
// ── Get Prompt ────────────────────────────────────────────────
|
|
421
|
+
|
|
422
|
+
header('6. Get Prompt: tailor-resume-to-jd');
|
|
423
|
+
const prompt = await client.getPrompt('tailor-resume-to-jd', {
|
|
424
|
+
jobDescription: JOB_DESCRIPTION,
|
|
425
|
+
candidateName: 'Alex Chen',
|
|
426
|
+
candidateEmail: 'alex.chen@example.com',
|
|
427
|
+
candidateBackground: '7 years ML data engineering at Meta, AWS, Nvidia. Expert in Python, Spark, Airflow.',
|
|
428
|
+
});
|
|
429
|
+
assert(prompt.messages?.length > 0, 'Prompt returns messages');
|
|
430
|
+
const promptText = prompt.messages[0].content.text;
|
|
431
|
+
assert(promptText.includes('Job Description'), 'Prompt includes JD');
|
|
432
|
+
assert(promptText.includes('Alex Chen'), 'Prompt includes candidate name');
|
|
433
|
+
assert(promptText.includes('resuml_validate'), 'Prompt instructs to use resuml_validate');
|
|
434
|
+
assert(promptText.includes('resuml_ats_check'), 'Prompt instructs to use resuml_ats_check');
|
|
435
|
+
info(`Prompt text length: ${promptText.length} chars`);
|
|
436
|
+
info(`Preview: ${truncate(promptText, 150)}`);
|
|
437
|
+
|
|
438
|
+
// ── Tool: resuml_init_resume ──────────────────────────────────
|
|
439
|
+
|
|
440
|
+
header('7. Tool: resuml_init_resume');
|
|
441
|
+
const initRes = await client.callTool('resuml_init_resume', {
|
|
442
|
+
name: 'Alex Chen',
|
|
443
|
+
title: 'Senior ML Data Engineer',
|
|
444
|
+
email: 'alex.chen@example.com',
|
|
445
|
+
});
|
|
446
|
+
const initYaml = initRes.content[0].text;
|
|
447
|
+
assert(initYaml.includes('Alex Chen'), 'Init template has candidate name');
|
|
448
|
+
assert(initYaml.includes('Senior ML Data Engineer'), 'Init template has title');
|
|
449
|
+
assert(initYaml.includes('alex.chen@example.com'), 'Init template has email');
|
|
450
|
+
info(`Template length: ${initYaml.length} chars`);
|
|
451
|
+
|
|
452
|
+
// ── Tool: resuml_validate ─────────────────────────────────────
|
|
453
|
+
|
|
454
|
+
header('8. Tool: resuml_validate');
|
|
455
|
+
|
|
456
|
+
subheader('8a. Valid resume');
|
|
457
|
+
const valResult = await client.callTool('resuml_validate', { yaml: RESUME_YAML });
|
|
458
|
+
const valData = JSON.parse(valResult.content[0].text);
|
|
459
|
+
assert(valData.valid === true, `Validation passed (valid=${valData.valid})`);
|
|
460
|
+
assert(valData.errors.length === 0, `No errors (count=${valData.errors.length})`);
|
|
461
|
+
|
|
462
|
+
subheader('8b. Invalid resume');
|
|
463
|
+
const badResult = await client.callTool('resuml_validate', { yaml: 'this is not valid yaml: [' });
|
|
464
|
+
const badData = JSON.parse(badResult.content[0].text);
|
|
465
|
+
assert(badData.valid === false, `Invalid YAML detected (valid=${badData.valid})`);
|
|
466
|
+
assert(badData.errors.length > 0, `Errors reported (count=${badData.errors.length})`);
|
|
467
|
+
info(`Error: ${truncate(badData.errors[0], 100)}`);
|
|
468
|
+
|
|
469
|
+
// ── Tool: resuml_ats_check ────────────────────────────────────
|
|
470
|
+
|
|
471
|
+
header('9. Tool: resuml_ats_check');
|
|
472
|
+
|
|
473
|
+
subheader('9a. ATS check without JD');
|
|
474
|
+
const atsBasic = await client.callTool('resuml_ats_check', { yaml: RESUME_YAML });
|
|
475
|
+
const atsBasicData = JSON.parse(atsBasic.content[0].text);
|
|
476
|
+
assert(typeof atsBasicData.score === 'number', `Score is a number (${atsBasicData.score})`);
|
|
477
|
+
assert(atsBasicData.rating, `Rating: ${atsBasicData.rating}`);
|
|
478
|
+
assert(Array.isArray(atsBasicData.checks), `Has ${atsBasicData.checks.length} checks`);
|
|
479
|
+
assert(atsBasicData.summary, 'Has summary text');
|
|
480
|
+
info(`Score: ${atsBasicData.score}/100 — Rating: ${atsBasicData.rating}`);
|
|
481
|
+
info(`Summary: ${truncate(atsBasicData.summary, 150)}`);
|
|
482
|
+
|
|
483
|
+
const passedChecks = atsBasicData.checks.filter(c => c.passed);
|
|
484
|
+
const failedChecks = atsBasicData.checks.filter(c => !c.passed);
|
|
485
|
+
info(`Checks: ${passedChecks.length} passed, ${failedChecks.length} failed`);
|
|
486
|
+
for (const c of failedChecks) {
|
|
487
|
+
info(` ⚠ [${c.id}] ${c.message}`);
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
subheader('9b. ATS check WITH Apple JD');
|
|
491
|
+
const atsJd = await client.callTool('resuml_ats_check', {
|
|
492
|
+
yaml: RESUME_YAML,
|
|
493
|
+
jobDescription: JOB_DESCRIPTION,
|
|
494
|
+
language: 'en',
|
|
495
|
+
});
|
|
496
|
+
const atsJdData = JSON.parse(atsJd.content[0].text);
|
|
497
|
+
assert(typeof atsJdData.score === 'number', `Score: ${atsJdData.score}/100`);
|
|
498
|
+
assert(atsJdData.keywords, 'Has keyword match data');
|
|
499
|
+
assert(Array.isArray(atsJdData.keywords?.matched), 'Has matched keywords');
|
|
500
|
+
assert(Array.isArray(atsJdData.keywords?.missing), 'Has missing keywords');
|
|
501
|
+
assert(typeof atsJdData.keywords?.matchPercentage === 'number', 'Has match percentage');
|
|
502
|
+
info(`Score: ${atsJdData.score}/100 — Rating: ${atsJdData.rating}`);
|
|
503
|
+
info(`Keyword match: ${atsJdData.keywords.matchPercentage}%`);
|
|
504
|
+
info(`Matched (${atsJdData.keywords.matched.length}): ${atsJdData.keywords.matched.join(', ')}`);
|
|
505
|
+
info(`Missing (${atsJdData.keywords.missing.length}): ${atsJdData.keywords.missing.join(', ')}`);
|
|
506
|
+
|
|
507
|
+
// ── Tool: resuml_list_themes ──────────────────────────────────
|
|
508
|
+
|
|
509
|
+
header('10. Tool: resuml_list_themes');
|
|
510
|
+
const themesResult = await client.callTool('resuml_list_themes');
|
|
511
|
+
const themesData = JSON.parse(themesResult.content[0].text);
|
|
512
|
+
assert(Array.isArray(themesData.themes), `Has ${themesData.themes.length} themes`);
|
|
513
|
+
const installed = themesData.themes.filter(t => t.installed);
|
|
514
|
+
info(`Total themes: ${themesData.themes.length}, Installed: ${installed.length}`);
|
|
515
|
+
for (const t of installed) {
|
|
516
|
+
info(` 📦 ${t.name} (${t.version})`);
|
|
517
|
+
}
|
|
518
|
+
|
|
519
|
+
// ── Tool: resuml_render ───────────────────────────────────────
|
|
520
|
+
|
|
521
|
+
header('11. Tool: resuml_render');
|
|
522
|
+
const renderResult = await client.callTool('resuml_render', {
|
|
523
|
+
yaml: RESUME_YAML,
|
|
524
|
+
theme: 'stackoverflow',
|
|
525
|
+
locale: 'en',
|
|
526
|
+
});
|
|
527
|
+
const html = renderResult.content[0].text;
|
|
528
|
+
const renderIsError = renderResult.isError || html.startsWith('{"error"');
|
|
529
|
+
assert(!renderIsError, 'Render succeeded (no error)');
|
|
530
|
+
if (renderIsError) {
|
|
531
|
+
info(`Render error: ${truncate(html, 150)}`);
|
|
532
|
+
} else {
|
|
533
|
+
assert(html.includes('Alex Chen'), 'HTML contains candidate name');
|
|
534
|
+
assert(html.includes('Senior ML') || html.includes('Meta') || html.includes('Stanford'), 'HTML contains resume content');
|
|
535
|
+
assert(html.length > 1000, `HTML is substantial (${html.length} chars)`);
|
|
536
|
+
info(`HTML output: ${html.length} chars`);
|
|
537
|
+
info(`Preview: ${truncate(html.replace(/<[^>]+>/g, ' ').replace(/\s+/g, ' '), 150)}`);
|
|
538
|
+
}
|
|
539
|
+
|
|
540
|
+
// ── Tool: resuml_export_pdf (skip if no Playwright) ──────────
|
|
541
|
+
|
|
542
|
+
header('12. Tool: resuml_export_pdf');
|
|
543
|
+
info('Attempting PDF export (requires Playwright)...');
|
|
544
|
+
const pdfResult = await client.callTool('resuml_export_pdf', {
|
|
545
|
+
yaml: RESUME_YAML,
|
|
546
|
+
theme: 'stackoverflow',
|
|
547
|
+
format: 'A4',
|
|
548
|
+
locale: 'en',
|
|
549
|
+
margin: '15mm',
|
|
550
|
+
});
|
|
551
|
+
const pdfContent = JSON.parse(pdfResult.content[0].text);
|
|
552
|
+
if (pdfContent.error?.includes('Playwright')) {
|
|
553
|
+
info('⏭ Skipped — Playwright not installed (expected in CI)');
|
|
554
|
+
ok('Graceful error when Playwright missing');
|
|
555
|
+
passed++;
|
|
556
|
+
} else if (pdfContent.pdf) {
|
|
557
|
+
assert(pdfContent.encoding === 'base64', 'PDF is base64 encoded');
|
|
558
|
+
assert(pdfContent.format === 'A4', 'Format is A4');
|
|
559
|
+
const pdfSize = Buffer.from(pdfContent.pdf, 'base64').length;
|
|
560
|
+
assert(pdfSize > 1000, `PDF has content (${(pdfSize / 1024).toFixed(1)} KB)`);
|
|
561
|
+
info(`PDF size: ${(pdfSize / 1024).toFixed(1)} KB`);
|
|
562
|
+
} else {
|
|
563
|
+
fail('Unexpected PDF response: ' + truncate(JSON.stringify(pdfContent), 150));
|
|
564
|
+
failed++;
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
// ── Summary ───────────────────────────────────────────────────
|
|
568
|
+
|
|
569
|
+
header('Results');
|
|
570
|
+
console.log(` \x1b[32m${passed} passed\x1b[0m, \x1b[31m${failed} failed\x1b[0m`);
|
|
571
|
+
console.log('');
|
|
572
|
+
|
|
573
|
+
if (failed > 0) process.exitCode = 1;
|
|
574
|
+
|
|
575
|
+
} catch (err) {
|
|
576
|
+
console.error('\n\x1b[31mFatal error:\x1b[0m', err.message || err);
|
|
577
|
+
process.exitCode = 1;
|
|
578
|
+
} finally {
|
|
579
|
+
client.stop();
|
|
580
|
+
}
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
main();
|