mcp-sequential-research 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,464 @@
1
+ # Example Workflow: Photonic Computing Patent Research
2
+
3
+ This example demonstrates the complete sequential research workflow for investigating photonic computing technology for AI acceleration.
4
+
5
+ ## Scenario
6
+
7
+ **Research Goal**: Investigate the current state of photonic computing for neural network inference, identify key patents, and produce a citable research report.
8
+
9
+ ---
10
+
11
+ ## Step 1: Generate Research Plan
12
+
13
+ ### Request
14
+
15
+ ```json
16
+ {
17
+ "method": "tools/call",
18
+ "params": {
19
+ "name": "sequential_research_plan",
20
+ "arguments": {
21
+ "topic": "photonic computing for neural network inference acceleration",
22
+ "depth": "standard",
23
+ "focus_areas": [
24
+ "silicon photonics integration",
25
+ "optical matrix multiplication",
26
+ "energy efficiency vs GPUs"
27
+ ],
28
+ "constraints": [
29
+ "patent-focused",
30
+ "post-2020 priority",
31
+ "english only"
32
+ ],
33
+ "output_format": "markdown"
34
+ }
35
+ }
36
+ }
37
+ ```
38
+
39
+ ### Response (Abbreviated)
40
+
41
+ ```json
42
+ {
43
+ "plan_id": "plan-abc123",
44
+ "topic": "photonic computing for neural network inference acceleration",
45
+ "created_at": "2025-01-13T10:00:00.000Z",
46
+ "summary": "Research plan with 8 queries at standard depth. Focus areas: silicon photonics integration, optical matrix multiplication, energy efficiency vs GPUs. Execution in 3 phases.",
47
+ "queries": [
48
+ {
49
+ "query_id": "q1",
50
+ "query_text": "What is photonic computing for neural network inference acceleration",
51
+ "query_type": "definition",
52
+ "extraction_goals": ["Primary definition", "Key characteristics", "Common examples", "Related terminology"],
53
+ "priority": "critical"
54
+ },
55
+ {
56
+ "query_id": "q2",
57
+ "query_text": "Current state of photonic computing for neural network inference acceleration",
58
+ "query_type": "current_state",
59
+ "extraction_goals": ["Current status", "Recent developments", "Key players or stakeholders", "Trends"],
60
+ "priority": "critical",
61
+ "depends_on": ["q1"]
62
+ },
63
+ {
64
+ "query_id": "q3",
65
+ "query_text": "Compare photonic computing for neural network inference acceleration",
66
+ "query_type": "comparison",
67
+ "extraction_goals": ["Key similarities", "Key differences", "Trade-offs", "Use case recommendations"],
68
+ "priority": "medium",
69
+ "depends_on": ["q1"]
70
+ },
71
+ {
72
+ "query_id": "q4",
73
+ "query_text": "photonic computing for neural network inference acceleration: silicon photonics integration",
74
+ "query_type": "current_state",
75
+ "priority": "high",
76
+ "depends_on": ["q1"]
77
+ },
78
+ {
79
+ "query_id": "q5",
80
+ "query_text": "photonic computing for neural network inference acceleration: optical matrix multiplication",
81
+ "query_type": "current_state",
82
+ "priority": "high",
83
+ "depends_on": ["q1"]
84
+ },
85
+ {
86
+ "query_id": "q6",
87
+ "query_text": "photonic computing for neural network inference acceleration: energy efficiency vs GPUs",
88
+ "query_type": "current_state",
89
+ "priority": "high",
90
+ "depends_on": ["q1"]
91
+ }
92
+ ],
93
+ "execution_order": [
94
+ ["q1"],
95
+ ["q2", "q3", "q4", "q5", "q6"]
96
+ ],
97
+ "estimated_sources": 18
98
+ }
99
+ ```
100
+
101
+ ---
102
+
103
+ ## Step 2: Execute Queries
104
+
105
+ Execute queries using appropriate MCP tools. Here's how each query maps to data sources:
106
+
107
+ | Query | Source | MCP Tool |
108
+ |-------|--------|----------|
109
+ | q1 (definition) | Google Patents + Web | `search_patents`, Google Custom Search |
110
+ | q2 (current_state) | News, Patents | Google Custom Search, `search_patents` |
111
+ | q3 (comparison) | Technical papers | Google Custom Search |
112
+ | q4-q6 (focus areas) | Patents primarily | `search_patents` |
113
+
114
+ ### Example: Query q1 via Google Patents MCP
115
+
116
+ ```json
117
+ {
118
+ "method": "tools/call",
119
+ "params": {
120
+ "name": "search_patents",
121
+ "arguments": {
122
+ "query": "photonic computing neural network inference",
123
+ "num_results": 5,
124
+ "country": "US",
125
+ "after": "2020-01-01"
126
+ }
127
+ }
128
+ }
129
+ ```
130
+
131
+ ### Example: Query q1 via Google Search MCP
132
+
133
+ ```json
134
+ {
135
+ "method": "tools/call",
136
+ "params": {
137
+ "name": "google_custom_search",
138
+ "arguments": {
139
+ "query": "what is photonic computing for neural networks",
140
+ "num": 5
141
+ }
142
+ }
143
+ }
144
+ ```
145
+
146
+ ---
147
+
148
+ ## Step 3: Normalize Results
149
+
150
+ Transform raw API responses into the standard `raw_results` format:
151
+
152
+ ```json
153
+ [
154
+ {
155
+ "query_id": "q1",
156
+ "success": true,
157
+ "data": {
158
+ "definition": "Photonic computing for neural network inference uses optical signals to perform matrix operations, achieving orders of magnitude lower latency and power consumption compared to electronic processors.",
159
+ "characteristics": [
160
+ "Parallel matrix multiplication via light interference",
161
+ "Near-zero energy for passive optical operations",
162
+ "Wavelength division multiplexing for high throughput",
163
+ "Femtosecond-scale operation speed"
164
+ ],
165
+ "examples": [
166
+ "Lightmatter Envise photonic chip",
167
+ "Intel Silicon Photonics accelerator",
168
+ "MIT photonic neural network prototype"
169
+ ]
170
+ },
171
+ "sources": [
172
+ {
173
+ "id": "S1",
174
+ "source_type": "document",
175
+ "title": "US11544545B2 - Photonic neural network system",
176
+ "url": "https://patents.google.com/patent/US11544545B2",
177
+ "accessed_date": "2025-01-13",
178
+ "excerpt": "A photonic neural network system comprising an array of Mach-Zehnder interferometers configured to perform matrix-vector multiplication..."
179
+ },
180
+ {
181
+ "id": "S2",
182
+ "source_type": "document",
183
+ "title": "US20230004850A1 - Optical inference accelerator",
184
+ "url": "https://patents.google.com/patent/US20230004850A1",
185
+ "accessed_date": "2025-01-13",
186
+ "excerpt": "An optical inference accelerator for deep learning workloads..."
187
+ },
188
+ {
189
+ "id": "S3",
190
+ "source_type": "web",
191
+ "title": "Photonic Computing Explained - MIT Technology Review",
192
+ "url": "https://technologyreview.com/photonic-computing-explained",
193
+ "accessed_date": "2025-01-13"
194
+ }
195
+ ]
196
+ },
197
+ {
198
+ "query_id": "q2",
199
+ "success": true,
200
+ "data": {
201
+ "current_status": "Photonic computing for AI inference is in early commercialization stage, with several startups shipping development kits and major tech companies investing in R&D.",
202
+ "recent_developments": [
203
+ "Lightmatter raised $154M Series C (2023)",
204
+ "Intel demonstrated 100Gbps silicon photonics (2024)",
205
+ "NVIDIA announced photonic interconnect research program"
206
+ ],
207
+ "key_players": [
208
+ "Lightmatter (Boston)",
209
+ "Luminous Computing (California)",
210
+ "Intel Silicon Photonics",
211
+ "Ayar Labs",
212
+ "MIT Photonics Research Lab"
213
+ ]
214
+ },
215
+ "sources": [
216
+ {
217
+ "id": "S4",
218
+ "source_type": "document",
219
+ "title": "US11675187B2 - Scalable photonic computing architecture",
220
+ "url": "https://patents.google.com/patent/US11675187B2",
221
+ "accessed_date": "2025-01-13"
222
+ },
223
+ {
224
+ "id": "S5",
225
+ "source_type": "web",
226
+ "title": "Lightmatter Series C Announcement",
227
+ "url": "https://lightmatter.co/news/series-c",
228
+ "accessed_date": "2025-01-13"
229
+ }
230
+ ]
231
+ },
232
+ {
233
+ "query_id": "q3",
234
+ "success": true,
235
+ "data": {
236
+ "similarities": [
237
+ "Both photonic and electronic accelerators target matrix operations",
238
+ "Both require custom software stacks",
239
+ "Both face manufacturing scaling challenges"
240
+ ],
241
+ "differences": [
242
+ "Photonic: passive operations consume near-zero energy",
243
+ "Electronic: mature ecosystem and tooling",
244
+ "Photonic: fundamentally analog computation",
245
+ "Electronic: digital precision but higher power"
246
+ ],
247
+ "recommendation": "Photonic accelerators are best suited for inference workloads with high matrix dimensions where latency and power are critical. Electronic accelerators remain preferred for training and general-purpose computing."
248
+ },
249
+ "sources": [
250
+ {
251
+ "id": "S6",
252
+ "source_type": "web",
253
+ "title": "Photonic vs Electronic AI Accelerators - IEEE Spectrum",
254
+ "url": "https://spectrum.ieee.org/photonic-vs-electronic-ai",
255
+ "accessed_date": "2025-01-13"
256
+ }
257
+ ]
258
+ },
259
+ {
260
+ "query_id": "q4",
261
+ "success": true,
262
+ "data": {
263
+ "current_status": "Silicon photonics integration enables manufacturing photonic chips using existing CMOS fabs, dramatically reducing costs.",
264
+ "recent_developments": [
265
+ "GlobalFoundries offering silicon photonics PDK",
266
+ "TSMC announced photonics integration roadmap",
267
+ "Intel shipping Ponte Vecchio with silicon photonics"
268
+ ],
269
+ "key_players": [
270
+ "Intel",
271
+ "GlobalFoundries",
272
+ "TSMC",
273
+ "Tower Semiconductor"
274
+ ]
275
+ },
276
+ "sources": [
277
+ {
278
+ "id": "S7",
279
+ "source_type": "document",
280
+ "title": "US11550101B2 - Silicon photonic integration method",
281
+ "url": "https://patents.google.com/patent/US11550101B2",
282
+ "accessed_date": "2025-01-13"
283
+ }
284
+ ]
285
+ },
286
+ {
287
+ "query_id": "q5",
288
+ "success": true,
289
+ "data": {
290
+ "current_status": "Optical matrix multiplication is the core operation enabling photonic neural networks, using interference patterns to compute dot products.",
291
+ "recent_developments": [
292
+ "Demonstrated 97% accuracy on ImageNet classification",
293
+ "10 TOPS/W efficiency achieved in lab conditions",
294
+ "Wavelength-multiplexed systems reaching 64 parallel channels"
295
+ ],
296
+ "key_players": [
297
+ "Lightmatter",
298
+ "MIT",
299
+ "Stanford Photonics Lab"
300
+ ]
301
+ },
302
+ "sources": [
303
+ {
304
+ "id": "S8",
305
+ "source_type": "document",
306
+ "title": "US11385449B2 - Optical matrix multiplication apparatus",
307
+ "url": "https://patents.google.com/patent/US11385449B2",
308
+ "accessed_date": "2025-01-13"
309
+ }
310
+ ]
311
+ },
312
+ {
313
+ "query_id": "q6",
314
+ "success": true,
315
+ "data": {
316
+ "current_status": "Photonic accelerators demonstrate 10-100x better energy efficiency than GPUs for inference, measured in TOPS/W.",
317
+ "recent_developments": [
318
+ "Lightmatter claims 1000 TOPS/W theoretical peak",
319
+ "NVIDIA H100 achieves ~10 TOPS/W for inference",
320
+ "Academic prototypes reaching 100 TOPS/W"
321
+ ],
322
+ "key_players": [
323
+ "Lightmatter",
324
+ "NVIDIA (benchmark baseline)",
325
+ "Luminous Computing"
326
+ ]
327
+ },
328
+ "sources": [
329
+ {
330
+ "id": "S9",
331
+ "source_type": "web",
332
+ "title": "Energy Efficiency Comparison: Photonic vs GPU",
333
+ "url": "https://example.com/photonic-gpu-efficiency",
334
+ "accessed_date": "2025-01-13"
335
+ }
336
+ ]
337
+ }
338
+ ]
339
+ ```
340
+
341
+ ---
342
+
343
+ ## Step 4: Compile Report
344
+
345
+ ### Request
346
+
347
+ ```json
348
+ {
349
+ "method": "tools/call",
350
+ "params": {
351
+ "name": "sequential_research_compile",
352
+ "arguments": {
353
+ "plan": { /* full plan from step 1 */ },
354
+ "raw_results": [ /* array from step 3 */ ],
355
+ "include_sources": true,
356
+ "include_methodology": true,
357
+ "citation_style": "inline"
358
+ }
359
+ }
360
+ }
361
+ ```
362
+
363
+ ### Response (markdown_report excerpt)
364
+
365
+ ```markdown
366
+ # Research Report: photonic computing for neural network inference acceleration
367
+
368
+ ## Executive Summary
369
+
370
+ This report presents research findings on **photonic computing for neural network inference acceleration**. The analysis covers 6 research queries across 5 sections. Key topics include: Overview, Current State, Comparison, Types and Categories.
371
+
372
+ ## Methodology
373
+
374
+ This report was compiled using a sequential research approach:
375
+ 1. Research queries were generated based on the topic and focus areas
376
+ 2. Each query was executed to gather relevant information
377
+ 3. Results were compiled and organized into thematic sections
378
+ 4. Sources were consolidated and citations were verified
379
+
380
+ ## Overview
381
+
382
+ Photonic computing for neural network inference uses optical signals to perform matrix operations, achieving orders of magnitude lower latency and power consumption compared to electronic processors. [1], [2]
383
+
384
+ **Key Characteristics:**
385
+ - Parallel matrix multiplication via light interference
386
+ - Near-zero energy for passive optical operations
387
+ - Wavelength division multiplexing for high throughput
388
+ - Femtosecond-scale operation speed
389
+
390
+ **Examples:**
391
+ - Lightmatter Envise photonic chip
392
+ - Intel Silicon Photonics accelerator
393
+ - MIT photonic neural network prototype
394
+
395
+ ## Current State
396
+
397
+ Photonic computing for AI inference is in early commercialization stage, with several startups shipping development kits and major tech companies investing in R&D. [4], [5]
398
+
399
+ **Recent Developments:**
400
+ - Lightmatter raised $154M Series C (2023)
401
+ - Intel demonstrated 100Gbps silicon photonics (2024)
402
+ - NVIDIA announced photonic interconnect research program
403
+
404
+ **Key Players:**
405
+ - Lightmatter (Boston)
406
+ - Luminous Computing (California)
407
+ - Intel Silicon Photonics
408
+ - Ayar Labs
409
+ - MIT Photonics Research Lab
410
+
411
+ ## Comparison
412
+
413
+ **Similarities:**
414
+ - Both photonic and electronic accelerators target matrix operations
415
+ - Both require custom software stacks
416
+ - Both face manufacturing scaling challenges
417
+
418
+ **Differences:**
419
+ - Photonic: passive operations consume near-zero energy
420
+ - Electronic: mature ecosystem and tooling
421
+ - Photonic: fundamentally analog computation
422
+ - Electronic: digital precision but higher power
423
+
424
+ **Recommendation:** Photonic accelerators are best suited for inference workloads with high matrix dimensions where latency and power are critical. Electronic accelerators remain preferred for training and general-purpose computing. [6]
425
+
426
+ ---
427
+
428
+ ### References
429
+
430
+ - **[1]**: US11544545B2 - Photonic neural network system <https://patents.google.com/patent/US11544545B2>
431
+ - **[2]**: US20230004850A1 - Optical inference accelerator <https://patents.google.com/patent/US20230004850A1>
432
+ - **[3]**: Photonic Computing Explained - MIT Technology Review <https://technologyreview.com/photonic-computing-explained> (web)
433
+ - **[4]**: US11675187B2 - Scalable photonic computing architecture <https://patents.google.com/patent/US11675187B2>
434
+ - **[5]**: Lightmatter Series C Announcement <https://lightmatter.co/news/series-c> (web)
435
+ - **[6]**: Photonic vs Electronic AI Accelerators - IEEE Spectrum <https://spectrum.ieee.org/photonic-vs-electronic-ai> (web)
436
+ - **[7]**: US11550101B2 - Silicon photonic integration method <https://patents.google.com/patent/US11550101B2>
437
+ - **[8]**: US11385449B2 - Optical matrix multiplication apparatus <https://patents.google.com/patent/US11385449B2>
438
+ - **[9]**: Energy Efficiency Comparison: Photonic vs GPU <https://example.com/photonic-gpu-efficiency> (web)
439
+ ```
440
+
441
+ ---
442
+
443
+ ## Step 5: Save Outputs
444
+
445
+ ```bash
446
+ # Create output directory
447
+ mkdir -p research/photonic-computing-inference
448
+
449
+ # Save files (in Claude Code, use Write tool)
450
+ # research/photonic-computing-inference/report.md <- markdown_report
451
+ # research/photonic-computing-inference/sources.json <- sources array
452
+ # research/photonic-computing-inference/plan.json <- original plan
453
+ # research/photonic-computing-inference/raw_results.json <- normalized results
454
+ ```
455
+
456
+ ---
457
+
458
+ ## Key Takeaways
459
+
460
+ 1. **Plan first** — The structured plan ensures comprehensive coverage
461
+ 2. **Parallel execution** — Queries in the same execution group run concurrently
462
+ 3. **Normalize early** — Convert to standard schema immediately after each query
463
+ 4. **Stable citations** — `[S#]` format enables downstream claim-mining
464
+ 5. **Save everything** — Raw results allow re-processing without re-querying
package/package.json ADDED
@@ -0,0 +1,38 @@
1
+ {
2
+ "name": "mcp-sequential-research",
3
+ "version": "1.0.0",
4
+ "description": "MCP server for sequential research planning and compilation",
5
+ "type": "module",
6
+ "main": "dist/index.js",
7
+ "types": "dist/index.d.ts",
8
+ "bin": {
9
+ "mcp-sequential-research": "./dist/index.js"
10
+ },
11
+ "scripts": {
12
+ "build": "tsc",
13
+ "dev": "tsx src/index.ts",
14
+ "lint": "eslint src --ext .ts",
15
+ "test": "echo \"Tests not yet implemented\" && exit 0"
16
+ },
17
+ "keywords": [
18
+ "mcp",
19
+ "model-context-protocol",
20
+ "research",
21
+ "sequential-thinking"
22
+ ],
23
+ "author": "",
24
+ "license": "MIT",
25
+ "dependencies": {
26
+ "@modelcontextprotocol/sdk": "^1.0.0",
27
+ "zod": "^3.23.0"
28
+ },
29
+ "devDependencies": {
30
+ "@types/node": "^20.11.0",
31
+ "eslint": "^8.57.0",
32
+ "tsx": "^4.7.0",
33
+ "typescript": "^5.3.0"
34
+ },
35
+ "engines": {
36
+ "node": ">=18.0.0"
37
+ }
38
+ }