@gonzih/research-rabbit 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +330 -0
  3. package/SKILL.md +109 -0
  4. package/dist/analysis.d.ts +12 -0
  5. package/dist/analysis.d.ts.map +1 -0
  6. package/dist/analysis.js +217 -0
  7. package/dist/analysis.js.map +1 -0
  8. package/dist/citations.d.ts +3 -0
  9. package/dist/citations.d.ts.map +1 -0
  10. package/dist/citations.js +52 -0
  11. package/dist/citations.js.map +1 -0
  12. package/dist/index.d.ts +3 -0
  13. package/dist/index.d.ts.map +1 -0
  14. package/dist/index.js +187 -0
  15. package/dist/index.js.map +1 -0
  16. package/dist/search.d.ts +3 -0
  17. package/dist/search.d.ts.map +1 -0
  18. package/dist/search.js +61 -0
  19. package/dist/search.js.map +1 -0
  20. package/dist/sources/arxiv.d.ts +3 -0
  21. package/dist/sources/arxiv.d.ts.map +1 -0
  22. package/dist/sources/arxiv.js +51 -0
  23. package/dist/sources/arxiv.js.map +1 -0
  24. package/dist/sources/openalex.d.ts +3 -0
  25. package/dist/sources/openalex.d.ts.map +1 -0
  26. package/dist/sources/openalex.js +47 -0
  27. package/dist/sources/openalex.js.map +1 -0
  28. package/dist/sources/pubmed.d.ts +3 -0
  29. package/dist/sources/pubmed.d.ts.map +1 -0
  30. package/dist/sources/pubmed.js +68 -0
  31. package/dist/sources/pubmed.js.map +1 -0
  32. package/dist/sources/semantic_scholar.d.ts +3 -0
  33. package/dist/sources/semantic_scholar.d.ts.map +1 -0
  34. package/dist/sources/semantic_scholar.js +38 -0
  35. package/dist/sources/semantic_scholar.js.map +1 -0
  36. package/dist/types.d.ts +51 -0
  37. package/dist/types.d.ts.map +1 -0
  38. package/dist/types.js +2 -0
  39. package/dist/types.js.map +1 -0
  40. package/llms.txt +18 -0
  41. package/package.json +37 -0
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Maksim Soltan
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,330 @@
1
+ # research-rabbit
2
+
3
+ An MCP server that helps students do real academic research. It searches ArXiv, Semantic Scholar, OpenAlex, and PubMed simultaneously, analyzes the literature with Claude, finds research gaps, and builds argument scaffolds — all grounded in real papers with verifiable URLs.
4
+
5
+ No hallucinated citations. No made-up abstracts. Real sources, real analysis.
6
+
7
+ ## Quick Start
8
+
9
+ ### Install and build
10
+
11
+ ```bash
12
+ npm install
13
+ npm run build
14
+ ```
15
+
16
+ ### Configure with Claude Desktop
17
+
18
+ Add to your `claude_desktop_config.json`:
19
+
20
+ ```json
21
+ {
22
+ "mcpServers": {
23
+ "research-rabbit": {
24
+ "command": "node",
25
+ "args": ["/path/to/research-rabbit/dist/index.js"],
26
+ "env": {
27
+ "ANTHROPIC_API_KEY": "your-api-key-here"
28
+ }
29
+ }
30
+ }
31
+ }
32
+ ```
33
+
34
+ ### Configure with Cursor
35
+
36
+ Add to your Cursor MCP settings:
37
+
38
+ ```json
39
+ {
40
+ "research-rabbit": {
41
+ "command": "node",
42
+ "args": ["/path/to/research-rabbit/dist/index.js"],
43
+ "env": {
44
+ "ANTHROPIC_API_KEY": "your-api-key-here"
45
+ }
46
+ }
47
+ }
48
+ ```
49
+
50
+ ### Run directly
51
+
52
+ ```bash
53
+ ANTHROPIC_API_KEY=your-key node dist/index.js
54
+ ```
55
+
56
+ ## Tools Reference
57
+
58
+ ### `search_papers`
59
+
60
+ Search multiple academic databases simultaneously and return real paper metadata.
61
+
62
+ **Parameters:**
63
+ - `query` (string, required): Search query. Be specific — include populations, methods, outcomes.
64
+ - `sources` (array, optional): Which databases to search. Options: `"arxiv"`, `"semantic_scholar"`, `"openalex"`, `"pubmed"`. Default: all four.
65
+ - `limit` (number, optional): Papers to return. Range: 1–25. Default: 10.
66
+ - `yearFrom` (number, optional): Earliest publication year.
67
+
68
+ **Example:**
69
+ ```json
70
+ {
71
+ "query": "social media depression teenagers longitudinal",
72
+ "sources": ["semantic_scholar", "pubmed"],
73
+ "limit": 10,
74
+ "yearFrom": 2018
75
+ }
76
+ ```
77
+
78
+ **Returns:** Array of Paper objects with `id`, `source`, `title`, `authors`, `year`, `abstract`, `citationCount`, `doi`, `url`, `openAccessPdfUrl`.
79
+
80
+ ---
81
+
82
+ ### `summarize_paper`
83
+
84
+ Generate a structured summary of a paper from its abstract using Claude. Identifies the main claim, methodology, key evidence, and limitations — and assesses how relevant it is to the student's topic.
85
+
86
+ **Parameters:**
87
+ - `paperId` (string): Paper ID from `search_papers` results.
88
+ - `title` (string): Paper title.
89
+ - `abstract` (string): Full abstract text.
90
+ - `topic` (string): The student's research topic (used to assess relevance).
91
+
92
+ **Example:**
93
+ ```json
94
+ {
95
+ "paperId": "pubmed:38293847",
96
+ "title": "Instagram use and adolescent depression: a longitudinal study",
97
+ "abstract": "Background: ...",
98
+ "topic": "mental health effects of social media on teenagers"
99
+ }
100
+ ```
101
+
102
+ **Returns:** `PaperSummary` with `mainClaim`, `methodology`, `keyEvidence[]`, `limitations[]`, `relevanceToTopic`.
103
+
104
+ ---
105
+
106
+ ### `map_literature`
107
+
108
+ Analyze a set of papers to produce a structured map of the field. Identifies what researchers agree on, where they disagree, methodological tensions, and how thinking has evolved over time.
109
+
110
+ **Parameters:**
111
+ - `papers` (array, required): 2–20 Paper objects from `search_papers`.
112
+ - `topic` (string, required): The research topic.
113
+
114
+ **Returns:** `LiteratureMap` with `consensus[]`, `debates[]`, `methodologicalTensions[]`, `temporalTrends[]`. Each entry cites specific papers using [1], [2] notation.
115
+
116
+ ---
117
+
118
+ ### `find_gaps`
119
+
120
+ Identify what the literature is NOT studying — where a student's original contribution can live. Categorizes gaps by type and marks confidence levels.
121
+
122
+ **Parameters:**
123
+ - `papers` (array, required): 2–20 Paper objects.
124
+ - `topic` (string, required): The research topic.
125
+
126
+ **Returns:**
127
+ - `gaps[]`: Each gap has `description`, `type` (population/temporal/methodological/geographic/theoretical/other), `confidence` (high/medium/low), and `rationale`.
128
+ - `originalContributionAngles[]`: Specific suggestions for what a student could add.
129
+
130
+ ---
131
+
132
+ ### `build_argument`
133
+
134
+ Build a structured argument scaffold for a research paper. Maps available evidence to supporting arguments, anticipates counterarguments with rebuttals, and identifies missing evidence the student still needs.
135
+
136
+ **Parameters:**
137
+ - `topic` (string): The research topic.
138
+ - `thesis` (string): The student's thesis statement.
139
+ - `papers` (array, required): 1–20 Paper objects to draw evidence from.
140
+
141
+ **Returns:**
142
+ - `argumentScaffold`: `thesisStatement`, `supportingArguments[]` (each with `claim`, `evidence[]`, `paperIds[]`), `counterarguments[]` (each with `claim`, `rebuttal`, `paperIds[]`), `suggestedStructure[]`.
143
+ - `missingEvidence[]`: What the student still needs to find.
144
+
145
+ ---
146
+
147
+ ### `format_citation`
148
+
149
+ Format a paper in APA, MLA, or Chicago style using real metadata. Only formats papers returned from `search_papers` — never invents citations.
150
+
151
+ **Parameters:**
152
+ - `paper`: A Paper object from `search_papers`.
153
+ - `style`: `"apa"`, `"mla"`, or `"chicago"`.
154
+
155
+ **Returns:** Formatted citation string.
156
+
157
+ **Example output (APA):**
158
+ ```
159
+ Twenge, J., Haidt, J. (2019). This is our chance to stop social media from harming teen girls' mental health. Semantic Scholar. https://www.semanticscholar.org/paper/abc123
160
+ ```
161
+
162
+ ---
163
+
164
+ ### `search_and_summarize`
165
+
166
+ All-in-one research assistant. Searches all databases, summarizes the top papers, maps the literature, and finds gaps — in a single tool call. Best for getting oriented on a new topic quickly.
167
+
168
+ **Parameters:**
169
+ - `query` (string, required): Research topic or question.
170
+ - `depth` (`"quick"` | `"thorough"`, optional): `"quick"` searches 5 papers; `"thorough"` searches 10 with full gap analysis. Default: `"quick"`.
171
+
172
+ **Returns:** Combined object with `papers[]`, `summaries[]`, `literatureMap`, `gaps`.
173
+
174
+ ---
175
+
176
+ ## Research Methodology Primer
177
+
178
+ ### What is a literature review?
179
+
180
+ A literature review is a systematic survey of what has already been written about your topic. It does three things:
181
+
182
+ 1. **Maps the terrain** — what has been studied, by whom, with what methods
183
+ 2. **Shows the state of knowledge** — what is settled, what is debated
184
+ 3. **Identifies the gap** — what hasn't been studied yet, where your work fits
185
+
186
+ A strong literature review is not a list of summaries. It is a synthesis that builds a case for why your research question matters and hasn't been answered yet.
187
+
188
+ ### How to find research gaps
189
+
190
+ A research gap is a question the literature hasn't answered. Gaps come in several types:
191
+
192
+ - **Population gaps**: Studies have only examined one demographic group (e.g., only adults, only Americans)
193
+ - **Temporal gaps**: The phenomenon hasn't been studied recently, or long-term effects are unknown
194
+ - **Methodological gaps**: Most studies use surveys; no one has done qualitative interviews, or vice versa
195
+ - **Geographic gaps**: Findings from Western countries may not apply elsewhere
196
+ - **Theoretical gaps**: Existing frameworks don't explain a phenomenon well
197
+ - **Interaction gaps**: Two variables have been studied separately but never together
198
+
199
+ Use `find_gaps` to identify these automatically, then verify by checking whether the gap is actually present in the literature.
200
+
201
+ ### How to build a research argument
202
+
203
+ A research argument has four parts:
204
+
205
+ 1. **Thesis**: Your central claim (what you are arguing is true)
206
+ 2. **Warrant**: Why the claim matters (why anyone should care)
207
+ 3. **Evidence**: Papers that support your claim
208
+ 4. **Counterargument**: The strongest objection to your thesis, and your response
209
+
210
+ Use `build_argument` to generate a scaffold, then fill it in with your own analysis. The tool will tell you what evidence is missing so you know what to search for next.
211
+
212
+ ---
213
+
214
+ ## Example Workflow: Mental Health and Social Media
215
+
216
+ ### Step 1: Get oriented
217
+
218
+ ```
219
+ search_and_summarize(
220
+ query="social media mental health adolescents depression anxiety",
221
+ depth="thorough"
222
+ )
223
+ ```
224
+
225
+ This returns 10 papers, summarizes each, maps the literature, and identifies gaps. You now know the main debates (correlational vs. causal evidence, passive vs. active use) and can see which populations are understudied.
226
+
227
+ ### Step 2: Go deeper on a specific angle
228
+
229
+ The gap analysis suggests that "effects on boys vs. girls" is understudied. Search specifically:
230
+
231
+ ```
232
+ search_papers(
233
+ query="social media depression gender differences adolescents boys girls",
234
+ sources=["pubmed", "semantic_scholar"],
235
+ limit=10,
236
+ yearFrom=2015
237
+ )
238
+ ```
239
+
240
+ ### Step 3: Summarize key papers
241
+
242
+ For each paper with a substantial abstract:
243
+
244
+ ```
245
+ summarize_paper(
246
+ paperId="pubmed:38293847",
247
+ title="...",
248
+ abstract="...",
249
+ topic="gender differences in social media effects on adolescent mental health"
250
+ )
251
+ ```
252
+
253
+ ### Step 4: Map the gender-specific literature
254
+
255
+ ```
256
+ map_literature(
257
+ papers=[...the papers from Step 2...],
258
+ topic="gender differences in social media and adolescent mental health"
259
+ )
260
+ ```
261
+
262
+ ### Step 5: Draft your thesis and build an argument
263
+
264
+ ```
265
+ build_argument(
266
+ topic="social media and adolescent mental health",
267
+ thesis="Passive consumption of Instagram harms girls' self-esteem more than boys' due to appearance-related social comparison, while active social use affects both genders equally",
268
+ papers=[...all collected papers...]
269
+ )
270
+ ```
271
+
272
+ The output shows which papers support each sub-claim and which evidence is still missing (e.g., "experimental studies manipulating active vs. passive use by gender").
273
+
274
+ ### Step 6: Format citations for your reference list
275
+
276
+ ```
277
+ format_citation(paper={...}, style="apa")
278
+ ```
279
+
280
+ ---
281
+
282
+ ## Responsible Use
283
+
284
+ ### What this tool guarantees
285
+ - All papers are fetched from real academic APIs in real time
286
+ - Every paper includes a verifiable URL
287
+ - Gap confidence levels are explicit (high/medium/low)
288
+ - Analysis is labeled as AI interpretation, not ground truth
289
+
290
+ ### What you should always do
291
+ - Click the `url` of any paper you cite to verify it exists and says what the summary claims
292
+ - For important arguments, read the full paper, not just the abstract
293
+ - Cross-check citations against official style guides before final submission
294
+ - Treat `confidence: "low"` gaps as hypotheses to investigate, not established facts
295
+
296
+ ### Academic integrity
297
+ This tool helps you find and analyze real literature faster. It does not write your paper for you, and it never should. The synthesis, argument, and original analysis must be yours. Using AI to fabricate sources is academic dishonesty; using AI to find and understand real sources is good research practice.
298
+
299
+ ---
300
+
301
+ ## API Sources
302
+
303
+ | Database | Coverage | Best for |
304
+ |---|---|---|
305
+ | ArXiv | CS, Math, Physics, Economics preprints | STEM, cutting-edge research |
306
+ | Semantic Scholar | Cross-disciplinary, 200M+ papers | Citation counts, open access |
307
+ | OpenAlex | Cross-disciplinary, 250M+ works | Social sciences, humanities |
308
+ | PubMed | Biomedical and life sciences | Medicine, clinical research |
309
+
310
+ All APIs are free and do not require authentication for basic usage. Semantic Scholar may rate-limit heavy usage; the server handles this gracefully by returning results from other sources.
311
+
312
+ ---
313
+
314
+ ## Development
315
+
316
+ ```bash
317
+ # Install dependencies
318
+ npm install
319
+
320
+ # Run in development (no build step)
321
+ npm run dev
322
+
323
+ # Build
324
+ npm run build
325
+
326
+ # Run built version
327
+ npm start
328
+ ```
329
+
330
+ Requirements: Node.js 18+, `ANTHROPIC_API_KEY` environment variable.
package/SKILL.md ADDED
@@ -0,0 +1,109 @@
1
+ # research-rabbit Skill Guide
2
+
3
+ A guide to using research-rabbit effectively as an AI-assisted research workflow.
4
+
5
+ ## What This Skill Does
6
+
7
+ research-rabbit turns your AI assistant into a real academic research partner. It connects to live academic databases (ArXiv, Semantic Scholar, OpenAlex, PubMed) and uses Claude to intelligently analyze the literature — no hallucinated citations, no made-up papers.
8
+
9
+ ## Core Workflow Patterns
10
+
11
+ ### Pattern 1: Quick Topic Exploration
12
+
13
+ Use `search_and_summarize` with `depth: "quick"` when you want a fast overview of a topic. This runs all steps in sequence and returns papers, summaries, a literature map, and gap analysis in one shot.
14
+
15
+ ```
16
+ search_and_summarize(query="CRISPR gene editing ethics", depth="quick")
17
+ ```
18
+
19
+ ### Pattern 2: Deep Literature Review
20
+
21
+ For a thorough literature review, chain the tools:
22
+
23
+ 1. `search_papers` — retrieve papers from multiple sources
24
+ 2. `summarize_paper` — summarize each paper individually (good for diving deep into specific papers)
25
+ 3. `map_literature` — get the big picture of consensus and debates
26
+ 4. `find_gaps` — identify where your contribution can live
27
+ 5. `build_argument` — scaffold your thesis with real evidence
28
+ 6. `format_citation` — generate properly formatted citations
29
+
30
+ ### Pattern 3: Targeted Database Search
31
+
32
+ When you know which database is most relevant to your field:
33
+
34
+ - **STEM / Computer Science / Math**: use `sources: ["arxiv", "semantic_scholar"]`
35
+ - **Life Sciences / Medicine**: use `sources: ["pubmed", "openalex"]`
36
+ - **Social Sciences / Humanities**: use `sources: ["openalex", "semantic_scholar"]`
37
+ - **Everything**: omit `sources` to search all four simultaneously
38
+
39
+ ### Pattern 4: Citation Management
40
+
41
+ After running `search_papers`, pass individual paper objects to `format_citation` with your preferred style. This guarantees citations are grounded in real metadata.
42
+
43
+ ## Tips for Better Results
44
+
45
+ **Query construction**: Be specific. "social media mental health teenagers depression" will outperform "social media effects". Include key terms, populations, and outcomes you care about.
46
+
47
+ **Year filtering**: Use `yearFrom` to focus on recent literature (e.g., `yearFrom: 2020` for the last 5 years). Crucial for fast-moving fields.
48
+
49
+ **Limit tuning**: Start with `limit: 10` (default). Increase to 20-25 when you need broad coverage. Decrease to 5 for speed.
50
+
51
+ **Gap confidence**: When reading gap analysis output, prioritize `confidence: "high"` gaps — these are directly evidenced by the paper abstracts. Treat `confidence: "low"` gaps as hypotheses to investigate further.
52
+
53
+ **Iterative refinement**: Run `search_papers` with different query phrasings and merge the results before running `map_literature`. This gives a richer picture than a single search.
54
+
55
+ ## Understanding the Output
56
+
57
+ ### Paper object
58
+ Every paper returned includes:
59
+ - `id`: unique identifier with source prefix (`arxiv:`, `ss:`, `openalex:`, `pubmed:`)
60
+ - `source`: which database it came from
61
+ - `url`: always a real, verifiable URL
62
+ - `openAccessPdfUrl`: when available, a direct link to the full PDF
63
+
64
+ ### Literature Map
65
+ - `consensus`: findings most papers agree on — your background facts
66
+ - `debates`: active disagreements — your opportunity to take a side
67
+ - `methodologicalTensions`: how different research methods produce different results — useful for methodology sections
68
+ - `temporalTrends`: how the field has evolved — useful for introductions
69
+
70
+ ### Gap Analysis
71
+ - `gaps`: what the literature is missing, categorized by type (population, temporal, methodological, geographic, theoretical)
72
+ - `originalContributionAngles`: specific suggestions for where your work can add value
73
+
74
+ ### Argument Scaffold
75
+ - `thesisStatement`: a refined version of your thesis
76
+ - `supportingArguments`: each argument linked to specific papers and evidence
77
+ - `counterarguments`: anticipated objections with paper-backed rebuttals
78
+ - `suggestedStructure`: a proposed outline for your paper
79
+ - `missingEvidence`: what you still need to find — your next search queries
80
+
81
+ ## Responsible Use
82
+
83
+ - All sources are real papers from real APIs. Verify important claims by clicking the `url` links.
84
+ - Abstracts are sometimes incomplete or misleading. For important arguments, read the full paper.
85
+ - Gap analysis is Claude's interpretation of the literature, not ground truth. Use it as a starting point, not a conclusion.
86
+ - Citation formatting is best-effort. Always verify against official style guides for final submissions.
87
+ - PubMed and OpenAlex have usage policies. This tool respects their rate limits and terms of service.
88
+
89
+ ## Field-Specific Guidance
90
+
91
+ ### STEM / Engineering
92
+ - ArXiv has preprints that may not be peer-reviewed. Check publication status.
93
+ - Citation counts from Semantic Scholar are reliable for assessing paper impact.
94
+ - Use `yearFrom` aggressively — 2-3 years is often sufficient for fast-moving fields.
95
+
96
+ ### Social Sciences
97
+ - OpenAlex covers more social science journals than ArXiv.
98
+ - Look for methodological debates (quantitative vs. qualitative) in the literature map.
99
+ - Geographic gaps are common — consider whether findings from one country generalize.
100
+
101
+ ### Medicine / Life Sciences
102
+ - PubMed is the authoritative source — include it always.
103
+ - Abstract structure is often standardized (Background/Methods/Results/Conclusions) — the summarizer handles this well.
104
+ - Clinical trial papers will have strong methodology sections worth examining.
105
+
106
+ ### Humanities
107
+ - Coverage is thinner than in STEM. Supplement with manual library searches.
108
+ - Use gap analysis for theoretical angles rather than empirical gaps.
109
+ - Chicago citation style is standard in most humanities disciplines.
@@ -0,0 +1,12 @@
1
+ import type { Paper, PaperSummary, LiteratureMap, Gap, ArgumentScaffold } from './types.js';
2
+ export declare function summarizePaper(paperId: string, title: string, abstract: string, topic: string): Promise<PaperSummary>;
3
+ export declare function mapLiterature(papers: Paper[], topic: string): Promise<LiteratureMap>;
4
+ export declare function findGaps(papers: Paper[], topic: string): Promise<{
5
+ gaps: Gap[];
6
+ originalContributionAngles: string[];
7
+ }>;
8
+ export declare function buildArgument(topic: string, thesis: string, papers: Paper[]): Promise<{
9
+ argumentScaffold: ArgumentScaffold;
10
+ missingEvidence: string[];
11
+ }>;
12
+ //# sourceMappingURL=analysis.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"analysis.d.ts","sourceRoot":"","sources":["../src/analysis.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,KAAK,EAAE,YAAY,EAAE,aAAa,EAAE,GAAG,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAa5F,wBAAsB,cAAc,CAClC,OAAO,EAAE,MAAM,EACf,KAAK,EAAE,MAAM,EACb,QAAQ,EAAE,MAAM,EAChB,KAAK,EAAE,MAAM,GACZ,OAAO,CAAC,YAAY,CAAC,CAoDvB;AAED,wBAAsB,aAAa,CAAC,MAAM,EAAE,KAAK,EAAE,EAAE,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC,aAAa,CAAC,CA6C1F;AAED,wBAAsB,QAAQ,CAAC,MAAM,EAAE,KAAK,EAAE,EAAE,KAAK,EAAE,MAAM,GAAG,OAAO,CAAC;IAAE,IAAI,EAAE,GAAG,EAAE,CAAC;IAAC,0BAA0B,EAAE,MAAM,EAAE,CAAA;CAAE,CAAC,CAmD7H;AAED,wBAAsB,aAAa,CACjC,KAAK,EAAE,MAAM,EACb,MAAM,EAAE,MAAM,EACd,MAAM,EAAE,KAAK,EAAE,GACd,OAAO,CAAC;IAAE,gBAAgB,EAAE,gBAAgB,CAAC;IAAC,eAAe,EAAE,MAAM,EAAE,CAAA;CAAE,CAAC,CA8D5E"}
@@ -0,0 +1,217 @@
1
+ import Anthropic from '@anthropic-ai/sdk';
2
+ const client = new Anthropic();
3
+ function papersToText(papers) {
4
+ return papers.map((p, i) => `[${i + 1}] "${p.title}" (${p.year}) by ${p.authors.slice(0, 3).join(', ')}${p.authors.length > 3 ? ' et al.' : ''}
5
+ ID: ${p.id}
6
+ Abstract: ${p.abstract.substring(0, 500)}${p.abstract.length > 500 ? '...' : ''}
7
+ Citations: ${p.citationCount ?? 'unknown'}`).join('\n\n');
8
+ }
9
+ export async function summarizePaper(paperId, title, abstract, topic) {
10
+ if (abstract.trim().length < 50) {
11
+ return {
12
+ paperId,
13
+ title,
14
+ mainClaim: 'Abstract too short to summarize',
15
+ methodology: 'Unknown',
16
+ keyEvidence: [],
17
+ limitations: [],
18
+ relevanceToTopic: 'Could not assess relevance — abstract is too brief',
19
+ };
20
+ }
21
+ const response = await client.messages.create({
22
+ model: 'claude-opus-4-6',
23
+ max_tokens: 1024,
24
+ messages: [{
25
+ role: 'user',
26
+ content: `Analyze this academic paper abstract for a student researching: "${topic}"
27
+
28
+ Title: ${title}
29
+ Abstract: ${abstract}
30
+
31
+ Respond with ONLY a JSON object (no markdown, no explanation) in this exact format:
32
+ {
33
+ "mainClaim": "the paper's central finding or argument in one sentence",
34
+ "methodology": "the research method used (e.g., randomized controlled trial, meta-analysis, survey, etc.)",
35
+ "keyEvidence": ["evidence point 1", "evidence point 2"],
36
+ "limitations": ["limitation 1", "limitation 2"],
37
+ "relevanceToTopic": "how this paper relates to the student's topic in 1-2 sentences"
38
+ }`,
39
+ }],
40
+ });
41
+ const text = response.content.find(b => b.type === 'text')?.text ?? '';
42
+ try {
43
+ const jsonMatch = text.match(/\{[\s\S]*\}/);
44
+ if (!jsonMatch)
45
+ throw new Error('No JSON found');
46
+ const parsed = JSON.parse(jsonMatch[0]);
47
+ return { paperId, title, ...parsed };
48
+ }
49
+ catch {
50
+ return {
51
+ paperId,
52
+ title,
53
+ mainClaim: text.substring(0, 200),
54
+ methodology: 'Could not parse',
55
+ keyEvidence: [],
56
+ limitations: [],
57
+ relevanceToTopic: 'Analysis failed — see raw text',
58
+ };
59
+ }
60
+ }
61
+ export async function mapLiterature(papers, topic) {
62
+ if (papers.length === 0) {
63
+ return { consensus: [], debates: [], methodologicalTensions: [], temporalTrends: [] };
64
+ }
65
+ const papersText = papersToText(papers);
66
+ const response = await client.messages.create({
67
+ model: 'claude-opus-4-6',
68
+ max_tokens: 2048,
69
+ messages: [{
70
+ role: 'user',
71
+ content: `You are helping a student map the academic literature on: "${topic}"
72
+
73
+ Here are ${papers.length} papers from the literature:
74
+
75
+ ${papersText}
76
+
77
+ Based ONLY on these papers (do not invent findings not present in the abstracts), identify:
78
+ 1. Areas of CONSENSUS — what do most papers agree on?
79
+ 2. Active DEBATES — where do papers disagree or conflict?
80
+ 3. METHODOLOGICAL TENSIONS — conflicts between research approaches (e.g., experimental vs. observational)?
81
+ 4. TEMPORAL TRENDS — how has thinking evolved over time?
82
+
83
+ Respond with ONLY a JSON object:
84
+ {
85
+ "consensus": ["finding 1 with paper reference", "finding 2"],
86
+ "debates": ["debate 1 with conflicting evidence", "debate 2"],
87
+ "methodologicalTensions": ["tension 1", "tension 2"],
88
+ "temporalTrends": ["trend 1", "trend 2"]
89
+ }
90
+
91
+ Be specific and cite paper numbers like [1], [2]. If insufficient data, say so briefly.`,
92
+ }],
93
+ });
94
+ const text = response.content.find(b => b.type === 'text')?.text ?? '';
95
+ try {
96
+ const jsonMatch = text.match(/\{[\s\S]*\}/);
97
+ if (!jsonMatch)
98
+ throw new Error('No JSON found');
99
+ return JSON.parse(jsonMatch[0]);
100
+ }
101
+ catch {
102
+ return { consensus: [text], debates: [], methodologicalTensions: [], temporalTrends: [] };
103
+ }
104
+ }
105
+ export async function findGaps(papers, topic) {
106
+ if (papers.length === 0) {
107
+ return { gaps: [], originalContributionAngles: [] };
108
+ }
109
+ const papersText = papersToText(papers);
110
+ const response = await client.messages.create({
111
+ model: 'claude-opus-4-6',
112
+ max_tokens: 2048,
113
+ messages: [{
114
+ role: 'user',
115
+ content: `You are helping a student find ORIGINAL RESEARCH ANGLES for a paper on: "${topic}"
116
+
117
+ Here are ${papers.length} papers from the literature:
118
+
119
+ ${papersText}
120
+
121
+ Based on what these papers DO and DON'T cover, identify:
122
+ 1. Genuine GAPS in the literature (populations unstudied, time periods missing, methods not tried, etc.)
123
+ 2. Angles for ORIGINAL STUDENT CONTRIBUTION (what could a student add that isn't already covered?)
124
+
125
+ IMPORTANT: Mark speculative gaps with confidence "low". Only mark "high" confidence for gaps clearly evident from the abstracts.
126
+
127
+ Respond with ONLY a JSON object:
128
+ {
129
+ "gaps": [
130
+ {
131
+ "description": "clear description of what's missing",
132
+ "type": "population|temporal|methodological|geographic|theoretical|other",
133
+ "confidence": "high|medium|low",
134
+ "rationale": "why this is a gap based on the papers reviewed"
135
+ }
136
+ ],
137
+ "originalContributionAngles": [
138
+ "specific angle a student could take",
139
+ "another angle"
140
+ ]
141
+ }`,
142
+ }],
143
+ });
144
+ const text = response.content.find(b => b.type === 'text')?.text ?? '';
145
+ try {
146
+ const jsonMatch = text.match(/\{[\s\S]*\}/);
147
+ if (!jsonMatch)
148
+ throw new Error('No JSON found');
149
+ return JSON.parse(jsonMatch[0]);
150
+ }
151
+ catch {
152
+ return { gaps: [{ description: text, type: 'other', confidence: 'low', rationale: 'Parse error' }], originalContributionAngles: [] };
153
+ }
154
+ }
155
+ export async function buildArgument(topic, thesis, papers) {
156
+ const papersText = papersToText(papers);
157
+ const response = await client.messages.create({
158
+ model: 'claude-opus-4-6',
159
+ max_tokens: 3000,
160
+ messages: [{
161
+ role: 'user',
162
+ content: `You are helping a student build a research argument for their paper.
163
+
164
+ Topic: "${topic}"
165
+ Student's Thesis: "${thesis}"
166
+
167
+ Available papers:
168
+ ${papersText}
169
+
170
+ Create a structured argument scaffold using ONLY the papers provided. Do not invent citations.
171
+
172
+ Respond with ONLY a JSON object:
173
+ {
174
+ "argumentScaffold": {
175
+ "thesisStatement": "refined version of the student's thesis",
176
+ "supportingArguments": [
177
+ {
178
+ "claim": "supporting argument 1",
179
+ "evidence": ["specific evidence from paper X", "evidence from paper Y"],
180
+ "paperIds": ["arxiv:...", "ss:..."]
181
+ }
182
+ ],
183
+ "counterarguments": [
184
+ {
185
+ "claim": "likely counterargument",
186
+ "rebuttal": "how to respond using the literature",
187
+ "paperIds": ["paper id that supports rebuttal"]
188
+ }
189
+ ],
190
+ "suggestedStructure": ["Introduction: ...", "Section 2: ...", "Conclusion: ..."]
191
+ },
192
+ "missingEvidence": [
193
+ "type of evidence still needed that wasn't found in the provided papers"
194
+ ]
195
+ }`,
196
+ }],
197
+ });
198
+ const text = response.content.find(b => b.type === 'text')?.text ?? '';
199
+ try {
200
+ const jsonMatch = text.match(/\{[\s\S]*\}/);
201
+ if (!jsonMatch)
202
+ throw new Error('No JSON found');
203
+ return JSON.parse(jsonMatch[0]);
204
+ }
205
+ catch {
206
+ return {
207
+ argumentScaffold: {
208
+ thesisStatement: thesis,
209
+ supportingArguments: [],
210
+ counterarguments: [],
211
+ suggestedStructure: ['Could not generate structure'],
212
+ },
213
+ missingEvidence: ['Analysis failed'],
214
+ };
215
+ }
216
+ }
217
+ //# sourceMappingURL=analysis.js.map