scientify 1.13.6 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/README.en.md +350 -0
  2. package/README.md +148 -358
  3. package/dist/index.d.ts +8 -2
  4. package/dist/index.d.ts.map +1 -1
  5. package/dist/index.js +131 -122
  6. package/dist/index.js.map +1 -1
  7. package/dist/src/cli/research.d.ts +1 -6
  8. package/dist/src/cli/research.d.ts.map +1 -1
  9. package/dist/src/cli/research.js +227 -123
  10. package/dist/src/cli/research.js.map +1 -1
  11. package/dist/src/commands/metabolism-status.d.ts +3 -3
  12. package/dist/src/commands/metabolism-status.d.ts.map +1 -1
  13. package/dist/src/commands/metabolism-status.js +72 -75
  14. package/dist/src/commands/metabolism-status.js.map +1 -1
  15. package/dist/src/commands.d.ts +1 -1
  16. package/dist/src/commands.d.ts.map +1 -1
  17. package/dist/src/commands.js +0 -55
  18. package/dist/src/commands.js.map +1 -1
  19. package/dist/src/hooks/cron-skill-inject.d.ts +6 -7
  20. package/dist/src/hooks/cron-skill-inject.d.ts.map +1 -1
  21. package/dist/src/hooks/cron-skill-inject.js +6 -15
  22. package/dist/src/hooks/cron-skill-inject.js.map +1 -1
  23. package/dist/src/hooks/research-mode.d.ts +1 -1
  24. package/dist/src/hooks/research-mode.d.ts.map +1 -1
  25. package/dist/src/hooks/research-mode.js +24 -101
  26. package/dist/src/hooks/research-mode.js.map +1 -1
  27. package/dist/src/hooks/scientify-signature.d.ts +1 -1
  28. package/dist/src/hooks/scientify-signature.d.ts.map +1 -1
  29. package/dist/src/hooks/scientify-signature.js +2 -5
  30. package/dist/src/hooks/scientify-signature.js.map +1 -1
  31. package/dist/src/knowledge-state/render.d.ts +1 -9
  32. package/dist/src/knowledge-state/render.d.ts.map +1 -1
  33. package/dist/src/knowledge-state/render.js +33 -187
  34. package/dist/src/knowledge-state/render.js.map +1 -1
  35. package/dist/src/knowledge-state/store.d.ts.map +1 -1
  36. package/dist/src/knowledge-state/store.js +65 -1100
  37. package/dist/src/knowledge-state/store.js.map +1 -1
  38. package/dist/src/knowledge-state/types.d.ts +0 -76
  39. package/dist/src/knowledge-state/types.d.ts.map +1 -1
  40. package/dist/src/literature/subscription-state.d.ts +0 -2
  41. package/dist/src/literature/subscription-state.d.ts.map +1 -1
  42. package/dist/src/literature/subscription-state.js +7 -1375
  43. package/dist/src/literature/subscription-state.js.map +1 -1
  44. package/dist/src/research-subscriptions/constants.d.ts +1 -1
  45. package/dist/src/research-subscriptions/constants.js +1 -1
  46. package/dist/src/research-subscriptions/cron-client.d.ts +1 -1
  47. package/dist/src/research-subscriptions/cron-client.d.ts.map +1 -1
  48. package/dist/src/research-subscriptions/delivery.d.ts +1 -1
  49. package/dist/src/research-subscriptions/delivery.d.ts.map +1 -1
  50. package/dist/src/research-subscriptions/handlers.d.ts +1 -1
  51. package/dist/src/research-subscriptions/handlers.d.ts.map +1 -1
  52. package/dist/src/research-subscriptions/handlers.js +10 -20
  53. package/dist/src/research-subscriptions/handlers.js.map +1 -1
  54. package/dist/src/research-subscriptions/parse.d.ts.map +1 -1
  55. package/dist/src/research-subscriptions/parse.js +0 -25
  56. package/dist/src/research-subscriptions/parse.js.map +1 -1
  57. package/dist/src/research-subscriptions/prompt.d.ts +1 -1
  58. package/dist/src/research-subscriptions/prompt.d.ts.map +1 -1
  59. package/dist/src/research-subscriptions/prompt.js +195 -244
  60. package/dist/src/research-subscriptions/prompt.js.map +1 -1
  61. package/dist/src/research-subscriptions/types.d.ts +1 -3
  62. package/dist/src/research-subscriptions/types.d.ts.map +1 -1
  63. package/dist/src/templates/bootstrap.d.ts.map +1 -1
  64. package/dist/src/templates/bootstrap.js +32 -19
  65. package/dist/src/templates/bootstrap.js.map +1 -1
  66. package/dist/src/tools/arxiv-download.d.ts +1 -2
  67. package/dist/src/tools/arxiv-download.d.ts.map +1 -1
  68. package/dist/src/tools/arxiv-search.d.ts +1 -2
  69. package/dist/src/tools/arxiv-search.d.ts.map +1 -1
  70. package/dist/src/tools/github-search-tool.d.ts +1 -2
  71. package/dist/src/tools/github-search-tool.d.ts.map +1 -1
  72. package/dist/src/tools/openalex-search.d.ts +1 -2
  73. package/dist/src/tools/openalex-search.d.ts.map +1 -1
  74. package/dist/src/tools/openreview-lookup.d.ts +1 -2
  75. package/dist/src/tools/openreview-lookup.d.ts.map +1 -1
  76. package/dist/src/tools/paper-browser.d.ts +1 -2
  77. package/dist/src/tools/paper-browser.d.ts.map +1 -1
  78. package/dist/src/tools/result.d.ts +3 -5
  79. package/dist/src/tools/result.d.ts.map +1 -1
  80. package/dist/src/tools/result.js +5 -7
  81. package/dist/src/tools/result.js.map +1 -1
  82. package/dist/src/tools/scientify-cron.d.ts +4 -11
  83. package/dist/src/tools/scientify-cron.d.ts.map +1 -1
  84. package/dist/src/tools/scientify-cron.js +19 -524
  85. package/dist/src/tools/scientify-cron.js.map +1 -1
  86. package/dist/src/tools/scientify-literature-state.d.ts +1 -76
  87. package/dist/src/tools/scientify-literature-state.d.ts.map +1 -1
  88. package/dist/src/tools/scientify-literature-state.js +46 -363
  89. package/dist/src/tools/scientify-literature-state.js.map +1 -1
  90. package/dist/src/tools/unpaywall-download.d.ts +1 -2
  91. package/dist/src/tools/unpaywall-download.d.ts.map +1 -1
  92. package/dist/src/types.d.ts +16 -0
  93. package/dist/src/types.d.ts.map +1 -0
  94. package/dist/src/types.js +2 -0
  95. package/dist/src/types.js.map +1 -0
  96. package/openclaw.plugin.json +4 -2
  97. package/package.json +1 -1
  98. package/skills/metabolism/SKILL.md +2 -0
  99. package/skills/research-subscription/SKILL.md +1 -29
  100. package/README.zh.md +0 -494
package/README.md CHANGED
@@ -1,277 +1,226 @@
1
- # Scientify
2
-
3
- **AI-powered research workflow automation for OpenClaw.**
4
-
5
- Scientify is an [OpenClaw](https://github.com/openclaw/openclaw) plugin that automates the full academic research pipeline — from literature survey to experiment execution — using LLM-driven sub-agents.
6
-
7
- **Website:** [scientify.tech](https://scientify.tech) | [中文文档](./README.zh.md)
1
+ <h1 align="center">Scientify</h1>
2
+ <p align="center">
3
+ <em>持续新陈代谢的 AI 科研系统</em>
4
+ </p>
5
+
6
+ <p align="center">
7
+ <a href="https://www.npmjs.com/package/scientify"><img src="https://img.shields.io/npm/v/scientify?style=for-the-badge&logo=npm&logoColor=white" alt="npm version"></a>
8
+ <a href="https://github.com/tsingyuai/scientify"><img src="https://img.shields.io/github/stars/tsingyuai/scientify?style=for-the-badge&logo=github" alt="GitHub stars"></a>
9
+ <a href="LICENSE"><img src="https://img.shields.io/badge/License-MIT-blue.svg?style=for-the-badge" alt="MIT License"></a>
10
+ <a href="https://github.com/openclaw/openclaw"><img src="https://img.shields.io/badge/OpenClaw-plugin-00FF9F?style=for-the-badge" alt="OpenClaw plugin"></a>
11
+ </p>
12
+
13
+ <p align="center">
14
+ <a href="https://scientify.tech">官网</a> · <a href="./README.en.md">English</a> · <a href="https://github.com/tsingyuai/scientify/issues">Issues</a>
15
+ </p>
8
16
 
9
17
  ---
10
18
 
11
- ## What It Does
12
-
13
- Scientify turns a single research prompt into a complete automated pipeline. Each phase runs as an independent sub-agent — the orchestrator verifies outputs between steps and passes context forward.
14
-
15
- ### Scenario 1 — End-to-End Research Pipeline
16
-
17
- > *"Research scaling laws for classical ML classifiers on Fashion-MNIST"*
18
-
19
- The **research-pipeline** orchestrator runs all 6 phases in sequence, spawning a dedicated sub-agent for each:
20
-
21
- ```mermaid
22
- flowchart LR
23
- A["Literature\nSurvey"] --> B["Deep\nAnalysis"] --> C["Implementation\nPlan"] --> D["Code\nImplementation"] --> E["Automated\nReview"] --> F["Full\nExperiment"]
24
- ```
19
+ ## 它能做什么
25
20
 
26
- <details>
27
- <summary><b>What each phase produces</b></summary>
21
+ > [!IMPORTANT]
22
+ > Scientify 不是一个"问一次答一次"的 AI 工具。它像一个真正的研究伙伴——**持续思考、持续积累、持续交付**。
28
23
 
29
- | Phase | What Happens | Output File |
30
- |:------|:-------------|:------------|
31
- | **1. Literature Survey** | Search arXiv + OpenAlex, filter, download .tex sources, cluster by direction | `survey/report.md` |
32
- | **2. Deep Analysis** | Extract formulas, map methods to code, build cross-comparison | `survey_res.md` |
33
- | **3. Implementation Plan** | Design 4-part plan — Dataset / Model / Training / Testing | `plan_res.md` |
34
- | **4. Code Implementation** | Write ML code in `uv`-isolated venv, validate with 2-epoch run | `project/run.py` |
35
- | **5. Automated Review** | Review code → fix issues → rerun → re-review (up to 3 rounds) | `iterations/judge_v*.md` |
36
- | **6. Full Experiment** | Complete training + ablation studies with final analysis | `experiment_res.md` |
24
+ ### 1. 新陈代谢:持续思考,而非一次性回答
37
25
 
38
- </details>
26
+ 现有 AI 科研工具的工作方式是**批处理**——给个问题,跑一遍 pipeline,输出报告,结束。下次再问同一个方向,从零开始。跑 10 次和跑 1 次没有本质区别。
39
27
 
40
- ---
28
+ 但人类研究者不是这样工作的。你每天在读、在跑、在想。昨天的失败改变了今天的阅读,上周的对话改变了这周的实验设计。
41
29
 
42
- ### Scenario 2 — Idea Generation
30
+ Scientify 采用**新陈代谢模式**——持续地摄入、消化、沉淀、再摄入:
43
31
 
44
- > *"Explore recent advances in protein folding and generate innovative research ideas"*
32
+ - **持续摄入**:每天自动跟进前沿论文,不需要你手动触发
33
+ - **消化沉淀**:将新知识与已有积累关联,写入持久化知识库
34
+ - **假设进化**:淘汰无效假设,进化有效路径,每一轮失败都是下一轮的养料
35
+ - **主动交付**:发现值得关注的进展后自动验证,验证通过主动推送给你
45
36
 
46
- The **idea-generation** skill surveys the field, then:
37
+ 用得越久,它研究越深入。
47
38
 
48
- 1. Generates **5 diverse research ideas** grounded in real papers
49
- 2. Scores each on novelty, feasibility, and impact
50
- 3. Selects the best and produces an **enhanced proposal** with detailed methodology
39
+ <p align="center">
40
+ <img src="docs/assets/showcase/3.png" width="50%" alt="Scientify 通过飞书主动推送研究进展">
41
+ <br>
42
+ <sub>Scientify 通过飞书主动向研究者推送最新发现,并结合知识库产生思考</sub>
43
+ </p>
51
44
 
52
- > [!TIP]
53
- > **Output:** `ideas/selected_idea.md` — a ready-to-develop research proposal.
45
+ ### 2. 端到端自主研究:做到 SOTA 级成果
54
46
 
55
- ---
47
+ 给它一个课题,它自己把研究做完,跑出性能超越外部文献水平的新算法。
56
48
 
57
- ### Scenario 3 Standalone Literature Survey
49
+ Agent 迭代驱动:编排器持有假设和全部积累,只调度不写代码;每轮 spawn 独立子 agent 执行实现、审查、实验;每一轮失败都沉淀为下一轮的经验,假设越修正越精确,直到发现更优的方法。
58
50
 
59
- > *"Survey the latest papers on vision-language models for medical imaging"*
51
+ ### Showcase:自主发现 KV2 算法并达到领域领先性能
60
52
 
61
- Run just the survey phase when you need a structured reading list without running the full pipeline:
53
+ > **目标**:针对长上下文 LLM 推理,设计一种策略,同时降低首 token 时延和单请求通信量。
54
+ >
55
+ > Scientify 自主完成文献调研、假设生成、代码实现与消融实验验证,提出 **KV2 算法**,相较于现有研究,TTFT p95和bytes/request均有不同程度降低,性能达到 SOTA 水平。
62
56
 
63
- - Searches **arXiv** (CS/ML) and **OpenAlex** (cross-disciplinary, broader coverage)
64
- - Downloads `.tex` source files; retrieves open-access PDFs via **Unpaywall**
65
- - Clusters papers by sub-topic and extracts key findings
66
- - Generates a structured survey report
57
+ <p align="center">
58
+ <img src="docs/assets/showcase/1.png" width="80%" alt="KV2 算法实验结果">
59
+ <br>
60
+ <sub>Scientify 独立产出的学术论文,报道KV2的设计思路与结果</sub>
61
+ </p>
67
62
 
68
- > [!TIP]
69
- > **Output:** `survey/report.md` + raw papers in `papers/_downloads/`
63
+ <p align="center">
64
+ <img src="docs/assets/showcase/2.png" width="80%" alt="KV2 与现有方法对比">
65
+ <br>
66
+ <sub>KV2 与现有方法的 SOTA 对比</sub>
67
+ </p>
70
68
 
71
69
  ---
72
70
 
73
- ### Scenario 4 — Review Paper Drafting
74
-
75
- > *"Write a survey paper based on my project's research outputs"*
71
+ ## 架构
76
72
 
77
- After completing a research pipeline (or just a literature survey + deep analysis), the **write-review-paper** skill assembles a draft:
78
-
79
- - Synthesizes survey reports, analysis notes, and comparison tables
80
- - Structures the paper with Introduction, Related Work, Methods, and Discussion
81
- - Produces a publication-ready draft in Markdown
82
-
83
- > [!TIP]
84
- > **Output:** a survey/review paper draft based on all accumulated project artifacts.
85
-
86
- ---
87
-
88
- ### Advanced Scenarios — Combining OpenClaw Platform Capabilities
89
-
90
- As an OpenClaw plugin, Scientify can leverage the platform's MCP servers, browser automation, multi-session concurrency, and more to build powerful composite workflows.
91
-
92
- ---
93
-
94
- ### Scenario 5 — Literature Monitoring Bot
95
-
96
- > *"Automatically search for new diffusion model papers every day and push a digest to our Slack channel"*
97
-
98
- Combine OpenClaw's **MCP integration** (Slack / Feishu / Email) with **scheduled triggers** to build automated literature monitoring:
99
-
100
- ```mermaid
101
- flowchart LR
102
- A["Scheduled Trigger\n(cron / webhook)"] --> B["arxiv_search\n+ openalex_search"]
103
- B --> C["LLM Filtering\n+ Summary"]
104
- C --> D["Push to\nSlack / Feishu / Email"]
105
73
  ```
106
-
107
- 1. External cron job or OpenClaw webhook triggers a session periodically
108
- 2. Scientify's `arxiv_search` + `openalex_search` fetch the latest papers
109
- 3. LLM scores and filters by your research interests, generates concise summaries
110
- 4. MCP tools push the digest to Slack, Feishu, or email
111
-
112
- > [!NOTE]
113
- > **Requires:** A configured MCP server (e.g., `slack-mcp`, `feishu-mcp`). OpenClaw supports declaring MCP servers in `openclaw.json`.
114
-
115
- ---
116
-
117
- ### Scenario 6 — Download Paywalled Papers via Browser
118
-
119
- > *"Download these 5 IEEE papers using my university VPN"*
120
-
121
- Scientify's built-in `arxiv_download` and `unpaywall_download` only handle open-access papers. For paywalled content, combine with OpenClaw's **browser automation** (Playwright MCP):
122
-
123
- ```mermaid
124
- flowchart LR
125
- A["Scientify\nprovides paper URLs"] --> B["Playwright MCP\nopens browser"]
126
- B --> C["Institutional Proxy\nauto-authenticate"]
127
- C --> D["Navigate to Publisher\ndownload PDF"]
74
+ ┌─────────────────────────────────────────────────────────────┐
75
+ │ 研究者 │
76
+ │ 对话 · 投喂材料 · 判断假设 │
77
+ └──────────────┬──────────────────────────────┬───────────────┘
78
+ ↓ ↓
79
+ ┌──────────────────────────┐ ┌──────────────────────────────┐
80
+ │ Agent 层 │ │ 知识库(持久化) │
81
+ │ │ │ │
82
+ │ Heartbeat 每天定时唤醒 │←→│ _index.md │
83
+ │ Reflection 自主跨领域探索│ │ topic-*.md │
84
+ │ Pipeline 假设验证执行 │ │ hypotheses/ │
85
+ │ │ │ experiments/ │
86
+ └──────────┬───────────────┘ │ conversations/ │
87
+ ↓ │ │
88
+ ┌──────────────────────────┐ │ Markdown 文件 · Git 管理 │
89
+ │ 工具层 │ │ 完全可审计 · 你也能编辑 │
90
+ │ │──→│ │
91
+ │ arxiv_search │ └──────────────────────────────┘
92
+ │ openalex_search │
93
+ │ github_search │
94
+ │ paper_browser │
95
+ │ code_executor │
96
+ └──────────────────────────┘
128
97
  ```
129
98
 
130
- - OpenClaw launches a controlled browser via Playwright MCP server
131
- - The browser accesses publisher sites through your institutional proxy / VPN
132
- - Automatically navigates to the paper page and downloads the PDF to `papers/_downloads/`
133
- - Works with IEEE, Springer, Elsevier, ACM, and other subscription-based publishers
134
-
135
- > [!NOTE]
136
- > **Requires:** Playwright MCP server configured, and institutional network access to the papers.
137
-
138
- ---
139
-
140
- ### Scenario 7 — Multi-Topic Parallel Research
141
-
142
- > *"Research 3 directions simultaneously: LoRA fine-tuning, MoE architectures, KV-Cache optimization"*
99
+ 四个部分,各司其职:
143
100
 
144
- Leverage OpenClaw's **multi-session concurrency** (`sessions_spawn`) to run multiple research pipelines in parallel:
101
+ ### 研究者
145
102
 
146
- ```mermaid
147
- flowchart TD
148
- O["Main Agent\n(Orchestrator)"] --> A["Sub-session 1\nLoRA Fine-tuning"]
149
- O --> B["Sub-session 2\nMoE Architectures"]
150
- O --> C["Sub-session 3\nKV-Cache Optimization"]
151
- A --> D["Independent project dirs\nisolated from each other"]
152
- B --> D
153
- C --> D
154
- ```
103
+ 你是系统的一部分。通过对话注入判断、投喂材料、确认或否决假设。你的参与让新陈代谢的方向更准确,让研究假设更精确。
155
104
 
156
- - Each sub-topic runs a full pipeline with its own project directory
157
- - The main agent collects results and produces a cross-topic comparative analysis
158
- - Ideal for quickly scouting multiple directions during the topic-selection phase of a survey paper
105
+ ### Agent
159
106
 
160
- ---
107
+ 三个循环驱动新陈代谢:
161
108
 
162
- ### Scenario 8 Interactive Paper Reading Assistant
109
+ | Agent | 做什么 | 触发方式 |
110
+ |-------|--------|---------|
111
+ | **Heartbeat** | 每天跟进前沿论文,发现关联后自主验证,验证通过主动推送给你 | 定时自动唤醒 |
112
+ | **Reflection** | 跨领域探索,将不同主题的知识关联起来,发现意想不到的联系 | Heartbeat 触发 / 研究者触发 |
113
+ | **Pipeline** | 端到端研究执行——文献调研 → 深度分析 → 实现 → 审查 → 实验 | 研究者触发 / Reflection 触发 |
163
114
 
164
- > *"Walk me through 'Attention Is All You Need' section by section, explain every formula"*
115
+ Pipeline 内部是多 Agent 迭代:编排器持有假设,spawn agent 执行实现(`implement`)、审查(`review`)、实验(`experiment`)。每轮失败沉淀为经验,假设越修正越精确。
165
116
 
166
- Combine OpenClaw's conversational interface with Scientify's `paper_browser` tool for interactive deep reading:
117
+ ### 工具层
167
118
 
168
- - `paper_browser` loads papers page-by-page, avoiding context overflow
169
- - Discuss section by section: LLM explains derivations, compares with related work, highlights contributions
170
- - Follow up on implementation details — LLM uses `github_search` to find corresponding open-source code
171
- - All analysis notes are saved to `notes/paper_{id}.md`
119
+ Agent 的手和眼:
172
120
 
173
- ---
121
+ | 工具 | 能力 |
122
+ |------|------|
123
+ | `arxiv_search` / `openalex_search` | 搜索学术论文(arXiv + 跨学科) |
124
+ | `github_search` | 搜索开源代码实现 |
125
+ | `paper_browser` | 分页精读论文,避免上下文溢出 |
126
+ | `code_executor` | 在 `uv` 隔离环境中执行实验代码 |
174
127
 
175
- ### Scenario 9 Paper-to-Reproducible-Experiment
128
+ > Scientify 运行在 [OpenClaw](https://github.com/openclaw/openclaw) 之上,天然可调用平台的 MCP 服务器(Slack / 飞书推送)、浏览器自动化(付费文献下载)、多会话并发(多方向并行研究)等能力。
176
129
 
177
- > *"Reproduce the results from Table 2 of this paper"*
130
+ ### 知识库
178
131
 
179
- End-to-end automation: understand paper implement code → run experiment → compare results:
132
+ 所有积累持久化为 Markdown 文件,Git 管理,每一行变化都可追溯。你和 Agent 读写的是同一组文件:
180
133
 
181
- ```mermaid
182
- flowchart LR
183
- A["paper_browser\nDeep read paper"] --> B["research-plan\nExtract experiment design"]
184
- B --> C["research-implement\nWrite code"]
185
- C --> D["research-experiment\nRun experiment"]
186
- D --> E["Compare with\npaper's Table 2"]
187
134
  ```
188
-
189
- 1. `paper_browser` reads the method and experiment sections in detail
190
- 2. `research-plan` extracts experiment config (hyperparameters, datasets, metrics)
191
- 3. `research-implement` generates code and validates in a `uv`-isolated environment
192
- 4. `research-experiment` runs the full experiment
193
- 5. LLM automatically compares your results against the paper's reported numbers
135
+ knowledge_state/
136
+ ├── _index.md # 研究全局索引
137
+ ├── topic-*.md # 按主题组织的知识沉淀
138
+ ├── hypotheses/ # 假设演化记录
139
+ ├── experiments/ # 实验结果与分析
140
+ ├── paper_notes/ # 逐篇论文深读记录
141
+ └── logs/ # 每轮新陈代谢的运行日志
142
+ ```
194
143
 
195
144
  ---
196
145
 
197
- ## Prerequisites
146
+ ## 环境要求
198
147
 
199
148
  - **Node.js** >= 18
200
- - **Python 3** + **uv** (for ML code execution)
149
+ - **Python 3** + **uv**(用于 ML 代码执行)
201
150
  - **git**
202
151
 
203
152
  ---
204
153
 
205
- ## Install OpenClaw
154
+ ## 安装 OpenClaw
206
155
 
207
156
  ```bash
208
- # Install OpenClaw globally
209
- pnpm add -g openclaw # or: npm install -g openclaw
157
+ # 全局安装 OpenClaw
158
+ pnpm add -g openclaw # 或: npm install -g openclaw
210
159
 
211
- # Run onboarding wizard (configures model provider, API key, workspace)
160
+ # 运行引导向导(配置模型提供商、API Key、工作空间)
212
161
  openclaw onboard
213
162
 
214
- # Start the gateway (runs the WebUI server)
163
+ # 启动 Gateway(WebUI 服务器)
215
164
  openclaw gateway
216
165
  ```
217
166
 
218
- After `openclaw gateway`, the WebUI is available at **http://127.0.0.1:18789/** (default port).
167
+ 启动后,WebUI 地址为 **http://127.0.0.1:18789/**(默认端口)。
219
168
 
220
- > **Proxy users:** If you have `http_proxy` set, access the WebUI with `--noproxy 127.0.0.1` or configure your browser accordingly.
169
+ > **代理用户注意:** 如果你设置了 `http_proxy`,访问 WebUI 时需加 `--noproxy 127.0.0.1`,或在浏览器中配置代理例外。
221
170
 
222
171
  ---
223
172
 
224
- ## Install Scientify
173
+ ## 安装 Scientify
225
174
 
226
- ### From npm (recommended)
175
+ ### npm 安装(推荐)
227
176
 
228
177
  ```bash
229
178
  openclaw plugins install scientify
230
179
  ```
231
180
 
232
- The plugin installs to `~/.openclaw/extensions/scientify/` and is automatically enabled.
181
+ 插件安装到 `~/.openclaw/extensions/scientify/`,自动启用。
233
182
 
234
- ### From source (development)
183
+ ### 从源码安装(开发用)
235
184
 
236
185
  ```bash
237
186
  git clone https://github.com/user/scientify.git
238
187
  cd scientify && pnpm install && pnpm build
239
188
 
240
- # Link as dev plugin
189
+ # 链接为开发插件
241
190
  openclaw plugins install -l ./
242
191
  ```
243
192
 
244
- ### Verify installation
193
+ ### 验证安装
245
194
 
246
195
  ```bash
247
196
  openclaw plugins list
248
- # Should show: scientify (enabled)
197
+ # 应显示: scientify (enabled)
249
198
  ```
250
199
 
251
- After installation, **restart the gateway** to load the plugin:
200
+ 安装后需 **重启 Gateway** 以加载插件:
252
201
 
253
202
  ```bash
254
- # Stop the running gateway (Ctrl+C), then:
203
+ # 停止运行中的 Gateway(Ctrl+C),然后:
255
204
  openclaw gateway
256
205
  ```
257
206
 
258
207
  ---
259
208
 
260
- ## Usage via WebUI
209
+ ## 通过 WebUI 使用
261
210
 
262
- ### 1. Open the WebUI
211
+ ### 1. 打开 WebUI
263
212
 
264
- Navigate to **http://127.0.0.1:18789/** in your browser.
213
+ 浏览器访问 **http://127.0.0.1:18789/**。
265
214
 
266
- ### 2. Start a research task
215
+ ### 2. 开始研究任务
267
216
 
268
- Type a research prompt in the chat. Scientify skills are auto-matched by the LLM:
217
+ 在聊天框中输入研究提示,Scientify skill 会被 LLM 自动匹配:
269
218
 
270
219
  ```
271
- Research "transformer efficiency" and generate some innovative ideas
220
+ 研究 "transformer efficiency",分析论文并生成创新想法
272
221
  ```
273
222
 
274
- Or invoke a specific skill directly with a slash command:
223
+ 或者用斜杠命令直接调用特定 skill
275
224
 
276
225
  ```
277
226
  /research-pipeline
@@ -279,211 +228,52 @@ Or invoke a specific skill directly with a slash command:
279
228
  /idea-generation
280
229
  ```
281
230
 
282
- ### 3. Monitor sub-agent progress
231
+ ### 3. 监控子 agent 进度
283
232
 
284
- When the orchestrator spawns sub-agents, you'll see:
285
- - **Spawn notification** — "Phase 1: Literature Survey started"
286
- - **Completion announcement** automatic message when the sub-agent finishes
287
- - **Progress updates** the orchestrator verifies outputs and advances to the next phase
233
+ 编排器 spawn agent 后,你会看到:
234
+ - **启动通知** — "Phase 1: Literature Survey 已启动"
235
+ - **完成通知** agent 完成后自动发送消息
236
+ - **进度推进**编排器验证产出后自动进入下一阶段
288
237
 
289
- You can also check status anytime with:
238
+ 随时查看状态:
290
239
 
291
240
  ```
292
241
  /research-status
293
242
  ```
294
243
 
295
- ### 4. Manage projects
244
+ ### 4. 管理项目
296
245
 
297
246
  ```
298
- /projects # List all projects
299
- /project-switch <id> # Switch to a different project
300
- /papers # List downloaded papers
301
- /ideas # List generated ideas
302
- ```
303
-
304
- ---
305
-
306
- ## Skills
307
-
308
- ### Pipeline Skills (LLM-powered)
309
-
310
- | Skill | Slash Command | Description |
311
- |-------|---------------|-------------|
312
- | **research-pipeline** | `/research-pipeline` | Orchestrator. Spawns sub-agents for each phase, verifies outputs between steps. |
313
- | **research-collect** | `/research-collect` | Search arXiv → filter → download .tex sources → cluster → generate survey report. |
314
- | **research-survey** | `/research-survey` | Deep analysis of papers: extract formulas, map to code, produce method comparison table. |
315
- | **research-plan** | `/research-plan` | Create 4-part implementation plan (Dataset/Model/Training/Testing) from survey results. |
316
- | **research-implement** | `/research-implement` | Implement ML code from plan, run 2-epoch validation with `uv` venv isolation. |
317
- | **research-review** | `/research-review` | Review implementation. Iterates fix → rerun → review up to 3 times. |
318
- | **research-experiment** | `/research-experiment` | Full training + ablation experiments. Requires review PASS. |
319
- | **idea-generation** | `/idea-generation` | Generate 5 innovative research ideas from a topic, select and enhance the best one. |
320
-
321
- ### Standalone Skills
322
-
323
- | Skill | Description |
324
- |-------|-------------|
325
- | **write-review-paper** | Draft a review/survey paper from project research outputs. |
326
- | **research-subscription** | Create/list/remove scheduled Scientify jobs via `scientify_cron_job` (research digests or plain reminders). |
327
-
328
- ### Tools (available to LLM)
329
-
330
- | Tool | Description |
331
- |------|-------------|
332
- | `arxiv_search` | Search arXiv papers. Returns metadata (title, authors, abstract, ID). Does not download files. Supports sorting by relevance/date and date filtering. |
333
- | `arxiv_download` | Batch download papers by arXiv ID. Prefers .tex source files (PDF fallback). Requires absolute output directory path. |
334
- | `openalex_search` | Search cross-disciplinary academic papers via OpenAlex API. Returns DOI, authors, citation count, OA status. Broader coverage than arXiv. |
335
- | `openreview_lookup` | Lookup OpenReview evidence by title/ID/forum. Returns decision (if available), review rating/confidence aggregates, and concise review summaries for venue-risk analysis. |
336
- | `unpaywall_download` | Download open access PDFs by DOI via Unpaywall API. Non-OA papers are silently skipped (no failure). |
337
- | `github_search` | Search GitHub repositories. Returns repo name, description, stars, URL. Supports language filtering and sorting. |
338
- | `paper_browser` | Paginated browsing of large paper files (.tex/.md) to avoid loading thousands of lines into context. Returns specified line range with navigation info. |
339
- | `scientify_cron_job` | Manage scheduled Scientify jobs from the model (`upsert`/`list`/`remove`). Main fields: `action`, `scope`, `schedule`, `topic`, `project`, `message`, `max_papers`, `recency_days`, `candidate_pool`, `score_weights`, `sources`, `channel`, `to`, `no_deliver`, `run_now`, `job_id`. `run_now=true` also returns a `status_json` snapshot for research tasks. |
340
- | `scientify_literature_state` | Persistent incremental state for subscriptions: `prepare` dedupe context (+ memory hints), `record` pushed papers + project `knowledge_state` artifacts (including `paper_notes` deep-reading fields and full-text cleanup run logs), `feedback` lightweight preference memory, and `status` inspection with traceable logs. |
341
-
342
- ### Commands (direct, no LLM)
343
-
344
- | Command | Description |
345
- |---------|-------------|
346
- | `/research-status` | Show workspace status and active project |
347
- | `/papers` | List downloaded papers with metadata |
348
- | `/ideas` | List generated ideas |
349
- | `/projects` | List all projects |
350
- | `/project-switch <id>` | Switch active project |
351
- | `/project-delete <id>` | Delete a project |
352
- | `/research-subscribe ...` | Create/update scheduled Scientify jobs (supports `daily`, `weekly`, `every`, `at`, `cron`; options: `--channel`, `--to`, `--topic`, `--project`, `--message`, `--max-papers`, `--recency-days`, `--candidate-pool`, `--score-weights`, `--sources`, `--no-deliver`) |
353
- | `/research-subscriptions` | Show your scheduled Scientify jobs |
354
- | `/research-unsubscribe [job-id]` | Remove your scheduled Scientify jobs (or a specific job) |
355
- | `/metabolism-status` | Compatibility alias view over project `knowledge_state` summary |
356
-
357
- `/research-subscribe` examples:
358
- - `/research-subscribe daily 09:00 Asia/Shanghai` (auto-deliver to current chat sender/channel when possible)
359
- - `/research-subscribe every 2h --channel feishu --to ou_xxx`
360
- - `/research-subscribe at 2m --channel feishu --to ou_xxx`
361
- - `/research-subscribe weekly mon 09:30 --channel telegram --to 123456789`
362
- - `/research-subscribe at 2m --channel webui` (`webui`/`tui` are aliases of `last`)
363
- - `/research-subscribe daily 08:00 --topic "LLM alignment"`
364
- - `/research-subscribe daily 08:00 --topic "LLM alignment" --project llm-alignment`
365
- - `/research-subscribe daily 08:00 --topic "LLM alignment" --max-papers 5 --recency-days 30 --sources arxiv,openalex`
366
- - `/research-subscribe daily 08:00 --topic "LLM alignment" --candidate-pool 12 --score-weights relevance:45,novelty:20,authority:25,actionability:10`
367
- - `/research-subscribe at 1m --message "Time to drink coffee."`
368
- - `/research-subscribe daily 09:00 --no-deliver` (background only, no push)
369
-
370
- Behavior notes:
371
- - Scoped upsert: per sender/channel scope, creating a new subscription replaces the previous one in that scope.
372
- - Delivery aliases: `--channel webui` and `--channel tui` map to `last`; they do not require `--to`.
373
- - Reminder-safe fallback: if `topic` looks like a plain reminder (for example "remind me to sleep"), Scientify auto-routes it as a reminder message instead of literature pipeline.
374
- - One-shot topic (`at ... --topic ...`) uses focused retrieval of representative papers; recurring schedules (`daily/weekly/every/cron`) use incremental tracking mode.
375
- - Recurring incremental mode uses candidate-pool ranking before Top-K selection; if no unseen paper is found, it runs one representative fallback pass before returning empty.
376
- - Default `max_papers` is 5 unless overridden by `--max-papers`.
377
- - Built-in quality gates use soft mode by default: non-fatal quality gaps are stored as warnings (`quality_gate.severity=warn`) while the run remains `ok`; only fatal issues are downgraded to `degraded_quality`.
378
- - Default strict recall behavior uses tiered retrieval (topic-related + broader method + adjacent cross-domain) to avoid sparse core selection on broad topics.
379
- - Each run executes at most one immediate reflection follow-up and writes trace/results back to `knowledge_state` for auditability.
380
- - Hypothesis-facing response policy is gate-driven:
381
- - if `knowledge_state_summary.hypothesis_gate.accepted == 0`, Scientify returns factual cycle status only (no speculative roadmap/deep-dive suggestions)
382
- - if `accepted > 0`, Scientify includes hypothesis details in the current message by default
383
- - when channel/runtime clearly supports multi-send, it may optionally split into two consecutive pushes (brief alert + detailed hypothesis)
384
- - Lightweight preference memory is stored backend-only (keyword/source affinities) and used to rerank future pushes quietly.
385
- - Incremental dedupe + memory state is persisted under `~/.openclaw/workspace/scientify/` (`literature-state.json`, `literature-push-log.jsonl`).
386
- - Project-level research traceability is persisted under `~/.openclaw/workspace/projects/{project-id}/knowledge_state/`.
387
- - Full-text-first scheduled runs store per-paper deep-reading notes (domain/subdomains/cross-domain links/research goal/approach/design/contributions/practical insights/must-understand points/limitations/evidence anchors) under `knowledge_state/paper_notes/`.
388
- - Full-text files should be downloaded to a temporary directory and cleaned after each run; cleanup result is tracked in `knowledge_state` run logs.
389
- - Storage: subscription jobs are stored in OpenClaw cron storage; knowledge artifacts are stored in project workspace files.
390
- - Global inspect: `openclaw cron list --all --json`
391
- - Compatibility aliases: `openclaw research init/list/status/delete` and `/metabolism-status` are retained for migration, but internally use the same unified `projects/*/knowledge_state` architecture.
392
-
393
- ---
394
-
395
- ## Workspace Structure
396
-
397
- All research data is organized under `~/.openclaw/workspace/projects/`:
398
-
399
- ```
400
- projects/
401
- ├── .active # Current project ID
402
- ├── scaling-law-fashion-mnist/ # Example project
403
- │ ├── project.json # Metadata
404
- │ ├── task.json # Task definition
405
- │ ├── papers/
406
- │ │ ├── _meta/ # Paper metadata (*.json)
407
- │ │ └── _downloads/ # Raw .tex/.pdf files
408
- │ ├── survey/
409
- │ │ └── report.md # Literature survey report
410
- │ ├── notes/ # Per-paper deep analysis
411
- │ │ └── paper_{arxiv_id}.md
412
- │ ├── survey_res.md # Method comparison table
413
- │ ├── plan_res.md # Implementation plan
414
- │ ├── project/ # ML code
415
- │ │ ├── run.py
416
- │ │ └── requirements.txt
417
- │ ├── ml_res.md # Implementation results
418
- │ ├── iterations/ # Review iterations
419
- │ │ └── judge_v*.md
420
- │ ├── experiment_res.md # Final experiment results
421
- │ ├── ideas/ # Generated ideas
422
- │ ├── idea_*.md
423
- │ └── selected_idea.md
424
- │ └── knowledge_state/ # Scheduled research state artifacts
425
- │ ├── knowledge/
426
- │ ├── paper_notes/ # Per-paper deep-reading records
427
- │ ├── daily_changes/
428
- │ ├── hypotheses/
429
- │ ├── logs/
430
- │ ├── state.json
431
- │ └── events.jsonl
432
- └── another-project/
433
- ```
434
-
435
- ---
436
-
437
- ## Configuration
438
-
439
- Plugin settings in `~/.openclaw/openclaw.json`:
440
-
441
- ```json
442
- {
443
- "plugins": {
444
- "entries": {
445
- "scientify": {
446
- "enabled": true
447
- }
448
- }
449
- }
450
- }
451
- ```
452
-
453
- ### Plugin management
454
-
455
- ```bash
456
- openclaw plugins list # List installed plugins
457
- openclaw plugins enable scientify # Enable
458
- openclaw plugins disable scientify # Disable
459
- openclaw plugins update scientify # Update to latest
460
- openclaw plugins doctor # Diagnose issues
247
+ /projects # 列出所有项目
248
+ /project-switch <id> # 切换项目
249
+ /papers # 列出已下载论文
250
+ /ideas # 列出已生成想法
461
251
  ```
462
252
 
463
253
  ---
464
254
 
465
- ## Known Limitations
255
+ ## 已知限制
466
256
 
467
- - **Sub-agent timeout**: Each sub-agent has a 30-minute timeout (`runTimeoutSeconds: 1800`). Complex literature surveys with many papers may need longer.
468
- - **GPU/Sandbox**: Code execution runs on host by default. OpenClaw sandbox does not support GPU passthrough yet.
469
- - **Model dependency**: Research quality depends heavily on the LLM model used. Claude Opus 4.5+ or GPT-5+ recommended.
257
+ - **子 agent 超时**:每个子 agent 超时 30 分钟(`runTimeoutSeconds: 1800`)。复杂文献调研可能需要更长时间。
258
+ - **GPU/Sandbox**:代码默认在宿主机直接执行。OpenClaw sandbox 暂不支持 GPU 透传。
259
+ - **模型依赖**:研究质量与使用的 LLM 模型强相关。推荐 Claude Opus 4.5+ GPT-5+。
470
260
 
471
261
  ---
472
262
 
473
- ## Development
263
+ ## 开发
474
264
 
475
265
  ```bash
476
266
  git clone https://github.com/user/scientify.git
477
267
  cd scientify
478
268
  pnpm install
479
- pnpm build # Build TypeScript
480
- pnpm dev # Watch mode
269
+ pnpm build # 构建 TypeScript
270
+ pnpm dev # 监听模式
481
271
 
482
- # Link to OpenClaw for testing
272
+ # 链接到 OpenClaw 测试
483
273
  openclaw plugins install -l ./
484
274
  ```
485
275
 
486
- See [CLAUDE.md](./CLAUDE.md) for version update SOP and contribution guide.
276
+ 参见 [CLAUDE.md](./CLAUDE.md) 了解版本更新流程和贡献指南。
487
277
 
488
278
  ---
489
279
 
package/dist/index.d.ts CHANGED
@@ -1,3 +1,9 @@
1
- import type { OpenClawPluginApi } from "openclaw";
2
- export default function register(api: OpenClawPluginApi): void;
1
+ declare const _default: {
2
+ id: string;
3
+ name: string;
4
+ description: string;
5
+ configSchema: import("openclaw/plugin-sdk/plugin-entry").OpenClawPluginConfigSchema;
6
+ register: NonNullable<import("openclaw/plugin-sdk/plugin-entry").OpenClawPluginDefinition["register"]>;
7
+ } & Pick<import("openclaw/plugin-sdk/plugin-entry").OpenClawPluginDefinition, "kind">;
8
+ export default _default;
3
9
  //# sourceMappingURL=index.d.ts.map