research-copilot 0.2.2 → 0.2.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/app/out/main/index.mjs +124 -49
- package/app/out/preload/index.js +6 -0
- package/app/out/renderer/assets/{MilkdownMarkdownEditor-jaF-aGPn.js → MilkdownMarkdownEditor-G1iOHwc_.js} +50 -50
- package/app/out/renderer/assets/{arc-C1kBmvvR.js → arc-4dzvPWwv.js} +1 -1
- package/app/out/renderer/assets/{blockDiagram-c4efeb88-Do93X2rs.js → blockDiagram-c4efeb88-Cj5lBciZ.js} +8 -8
- package/app/out/renderer/assets/{c4Diagram-c83219d4-DgxxcZWC.js → c4Diagram-c83219d4-D4wJF4Nk.js} +3 -3
- package/app/out/renderer/assets/{channel-Co_M0Svj.js → channel-DRHGMFu6.js} +1 -1
- package/app/out/renderer/assets/{classDiagram-beda092f-CQlHgE6H.js → classDiagram-beda092f-DrBIQ4Da.js} +6 -6
- package/app/out/renderer/assets/{classDiagram-v2-2358418a-CkGG3aI2.js → classDiagram-v2-2358418a-yBhKnZh4.js} +10 -10
- package/app/out/renderer/assets/{clone-C18Y6dgC.js → clone-ChFuWsyG.js} +1 -1
- package/app/out/renderer/assets/{createText-1719965b-DGRc6nys.js → createText-1719965b-dtk9hA3P.js} +2 -2
- package/app/out/renderer/assets/{edges-96097737-BXvJ4fAK.js → edges-96097737-D_e1x_h6.js} +3 -3
- package/app/out/renderer/assets/{erDiagram-0228fc6a-CXjPp0pt.js → erDiagram-0228fc6a-CrZBuvPZ.js} +5 -5
- package/app/out/renderer/assets/{flowDb-c6c81e3f-CNhpbtw_.js → flowDb-c6c81e3f-DtZC9Fq3.js} +1 -1
- package/app/out/renderer/assets/{flowDiagram-50d868cf-KZ_BUCPA.js → flowDiagram-50d868cf-VzlFN6Qf.js} +12 -12
- package/app/out/renderer/assets/{flowDiagram-v2-4f6560a1-IMv50KZP.js → flowDiagram-v2-4f6560a1-BWNPZGSS.js} +12 -12
- package/app/out/renderer/assets/{flowchart-elk-definition-6af322e1-BFwFiPvq.js → flowchart-elk-definition-6af322e1-BRS2deVY.js} +6 -6
- package/app/out/renderer/assets/{ganttDiagram-a2739b55-D0-ehN-T.js → ganttDiagram-a2739b55-DRChnjlR.js} +3 -3
- package/app/out/renderer/assets/{gitGraphDiagram-82fe8481-DUyIR0Dv.js → gitGraphDiagram-82fe8481-BVfXECHD.js} +2 -2
- package/app/out/renderer/assets/{graph-DnTq2_3F.js → graph-Dl3bczvk.js} +1 -1
- package/app/out/renderer/assets/{index-5325376f-CBwuFbRF.js → index-5325376f-DjPany76.js} +6 -6
- package/app/out/renderer/assets/{index-u0FZRZON.js → index-6EerDbdL.js} +4 -4
- package/app/out/renderer/assets/{index-BHcU72Rm.js → index-B9tygoLQ.js} +3 -3
- package/app/out/renderer/assets/{index-DWU4ia28.js → index-BNdWI-dg.js} +6 -6
- package/app/out/renderer/assets/{index-D6r8msaQ.js → index-BPwe457Y.js} +3 -3
- package/app/out/renderer/assets/{index-DuhageEr.js → index-BQHeBYdD.js} +3 -3
- package/app/out/renderer/assets/{index-C1oXjI4L.js → index-BUUZSVeh.js} +3 -3
- package/app/out/renderer/assets/{index-Diy30-34.js → index-Bg-eDX1p.js} +4 -4
- package/app/out/renderer/assets/{index-CKXwBmK7.js → index-BkB91HdR.js} +5 -5
- package/app/out/renderer/assets/{index-BB-a1ajC.js → index-BlpusrHE.js} +136 -19
- package/app/out/renderer/assets/{index-gH-w4EHk.js → index-C226G81f.js} +3 -3
- package/app/out/renderer/assets/{index-DZbrRR7w.js → index-C4rx4LuD.js} +6 -6
- package/app/out/renderer/assets/{index-BpKrXGYD.js → index-C8-8M1o_.js} +3 -3
- package/app/out/renderer/assets/{index-CjffvluT.js → index-CLSRcr1i.js} +6 -6
- package/app/out/renderer/assets/{index-7hDGClrI.js → index-CMSALyTS.js} +3 -3
- package/app/out/renderer/assets/{index-yanwpi6t.js → index-CU9ZRB66.js} +6 -6
- package/app/out/renderer/assets/{index-bMe3RSkw.js → index-CgEKFTyJ.js} +6 -6
- package/app/out/renderer/assets/{index-D6jljsup.js → index-CllWzrq7.js} +3 -3
- package/app/out/renderer/assets/{index-ESFHcvWy.js → index-DHmQz4fM.js} +3 -3
- package/app/out/renderer/assets/{index-h_fNksib.js → index-DxziAmqO.js} +3 -3
- package/app/out/renderer/assets/{index-COZSDrEw.js → index-HZn90B-L.js} +6 -6
- package/app/out/renderer/assets/{index-CT1HtzVp.css → index-dNBQ09OL.css} +60 -0
- package/app/out/renderer/assets/{index-BQ7qz1CD.js → index-dvpto11c.js} +3 -3
- package/app/out/renderer/assets/{index-BVYoMX5H.js → index-hVorRCxO.js} +3 -3
- package/app/out/renderer/assets/{index-JT8OCsRP.js → index-nk-me1QW.js} +1 -1
- package/app/out/renderer/assets/{infoDiagram-8eee0895-Qra4japr.js → infoDiagram-8eee0895-CRzw5OpC.js} +2 -2
- package/app/out/renderer/assets/{journeyDiagram-c64418c1-BTN9SgOL.js → journeyDiagram-c64418c1-CYFcrwy9.js} +4 -4
- package/app/out/renderer/assets/{layout-DGrHHJdN.js → layout-2qUs4rWy.js} +2 -2
- package/app/out/renderer/assets/{line-DXtxdS2B.js → line-zEaIEY7C.js} +1 -1
- package/app/out/renderer/assets/{linear-CexrSQK6.js → linear-B-lut2jS.js} +1 -1
- package/app/out/renderer/assets/{mindmap-definition-8da855dc-pvG2hzEB.js → mindmap-definition-8da855dc--G6w66OU.js} +3 -3
- package/app/out/renderer/assets/{pieDiagram-a8764435-D_neFVMq.js → pieDiagram-a8764435-B1aj6YO1.js} +3 -3
- package/app/out/renderer/assets/{quadrantDiagram-1e28029f-C47W3UMp.js → quadrantDiagram-1e28029f-WcQ-5B6J.js} +3 -3
- package/app/out/renderer/assets/{requirementDiagram-08caed73-DW4Bo_fu.js → requirementDiagram-08caed73-DVS_F7Ld.js} +5 -5
- package/app/out/renderer/assets/{sankeyDiagram-a04cb91d-D_3PD7JI.js → sankeyDiagram-a04cb91d-C9DI8I5c.js} +2 -2
- package/app/out/renderer/assets/{sequenceDiagram-c5b8d532-BW6nGtuQ.js → sequenceDiagram-c5b8d532-BbjlXU_R.js} +3 -3
- package/app/out/renderer/assets/{stateDiagram-1ecb1508-CDgBJ3-T.js → stateDiagram-1ecb1508-D-RBhWue.js} +6 -6
- package/app/out/renderer/assets/{stateDiagram-v2-c2b004d7-CBw5TtXo.js → stateDiagram-v2-c2b004d7-5WeKw5B9.js} +10 -10
- package/app/out/renderer/assets/{styles-b4e223ce-DeeiEsuW.js → styles-b4e223ce-FiETMPKg.js} +1 -1
- package/app/out/renderer/assets/{styles-ca3715f6-CMpiebrG.js → styles-ca3715f6-DOyTWqN-.js} +1 -1
- package/app/out/renderer/assets/{styles-d45a18b0-CZe9hU7H.js → styles-d45a18b0-D2yoeLkD.js} +4 -4
- package/app/out/renderer/assets/{svgDrawCommon-b86b1483-CmJZfZzJ.js → svgDrawCommon-b86b1483-7THlOYFk.js} +1 -1
- package/app/out/renderer/assets/{timeline-definition-faaaa080-Beo2kiiz.js → timeline-definition-faaaa080-BVp4ikHz.js} +3 -3
- package/app/out/renderer/assets/{xychartDiagram-f5964ef8-DYmo7moz.js → xychartDiagram-f5964ef8-Cdp51x2C.js} +5 -5
- package/app/out/renderer/index.html +2 -2
- package/lib/skills/builtin/paper-revision/SKILL.md +467 -0
- package/lib/skills/builtin/paper-revision/references/evidence-strengthening.md +101 -0
- package/lib/skills/builtin/paper-revision/references/framing-patterns.md +119 -0
- package/lib/skills/builtin/paper-revision/references/reviewer-attack-catalog.md +171 -0
- package/lib/skills/builtin/paper-revision/references/venue-strategies.md +114 -0
- package/lib/skills/builtin/paper-writing/SKILL.md +1 -0
- package/package.json +4 -4
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
# Framing Patterns for Paper Revision
|
|
2
|
+
|
|
3
|
+
Common reframing strategies that turn borderline papers into defensible submissions. Each pattern includes the problem it solves, the reframe, and an example.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Pattern 1: From "Pipeline of Components" to "Protocol / Contract"
|
|
8
|
+
|
|
9
|
+
### When to use
|
|
10
|
+
Paper has multiple components that each exist in prior work. Reviewers will say "this is just integration."
|
|
11
|
+
|
|
12
|
+
### The reframe
|
|
13
|
+
Define the contribution not as the components but as the **conditions under which they must operate together**. The novelty is the contract, not the parts.
|
|
14
|
+
|
|
15
|
+
### Template
|
|
16
|
+
> "Prior work addresses [A], [B], and [C] separately. But [the problem] requires all four conditions to hold simultaneously: [condition 1], [condition 2], [condition 3], [condition 4]. Our contribution is the protocol that enforces these conditions together."
|
|
17
|
+
|
|
18
|
+
### Example (ParameterRec / MATRIX-IO)
|
|
19
|
+
- Before: "We cluster traces, synthesize surrogates, and train a recommender."
|
|
20
|
+
- After: "We define when historical traces may become reusable tuning evidence, by enforcing semantic partition, executable anchoring, validated transfer, and bounded serving together."
|
|
21
|
+
|
|
22
|
+
---
|
|
23
|
+
|
|
24
|
+
## Pattern 2: From "Benchmark Paper" to "Benchmark-Backed Measurement Study"
|
|
25
|
+
|
|
26
|
+
### When to use
|
|
27
|
+
Paper introduces a benchmark but the benchmark alone may not be seen as sufficient novelty.
|
|
28
|
+
|
|
29
|
+
### The reframe
|
|
30
|
+
Position the benchmark as an **evaluation substrate** that enables a measurement study. The paper's value is the measurement findings, not the benchmark artifact.
|
|
31
|
+
|
|
32
|
+
### Template
|
|
33
|
+
> "We use [benchmark name] not as an end in itself, but as the evaluation substrate for a controlled measurement study of [phenomenon]. Our central finding is [mechanism / finding]."
|
|
34
|
+
|
|
35
|
+
### Example (COIDI)
|
|
36
|
+
- Before: "We introduce CIODI-Bench for evaluating LLM agents on HPC telemetry."
|
|
37
|
+
- After: "We use CIODI-Bench as an evaluation substrate to measure how reliability bottlenecks in LLM agents shift with task structure."
|
|
38
|
+
|
|
39
|
+
---
|
|
40
|
+
|
|
41
|
+
## Pattern 3: From "Parallel Observations" to "Unified Mechanism"
|
|
42
|
+
|
|
43
|
+
### When to use
|
|
44
|
+
Paper has 2-4 separate findings that are individually reasonable but collectively feel like "result dump."
|
|
45
|
+
|
|
46
|
+
### The reframe
|
|
47
|
+
Identify a single mechanism that explains all findings as special cases. The paper's thesis becomes the mechanism, not the individual observations.
|
|
48
|
+
|
|
49
|
+
### Template
|
|
50
|
+
> "Our central finding is that [mechanism] shifts with [variable]. Specifically, for [regime A], [bottleneck A] dominates; for [regime B], [bottleneck B] dominates; for [regime C], [bottleneck C] dominates."
|
|
51
|
+
|
|
52
|
+
### Example (COIDI)
|
|
53
|
+
- Before: "Context helps on extraction. Verification helps on analysis. Strong models help on hard tasks."
|
|
54
|
+
- After: "The dominant reliability bottleneck shifts with task structure: semantic grounding on extraction, verification tractability on analysis, upstream hypothesis formation on hard tasks."
|
|
55
|
+
|
|
56
|
+
---
|
|
57
|
+
|
|
58
|
+
## Pattern 4: From "We Did X" to "Gap Chain"
|
|
59
|
+
|
|
60
|
+
### When to use
|
|
61
|
+
Paper solves a problem but the connection between the problem and the solution feels hand-wavy.
|
|
62
|
+
|
|
63
|
+
### The reframe
|
|
64
|
+
Identify the specific gaps that prevent the obvious/naive approach from working. Structure the contribution as closing those gaps.
|
|
65
|
+
|
|
66
|
+
### Template
|
|
67
|
+
> "[Resource] cannot directly become [goal] because of N gaps: [gap 1], [gap 2], ..., [gap N]. We close these gaps by [solution 1], [solution 2], ..., [solution N]."
|
|
68
|
+
|
|
69
|
+
### Example (ParameterRec / MATRIX-IO)
|
|
70
|
+
- Before: "We use production traces for I/O tuning."
|
|
71
|
+
- After: "Production traces cannot directly become reusable tuning evidence because: (1) they don't define a semantically valid evidence space, (2) they're descriptive not executable, (3) trace-level resemblance doesn't imply transfer validity, (4) serving needs a stopping rule. We close each gap."
|
|
72
|
+
|
|
73
|
+
---
|
|
74
|
+
|
|
75
|
+
## Pattern 5: Three-Layer Claim Hierarchy
|
|
76
|
+
|
|
77
|
+
### When to use
|
|
78
|
+
Collaborators disagree about what the core contribution is (common when system builders care about practical impact and advisors care about intellectual novelty).
|
|
79
|
+
|
|
80
|
+
### The reframe
|
|
81
|
+
Separate value, problem, and mechanism into three explicit layers. All appear in the paper, but the mechanism is the defensible novelty.
|
|
82
|
+
|
|
83
|
+
### Template
|
|
84
|
+
> "[Value statement: what practical benefit this delivers.] This is possible because [problem statement: what problem this solves and using what resources]. The technical mechanism that makes it work is [mechanism statement: the specific novelty]."
|
|
85
|
+
|
|
86
|
+
### Example (ParameterRec / MATRIX-IO)
|
|
87
|
+
- Value: "Reduces hardware-backed tuning cost."
|
|
88
|
+
- Problem: "Uses large production trace corpora for tuning."
|
|
89
|
+
- Mechanism: "Execution-backed conservative evidence-reuse protocol."
|
|
90
|
+
- Combined: "MATRIX-IO uses large production I/O trace corpora to reduce hardware-backed tuning cost, by turning trace history into execution-backed, conservatively reusable tuning evidence."
|
|
91
|
+
|
|
92
|
+
---
|
|
93
|
+
|
|
94
|
+
## Pattern 6: From "Method Paper" to "Design Principles Paper"
|
|
95
|
+
|
|
96
|
+
### When to use
|
|
97
|
+
Paper proposes a system or method, but the specific design may not generalize. The insights behind the design are more valuable than the design itself.
|
|
98
|
+
|
|
99
|
+
### The reframe
|
|
100
|
+
Position the paper as extracting design principles or deployment guidance, with the system as the vehicle for discovering and validating those principles.
|
|
101
|
+
|
|
102
|
+
### Template
|
|
103
|
+
> "Our system demonstrates that [design principle]. This yields practical guidance: [when to do X], [when to do Y], [when not to do Z]."
|
|
104
|
+
|
|
105
|
+
---
|
|
106
|
+
|
|
107
|
+
## Pattern 7: Lowering Secondary Contributions Without Removing Them
|
|
108
|
+
|
|
109
|
+
### When to use
|
|
110
|
+
A secondary result matters to some reviewers but shouldn't overshadow the primary claim.
|
|
111
|
+
|
|
112
|
+
### The reframe
|
|
113
|
+
Include the secondary contribution in the value layer but not the mechanism layer. Give it one clear bullet, positioned after primary contributions.
|
|
114
|
+
|
|
115
|
+
### Rules
|
|
116
|
+
- Never mention the secondary result before the primary claim in abstract/intro
|
|
117
|
+
- In evaluation, show it as a consequence of the primary mechanism
|
|
118
|
+
- In contribution bullets, use phrasing like "as a practical consequence" or "this additionally reduces"
|
|
119
|
+
- Do not create a separate evaluation section for it; embed it in the primary evaluation flow
|
|
@@ -0,0 +1,171 @@
|
|
|
1
|
+
# Reviewer Attack Catalog
|
|
2
|
+
|
|
3
|
+
Common reviewer objections organized by category, with defense strategies and preemptive text templates. Use this to anticipate and address attacks before submission.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## Category 1: Novelty Attacks
|
|
8
|
+
|
|
9
|
+
### Attack 1.1: "This is just integration"
|
|
10
|
+
**When it comes:** Paper has multiple components that each exist in prior work.
|
|
11
|
+
|
|
12
|
+
**Defense strategy:** Reframe as protocol/contract (see framing-patterns.md Pattern 1). Add a capability comparison table showing no prior work satisfies all conditions simultaneously.
|
|
13
|
+
|
|
14
|
+
**Preemptive text template:**
|
|
15
|
+
> "The contribution is not any individual component in isolation — [X], [Y], and [Z] each have precedents. Rather, it is the protocol that couples them: [goal] becomes possible only when [conditions] are enforced together. No prior system enforces all of these simultaneously (Table N)."
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
### Attack 1.2: "This is incremental over [specific prior work]"
|
|
20
|
+
**When it comes:** A closely related system exists.
|
|
21
|
+
|
|
22
|
+
**Defense strategy:** Identify the specific capability delta. If the delta is small, acknowledge it and reframe around the unique combination. If the delta is large, make it more visible.
|
|
23
|
+
|
|
24
|
+
**Preemptive text template:**
|
|
25
|
+
> "[Prior work] addresses [X] but assumes [limitation]. In our setting, [limitation does not hold] because [reason]. [Our approach] removes this assumption by [mechanism]."
|
|
26
|
+
|
|
27
|
+
---
|
|
28
|
+
|
|
29
|
+
### Attack 1.3: "The findings are obvious / expected"
|
|
30
|
+
**When it comes:** Results align with intuition (e.g., "of course more context helps").
|
|
31
|
+
|
|
32
|
+
**Defense strategy:** Emphasize that the contribution is not the direction of the effect but its quantification, boundary conditions, and mechanism. Add quantitative deltas and identify where the effect does NOT hold.
|
|
33
|
+
|
|
34
|
+
**Preemptive text template:**
|
|
35
|
+
> "While the direction of this effect may be intuitive, our study provides the first controlled quantification: [effect size]. Importantly, the effect reverses under [condition], demonstrating that [nuance]."
|
|
36
|
+
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## Category 2: Scope and Generalization Attacks
|
|
40
|
+
|
|
41
|
+
### Attack 2.1: "Single site / single dataset / narrow domain"
|
|
42
|
+
**When it comes:** Experiments use data from one source.
|
|
43
|
+
|
|
44
|
+
**Defense strategy:** Do NOT claim broad universality. Instead, claim "controlled realism" and position the single site as a high-fidelity first step.
|
|
45
|
+
|
|
46
|
+
**Preemptive text template:**
|
|
47
|
+
> "We scope the validated path to [site/dataset] because it provides [specific advantages: production data, complete metadata, etc.]. We do not claim that quantitative results transfer unchanged to all [domains]. Rather, we provide a controlled, realistic substrate for studying [phenomenon], and we expect the qualitative patterns to generalize to settings with similar [characteristics]."
|
|
48
|
+
|
|
49
|
+
---
|
|
50
|
+
|
|
51
|
+
### Attack 2.2: "Would this work on a different [system/domain/scale]?"
|
|
52
|
+
**When it comes:** The approach seems tied to specific infrastructure.
|
|
53
|
+
|
|
54
|
+
**Defense strategy:** Identify what the approach actually depends on (e.g., "feature availability" not "specific hardware") and state that dependency explicitly.
|
|
55
|
+
|
|
56
|
+
**Preemptive text template:**
|
|
57
|
+
> "Extension to other [systems/domains] requires [specific conditions], not merely more data. We discuss this requirement in Section N and leave multi-[site/domain] validation to future work."
|
|
58
|
+
|
|
59
|
+
---
|
|
60
|
+
|
|
61
|
+
### Attack 2.3: "N is too small" (tasks, samples, runs, etc.)
|
|
62
|
+
**When it comes:** Dataset/benchmark size seems insufficient.
|
|
63
|
+
|
|
64
|
+
**Defense strategy:** Do not compete on scale. Compete on quality, realism, and control. Explain why N is sufficient for the claims being made.
|
|
65
|
+
|
|
66
|
+
**Preemptive text template:**
|
|
67
|
+
> "We emphasize [quality/realism/expert curation/control] over scale. Each of the N [items] involves [complexity]. Our claims are at the [regime/family/category] level, not at the level of fine-grained ranking between near-tied methods, and the observed effects are consistent across [groupings]."
|
|
68
|
+
|
|
69
|
+
---
|
|
70
|
+
|
|
71
|
+
## Category 3: Methodology Attacks
|
|
72
|
+
|
|
73
|
+
### Attack 3.1: "How were thresholds / hyperparameters chosen?"
|
|
74
|
+
**When it comes:** Paper uses thresholds or design choices without clear justification.
|
|
75
|
+
|
|
76
|
+
**Defense strategy:** State the thresholds explicitly. Explain the design rationale (conservative, validated on held-out data, or sensitivity-analyzed). If sensitivity analysis exists, reference it.
|
|
77
|
+
|
|
78
|
+
**Preemptive text template:**
|
|
79
|
+
> "We set [threshold] = [value] based on [rationale]. The system is intentionally [conservative/asymmetric]: [explanation of design choice]. We report sensitivity to this choice in [appendix/section]."
|
|
80
|
+
|
|
81
|
+
---
|
|
82
|
+
|
|
83
|
+
### Attack 3.2: "This relies on manual / hand-crafted rules"
|
|
84
|
+
**When it comes:** Paper has expert-defined categories, rules, or heuristics.
|
|
85
|
+
|
|
86
|
+
**Defense strategy:** Reframe rules as "operational constraints" or "semantic priors" rather than "heuristics." Emphasize that they define the scope of safe operation, not universal truth.
|
|
87
|
+
|
|
88
|
+
**Preemptive text template:**
|
|
89
|
+
> "[Rules/categories] encode operational constraints rather than universal taxonomy. They define the scope within which [reuse/transfer/recommendation] is safe. We prioritize [conservative correctness] over [full automation], and we expect these constraints to be refined as [domain knowledge / deployment experience] grows."
|
|
90
|
+
|
|
91
|
+
---
|
|
92
|
+
|
|
93
|
+
### Attack 3.3: "Baseline comparison is unfair"
|
|
94
|
+
**When it comes:** Baselines are weak, adapted from different settings, or cherry-picked.
|
|
95
|
+
|
|
96
|
+
**Defense strategy:** Explain why each baseline was chosen and what question it answers. If adaptation was necessary, explain what was preserved and what was changed.
|
|
97
|
+
|
|
98
|
+
**Preemptive text template:**
|
|
99
|
+
> "We compare against [baselines] not as straw men but as a progression: [baseline 1] tests whether [simple approach] suffices; [baseline 2] tests whether [moderate approach] closes the gap; [our method] tests whether the full [path/protocol] is necessary. This ladder structure isolates the contribution of each design element."
|
|
100
|
+
|
|
101
|
+
---
|
|
102
|
+
|
|
103
|
+
### Attack 3.4: "K / number of runs is too low for reliable conclusions"
|
|
104
|
+
**When it comes:** Stochastic experiments with limited repetitions.
|
|
105
|
+
|
|
106
|
+
**Defense strategy:** Acknowledge the budget constraint, explain what level of conclusion the data supports, and state what you do NOT claim.
|
|
107
|
+
|
|
108
|
+
**Preemptive text template:**
|
|
109
|
+
> "We run K=[value] trials per configuration, yielding [metric] as our primary reliability estimate. We do not claim fine-grained ranking between near-tied methods; rather, we focus on [regime-level / family-level] conclusions where the observed effects are consistent across [categories/tasks]. Larger K would sharpen uncertainty estimates but is unlikely to reverse the broader trends observed."
|
|
110
|
+
|
|
111
|
+
---
|
|
112
|
+
|
|
113
|
+
## Category 4: Presentation Attacks
|
|
114
|
+
|
|
115
|
+
### Attack 4.1: "The paper is hard to follow"
|
|
116
|
+
**When it comes:** Narrative is unclear, inconsistent terminology, or poor section transitions.
|
|
117
|
+
|
|
118
|
+
**Defense strategy:** Apply Phase 3 (Narrative Unification) systematically. Ensure every section opening answers "what is this section's job?"
|
|
119
|
+
|
|
120
|
+
---
|
|
121
|
+
|
|
122
|
+
### Attack 4.2: "Claims in abstract/intro don't match evaluation"
|
|
123
|
+
**When it comes:** Front matter was revised but evaluation wasn't updated (or vice versa).
|
|
124
|
+
|
|
125
|
+
**Defense strategy:** Do a claim-echo audit: for each claim in the abstract, find the exact evaluation paragraph that supports it.
|
|
126
|
+
|
|
127
|
+
---
|
|
128
|
+
|
|
129
|
+
### Attack 4.3: "Figure/table doesn't support the claimed conclusion"
|
|
130
|
+
**When it comes:** Captions are generic, or the text interprets the figure differently than what it shows.
|
|
131
|
+
|
|
132
|
+
**Defense strategy:** Rewrite captions to include the key takeaway. Ensure text references to figures state what to observe and why it matters.
|
|
133
|
+
|
|
134
|
+
---
|
|
135
|
+
|
|
136
|
+
## Category 5: Significance Attacks
|
|
137
|
+
|
|
138
|
+
### Attack 5.1: "So what? Why should [community] care?"
|
|
139
|
+
**When it comes:** Paper is technically sound but doesn't connect to community values.
|
|
140
|
+
|
|
141
|
+
**Defense strategy:** Add a "community significance" paragraph in the introduction that translates technical findings into operational/deployment/practical terms.
|
|
142
|
+
|
|
143
|
+
**SC-specific template:**
|
|
144
|
+
> "For the HPC [storage/computing] community, this means [practical implication]. The path to [goal] requires not [naive approach] but [principled approach that our work demonstrates]."
|
|
145
|
+
|
|
146
|
+
---
|
|
147
|
+
|
|
148
|
+
### Attack 5.2: "This is a nice dataset/tool but not a research contribution"
|
|
149
|
+
**When it comes:** Paper's primary artifact is a benchmark, tool, or dataset.
|
|
150
|
+
|
|
151
|
+
**Defense strategy:** Reframe as measurement study or design study (see framing-patterns.md Pattern 2). The artifact is the instrument; the findings are the contribution.
|
|
152
|
+
|
|
153
|
+
---
|
|
154
|
+
|
|
155
|
+
## Using This Catalog
|
|
156
|
+
|
|
157
|
+
### Before submission
|
|
158
|
+
1. Read through the catalog and identify 3-5 attacks most relevant to your paper
|
|
159
|
+
2. For each, check whether the paper already addresses it
|
|
160
|
+
3. If not, add preemptive defense using the templates above
|
|
161
|
+
4. Prioritize attacks by likelihood and severity
|
|
162
|
+
|
|
163
|
+
### During revision
|
|
164
|
+
- When a collaborator or internal reviewer raises a concern, check if it maps to a catalog entry
|
|
165
|
+
- If yes, apply the corresponding defense strategy
|
|
166
|
+
- If no, consider adding a new entry for future reference
|
|
167
|
+
|
|
168
|
+
### After reviews
|
|
169
|
+
- Map each reviewer comment to a catalog entry
|
|
170
|
+
- Use the defense strategy to draft rebuttal responses
|
|
171
|
+
- Strengthen the paper text for resubmission
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
# Venue-Specific Revision Strategies
|
|
2
|
+
|
|
3
|
+
Guidance for adapting revision strategy to the values, expectations, and reviewer culture of specific conference types.
|
|
4
|
+
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
## SC / HPDC (HPC Systems)
|
|
8
|
+
|
|
9
|
+
### What reviewers value most
|
|
10
|
+
- Real systems on real hardware with real workloads
|
|
11
|
+
- Operational relevance to HPC facilities
|
|
12
|
+
- Reproducibility and artifact descriptions
|
|
13
|
+
- Conservative, well-bounded claims
|
|
14
|
+
- Performance at scale
|
|
15
|
+
|
|
16
|
+
### What gets papers rejected
|
|
17
|
+
- "Demo paper" feel without clear intellectual contribution
|
|
18
|
+
- Overclaimed generalization from limited evaluation
|
|
19
|
+
- Missing limitations section or dishonest scope
|
|
20
|
+
- Ignoring prior HPC-specific work
|
|
21
|
+
- Benchmarks without explaining why the benchmark design matters
|
|
22
|
+
|
|
23
|
+
### Revision priorities for SC
|
|
24
|
+
1. **Operational framing:** Connect findings to what HPC facility operators / administrators would actually do differently
|
|
25
|
+
2. **Scope honesty:** Be explicit about single-site vs. multi-site, production vs. testbed, application-specific vs. general
|
|
26
|
+
3. **Reproducibility signals:** Mention artifact availability, exact configurations, hardware specs
|
|
27
|
+
4. **Performance context:** Always explain whether speedups are over reasonable baselines (not just defaults)
|
|
28
|
+
5. **Community significance paragraph:** Translate technical findings into HPC deployment implications
|
|
29
|
+
|
|
30
|
+
### Style notes
|
|
31
|
+
- Direct, technical prose; avoid promotional language
|
|
32
|
+
- Prefer concrete mechanisms over abstract frameworks
|
|
33
|
+
- Include compute/cost/resource details
|
|
34
|
+
- Limitations section is expected and respected
|
|
35
|
+
|
|
36
|
+
---
|
|
37
|
+
|
|
38
|
+
## OSDI / SOSP / NSDI (Systems)
|
|
39
|
+
|
|
40
|
+
### What reviewers value most
|
|
41
|
+
- Clean problem formulation with clear design invariants
|
|
42
|
+
- Systems that address a real deployment pain point
|
|
43
|
+
- Strong evaluation methodology (not just "it's faster")
|
|
44
|
+
- Safety boundaries and failure modes
|
|
45
|
+
- Design lessons that generalize beyond the specific system
|
|
46
|
+
|
|
47
|
+
### What gets papers rejected
|
|
48
|
+
- "Engineering effort" without clear intellectual contribution
|
|
49
|
+
- Evaluation that only shows the happy path
|
|
50
|
+
- Missing discussion of failure modes / edge cases
|
|
51
|
+
- Claims that don't match the evaluated scope
|
|
52
|
+
- Poor writing quality (OSDI/SOSP have very high presentation bar)
|
|
53
|
+
|
|
54
|
+
### Revision priorities for OSDI/SOSP
|
|
55
|
+
1. **Design invariants:** State what your system always guarantees, not just what it usually achieves
|
|
56
|
+
2. **Necessity evidence:** Prove that simpler approaches don't work (ablation, controlled comparisons)
|
|
57
|
+
3. **Failure mode analysis:** Show what happens when assumptions are violated
|
|
58
|
+
4. **Scalability and cost:** Include deployment overhead, not just throughput
|
|
59
|
+
5. **Design lessons section:** Extract 2-3 takeaways that other system builders can use
|
|
60
|
+
|
|
61
|
+
### Style notes
|
|
62
|
+
- Very strong problem formulation expected in first 2 pages
|
|
63
|
+
- "Rapid review" culture: reviewers decide quickly based on intro quality
|
|
64
|
+
- Design section should explain *why*, not just *what*
|
|
65
|
+
- Evaluation should answer specific research questions, not demonstrate features
|
|
66
|
+
|
|
67
|
+
---
|
|
68
|
+
|
|
69
|
+
## NeurIPS / ICML / ICLR (ML/AI)
|
|
70
|
+
|
|
71
|
+
### What reviewers value most
|
|
72
|
+
- Clear, novel contribution (method, theory, or empirical finding)
|
|
73
|
+
- Rigorous ablation studies
|
|
74
|
+
- Reproducible experimental setup
|
|
75
|
+
- Positioning relative to a well-defined problem
|
|
76
|
+
- Statistical rigor (error bars, significance tests, multiple seeds)
|
|
77
|
+
|
|
78
|
+
### What gets papers rejected
|
|
79
|
+
- "Bag of tricks" without clear insight
|
|
80
|
+
- Evaluation on only one dataset/setting
|
|
81
|
+
- Missing ablations for key design choices
|
|
82
|
+
- Overclaimed novelty that is actually incremental
|
|
83
|
+
- Poor related work coverage
|
|
84
|
+
|
|
85
|
+
### Revision priorities for NeurIPS/ICML
|
|
86
|
+
1. **One clear contribution:** Can you state it in one sentence? If not, the paper isn't ready
|
|
87
|
+
2. **Ablation rigor:** Every non-obvious design choice needs an ablation or justification
|
|
88
|
+
3. **Statistical reporting:** Error bars, number of seeds, significance tests where appropriate
|
|
89
|
+
4. **Related work density:** Cite generously; reviewers may have authored related papers
|
|
90
|
+
5. **Abstract formula:** Use the 5-sentence structure (what/why hard/how/evidence/best number)
|
|
91
|
+
|
|
92
|
+
### Style notes
|
|
93
|
+
- More theoretical framing expected than systems venues
|
|
94
|
+
- "Broader Impact" or similar statement may be required
|
|
95
|
+
- Appendix can be extensive but reviewers aren't required to read it
|
|
96
|
+
- Checklist compliance is mandatory at most venues
|
|
97
|
+
|
|
98
|
+
---
|
|
99
|
+
|
|
100
|
+
## Cross-Venue Patterns
|
|
101
|
+
|
|
102
|
+
### Universal high-ROI actions
|
|
103
|
+
1. Ensure title accurately signals the paper type
|
|
104
|
+
2. Ensure abstract contains at least 2 concrete numbers
|
|
105
|
+
3. Ensure introduction has clear contribution bullets
|
|
106
|
+
4. Ensure evaluation opening states what questions are being answered
|
|
107
|
+
5. Ensure limitations section exists and is honest
|
|
108
|
+
|
|
109
|
+
### Universal revision traps
|
|
110
|
+
1. Revising front matter but not evaluation (or vice versa)
|
|
111
|
+
2. Adding more results instead of fixing the narrative
|
|
112
|
+
3. Over-defending minor points while leaving major gaps unaddressed
|
|
113
|
+
4. Changing terminology mid-revision without propagating everywhere
|
|
114
|
+
5. Cutting limitations to save space (this always backfires)
|
|
@@ -26,6 +26,7 @@ This skill covers the full lifecycle of conference paper writing: from assemblin
|
|
|
26
26
|
- Research is still in progress — finish experiments and analysis first.
|
|
27
27
|
- You need a literature survey — use the `literature-search` tool.
|
|
28
28
|
- You need to brainstorm research directions — use `brainstorming-research-ideas` or `creative-thinking-for-research`.
|
|
29
|
+
- You already have a draft and want to **strategically revise** it (reframe the contribution, strengthen evidence, prepare reviewer defense, unify the narrative) — use `paper-revision` instead. That skill covers framing diagnosis, claim crystallization, and reviewer-adversarial revision, which are fundamentally different from first-draft writing.
|
|
29
30
|
- You are writing a **journal article** (Nature, Science, NEJM, etc.), **technical report**, or **research summary** — use `scientific-writing` instead, which covers IMRAD structure, journal-specific citation styles (APA/AMA/Vancouver), and reporting guidelines (CONSORT/STROBE/PRISMA).
|
|
30
31
|
|
|
31
32
|
### paper-writing vs scientific-writing: Which to Use
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "research-copilot",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.4",
|
|
4
4
|
"description": "AI-powered research assistant for scientists — literature search, data analysis, academic writing, and project management",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -47,9 +47,9 @@
|
|
|
47
47
|
"node": ">=18"
|
|
48
48
|
},
|
|
49
49
|
"dependencies": {
|
|
50
|
-
"@mariozechner/pi-agent-core": "^0.
|
|
51
|
-
"@mariozechner/pi-ai": "^0.
|
|
52
|
-
"@mariozechner/pi-coding-agent": "^0.
|
|
50
|
+
"@mariozechner/pi-agent-core": "^0.66.1",
|
|
51
|
+
"@mariozechner/pi-ai": "^0.66.1",
|
|
52
|
+
"@mariozechner/pi-coding-agent": "^0.66.1",
|
|
53
53
|
"@sinclair/typebox": "^0.34.48",
|
|
54
54
|
"adm-zip": "^0.5.16",
|
|
55
55
|
"ajv": "^8.18.0",
|