tokens-for-good 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +27 -0
- package/pipeline/01-research/PROMPT.md +185 -0
- package/pipeline/02-verify/PROMPT.md +114 -0
- package/pipeline/03-humanize/PROMPT.md +143 -0
- package/pipeline/04-peer-review/PROMPT.md +73 -0
- package/src/api-client.js +87 -0
- package/src/cli.js +48 -0
- package/src/mcp-server.js +334 -0
- package/src/platform.js +64 -0
- package/src/state.js +71 -0
package/package.json
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "tokens-for-good",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"type": "module",
|
|
5
|
+
"description": "Donate your spare AI tokens to research nonprofits for Fierce Philanthropy",
|
|
6
|
+
"bin": {
|
|
7
|
+
"tokens-for-good": "src/cli.js"
|
|
8
|
+
},
|
|
9
|
+
"main": "./src/mcp-server.js",
|
|
10
|
+
"scripts": {
|
|
11
|
+
"start": "node src/mcp-server.js",
|
|
12
|
+
"test": "node --test src/**/*.test.js"
|
|
13
|
+
},
|
|
14
|
+
"keywords": [
|
|
15
|
+
"mcp",
|
|
16
|
+
"philanthropy",
|
|
17
|
+
"nonprofit",
|
|
18
|
+
"research",
|
|
19
|
+
"tokens",
|
|
20
|
+
"social-impact"
|
|
21
|
+
],
|
|
22
|
+
"license": "MIT",
|
|
23
|
+
"dependencies": {
|
|
24
|
+
"@modelcontextprotocol/sdk": "^1.0.0",
|
|
25
|
+
"zod": "^4.3.6"
|
|
26
|
+
}
|
|
27
|
+
}
|
|
@@ -0,0 +1,185 @@
|
|
|
1
|
+
# Step 1: Research — Claude Code Instructions
|
|
2
|
+
|
|
3
|
+
## Inputs
|
|
4
|
+
|
|
5
|
+
- **Org name:** `{{ORG_NAME}}`
|
|
6
|
+
- **Org data:** From `orgs.json` — find the entry for this org (name, url, description, source)
|
|
7
|
+
- **Writing style guide:** Read from `site/writing-style-guide.md`
|
|
8
|
+
- **Research guidance:** Read from `site/research-guidance.md`
|
|
9
|
+
|
|
10
|
+
## Your Role
|
|
11
|
+
|
|
12
|
+
You are a social impact research analyst working for Fierce Philanthropy. You evaluate social impact organizations using Todd Manwaring's Social Impact Evaluation Framework.
|
|
13
|
+
|
|
14
|
+
You recognize that the best social impact organizations follow a repeated cycle of four items:
|
|
15
|
+
|
|
16
|
+
1. **Theory of Change grounded in the social problem's negative consequences**
|
|
17
|
+
- Start from negative consequences, not activities or feel-good goals
|
|
18
|
+
- Build a causal chain from activities to short-term shifts to meaningful changes in negative consequences
|
|
19
|
+
- Make assumptions and risks explicit at each link
|
|
20
|
+
|
|
21
|
+
2. **Intervention implementation that actually follows the model**
|
|
22
|
+
- Every major activity should map onto a specific link in the Theory of Change
|
|
23
|
+
- Ensure fidelity vs adaptation is thought through
|
|
24
|
+
|
|
25
|
+
3. **Measurement focused on intermediate outcomes, ultimate outcomes, negative consequences, and counterfactuals**
|
|
26
|
+
- Measure how much you are reducing negative consequences, directly or through well-chosen proxies
|
|
27
|
+
- Intermediate outcomes: changes in behavior or action from earlier gains in knowledge, skills, or attitudes
|
|
28
|
+
- Ultimate outcomes: changes in condition or life status (reduced homelessness, improved health, economic stability)
|
|
29
|
+
- Counterfactual thinking: compare to what would have happened otherwise
|
|
30
|
+
|
|
31
|
+
4. **Feedback loop: learning that actually changes the organization's efforts**
|
|
32
|
+
|
|
33
|
+
## Instructions
|
|
34
|
+
|
|
35
|
+
### 1. Look Up the Organization
|
|
36
|
+
|
|
37
|
+
Find the org in `orgs.json` by name. Extract:
|
|
38
|
+
- Name
|
|
39
|
+
- URL (primary website or portfolio link)
|
|
40
|
+
- Description
|
|
41
|
+
- Source (where we found them)
|
|
42
|
+
|
|
43
|
+
### 2. Research the Organization
|
|
44
|
+
|
|
45
|
+
Using web search and web fetch, thoroughly research the organization. Search for:
|
|
46
|
+
|
|
47
|
+
1. The organization's main website — read the homepage, about page, and impact/results pages
|
|
48
|
+
2. Their impact/results/evidence pages — look for published data, annual reports, metrics
|
|
49
|
+
3. Independent evaluations — search for RCTs, quasi-experimental studies, J-PAL, 3ie, Campbell Collaboration
|
|
50
|
+
4. Third-party reviews — GiveWell, Charity Navigator, GuideStar/Candid, news coverage
|
|
51
|
+
5. Financial data — ProPublica Nonprofit Explorer (search by EIN or org name), Form 990 data
|
|
52
|
+
|
|
53
|
+
**Research rules:**
|
|
54
|
+
- Only share DIRECT results from the organization, not from other similar orgs
|
|
55
|
+
- Only share direct results on outside measurements of the organization
|
|
56
|
+
- Do not include evidence from modeling or from other organizations
|
|
57
|
+
- Don't include anecdotes — only measured results
|
|
58
|
+
- Every factual claim must be traceable to a specific source
|
|
59
|
+
|
|
60
|
+
### 3. Generate the Report
|
|
61
|
+
|
|
62
|
+
Generate the COMPLETE report following this exact format and section order:
|
|
63
|
+
|
|
64
|
+
---
|
|
65
|
+
|
|
66
|
+
```
|
|
67
|
+
# [Org Name] - Fierce Philanthropy Research Report
|
|
68
|
+
|
|
69
|
+
**Date:** [today's date]
|
|
70
|
+
**Methodology:** Todd Manwaring's Social Impact Evaluation Framework
|
|
71
|
+
**Organization:** [Org Name]
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
---
|
|
75
|
+
|
|
76
|
+
#### PROMPT 1 — Organization and Social Problem Summary
|
|
77
|
+
|
|
78
|
+
Identify:
|
|
79
|
+
1. **Social Problem:** (less than 5 words)
|
|
80
|
+
2. **Population:** (who is affected)
|
|
81
|
+
3. **Location:** (where)
|
|
82
|
+
|
|
83
|
+
#### PROMPT 2 — Top 20 Negative Consequences
|
|
84
|
+
|
|
85
|
+
Create a table of the top 20 negative consequences of that social problem with that population in that location.
|
|
86
|
+
|
|
87
|
+
| # | Negative Consequence |
|
|
88
|
+
|---|----------------------|
|
|
89
|
+
|
|
90
|
+
#### PROMPT 3 — Intermediary vs Ultimate Outcome Classification
|
|
91
|
+
|
|
92
|
+
Keep the same 20 items. Add a column classifying each as Intermediary or Ultimate Outcome.
|
|
93
|
+
- **Intermediary:** changes in behavior/action from gains in knowledge, skills, attitudes
|
|
94
|
+
- **Ultimate:** changes in condition or life status (reduced homelessness, improved health, economic stability)
|
|
95
|
+
|
|
96
|
+
Sort by Intermediary first, then Ultimate.
|
|
97
|
+
|
|
98
|
+
#### PROMPT 4 — Positive Results Shared by Organization
|
|
99
|
+
|
|
100
|
+
Keep the table with all columns. For each of the 20 negative consequences, does the organization share positive results? Add a new column with DETAILED answers.
|
|
101
|
+
- Start each cell with "Yes.", "Partial.", or "No direct results shared."
|
|
102
|
+
- When Yes or Partial, provide SPECIFIC data: percentages, numbers, study names, sample sizes, time periods
|
|
103
|
+
- Only share DIRECT results from this organization, not indirect results
|
|
104
|
+
- Access the org's website, PDFs, reports, graphics, annual reports
|
|
105
|
+
- Do NOT include evidence from other organizations or modeling
|
|
106
|
+
- Don't include anecdotes — only measured results
|
|
107
|
+
- **CITATIONS REQUIRED:** Every data point, statistic, and result MUST include an inline citation with URL in format `[Source Name](URL)`
|
|
108
|
+
|
|
109
|
+
#### PROMPT 5 — Counterfactual Results
|
|
110
|
+
|
|
111
|
+
Keep the table with ALL previous columns intact. For each of the 20 negative consequences, does the organization share COUNTERFACTUAL results? Add a new column with DETAILED answers.
|
|
112
|
+
- Start each cell with "Yes.", "Partial.", or "No counterfactual results."
|
|
113
|
+
- When Yes or Partial, describe the study design (RCT, quasi-experimental, matched comparison), sample sizes, confidence intervals, and what the control/comparison group showed
|
|
114
|
+
- Only share direct counterfactual results from this organization
|
|
115
|
+
- Do not include evidence from modeling, similar organizations, or external benchmarks
|
|
116
|
+
- Counterfactual = comparison to what would have happened without the intervention (RCT, quasi-experimental, matched comparison, waitlist control, etc.)
|
|
117
|
+
- **CITATIONS REQUIRED:** Every data point, study reference, and counterfactual result MUST include an inline citation with URL in format `[Source Name](URL)`
|
|
118
|
+
|
|
119
|
+
#### SUMMARY REPORT
|
|
120
|
+
|
|
121
|
+
**Section 1 — Our Recommendation**
|
|
122
|
+
|
|
123
|
+
Write a recommendation paragraph (2-4 sentences), then include this exact scored checklist using [x] or [ ]. The score is out of 100 points:
|
|
124
|
+
|
|
125
|
+
- [x] or [ ] a. Has Ultimate Outcome Goals (50 pts)
|
|
126
|
+
- [x] or [ ] b. Measures Intermediate Outcomes (5 pts)
|
|
127
|
+
- [x] or [ ] c. Measures Ultimate Outcomes (10 pts)
|
|
128
|
+
- [x] or [ ] d. Measures Intermediate Counterfactuals (10 pts)
|
|
129
|
+
- [x] or [ ] e. Measures Ultimate Counterfactuals (20 pts)
|
|
130
|
+
- [x] or [ ] f. Shows Continual Learning & Adaptation (5 pts)
|
|
131
|
+
|
|
132
|
+
**Score: [X]/100** (sum of checked items)
|
|
133
|
+
|
|
134
|
+
**Section 2 — The Social Problem**
|
|
135
|
+
Describe the social problem the organization is trying to solve. Include scale (how many affected, what geographies). Cite sources for prevalence data.
|
|
136
|
+
|
|
137
|
+
**Section 3 — The Solution**
|
|
138
|
+
Describe what the organization actually does, not their mission statement. Explain the theory of change: how does activity X lead to outcome Y? Be specific about the intervention.
|
|
139
|
+
|
|
140
|
+
**Section 4 — Key Outputs**
|
|
141
|
+
Search the website for key outputs (scale, reach, cost data). Use specific numbers when available. Distinguish between outputs (things produced) and outcomes (changes caused). These should NOT come from the earlier prompt tables.
|
|
142
|
+
|
|
143
|
+
**Section 5 — Key Intermediate Outcomes**
|
|
144
|
+
Summarize key intermediate outcomes. Focus on measurable short-to-medium term changes. Note whether data is self-reported or independently verified. Highlight any counterfactual information found.
|
|
145
|
+
|
|
146
|
+
**Section 6 — Key Ultimate Outcomes**
|
|
147
|
+
Summarize key ultimate outcomes. Long-term impact evidence only. This section may be thin for many organizations — that is fine. Do not pad it. If no ultimate outcome data exists, say so directly.
|
|
148
|
+
|
|
149
|
+
**Section 7 — Continual Learning & Adaptation**
|
|
150
|
+
Evidence that the organization learns from data and adapts its approach. Look for documented program changes based on evidence. "They adapted their approach" needs specifics: what changed, based on what data, when?
|
|
151
|
+
|
|
152
|
+
#### SOURCES
|
|
153
|
+
|
|
154
|
+
List all cited sources with full URLs:
|
|
155
|
+
1. [Source Name](Full URL) - Brief description of what was cited
|
|
156
|
+
2. [Source Name](Full URL) - Brief description of what was cited
|
|
157
|
+
|
|
158
|
+
End with:
|
|
159
|
+
*Report prepared using Todd Manwaring's Social Impact Evaluation Framework for Fierce Philanthropy.*
|
|
160
|
+
|
|
161
|
+
### Citation Requirements
|
|
162
|
+
|
|
163
|
+
Every factual claim, statistic, or data point MUST include an inline citation in markdown link format: `[Source Name](URL)`. Attribution matters:
|
|
164
|
+
- Say "X reports that" when citing an org's own claims
|
|
165
|
+
- Say "independent evaluation found" when citing third-party evidence
|
|
166
|
+
- The distinction is load-bearing
|
|
167
|
+
|
|
168
|
+
### 4. Write Output
|
|
169
|
+
|
|
170
|
+
Write the report to: `{{ORG_SLUG}}_Research_Report.md` in the project root.
|
|
171
|
+
|
|
172
|
+
The slug is the org name with spaces replaced by underscores and special characters removed.
|
|
173
|
+
|
|
174
|
+
## Quality Checks
|
|
175
|
+
|
|
176
|
+
Before writing the output:
|
|
177
|
+
- [ ] All 5 prompt tables are present and complete (20 rows each)
|
|
178
|
+
- [ ] Summary report has all 7 sections
|
|
179
|
+
- [ ] Every factual claim has an inline citation `[Source Name](URL)`
|
|
180
|
+
- [ ] SOURCES section lists all cited URLs
|
|
181
|
+
- [ ] Scored checklist adds up correctly (total = sum of checked item point values)
|
|
182
|
+
- [ ] Report follows the writing style guide (no em dashes, no filler adjectives, no AI tells)
|
|
183
|
+
- [ ] Attribution is clear: "X reports that" for org claims vs "independent evaluation found" for third-party evidence
|
|
184
|
+
- [ ] Paragraphs are under 4 sentences
|
|
185
|
+
- [ ] No superlatives unless backed by comparative data
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
# Step 2: Verify — Claude Code Instructions
|
|
2
|
+
|
|
3
|
+
## Inputs
|
|
4
|
+
|
|
5
|
+
- **Org name:** `{{ORG_NAME}}`
|
|
6
|
+
- **Research report:** Read from `{{ORG_SLUG}}_Research_Report.md`
|
|
7
|
+
- **Research guidance:** Read from `site/research-guidance.md`
|
|
8
|
+
|
|
9
|
+
## Purpose
|
|
10
|
+
|
|
11
|
+
Step 1 generated the research report. This step verifies it. You are a fact-checker, not a rewriter. Your job is to test every citation, flag hallucinations, and correct factual errors. Do not change tone, structure, or style.
|
|
12
|
+
|
|
13
|
+
## Instructions
|
|
14
|
+
|
|
15
|
+
### 1. Read the Report
|
|
16
|
+
|
|
17
|
+
Read the full research report. Note every inline citation `[Source Name](URL)` and every factual claim (statistics, percentages, study references, program details).
|
|
18
|
+
|
|
19
|
+
### 2. Test Every Citation
|
|
20
|
+
|
|
21
|
+
For each citation in the report, visit the URL using web fetch and verify:
|
|
22
|
+
|
|
23
|
+
- [ ] **URL loads** — Is it a real page (not 404, not a redirect to a homepage)?
|
|
24
|
+
- [ ] **Content matches** — Does the source actually say what the report claims? Quote the relevant passage from the source.
|
|
25
|
+
- [ ] **Data is accurate** — Do the numbers in the report match the numbers in the source?
|
|
26
|
+
|
|
27
|
+
Record each citation check in a table:
|
|
28
|
+
|
|
29
|
+
| # | Citation | URL Status | Content Match | Notes |
|
|
30
|
+
|---|----------|-----------|---------------|-------|
|
|
31
|
+
|
|
32
|
+
Status values:
|
|
33
|
+
- **VALID** — URL loads and content matches
|
|
34
|
+
- **BROKEN** — 404, domain not found, or page doesn't load
|
|
35
|
+
- **MISMATCH** — URL loads but doesn't support the claim made in the report
|
|
36
|
+
- **PARTIAL** — URL loads, some claims match, some don't
|
|
37
|
+
- **UNVERIFIABLE** — Paywalled, requires login, or content not accessible
|
|
38
|
+
|
|
39
|
+
### 3. Check for Hallucinations
|
|
40
|
+
|
|
41
|
+
Search the web to verify claims that seem suspicious or unusually specific:
|
|
42
|
+
|
|
43
|
+
- Statistics or percentages that don't appear in any source
|
|
44
|
+
- Named studies, RCTs, or evaluations that can't be found
|
|
45
|
+
- Program details (founding dates, staff names, locations) that contradict other sources
|
|
46
|
+
- Claims about independent evaluations when none exist
|
|
47
|
+
|
|
48
|
+
### 4. Flag Factual Issues
|
|
49
|
+
|
|
50
|
+
For each issue found, log it with severity:
|
|
51
|
+
|
|
52
|
+
- **[SEVERITY: HIGH]** — Wrong numbers, fabricated sources, broken citation URLs, claims contradicted by evidence
|
|
53
|
+
- **[SEVERITY: MEDIUM]** — Misleading framing, outdated data, partially supported claims
|
|
54
|
+
- **[SEVERITY: LOW]** — Minor inaccuracies, rounding differences, ambiguous wording
|
|
55
|
+
|
|
56
|
+
### 5. Write Corrections
|
|
57
|
+
|
|
58
|
+
For each HIGH or MEDIUM issue, write the exact correction:
|
|
59
|
+
|
|
60
|
+
```
|
|
61
|
+
### Correction [N]
|
|
62
|
+
**Location:** [First ~10 words of the problematic passage]
|
|
63
|
+
**Problem:** [What's wrong]
|
|
64
|
+
**Original:** [Exact text to replace]
|
|
65
|
+
**Corrected:** [Fixed text]
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
### 6. Apply Corrections and Write Output
|
|
69
|
+
|
|
70
|
+
Apply all corrections to produce a verified version of the report. Write to:
|
|
71
|
+
`{{ORG_SLUG}}_02_Verified.md`
|
|
72
|
+
|
|
73
|
+
Start the file with a verification log:
|
|
74
|
+
|
|
75
|
+
```markdown
|
|
76
|
+
<!-- Verified: {{ORG_NAME}} | Date: [date] -->
|
|
77
|
+
|
|
78
|
+
# Verification Log
|
|
79
|
+
|
|
80
|
+
## Citation Check Results
|
|
81
|
+
|
|
82
|
+
| # | Citation | URL Status | Content Match | Notes |
|
|
83
|
+
|---|----------|-----------|---------------|-------|
|
|
84
|
+
|
|
85
|
+
## Factual Issues Found
|
|
86
|
+
|
|
87
|
+
- [List each issue with severity]
|
|
88
|
+
|
|
89
|
+
## Corrections Applied
|
|
90
|
+
|
|
91
|
+
- [List each correction made]
|
|
92
|
+
|
|
93
|
+
## Summary
|
|
94
|
+
|
|
95
|
+
- Total citations checked: X
|
|
96
|
+
- Valid: X | Broken: X | Mismatch: X | Partial: X
|
|
97
|
+
- Factual issues: X (High: X, Medium: X, Low: X)
|
|
98
|
+
- Corrections applied: X
|
|
99
|
+
- Overall accuracy: HIGH / MEDIUM / LOW
|
|
100
|
+
|
|
101
|
+
---
|
|
102
|
+
|
|
103
|
+
[Full verified report below]
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
## Quality Checks
|
|
107
|
+
|
|
108
|
+
Before writing the output:
|
|
109
|
+
- [ ] Every citation URL was actually visited and checked
|
|
110
|
+
- [ ] The citation table is complete (no citations skipped)
|
|
111
|
+
- [ ] All HIGH and MEDIUM issues have written corrections
|
|
112
|
+
- [ ] Corrections were applied to the report text
|
|
113
|
+
- [ ] No new content was added (only corrections to existing content)
|
|
114
|
+
- [ ] The verification log accurately reflects all checks performed
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
# Step 3: Humanize — Claude Code Instructions
|
|
2
|
+
|
|
3
|
+
## Inputs
|
|
4
|
+
|
|
5
|
+
- **Org name:** `{{ORG_NAME}}`
|
|
6
|
+
- **Verified report:** Read from `{{ORG_SLUG}}_02_Verified.md`
|
|
7
|
+
- **Writing style guide:** Read from `site/writing-style-guide.md`
|
|
8
|
+
|
|
9
|
+
## Purpose
|
|
10
|
+
|
|
11
|
+
Step 2 verified the facts. This step makes the report sound human. You are an editor whose only job is to remove AI writing patterns and inject natural voice. Do not change the report structure, tables, checklist items, scores, or citations. Edit the prose only.
|
|
12
|
+
|
|
13
|
+
## Instructions
|
|
14
|
+
|
|
15
|
+
### 1. Read the Report and Style Guide
|
|
16
|
+
|
|
17
|
+
Read the verified report (skip the verification log header, work on the content below the `---`).
|
|
18
|
+
|
|
19
|
+
Read the writing style guide. The "AI Decontamination Rules" section is your checklist.
|
|
20
|
+
|
|
21
|
+
### 2. Run Each Pass
|
|
22
|
+
|
|
23
|
+
Work through these checks in order. For each issue found, fix it and log the change.
|
|
24
|
+
|
|
25
|
+
#### Pass 1: Em Dash Removal
|
|
26
|
+
- Search for every `—` (em dash) in the content
|
|
27
|
+
- Replace each with a period (two sentences), comma, or parentheses
|
|
28
|
+
- Two short sentences almost always beat one em-dashed sentence
|
|
29
|
+
- Log count: "Removed X em dashes"
|
|
30
|
+
|
|
31
|
+
#### Pass 2: Sentence Rhythm
|
|
32
|
+
- Flag where 3+ consecutive sentences are roughly the same length (within ~5 words)
|
|
33
|
+
- Fix by splitting, combining, or varying structure
|
|
34
|
+
- Goal: rhythm should vary when read aloud. Short. Then longer. Then medium.
|
|
35
|
+
- Log: "Varied sentence rhythm in X sections"
|
|
36
|
+
|
|
37
|
+
#### Pass 3: Paragraph Cadence
|
|
38
|
+
- Flag sections where consecutive paragraphs follow the same structure (claim then explanation then example, repeated)
|
|
39
|
+
- Vary the pattern: lead with evidence sometimes, skip the explanation, open with a question
|
|
40
|
+
- Log: "Restructured X paragraphs for cadence variety"
|
|
41
|
+
|
|
42
|
+
#### Pass 4: Opening Word Diversity
|
|
43
|
+
- Scan every paragraph's first word. Flag 2+ consecutive paragraphs starting with the same word
|
|
44
|
+
- Common offenders: "The...", "This...", repeated org name, "Pawsperity..." three times in a row
|
|
45
|
+
- Rewrite at least one opener in each flagged group
|
|
46
|
+
- Log: "Diversified openings in X locations"
|
|
47
|
+
|
|
48
|
+
#### Pass 5: AI Pattern Scan
|
|
49
|
+
Check for and fix:
|
|
50
|
+
- [ ] "[Statement]. Not because X — because Y." dramatic structure
|
|
51
|
+
- [ ] "Not just X, but Y" emphasis pattern
|
|
52
|
+
- [ ] "Whether X or Y" parallel constructions
|
|
53
|
+
- [ ] "From X to Y" range statements
|
|
54
|
+
- [ ] "Here's the thing" / "Let's dive in" / "In short" / "Put simply" / "The reality is"
|
|
55
|
+
- [ ] "At its core" / "At the end of the day" / "Fundamentally" as intensifier
|
|
56
|
+
- [ ] "It's worth noting that" / "Importantly" at sentence start
|
|
57
|
+
- [ ] Overused dramatic colon reveals
|
|
58
|
+
- [ ] Overused semicolons
|
|
59
|
+
- Log each pattern found and fixed
|
|
60
|
+
|
|
61
|
+
#### Pass 6: Perfect Parallelism Breaker
|
|
62
|
+
- Find bullet lists where every bullet follows the exact same grammatical structure
|
|
63
|
+
- Vary at least one item's structure (not just words)
|
|
64
|
+
- Don't always group in threes
|
|
65
|
+
- Log: "Broke parallelism in X lists/sections"
|
|
66
|
+
|
|
67
|
+
#### Pass 7: Filler Adjective Sweep
|
|
68
|
+
Search for and remove/replace:
|
|
69
|
+
- "seamless," "robust," "comprehensive," "critical," "fundamental," "innovative," "powerful," "unique," "holistic," "cutting-edge," "game-changing," "revolutionary"
|
|
70
|
+
- "leverage" → "use", "utilize" → "use"
|
|
71
|
+
- Remove minimizers: "simply," "just," "easily"
|
|
72
|
+
- Usually the sentence is stronger without the adjective
|
|
73
|
+
- Log: "Removed X filler adjectives"
|
|
74
|
+
|
|
75
|
+
#### Pass 8: Read-Aloud Test
|
|
76
|
+
- For each Summary Report section (Sections 1-7), simulate reading aloud
|
|
77
|
+
- Flag anything that sounds stilted, overly formal, or robotically even
|
|
78
|
+
- Rewrite flagged sentences to sound like a thoughtful analyst explaining to a colleague
|
|
79
|
+
- Log: "Rewrote X sentences for natural voice"
|
|
80
|
+
|
|
81
|
+
#### Pass 9: Voice Injection
|
|
82
|
+
Add 2-3 human touches across the Summary Report sections:
|
|
83
|
+
- Brief asides showing evaluator judgment ("This is a stronger evidence base than most organizations in this space provide.")
|
|
84
|
+
- Concrete contextualization ("To put this in perspective, the WHO considers X to be the threshold for Y.")
|
|
85
|
+
- Honest assessments where evidence is ambiguous ("The data here is suggestive but not conclusive.")
|
|
86
|
+
- Do NOT overdo this. 2-3 per report max. They should feel like a thoughtful analyst's observations, not a personality transplant.
|
|
87
|
+
- Log each injection with location and what was added
|
|
88
|
+
|
|
89
|
+
### 3. Preserve Report Structure
|
|
90
|
+
|
|
91
|
+
After all passes, verify you did NOT change:
|
|
92
|
+
- [ ] Any markdown heading (##, ###)
|
|
93
|
+
- [ ] Any table structure or table data
|
|
94
|
+
- [ ] The scored checklist items or their checked/unchecked status
|
|
95
|
+
- [ ] The score (X/100)
|
|
96
|
+
- [ ] Citation URLs or citation text inside `[brackets](links)`
|
|
97
|
+
- [ ] The SOURCES section
|
|
98
|
+
- [ ] Section separators (`---`)
|
|
99
|
+
|
|
100
|
+
### 4. Write Output
|
|
101
|
+
|
|
102
|
+
Write to: `{{ORG_SLUG}}_03_Humanized.md`
|
|
103
|
+
|
|
104
|
+
Start with a change log:
|
|
105
|
+
|
|
106
|
+
```markdown
|
|
107
|
+
<!-- Humanized: {{ORG_NAME}} | Date: [date] -->
|
|
108
|
+
|
|
109
|
+
# Humanization Log
|
|
110
|
+
|
|
111
|
+
## Changes by Pass
|
|
112
|
+
- **Em dashes:** Removed [X] instances
|
|
113
|
+
- **Sentence rhythm:** Varied in [X] sections
|
|
114
|
+
- **Paragraph cadence:** Restructured [X] paragraphs
|
|
115
|
+
- **Opening diversity:** Fixed [X] locations
|
|
116
|
+
- **AI patterns:** Found and fixed: [list each pattern]
|
|
117
|
+
- **Parallelism:** Broke in [X] lists/sections
|
|
118
|
+
- **Filler adjectives:** Removed [X] ([list them])
|
|
119
|
+
- **Read-aloud fixes:** Rewrote [X] sentences
|
|
120
|
+
- **Voice injections:** Added [X] ([brief description of each])
|
|
121
|
+
|
|
122
|
+
## Structure Verification
|
|
123
|
+
- [ ] Headings unchanged
|
|
124
|
+
- [ ] Tables unchanged
|
|
125
|
+
- [ ] Checklist and score unchanged
|
|
126
|
+
- [ ] Citations unchanged
|
|
127
|
+
- [ ] Sources section unchanged
|
|
128
|
+
|
|
129
|
+
---
|
|
130
|
+
|
|
131
|
+
[Full humanized report below]
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
## Quality Checks
|
|
135
|
+
|
|
136
|
+
Before writing the output:
|
|
137
|
+
- [ ] Zero em dashes remain in the content
|
|
138
|
+
- [ ] No two consecutive paragraphs start with the same word
|
|
139
|
+
- [ ] No AI pattern from the tells list remains
|
|
140
|
+
- [ ] At least 2 voice injections added (but no more than 3)
|
|
141
|
+
- [ ] Report structure is identical to the input
|
|
142
|
+
- [ ] Content reads like a human analyst wrote it
|
|
143
|
+
- [ ] The change log accurately reflects all changes made
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
# Step 4: Peer Review -- Claude Code Instructions
|
|
2
|
+
|
|
3
|
+
## Inputs
|
|
4
|
+
|
|
5
|
+
- **Report to review:** Provided by the `get_peer_review` MCP tool
|
|
6
|
+
- **Research guidance:** The same methodology from step 1
|
|
7
|
+
- **Writing style guide:** The same decontamination rules from step 3
|
|
8
|
+
|
|
9
|
+
## Purpose
|
|
10
|
+
|
|
11
|
+
You are reviewing another contributor's research report. Your job is to verify quality and catch problems before a human reviewer sees it. You are NOT the original researcher -- you are a second pair of eyes.
|
|
12
|
+
|
|
13
|
+
## Instructions
|
|
14
|
+
|
|
15
|
+
### 1. Read the Full Report
|
|
16
|
+
|
|
17
|
+
Read the entire report carefully. Note the org name, the scored checklist, and the overall recommendation.
|
|
18
|
+
|
|
19
|
+
### 2. Spot-Check Citations (3-5)
|
|
20
|
+
|
|
21
|
+
Pick 3-5 citation URLs from the report. For each:
|
|
22
|
+
- Visit the URL using web fetch
|
|
23
|
+
- Verify the page exists (not 404)
|
|
24
|
+
- Check that the source says what the report claims
|
|
25
|
+
|
|
26
|
+
### 3. Check Report Structure
|
|
27
|
+
|
|
28
|
+
Verify:
|
|
29
|
+
- [ ] All 5 prompt sections present (PROMPT 1-5)
|
|
30
|
+
- [ ] All 7 summary sections present (Sections 1-7)
|
|
31
|
+
- [ ] SOURCES section exists with citations
|
|
32
|
+
- [ ] Tables in Prompts 2-5 have content
|
|
33
|
+
- [ ] Scored checklist is present with score calculated correctly
|
|
34
|
+
|
|
35
|
+
### 4. Evaluate Scoring
|
|
36
|
+
|
|
37
|
+
Compare the checklist against the evidence:
|
|
38
|
+
- Are checked items supported by evidence in the report?
|
|
39
|
+
- Are unchecked items correctly unchecked (no evidence was found)?
|
|
40
|
+
- Does the score math add up (checked items x weights = stated score)?
|
|
41
|
+
|
|
42
|
+
### 5. Look for Red Flags
|
|
43
|
+
|
|
44
|
+
- Suspiciously specific numbers with no citation
|
|
45
|
+
- Studies or evaluations that seem fabricated
|
|
46
|
+
- Copy-pasted content or generic filler
|
|
47
|
+
- Sections that are empty or trivially short
|
|
48
|
+
- Claims that contradict other parts of the report
|
|
49
|
+
|
|
50
|
+
### 6. Assign a Score
|
|
51
|
+
|
|
52
|
+
| Score | When to use |
|
|
53
|
+
|-------|------------|
|
|
54
|
+
| **4 -- Great** | Report is thorough, citations check out, scoring is correct. No changes needed. |
|
|
55
|
+
| **3 -- Good with fixes** | Minor issues you can fix: broken citation, wrong score math, awkward phrasing, a checklist item that should be toggled. **Fix the issues yourself** and submit the corrected report. |
|
|
56
|
+
| **2 -- Needs redo** | Major problems: thin evidence across multiple sections, significant hallucinations, missing sections, fundamentally wrong scoring. Not fixable with minor edits. |
|
|
57
|
+
| **1 -- Bad actor** | Garbage: copy-pasted nonsense, completely fabricated data, obvious gaming attempt. This flags the original author. Use sparingly and only when clearly warranted. |
|
|
58
|
+
|
|
59
|
+
### 7. Submit Your Review
|
|
60
|
+
|
|
61
|
+
Use `submit_peer_review` with:
|
|
62
|
+
- `claim_id`: The claim ID from `get_peer_review`
|
|
63
|
+
- `score`: Your score (1-4)
|
|
64
|
+
- `notes`: Brief explanation of your score
|
|
65
|
+
- `updated_report`: If score is 3, include the full fixed report
|
|
66
|
+
|
|
67
|
+
## Important Rules
|
|
68
|
+
|
|
69
|
+
- Be fair. Most reports should score 3 or 4.
|
|
70
|
+
- Score 2 is for genuinely bad reports, not minor style preferences.
|
|
71
|
+
- Score 1 is for abuse. If you're unsure, use 2 instead.
|
|
72
|
+
- If you spot-check a citation and it's broken, that alone is a 3 (fix it), not a 2.
|
|
73
|
+
- Don't rewrite the report to match your style. Fix factual errors, not opinions.
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
// HTTP client for the Fierce Philanthropy coordination API
|
|
2
|
+
|
|
3
|
+
const BASE_URL = process.env.FIERCE_API_URL || 'https://fierce-philanthropy-directory.laravel.cloud/api';
|
|
4
|
+
|
|
5
|
+
export class ApiClient {
|
|
6
|
+
constructor(apiKey) {
|
|
7
|
+
this.apiKey = apiKey;
|
|
8
|
+
if (!apiKey) {
|
|
9
|
+
throw new Error('TFG_API_KEY environment variable is required. Get your key at https://fierce-philanthropy-directory.laravel.cloud/contribute');
|
|
10
|
+
}
|
|
11
|
+
}
|
|
12
|
+
|
|
13
|
+
async request(method, path, body = null) {
|
|
14
|
+
const url = `${BASE_URL}${path}`;
|
|
15
|
+
const options = {
|
|
16
|
+
method,
|
|
17
|
+
headers: {
|
|
18
|
+
'X-TFG-Api-Key': this.apiKey,
|
|
19
|
+
'Content-Type': 'application/json',
|
|
20
|
+
'Accept': 'application/json',
|
|
21
|
+
},
|
|
22
|
+
};
|
|
23
|
+
|
|
24
|
+
if (body) {
|
|
25
|
+
options.body = JSON.stringify(body);
|
|
26
|
+
}
|
|
27
|
+
|
|
28
|
+
const response = await fetch(url, options);
|
|
29
|
+
const data = await response.json();
|
|
30
|
+
|
|
31
|
+
if (!response.ok) {
|
|
32
|
+
const error = new Error(data.error || data.message || `API error ${response.status}`);
|
|
33
|
+
error.status = response.status;
|
|
34
|
+
error.data = data;
|
|
35
|
+
throw error;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
return data;
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
async claimOrg(platform = null) {
|
|
42
|
+
return this.request('POST', '/research/claim', { platform });
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
async submitReport(claimId, reportMarkdown, tokenUsage = null, metrics = null, modelUsed = null) {
|
|
46
|
+
return this.request('POST', '/research/submit', {
|
|
47
|
+
claim_id: claimId,
|
|
48
|
+
report_markdown: reportMarkdown,
|
|
49
|
+
token_usage: tokenUsage,
|
|
50
|
+
metrics: metrics,
|
|
51
|
+
model_used: modelUsed,
|
|
52
|
+
});
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
async releaseClaim(claimId) {
|
|
56
|
+
return this.request('POST', '/research/release', { claim_id: claimId });
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
async getNextPeerReview() {
|
|
60
|
+
return this.request('GET', '/research/review/next');
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
async submitPeerReview(claimId, score, notes = null, updatedReport = null) {
|
|
64
|
+
return this.request('POST', '/research/review/submit', {
|
|
65
|
+
claim_id: claimId,
|
|
66
|
+
score,
|
|
67
|
+
notes,
|
|
68
|
+
updated_report: updatedReport,
|
|
69
|
+
});
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
async getStatus() {
|
|
73
|
+
// Status is public, no auth needed
|
|
74
|
+
const response = await fetch(`${BASE_URL}/research/status`, {
|
|
75
|
+
headers: { 'Accept': 'application/json' },
|
|
76
|
+
});
|
|
77
|
+
return response.json();
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
async getImpact() {
|
|
81
|
+
return this.request('GET', '/research/impact');
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
async checkSchedule() {
|
|
85
|
+
return this.request('GET', '/research/schedule-check');
|
|
86
|
+
}
|
|
87
|
+
}
|
package/src/cli.js
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
// CLI entry point for tokens-for-good
|
|
4
|
+
// Usage:
|
|
5
|
+
// npx tokens-for-good --mcp Start as MCP server (default)
|
|
6
|
+
// npx tokens-for-good --status Show project status
|
|
7
|
+
// npx tokens-for-good --impact Show your contribution stats
|
|
8
|
+
|
|
9
|
+
const args = process.argv.slice(2);
|
|
10
|
+
|
|
11
|
+
if (args.includes('--status')) {
|
|
12
|
+
const { ApiClient } = await import('./api-client.js');
|
|
13
|
+
try {
|
|
14
|
+
const client = new ApiClient(process.env.TFG_API_KEY || 'public');
|
|
15
|
+
const status = await client.getStatus();
|
|
16
|
+
console.log('\nTokens for Good - Project Status\n');
|
|
17
|
+
console.log(`Total orgs: ${status.total_orgs}`);
|
|
18
|
+
console.log(`Pending research: ${status.pending_orgs}`);
|
|
19
|
+
console.log(`Active contributors (7d): ${status.active_contributors_7d}`);
|
|
20
|
+
console.log('\nQueue:');
|
|
21
|
+
for (const [k, v] of Object.entries(status.queue || {})) {
|
|
22
|
+
console.log(` ${k}: ${v}`);
|
|
23
|
+
}
|
|
24
|
+
console.log('\nTop Contributors:');
|
|
25
|
+
(status.top_contributors || []).forEach((c, i) => {
|
|
26
|
+
console.log(` ${i + 1}. @${c.github_handle} (${c.total_orgs} orgs, ${c.tier})`);
|
|
27
|
+
});
|
|
28
|
+
} catch (err) {
|
|
29
|
+
console.error('Error:', err.message);
|
|
30
|
+
}
|
|
31
|
+
} else if (args.includes('--impact')) {
|
|
32
|
+
const { ApiClient } = await import('./api-client.js');
|
|
33
|
+
try {
|
|
34
|
+
const client = new ApiClient(process.env.TFG_API_KEY);
|
|
35
|
+
const result = await client.getImpact();
|
|
36
|
+
const c = result.contributor;
|
|
37
|
+
console.log(`\nYour Impact (@${c.github_handle})\n`);
|
|
38
|
+
console.log(`Tier: ${c.tier}`);
|
|
39
|
+
console.log(`Orgs researched: ${c.total_orgs}`);
|
|
40
|
+
console.log(`Acceptance rate: ${c.acceptance_rate}%`);
|
|
41
|
+
console.log(`Automation: ${c.has_schedule ? 'Active' : 'Not set up'}`);
|
|
42
|
+
} catch (err) {
|
|
43
|
+
console.error('Error:', err.message);
|
|
44
|
+
}
|
|
45
|
+
} else {
|
|
46
|
+
// Default: start MCP server
|
|
47
|
+
await import('./mcp-server.js');
|
|
48
|
+
}
|
|
@@ -0,0 +1,334 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
3
|
+
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
4
|
+
import { z } from 'zod';
|
|
5
|
+
import { ApiClient } from './api-client.js';
|
|
6
|
+
import { detectPlatform, isSchedulable, getAutomationInstructions } from './platform.js';
|
|
7
|
+
import { loadState, updateState, isSnoozed, snoozeDays, hasContributedToday, markContributed } from './state.js';
|
|
8
|
+
import { readFileSync } from 'fs';
|
|
9
|
+
import { join, dirname } from 'path';
|
|
10
|
+
import { fileURLToPath } from 'url';
|
|
11
|
+
|
|
12
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
13
|
+
const PIPELINE_DIR = join(__dirname, '..', 'pipeline');
|
|
14
|
+
|
|
15
|
+
const apiKey = process.env.TFG_API_KEY;
|
|
16
|
+
let client;
|
|
17
|
+
try {
|
|
18
|
+
client = new ApiClient(apiKey);
|
|
19
|
+
} catch {
|
|
20
|
+
// Will fail on tool calls, but server can still start
|
|
21
|
+
client = null;
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
const platform = detectPlatform();
|
|
25
|
+
updateState({ platform });
|
|
26
|
+
|
|
27
|
+
const server = new McpServer({
|
|
28
|
+
name: 'tokens-for-good',
|
|
29
|
+
version: '0.1.0',
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
// --- No-key onboarding message ---
|
|
33
|
+
|
|
34
|
+
const NO_KEY_INSTRUCTIONS = `This user has installed the Tokens for Good MCP server but hasn't set up their API key yet.
|
|
35
|
+
|
|
36
|
+
Walk them through setup:
|
|
37
|
+
|
|
38
|
+
1. **Create an account:** Go to https://fierce-philanthropy-directory.laravel.cloud/contribute and click "Sign up with GitHub" (one click, free).
|
|
39
|
+
|
|
40
|
+
2. **Copy the API key:** After signing up, they'll see an API key starting with \`tfg_live_\`. Copy it immediately -- it's only shown once.
|
|
41
|
+
|
|
42
|
+
3. **Add the key to their MCP config:** Update their tokens-for-good MCP configuration to include the key as an environment variable:
|
|
43
|
+
|
|
44
|
+
For Claude Code (.mcp.json or settings.json):
|
|
45
|
+
\`\`\`json
|
|
46
|
+
{
|
|
47
|
+
"mcpServers": {
|
|
48
|
+
"tokens-for-good": {
|
|
49
|
+
"command": "npx",
|
|
50
|
+
"args": ["-y", "tokens-for-good", "--mcp"],
|
|
51
|
+
"env": { "TFG_API_KEY": "tfg_live_their_key_here" }
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
\`\`\`
|
|
56
|
+
|
|
57
|
+
For Opencode (opencode.json):
|
|
58
|
+
\`\`\`json
|
|
59
|
+
{
|
|
60
|
+
"mcp": {
|
|
61
|
+
"tokens-for-good": {
|
|
62
|
+
"type": "local",
|
|
63
|
+
"command": ["npx", "-y", "tokens-for-good", "--mcp"],
|
|
64
|
+
"environment": { "TFG_API_KEY": "tfg_live_their_key_here" }
|
|
65
|
+
}
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
\`\`\`
|
|
69
|
+
|
|
70
|
+
For Cursor (.cursor/mcp.json):
|
|
71
|
+
\`\`\`json
|
|
72
|
+
{
|
|
73
|
+
"mcpServers": {
|
|
74
|
+
"tokens-for-good": {
|
|
75
|
+
"command": "npx",
|
|
76
|
+
"args": ["-y", "tokens-for-good", "--mcp"],
|
|
77
|
+
"env": { "TFG_API_KEY": "tfg_live_their_key_here" }
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
\`\`\`
|
|
82
|
+
|
|
83
|
+
4. **Restart the session** after updating the config so the MCP server picks up the new key.
|
|
84
|
+
|
|
85
|
+
5. **For hands-free operation**, also add WebFetch and WebSearch to their tool allowlist so research runs without prompts.
|
|
86
|
+
|
|
87
|
+
Once set up, they can say "Research an org for Fierce Philanthropy" and the AI does the rest. Each org takes ~5 minutes and costs ~$0.20 in tokens.
|
|
88
|
+
|
|
89
|
+
**What is Tokens for Good?**
|
|
90
|
+
Tokens for Good lets developers donate their spare AI subscription tokens to research nonprofit organizations for Fierce Philanthropy's social impact directory. It's like Folding@Home but for AI tokens -- crowdsourced compute for social good. Contributors get credit on a public leaderboard and on the org pages they research.`;
|
|
91
|
+
|
|
92
|
+
// --- Resources ---
|
|
93
|
+
|
|
94
|
+
server.resource('about', 'tokens-for-good://about', 'text/plain', async () => ({
|
|
95
|
+
contents: [{
|
|
96
|
+
uri: 'tokens-for-good://about',
|
|
97
|
+
text: `Tokens for Good - Donate Your Spare AI Tokens to Research Nonprofits
|
|
98
|
+
|
|
99
|
+
What: An MCP server that lets AI coding tool users (Claude Code, Opencode, Cursor, Windsurf, Devin) contribute their spare subscription tokens to research nonprofit organizations for Fierce Philanthropy's social impact directory.
|
|
100
|
+
|
|
101
|
+
How it works:
|
|
102
|
+
1. Sign up at https://fierce-philanthropy-directory.laravel.cloud/contribute (GitHub OAuth)
|
|
103
|
+
2. Get your API key, add it to your MCP config as TFG_API_KEY
|
|
104
|
+
3. Say "Research an org for Fierce Philanthropy"
|
|
105
|
+
4. Your AI claims an org, researches it (web search + analysis), verifies citations, humanizes the writing, and submits the report
|
|
106
|
+
5. Another contributor's AI peer-reviews your report
|
|
107
|
+
6. A human reviewer finalizes it for the directory
|
|
108
|
+
|
|
109
|
+
Research pipeline (3 steps per org, all done by your AI):
|
|
110
|
+
- Step 1: Research -- web search, 6-prompt methodology, scored checklist (100 pts)
|
|
111
|
+
- Step 2: Verify -- check every citation URL, flag hallucinations, correct errors
|
|
112
|
+
- Step 3: Humanize -- 9-pass AI decontamination (remove em dashes, filler adjectives, vary rhythm, inject analyst voice)
|
|
113
|
+
|
|
114
|
+
Contributor tiers:
|
|
115
|
+
- New: first 5 orgs, easy orgs only
|
|
116
|
+
- Bronze: 5+ orgs
|
|
117
|
+
- Silver: 25+ orgs, >80% acceptance rate
|
|
118
|
+
- Gold: 100+ orgs, >90% acceptance rate
|
|
119
|
+
|
|
120
|
+
Automation: On Claude Code, use /schedule to auto-contribute daily. On Opencode, set up a system cron. On Cursor/Windsurf, contribute manually when prompted.
|
|
121
|
+
|
|
122
|
+
Cost: ~$0.15-0.25 per org in tokens. Scale: 750K+ US nonprofits to research.`,
|
|
123
|
+
}],
|
|
124
|
+
}));
|
|
125
|
+
|
|
126
|
+
// --- Tools ---
|
|
127
|
+
|
|
128
|
+
server.tool('claim_org', 'Claim the next available nonprofit org to research. Blocked if you have a pending peer review.', {
|
|
129
|
+
platform: z.string().optional().describe('Your platform (claude-code, opencode, cursor, windsurf, devin)'),
|
|
130
|
+
}, async ({ platform: plat }) => {
|
|
131
|
+
if (!client) return { content: [{ type: 'text', text: 'Error: TFG_API_KEY not set. Get your key at https://fierce-philanthropy-directory.laravel.cloud/contribute' }] };
|
|
132
|
+
|
|
133
|
+
try {
|
|
134
|
+
const result = await client.claimOrg(plat || platform);
|
|
135
|
+
return {
|
|
136
|
+
content: [{ type: 'text', text: `Claimed: ${result.org.name}\nURL: ${result.org.url}\nDescription: ${result.org.description || 'N/A'}\nSource: ${result.org.source || 'N/A'}\nClaim ID: ${result.claim_id}\nExpires: ${result.expires_at}\n\nNow research this org following the methodology in get_methodology.` }],
|
|
137
|
+
};
|
|
138
|
+
} catch (err) {
|
|
139
|
+
return { content: [{ type: 'text', text: `Error: ${err.message}` }] };
|
|
140
|
+
}
|
|
141
|
+
});
|
|
142
|
+
|
|
143
|
+
server.tool('get_methodology', 'Get the full research methodology, verification instructions, or humanization instructions.', {
|
|
144
|
+
step: z.enum(['research', 'verify', 'humanize', 'peer-review']).describe('Which pipeline step to get instructions for'),
|
|
145
|
+
}, async ({ step }) => {
|
|
146
|
+
const stepMap = {
|
|
147
|
+
'research': '01-research/PROMPT.md',
|
|
148
|
+
'verify': '02-verify/PROMPT.md',
|
|
149
|
+
'humanize': '03-humanize/PROMPT.md',
|
|
150
|
+
'peer-review': '04-peer-review/PROMPT.md',
|
|
151
|
+
};
|
|
152
|
+
|
|
153
|
+
try {
|
|
154
|
+
const content = readFileSync(join(PIPELINE_DIR, stepMap[step]), 'utf-8');
|
|
155
|
+
return { content: [{ type: 'text', text: content }] };
|
|
156
|
+
} catch {
|
|
157
|
+
return { content: [{ type: 'text', text: `Error: Could not load ${step} methodology file.` }] };
|
|
158
|
+
}
|
|
159
|
+
});
|
|
160
|
+
|
|
161
|
+
server.tool('submit_report', 'Submit a completed research report for an org you claimed.', {
|
|
162
|
+
claim_id: z.number().describe('The claim ID from claim_org'),
|
|
163
|
+
report_markdown: z.string().describe('The full research report in markdown'),
|
|
164
|
+
model_used: z.string().optional().describe('The model that generated this report'),
|
|
165
|
+
}, async ({ claim_id, report_markdown, model_used }) => {
|
|
166
|
+
if (!client) return { content: [{ type: 'text', text: 'Error: TFG_API_KEY not set.' }] };
|
|
167
|
+
|
|
168
|
+
try {
|
|
169
|
+
const result = await client.submitReport(claim_id, report_markdown, null, null, model_used);
|
|
170
|
+
markContributed();
|
|
171
|
+
return {
|
|
172
|
+
content: [{ type: 'text', text: `Report submitted for ${result.org_name}!\n\nYour stats:\n- Total orgs: ${result.contributor_stats.total_orgs}\n- Tier: ${result.contributor_stats.tier}\n- Orgs remaining: ${result.orgs_remaining}\n\nYour report will now go through peer review. Thank you for contributing!` }],
|
|
173
|
+
};
|
|
174
|
+
} catch (err) {
|
|
175
|
+
return { content: [{ type: 'text', text: `Submit error: ${err.message}${err.data?.validation_errors ? '\n' + err.data.validation_errors.join('\n') : ''}` }] };
|
|
176
|
+
}
|
|
177
|
+
});
|
|
178
|
+
|
|
179
|
+
server.tool('get_peer_review', 'Get a draft report assigned to you for peer review. You must complete peer reviews before claiming new orgs.', {}, async () => {
|
|
180
|
+
if (!client) return { content: [{ type: 'text', text: 'Error: TFG_API_KEY not set.' }] };
|
|
181
|
+
|
|
182
|
+
try {
|
|
183
|
+
const result = await client.getNextPeerReview();
|
|
184
|
+
return {
|
|
185
|
+
content: [{ type: 'text', text: `Peer review assigned:\nOrg: ${result.org.name}\nAuthor: @${result.author}\nClaim ID: ${result.claim_id}\n\n---\n\n${result.report_markdown}\n\n---\n\nReview this report. Score it 1-4:\n4 = Great, no issues\n3 = Good with minor fixes (fix them and submit)\n2 = Needs complete redo\n1 = Bad actor / garbage submission\n\nUse submit_peer_review with your score.` }],
|
|
186
|
+
};
|
|
187
|
+
} catch (err) {
|
|
188
|
+
if (err.status === 404) {
|
|
189
|
+
return { content: [{ type: 'text', text: 'No peer reviews assigned to you right now.' }] };
|
|
190
|
+
}
|
|
191
|
+
return { content: [{ type: 'text', text: `Error: ${err.message}` }] };
|
|
192
|
+
}
|
|
193
|
+
});
|
|
194
|
+
|
|
195
|
+
server.tool('submit_peer_review', 'Submit your peer review score for a report.', {
|
|
196
|
+
claim_id: z.number().describe('The claim ID of the report being reviewed'),
|
|
197
|
+
score: z.number().min(1).max(4).describe('Score: 4=great, 3=good with fixes, 2=needs redo, 1=bad actor'),
|
|
198
|
+
notes: z.string().optional().describe('Review notes explaining the score'),
|
|
199
|
+
updated_report: z.string().optional().describe('If score is 3, the fixed version of the report'),
|
|
200
|
+
}, async ({ claim_id, score, notes, updated_report }) => {
|
|
201
|
+
if (!client) return { content: [{ type: 'text', text: 'Error: TFG_API_KEY not set.' }] };
|
|
202
|
+
|
|
203
|
+
try {
|
|
204
|
+
const result = await client.submitPeerReview(claim_id, score, notes, updated_report);
|
|
205
|
+
return {
|
|
206
|
+
content: [{ type: 'text', text: `Peer review submitted for ${result.org_name}.\nScore: ${result.score}/4\n\nYou can now claim a new org to research.` }],
|
|
207
|
+
};
|
|
208
|
+
} catch (err) {
|
|
209
|
+
return { content: [{ type: 'text', text: `Error: ${err.message}` }] };
|
|
210
|
+
}
|
|
211
|
+
});
|
|
212
|
+
|
|
213
|
+
server.tool('research_status', 'See the overall Tokens for Good project progress and leaderboard.', {}, async () => {
|
|
214
|
+
try {
|
|
215
|
+
const clientForStatus = client || new ApiClient('dummy'); // Status is public
|
|
216
|
+
const result = await clientForStatus.getStatus();
|
|
217
|
+
const topList = result.top_contributors?.map((c, i) =>
|
|
218
|
+
`${i + 1}. @${c.github_handle} (${c.total_orgs} orgs, ${c.tier})`
|
|
219
|
+
).join('\n') || 'No contributors yet';
|
|
220
|
+
|
|
221
|
+
return {
|
|
222
|
+
content: [{ type: 'text', text: `Tokens for Good Progress:\n\nTotal orgs: ${result.total_orgs}\nPending research: ${result.pending_orgs}\nActive contributors (7d): ${result.active_contributors_7d}\n\nQueue:\n${Object.entries(result.queue || {}).map(([k, v]) => ` ${k}: ${v}`).join('\n')}\n\nTop Contributors:\n${topList}` }],
|
|
223
|
+
};
|
|
224
|
+
} catch (err) {
|
|
225
|
+
return { content: [{ type: 'text', text: `Error: ${err.message}` }] };
|
|
226
|
+
}
|
|
227
|
+
});
|
|
228
|
+
|
|
229
|
+
server.tool('my_impact', 'See your personal contribution stats, tier, and history.', {}, async () => {
|
|
230
|
+
if (!client) return { content: [{ type: 'text', text: 'Error: TFG_API_KEY not set.' }] };
|
|
231
|
+
|
|
232
|
+
try {
|
|
233
|
+
const result = await client.getImpact();
|
|
234
|
+
const c = result.contributor;
|
|
235
|
+
const estimatedCost = (c.total_tokens / 1_000_000 * 3).toFixed(2);
|
|
236
|
+
|
|
237
|
+
return {
|
|
238
|
+
content: [{ type: 'text', text: `Your Impact (@${c.github_handle}):\n\nTier: ${c.tier}\nOrgs researched: ${c.total_orgs}\nEstimated donation: ~$${estimatedCost}\nAcceptance rate: ${c.acceptance_rate}%\nAutomation: ${c.has_schedule ? 'Active' : 'Not set up'}\n\nRecent:\n${result.claims?.slice(0, 5).map(cl => ` ${cl.organization?.name || 'Unknown'} - ${cl.status}`).join('\n') || 'None'}` }],
|
|
239
|
+
};
|
|
240
|
+
} catch (err) {
|
|
241
|
+
return { content: [{ type: 'text', text: `Error: ${err.message}` }] };
|
|
242
|
+
}
|
|
243
|
+
});
|
|
244
|
+
|
|
245
|
+
server.tool('setup_guide', 'Get setup instructions for Tokens for Good. Use this if the user needs help with installation, API keys, or configuration.', {}, async () => {
|
|
246
|
+
return { content: [{ type: 'text', text: NO_KEY_INSTRUCTIONS }] };
|
|
247
|
+
});
|
|
248
|
+
|
|
249
|
+
server.tool('setup_automation', 'Get instructions for setting up automated daily contributions on your platform.', {
|
|
250
|
+
frequency: z.enum(['hourly', 'daily', 'weekly']).optional().describe('How often to contribute'),
|
|
251
|
+
}, async ({ frequency }) => {
|
|
252
|
+
const instructions = getAutomationInstructions(platform, frequency || 'daily');
|
|
253
|
+
return { content: [{ type: 'text', text: instructions }] };
|
|
254
|
+
});
|
|
255
|
+
|
|
256
|
+
// --- Prompts (session start) ---
|
|
257
|
+
|
|
258
|
+
server.prompt('session_start', 'Check if you should research an org or complete a peer review', {}, async () => {
|
|
259
|
+
// No API key -- guide through setup
|
|
260
|
+
if (!client) {
|
|
261
|
+
return {
|
|
262
|
+
messages: [{
|
|
263
|
+
role: 'user',
|
|
264
|
+
content: { type: 'text', text: NO_KEY_INSTRUCTIONS },
|
|
265
|
+
}],
|
|
266
|
+
};
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
const state = loadState();
|
|
270
|
+
|
|
271
|
+
// Check for pending peer review first
|
|
272
|
+
if (client) {
|
|
273
|
+
try {
|
|
274
|
+
const review = await client.getNextPeerReview();
|
|
275
|
+
return {
|
|
276
|
+
messages: [{
|
|
277
|
+
role: 'user',
|
|
278
|
+
content: { type: 'text', text: `You have a pending peer review to complete before you can claim a new org. Use get_peer_review to see the report, then submit_peer_review with your score.` },
|
|
279
|
+
}],
|
|
280
|
+
};
|
|
281
|
+
} catch {
|
|
282
|
+
// No pending review, continue
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
if (isSnoozed()) {
|
|
287
|
+
return { messages: [{ role: 'user', content: { type: 'text', text: 'Tokens for Good is snoozed. No action needed.' } }] };
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
if (state.auto_schedule) {
|
|
291
|
+
try {
|
|
292
|
+
const impact = await client?.getImpact();
|
|
293
|
+
const c = impact?.contributor;
|
|
294
|
+
return {
|
|
295
|
+
messages: [{
|
|
296
|
+
role: 'user',
|
|
297
|
+
content: { type: 'text', text: `Tokens for Good: You're auto-contributing. ${c?.total_orgs || 0} orgs researched so far. Tier: ${c?.tier || 'new'}.` },
|
|
298
|
+
}],
|
|
299
|
+
};
|
|
300
|
+
} catch {
|
|
301
|
+
return { messages: [{ role: 'user', content: { type: 'text', text: 'Tokens for Good: Auto-contributions active.' } }] };
|
|
302
|
+
}
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
if (hasContributedToday()) {
|
|
306
|
+
return { messages: [{ role: 'user', content: { type: 'text', text: 'Tokens for Good: You already contributed today. Nice work!' } }] };
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
// Show the session start prompt
|
|
310
|
+
if (isSchedulable(platform)) {
|
|
311
|
+
return {
|
|
312
|
+
messages: [{
|
|
313
|
+
role: 'user',
|
|
314
|
+
content: { type: 'text', text: `Tokens for Good: Would you like to donate your spare tokens to research a nonprofit today?\n\n1. Set up automatic daily contributions (recommended)\n2. Just run one now\n3. Ask me tomorrow\n4. Ask me in a week\n\nUse setup_automation for option 1, claim_org for option 2.` },
|
|
315
|
+
}],
|
|
316
|
+
};
|
|
317
|
+
} else {
|
|
318
|
+
return {
|
|
319
|
+
messages: [{
|
|
320
|
+
role: 'user',
|
|
321
|
+
content: { type: 'text', text: `Tokens for Good: Would you like to research a nonprofit org today? It takes about 5 minutes and costs ~$0.20 in tokens.\n\n1. Research an org now\n2. Ask me tomorrow\n3. Ask me in a week\n\nUse claim_org for option 1.` },
|
|
322
|
+
}],
|
|
323
|
+
};
|
|
324
|
+
}
|
|
325
|
+
});
|
|
326
|
+
|
|
327
|
+
// --- Start ---
|
|
328
|
+
|
|
329
|
+
async function main() {
|
|
330
|
+
const transport = new StdioServerTransport();
|
|
331
|
+
await server.connect(transport);
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
main().catch(console.error);
|
package/src/platform.js
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
// Platform detection and automation guidance
|
|
2
|
+
|
|
3
|
+
export function detectPlatform() {
|
|
4
|
+
// Check environment hints
|
|
5
|
+
if (process.env.CLAUDE_CODE) return 'claude-code';
|
|
6
|
+
if (process.env.OPENCODE) return 'opencode';
|
|
7
|
+
if (process.env.DEVIN) return 'devin';
|
|
8
|
+
if (process.env.CURSOR_SESSION) return 'cursor';
|
|
9
|
+
if (process.env.WINDSURF_SESSION) return 'windsurf';
|
|
10
|
+
|
|
11
|
+
// Check parent process name
|
|
12
|
+
const parentName = process.env._ || process.env.PARENT_PROCESS || '';
|
|
13
|
+
if (parentName.includes('claude')) return 'claude-code';
|
|
14
|
+
if (parentName.includes('opencode')) return 'opencode';
|
|
15
|
+
if (parentName.includes('cursor')) return 'cursor';
|
|
16
|
+
if (parentName.includes('windsurf')) return 'windsurf';
|
|
17
|
+
|
|
18
|
+
return 'unknown';
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
export function isSchedulable(platform) {
|
|
22
|
+
return ['claude-code', 'opencode', 'devin'].includes(platform);
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export function getAutomationInstructions(platform, frequency = 'daily') {
|
|
26
|
+
switch (platform) {
|
|
27
|
+
case 'claude-code':
|
|
28
|
+
return `Set up automated contributions with Claude Code's built-in scheduling:
|
|
29
|
+
|
|
30
|
+
Run: /schedule ${frequency}
|
|
31
|
+
|
|
32
|
+
When prompted for the task, use:
|
|
33
|
+
"Research a nonprofit org for Fierce Philanthropy using the tokens-for-good MCP tools. Claim an org, research it, then submit the report."
|
|
34
|
+
|
|
35
|
+
This runs on Anthropic's cloud infrastructure. Your machine doesn't need to be on.`;
|
|
36
|
+
|
|
37
|
+
case 'opencode':
|
|
38
|
+
return `Set up automated contributions with a system cron job:
|
|
39
|
+
|
|
40
|
+
Add this to your crontab (crontab -e):
|
|
41
|
+
${getCronExpression(frequency)} cd /path/to/workspace && opencode run "Research a nonprofit org for Fierce Philanthropy using the tokens-for-good MCP tools. Claim an org, research it, then submit the report."
|
|
42
|
+
|
|
43
|
+
Your machine must be on for cron jobs to run.`;
|
|
44
|
+
|
|
45
|
+
case 'devin':
|
|
46
|
+
return `Set up a recurring Devin session to contribute automatically.
|
|
47
|
+
Configure a ${frequency} recurring session with the prompt:
|
|
48
|
+
"Research a nonprofit org for Fierce Philanthropy using the tokens-for-good MCP tools."
|
|
49
|
+
|
|
50
|
+
Devin runs in the cloud, fully autonomous.`;
|
|
51
|
+
|
|
52
|
+
default:
|
|
53
|
+
return `Automated contributions are not available on this platform. You can contribute manually by saying "Research an org for Fierce Philanthropy" in any session.`;
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
function getCronExpression(frequency) {
|
|
58
|
+
switch (frequency) {
|
|
59
|
+
case 'hourly': return '0 * * * *';
|
|
60
|
+
case 'daily': return '0 2 * * *';
|
|
61
|
+
case 'weekly': return '0 2 * * 1';
|
|
62
|
+
default: return '0 2 * * *';
|
|
63
|
+
}
|
|
64
|
+
}
|
package/src/state.js
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
// Local state management for contributor session tracking
|
|
2
|
+
import { readFileSync, writeFileSync, mkdirSync, existsSync } from 'fs';
|
|
3
|
+
import { join } from 'path';
|
|
4
|
+
import { homedir } from 'os';
|
|
5
|
+
|
|
6
|
+
const STATE_DIR = join(homedir(), '.tokens-for-good');
|
|
7
|
+
const STATE_FILE = join(STATE_DIR, 'state.json');
|
|
8
|
+
|
|
9
|
+
const DEFAULT_STATE = {
|
|
10
|
+
last_contributed: null,
|
|
11
|
+
snooze_until: null,
|
|
12
|
+
auto_schedule: false,
|
|
13
|
+
platform: null,
|
|
14
|
+
total_session_contributions: 0,
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
export function loadState() {
|
|
18
|
+
try {
|
|
19
|
+
if (existsSync(STATE_FILE)) {
|
|
20
|
+
const raw = readFileSync(STATE_FILE, 'utf-8');
|
|
21
|
+
return { ...DEFAULT_STATE, ...JSON.parse(raw) };
|
|
22
|
+
}
|
|
23
|
+
} catch {
|
|
24
|
+
// Corrupted state file, reset
|
|
25
|
+
}
|
|
26
|
+
return { ...DEFAULT_STATE };
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
export function saveState(state) {
|
|
30
|
+
if (!existsSync(STATE_DIR)) {
|
|
31
|
+
mkdirSync(STATE_DIR, { recursive: true });
|
|
32
|
+
}
|
|
33
|
+
writeFileSync(STATE_FILE, JSON.stringify(state, null, 2), 'utf-8');
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export function updateState(updates) {
|
|
37
|
+
const state = loadState();
|
|
38
|
+
Object.assign(state, updates);
|
|
39
|
+
saveState(state);
|
|
40
|
+
return state;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
export function isSnoozed() {
|
|
44
|
+
const state = loadState();
|
|
45
|
+
if (!state.snooze_until) return false;
|
|
46
|
+
return new Date(state.snooze_until) > new Date();
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
export function snoozeUntil(date) {
|
|
50
|
+
updateState({ snooze_until: date.toISOString() });
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
export function snoozeDays(days) {
|
|
54
|
+
const until = new Date();
|
|
55
|
+
until.setDate(until.getDate() + days);
|
|
56
|
+
snoozeUntil(until);
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
export function hasContributedToday() {
|
|
60
|
+
const state = loadState();
|
|
61
|
+
if (!state.last_contributed) return false;
|
|
62
|
+
const lastDate = new Date(state.last_contributed).toDateString();
|
|
63
|
+
return lastDate === new Date().toDateString();
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
export function markContributed() {
|
|
67
|
+
updateState({
|
|
68
|
+
last_contributed: new Date().toISOString(),
|
|
69
|
+
total_session_contributions: loadState().total_session_contributions + 1,
|
|
70
|
+
});
|
|
71
|
+
}
|