@beaconlabs-io/evidence 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. package/README.md +256 -0
  2. package/deployments/00.json +34 -0
  3. package/deployments/01.json +28 -0
  4. package/deployments/02.json +34 -0
  5. package/deployments/03.json +40 -0
  6. package/deployments/04.json +40 -0
  7. package/deployments/05.json +28 -0
  8. package/deployments/06.json +28 -0
  9. package/deployments/07.json +28 -0
  10. package/deployments/08.json +28 -0
  11. package/deployments/09.json +28 -0
  12. package/deployments/10.json +40 -0
  13. package/deployments/11.json +28 -0
  14. package/deployments/12.json +22 -0
  15. package/deployments/13.json +22 -0
  16. package/deployments/14.json +34 -0
  17. package/deployments/15.json +22 -0
  18. package/deployments/16.json +34 -0
  19. package/deployments/17.json +22 -0
  20. package/deployments/18.json +40 -0
  21. package/deployments/19.json +40 -0
  22. package/deployments/20.json +40 -0
  23. package/dist/content/deployments.d.ts +5 -0
  24. package/dist/content/deployments.d.ts.map +1 -0
  25. package/dist/content/deployments.js +664 -0
  26. package/dist/content/deployments.js.map +1 -0
  27. package/dist/content/evidence.d.ts +5 -0
  28. package/dist/content/evidence.d.ts.map +1 -0
  29. package/dist/content/evidence.js +678 -0
  30. package/dist/content/evidence.js.map +1 -0
  31. package/dist/content/index.d.ts +31 -0
  32. package/dist/content/index.d.ts.map +1 -0
  33. package/dist/content/index.js +72 -0
  34. package/dist/content/index.js.map +1 -0
  35. package/dist/index.d.ts +3 -0
  36. package/dist/index.d.ts.map +1 -0
  37. package/dist/index.js +5 -0
  38. package/dist/index.js.map +1 -0
  39. package/dist/types.d.ts +130 -0
  40. package/dist/types.d.ts.map +1 -0
  41. package/dist/types.js +75 -0
  42. package/dist/types.js.map +1 -0
  43. package/evidence/00.mdx +78 -0
  44. package/evidence/01.mdx +115 -0
  45. package/evidence/02.mdx +72 -0
  46. package/evidence/03.mdx +74 -0
  47. package/evidence/04.mdx +81 -0
  48. package/evidence/05.mdx +83 -0
  49. package/evidence/06.mdx +61 -0
  50. package/evidence/07.mdx +61 -0
  51. package/evidence/08.mdx +57 -0
  52. package/evidence/09.mdx +63 -0
  53. package/evidence/10.mdx +60 -0
  54. package/evidence/11.mdx +58 -0
  55. package/evidence/12.mdx +62 -0
  56. package/evidence/13.mdx +53 -0
  57. package/evidence/14.mdx +52 -0
  58. package/evidence/15.mdx +53 -0
  59. package/evidence/16.mdx +52 -0
  60. package/evidence/17.mdx +53 -0
  61. package/evidence/18.mdx +52 -0
  62. package/evidence/19.mdx +54 -0
  63. package/evidence/20.mdx +53 -0
  64. package/package.json +75 -0
@@ -0,0 +1,678 @@
1
+ export const evidence = {
2
+ "10": {
3
+ "frontmatter": {
4
+ "evidence_id": "10",
5
+ "title": "Effects of Work-Oriented vs. Social Offline Meetings on User Editing Activity",
6
+ "author": "BeaconLabs",
7
+ "date": "2024-11-05",
8
+ "citation": [
9
+ {
10
+ "name": "How offline meetings affect online activities: the case of Wikipedia",
11
+ "type": "link",
12
+ "src": "https://epjdatascience.springeropen.com/articles/10.1140/epjds/s13688-024-00506-w"
13
+ }
14
+ ],
15
+ "results": [
16
+ {
17
+ "intervention": "Type of offline meeting (work-oriented vs. social)",
18
+ "outcome_variable": "Increase the volume of users’ editing activity (number of edits) and the posibility of presence of editing",
19
+ "outcome": "+-"
20
+ }
21
+ ],
22
+ "strength": "3",
23
+ "methodologies": [
24
+ "DID, Covariate matching"
25
+ ],
26
+ "version": "1.0.0"
27
+ },
28
+ "content": "\n## Key Points\n\n- In the short term (1 week), work-related meetings showed a stronger positive effect than social meetings in terms of raising the likelihood of resuming editing.\n- However, this effect did not extend to the level (intensity) of editing, and in the medium term (1 month) and long term (1 year), no significant differences were observed between work-oriented and social meetings.\n- Overall, the value of social meetings for community building appears to match the effect of work meetings on long-term editor productivity.\n\n## Background\n\nOffline meetings include those primarily for social interaction and those that are work-oriented, directly focusing on improving Wikipedia. Theoretically, work-oriented meetings could yield larger increases in contributions by enhancing users’ capacity to contribute or heightening their awareness of needs.\n\n## Analysis Method\n\n### Dataset\n\n- We combine a comprehensive dataset on informal offline meetings in the German-language Wikipedia community from 2001 to 2020 with large-scale online activity data.\n- The dataset includes information on 4,408 small-scale meetings and 4,013 participating users.\n- All online actions on Wikipedia are recorded, and users’ editing activities are measured from metadata dumps.\n- Meetings are classified mainly into social (77%) and work-oriented (23%).\n\n### Intervation / Explanatory Variable\n\n- The intervention for this outcome is the type of offline meeting (work-oriented vs. social).\n- The models include indicators for meeting type and their interaction terms.\n\n### Dependent Variable\n\n- Outcome variables are the volume of users’ editing activity (number of edits) and the presence/absence of editing.\n- Analyses are conducted over short (1 week), medium (1 month), and long (1 year) horizons.\n\n### Identification Strategy\n\n- Quasi-experimental approach: We employ a difference-in-differences (DiD) design comparing meeting participants (treatment group) with comparable non-participants selected via matching (control group).\n- Covariate matching: From a pool of non-participants, we construct a control group most similar to participants based on five features (days since registration; cumulative activity in mainspace and outside mainspace from registration to the meeting; and recent activity in mainspace and outside mainspace over the 7-day, 1-month, 2-month, and 1-year periods prior to the meeting). This aims to minimize pre-existing differences between groups.\n- Statistical models: For the binary outcome of resuming activity, we use a multilevel linear probability model (LPM); for changes in activity volume, we use multilevel negative binomial models. Control variables (prior activity level, tenure, administrator status, and meeting year) are included.\n\n## Results\n\n- Short term (1 week): For the likelihood that previously inactive users resume editing, work-related meetings showed a stronger positive effect than social meetings. However, they did not affect the intensity of editing itself.\n- Medium term (1 month) and long term (1 year): No significant differences were observed between work-oriented and social meetings in either resuming editing or editing intensity.\n- These results provide only limited support for Hypothesis that work-related meetings increase contributions more than social meetings. While there is a short-term difference in re-engagement, there is no clear difference in long-term contribution growth.\n",
29
+ "raw": "---\nevidence_id: \"10\"\nresults:\n - intervention: \"Type of offline meeting (work-oriented vs. social)\"\n outcome_variable: \"Increase the volume of users’ editing activity (number of edits) and the posibility of presence of editing\"\n outcome: \"+-\"\nstrength: \"3\"\nmethodologies:\n - \"DID, Covariate matching\"\nversion: \"1.0.0\"\ntitle: \"Effects of Work-Oriented vs. Social Offline Meetings on User Editing Activity\"\ndate: \"2024-11-05\"\n\ncitation:\n - name: \"How offline meetings affect online activities: the case of Wikipedia\"\n src: \"https://epjdatascience.springeropen.com/articles/10.1140/epjds/s13688-024-00506-w\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\n- In the short term (1 week), work-related meetings showed a stronger positive effect than social meetings in terms of raising the likelihood of resuming editing.\n- However, this effect did not extend to the level (intensity) of editing, and in the medium term (1 month) and long term (1 year), no significant differences were observed between work-oriented and social meetings.\n- Overall, the value of social meetings for community building appears to match the effect of work meetings on long-term editor productivity.\n\n## Background\n\nOffline meetings include those primarily for social interaction and those that are work-oriented, directly focusing on improving Wikipedia. Theoretically, work-oriented meetings could yield larger increases in contributions by enhancing users’ capacity to contribute or heightening their awareness of needs.\n\n## Analysis Method\n\n### Dataset\n\n- We combine a comprehensive dataset on informal offline meetings in the German-language Wikipedia community from 2001 to 2020 with large-scale online activity data.\n- The dataset includes information on 4,408 small-scale meetings and 4,013 participating users.\n- All online actions on Wikipedia are recorded, and users’ editing activities are measured from metadata dumps.\n- Meetings are classified mainly into social (77%) and work-oriented (23%).\n\n### Intervation / Explanatory Variable\n\n- The intervention for this outcome is the type of offline meeting (work-oriented vs. social).\n- The models include indicators for meeting type and their interaction terms.\n\n### Dependent Variable\n\n- Outcome variables are the volume of users’ editing activity (number of edits) and the presence/absence of editing.\n- Analyses are conducted over short (1 week), medium (1 month), and long (1 year) horizons.\n\n### Identification Strategy\n\n- Quasi-experimental approach: We employ a difference-in-differences (DiD) design comparing meeting participants (treatment group) with comparable non-participants selected via matching (control group).\n- Covariate matching: From a pool of non-participants, we construct a control group most similar to participants based on five features (days since registration; cumulative activity in mainspace and outside mainspace from registration to the meeting; and recent activity in mainspace and outside mainspace over the 7-day, 1-month, 2-month, and 1-year periods prior to the meeting). This aims to minimize pre-existing differences between groups.\n- Statistical models: For the binary outcome of resuming activity, we use a multilevel linear probability model (LPM); for changes in activity volume, we use multilevel negative binomial models. Control variables (prior activity level, tenure, administrator status, and meeting year) are included.\n\n## Results\n\n- Short term (1 week): For the likelihood that previously inactive users resume editing, work-related meetings showed a stronger positive effect than social meetings. However, they did not affect the intensity of editing itself.\n- Medium term (1 month) and long term (1 year): No significant differences were observed between work-oriented and social meetings in either resuming editing or editing intensity.\n- These results provide only limited support for Hypothesis that work-related meetings increase contributions more than social meetings. While there is a short-term difference in re-engagement, there is no clear difference in long-term contribution growth.\n"
30
+ },
31
+ "11": {
32
+ "frontmatter": {
33
+ "evidence_id": "11",
34
+ "title": "How Administrators’ Participation in Offline Meetings Influences Their Contributions",
35
+ "author": "BeaconLabs",
36
+ "date": "2024-11-05",
37
+ "citation": [
38
+ {
39
+ "name": "How offline meetings affect online activities: the case of Wikipedia",
40
+ "type": "link",
41
+ "src": "https://epjdatascience.springeropen.com/articles/10.1140/epjds/s13688-024-00506-w"
42
+ }
43
+ ],
44
+ "results": [
45
+ {
46
+ "intervention": "Administrators participating in offline meeting",
47
+ "outcome_variable": "Increase the volume of users’ editing activity (number of edits) and the posibility of presence of editing",
48
+ "outcome": "+-"
49
+ }
50
+ ],
51
+ "strength": "3",
52
+ "methodologies": [
53
+ "DID, Covariate matching"
54
+ ],
55
+ "version": "1.0.0",
56
+ "datasets": [
57
+ ""
58
+ ]
59
+ },
60
+ "content": "\n## Key Points\n\n- Administrators generally make more edits across all namespaces, but the post-meeting increase in activity is smaller than for other users.\n- Administrators who had been inactive are more likely to resume editing after meetings, but their subsequent increase in activity is not as large as that of other users.\n\n## Background\n\nPrior research suggests that meetings are essential to administrative decision-making and that users who become administrators may increase their activity after participating. This study examines how the administrator role influences contribution behavior following meetings.\n\n## Analysis Method\n\n### Dataset\n\n- We combine a comprehensive dataset on informal offline meetings in the German-language Wikipedia community from 2001 to 2020 with large-scale online activity data.\n- The dataset includes information on 4,408 small-scale meetings and 4,013 participating users.\n- All online actions on Wikipedia are recorded, and users’ editing activities are measured from metadata dumps.\n- Information on whether a user has ever become a Wikipedia administrator is also included.\n\n### Intervation / Explanatory Variable\n\n- In addition to participation in offline meetings, we consider whether the user is an administrator.\n- The models include an indicator for administrator status and its interaction terms.\n\n### Dependent Variable\n\n- Outcome variables are the volume of users’ editing activity (number of edits) and the presence/absence of editing, analyzed over short, medium, and long horizons.\n\n### Identification Strategy\n\n- We use a quasi-experimental design (DiD, matching, multilevel LPM, multilevel negative binomial models), incorporating triple interaction terms to assess how administrator status moderates meeting effects.\n\n## Results\n\n- Users who are administrators tend to make more edits across all namespaces regardless of meeting participation.\n- However, after attending a meeting, administrators show smaller increases in activity compared to other users. This likely reflects a ceiling effect: administrators already have high baseline activity, leaving less room for additional increases from meetings.\n- In the medium-term (1 month) models, inactive administrators are more likely to resume editing after meetings; yet, as in the short-term models, their subsequent increase in activity is smaller than for other users. A similar negative moderating effect of administrator status appears in the long-term (1 year) trends.\n",
61
+ "raw": "---\nevidence_id: \"11\"\nresults:\n - intervention: \"Administrators participating in offline meeting\"\n outcome_variable: \"Increase the volume of users’ editing activity (number of edits) and the posibility of presence of editing\"\n outcome: \"+-\"\nstrength: \"3\"\nmethodologies:\n - \"DID, Covariate matching\"\nversion: \"1.0.0\"\ndatasets:\n - \"\"\ntitle: \"How Administrators’ Participation in Offline Meetings Influences Their Contributions\"\ndate: \"2024-11-05\"\n\ncitation:\n - name: \"How offline meetings affect online activities: the case of Wikipedia\"\n src: \"https://epjdatascience.springeropen.com/articles/10.1140/epjds/s13688-024-00506-w\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\n- Administrators generally make more edits across all namespaces, but the post-meeting increase in activity is smaller than for other users.\n- Administrators who had been inactive are more likely to resume editing after meetings, but their subsequent increase in activity is not as large as that of other users.\n\n## Background\n\nPrior research suggests that meetings are essential to administrative decision-making and that users who become administrators may increase their activity after participating. This study examines how the administrator role influences contribution behavior following meetings.\n\n## Analysis Method\n\n### Dataset\n\n- We combine a comprehensive dataset on informal offline meetings in the German-language Wikipedia community from 2001 to 2020 with large-scale online activity data.\n- The dataset includes information on 4,408 small-scale meetings and 4,013 participating users.\n- All online actions on Wikipedia are recorded, and users’ editing activities are measured from metadata dumps.\n- Information on whether a user has ever become a Wikipedia administrator is also included.\n\n### Intervation / Explanatory Variable\n\n- In addition to participation in offline meetings, we consider whether the user is an administrator.\n- The models include an indicator for administrator status and its interaction terms.\n\n### Dependent Variable\n\n- Outcome variables are the volume of users’ editing activity (number of edits) and the presence/absence of editing, analyzed over short, medium, and long horizons.\n\n### Identification Strategy\n\n- We use a quasi-experimental design (DiD, matching, multilevel LPM, multilevel negative binomial models), incorporating triple interaction terms to assess how administrator status moderates meeting effects.\n\n## Results\n\n- Users who are administrators tend to make more edits across all namespaces regardless of meeting participation.\n- However, after attending a meeting, administrators show smaller increases in activity compared to other users. This likely reflects a ceiling effect: administrators already have high baseline activity, leaving less room for additional increases from meetings.\n- In the medium-term (1 month) models, inactive administrators are more likely to resume editing after meetings; yet, as in the short-term models, their subsequent increase in activity is smaller than for other users. A similar negative moderating effect of administrator status appears in the long-term (1 year) trends.\n"
62
+ },
63
+ "12": {
64
+ "frontmatter": {
65
+ "evidence_id": "12",
66
+ "title": "Continuity of OSS contributions after participating in the Google Summer of Code (GSoC) Program",
67
+ "author": "BeaconLabs",
68
+ "date": "2019-12-14",
69
+ "citation": [
70
+ {
71
+ "name": "Google summer of code: Student motivations and contributions",
72
+ "type": "link",
73
+ "src": "https://ctreude.ca/wp-content/uploads/2019/12/jss20.pdf"
74
+ }
75
+ ],
76
+ "results": [
77
+ {
78
+ "intervention": "Participation in the Google Summer of Code (GSoC) Program",
79
+ "outcome_variable": "Continuity of Students’ Contributions to GSoC Projects",
80
+ "outcome": "+-"
81
+ }
82
+ ],
83
+ "strength": "1",
84
+ "methodologies": [
85
+ "Interview"
86
+ ],
87
+ "version": "1.0.0",
88
+ "datasets": [
89
+ ""
90
+ ],
91
+ "tags": [
92
+ "oss",
93
+ "public goods funding"
94
+ ]
95
+ },
96
+ "content": "\n## Key Points\n\nMost GSoC students did not continue contributing to their projects after the program, but a minority became frequent contributors. Since students mainly participated for enriching (work) experiences rather than to become long-term OSS contributors, this also shaped contribution continuity.\n\n## Background\n\nOSS projects join Summer of Code programs hoping for retention of newcomers and increased code contributions. Previous research focused on students’ quantitative contributions during GSoC or their outcomes. However, the link between motivations for participation and post-program contributions had not been examined. This study investigates students’ motivations for joining GSoC and their intentions/continuation of contributions afterward. Prior research found that experience levels strongly influence retention, that GSoC fosters strong bonds between mentors and students, and that 18% of students later became mentors.\n\n## Analysis Method\n\n### Dataset\n\n- The study targeted students who participated in GSoC between 2010 and 2015. Out of 1000 survey invitations, 141 students responded (14.1% response rate).\n- From these, 10 students volunteered for follow-up interviews.\n\n### Intervation / Explanatory Variable\n\n- Participation in GSoC, along with associated experiences and rewards.\n- GSoC is a three-month program that provides scholarships and mentorship to students who wish to contribute to open source software (OSS) projects.\n\n### Dependent Variable\n\n- Students’ intent to continue contributing and changes in contribution frequency before/after GSoC.\n\n### Identification Strategy\n\n- Data was collected via survey questions (OSS contributions before/after GSoC, general participation reasons).\n- Descriptive statistics were applied. Results were also compared with prior quantitative research (Silva et al., 2017).\n\n## Results\n\n- Pre-GSoC Contributions: 56.0% had “Never” contributed to their chosen project before GSoC, and 13.5% said “Rarely.” For OSS projects outside GSoC, 34.7% said “Never” and 32.6% “Rarely.”\n- Intent to Continue: About 57% intended to continue contributing (“Yes” or “Definitely yes”).\n- Actual Continuation: Post-GSoC contributions were reported as: “No” 17.0%, “Rarely” 21.3%, “Occasionally” 32.6%, “Frequently” 12.8%, “Core member” 16.3%.\n- Change in Contribution Frequency: About 53% (75 students) reported increased contribution frequency after GSoC. However, prior quantitative research (Silva et al., 2017) showed only ~16% continued contributing after several months, consistent with this study’s findings. This aligns with Roberts et al. (2006), suggesting initial motivations don’t always translate to long-term retention.\n- Frequent Contributors: Both this and prior studies indicated a small subset of students became frequent developers.\n\nLink to Motivations: While stipends were important, experienced developers saw them as essential. Less experienced students emphasized career building as the main reason for joining.\n",
97
+ "raw": "---\nevidence_id: \"12\"\nresults:\n - intervention: \"Participation in the Google Summer of Code (GSoC) Program\"\n outcome_variable: \"Continuity of Students’ Contributions to GSoC Projects\"\n outcome: \"+-\"\nstrength: \"1\"\nmethodologies:\n - \"Interview\"\nversion: \"1.0.0\"\ndatasets:\n - \"\"\ntitle: \"Continuity of OSS contributions after participating in the Google Summer of Code (GSoC) Program\"\ndate: \"2019-12-14\"\ntags:\n - \"oss\"\n - \"public goods funding\"\ncitation:\n - name: \"Google summer of code: Student motivations and contributions\"\n src: \"https://ctreude.ca/wp-content/uploads/2019/12/jss20.pdf\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nMost GSoC students did not continue contributing to their projects after the program, but a minority became frequent contributors. Since students mainly participated for enriching (work) experiences rather than to become long-term OSS contributors, this also shaped contribution continuity.\n\n## Background\n\nOSS projects join Summer of Code programs hoping for retention of newcomers and increased code contributions. Previous research focused on students’ quantitative contributions during GSoC or their outcomes. However, the link between motivations for participation and post-program contributions had not been examined. This study investigates students’ motivations for joining GSoC and their intentions/continuation of contributions afterward. Prior research found that experience levels strongly influence retention, that GSoC fosters strong bonds between mentors and students, and that 18% of students later became mentors.\n\n## Analysis Method\n\n### Dataset\n\n- The study targeted students who participated in GSoC between 2010 and 2015. Out of 1000 survey invitations, 141 students responded (14.1% response rate).\n- From these, 10 students volunteered for follow-up interviews.\n\n### Intervation / Explanatory Variable\n\n- Participation in GSoC, along with associated experiences and rewards.\n- GSoC is a three-month program that provides scholarships and mentorship to students who wish to contribute to open source software (OSS) projects.\n\n### Dependent Variable\n\n- Students’ intent to continue contributing and changes in contribution frequency before/after GSoC.\n\n### Identification Strategy\n\n- Data was collected via survey questions (OSS contributions before/after GSoC, general participation reasons).\n- Descriptive statistics were applied. Results were also compared with prior quantitative research (Silva et al., 2017).\n\n## Results\n\n- Pre-GSoC Contributions: 56.0% had “Never” contributed to their chosen project before GSoC, and 13.5% said “Rarely.” For OSS projects outside GSoC, 34.7% said “Never” and 32.6% “Rarely.”\n- Intent to Continue: About 57% intended to continue contributing (“Yes” or “Definitely yes”).\n- Actual Continuation: Post-GSoC contributions were reported as: “No” 17.0%, “Rarely” 21.3%, “Occasionally” 32.6%, “Frequently” 12.8%, “Core member” 16.3%.\n- Change in Contribution Frequency: About 53% (75 students) reported increased contribution frequency after GSoC. However, prior quantitative research (Silva et al., 2017) showed only ~16% continued contributing after several months, consistent with this study’s findings. This aligns with Roberts et al. (2006), suggesting initial motivations don’t always translate to long-term retention.\n- Frequent Contributors: Both this and prior studies indicated a small subset of students became frequent developers.\n\nLink to Motivations: While stipends were important, experienced developers saw them as essential. Less experienced students emphasized career building as the main reason for joining.\n"
98
+ },
99
+ "13": {
100
+ "frontmatter": {
101
+ "evidence_id": "13",
102
+ "title": "Analysis of “Hot” and “Cold” Trends in Badgeholders’ Voting Behavior",
103
+ "author": "BeaconLabs",
104
+ "date": "2024-05-22",
105
+ "citation": [
106
+ {
107
+ "name": "A deepdive into FIL-RetroPGF-1 results",
108
+ "type": "link",
109
+ "src": "https://medium.com/cryptoeconlab/a-deepdive-into-fil-retropgf-1-results-7e5a0bcdba08"
110
+ }
111
+ ],
112
+ "results": [
113
+ {
114
+ "intervention": "Classification of badgeholders’ voting behavior based on the concept of “temperature”",
115
+ "outcome_variable": "Funding allocation to projects.",
116
+ "outcome": "+-"
117
+ }
118
+ ],
119
+ "strength": "1",
120
+ "methodologies": [
121
+ "Counterfactual analysis"
122
+ ],
123
+ "version": "1.0.0",
124
+ "datasets": [
125
+ ""
126
+ ]
127
+ },
128
+ "content": "\n## Key Points\n\nA simulation analysis of how voting behavior trends (hot/cold) influence funding allocation. “Hot” voters are defined as those who concentrate votes on a small number of projects, while “cold” voters are those who spread votes across many projects. The analysis simulates funding allocation excluding these types of voters and evaluates the impact by comparing it to the original allocation.\n\n## Background\n\nIn FIL-RetroPGF-1, each badgeholder (voter) had 100 votes and cast them across multiple projects. This analysis classifies badgeholders’ voting behavior based on the concept of “temperature” and examines how such tendencies affected overall funding allocation. Specifically, it identifies “hot” voters who concentrate votes on a small number of projects and “cold” voters who spread votes across many projects, then simulates scenarios excluding these groups.\n\n## Analysis Method\n\n### Dataset\n\nAn anonymized dataset of all votes cast by badgeholders in the FIL-RetroPGF-1 round.\n\n### Intervation / Explanatory Variable\n\nExcluding the top 10% of “hot” badgeholders (those who concentrated votes on fewer projects) and the top 10% of “cold” badgeholders (those who spread votes across many projects) from the dataset.\n\n### Dependent Variable\n\nFunding allocation to projects\n\n### Identification Strategy\n\nA counterfactual analysis comparing three scenarios: the original funding allocation, allocation excluding “hot” badgeholders, and allocation excluding “cold” badgeholders.\n\n## Results\n\n- Excluding the top 10% of “hot” badgeholders had a very small impact on funding allocation.\n- In contrast, excluding the top 10% of “cold” badgeholders led to a disproportionately large reduction in the number of projects receiving funding.\n- This is likely because “cold” badgeholders helped more projects reach the quorum requirement, enabling funding to be distributed more widely. In addition, their low votes (e.g., 0, 1, 2) lowered the average scores of projects, further contributing to broader distribution of funds.\n",
129
+ "raw": "---\nevidence_id: \"13\"\nresults:\n - intervention: \"Classification of badgeholders’ voting behavior based on the concept of “temperature”\"\n outcome_variable: \"Funding allocation to projects.\"\n outcome: \"+-\"\nstrength: \"1\"\nmethodologies:\n - \"Counterfactual analysis\"\nversion: \"1.0.0\"\ndatasets:\n - \"\"\ntitle: \"Analysis of “Hot” and “Cold” Trends in Badgeholders’ Voting Behavior\"\ndate: \"2024-05-22\"\n\ncitation:\n - name: \"A deepdive into FIL-RetroPGF-1 results\"\n src: \"https://medium.com/cryptoeconlab/a-deepdive-into-fil-retropgf-1-results-7e5a0bcdba08\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nA simulation analysis of how voting behavior trends (hot/cold) influence funding allocation. “Hot” voters are defined as those who concentrate votes on a small number of projects, while “cold” voters are those who spread votes across many projects. The analysis simulates funding allocation excluding these types of voters and evaluates the impact by comparing it to the original allocation.\n\n## Background\n\nIn FIL-RetroPGF-1, each badgeholder (voter) had 100 votes and cast them across multiple projects. This analysis classifies badgeholders’ voting behavior based on the concept of “temperature” and examines how such tendencies affected overall funding allocation. Specifically, it identifies “hot” voters who concentrate votes on a small number of projects and “cold” voters who spread votes across many projects, then simulates scenarios excluding these groups.\n\n## Analysis Method\n\n### Dataset\n\nAn anonymized dataset of all votes cast by badgeholders in the FIL-RetroPGF-1 round.\n\n### Intervation / Explanatory Variable\n\nExcluding the top 10% of “hot” badgeholders (those who concentrated votes on fewer projects) and the top 10% of “cold” badgeholders (those who spread votes across many projects) from the dataset.\n\n### Dependent Variable\n\nFunding allocation to projects\n\n### Identification Strategy\n\nA counterfactual analysis comparing three scenarios: the original funding allocation, allocation excluding “hot” badgeholders, and allocation excluding “cold” badgeholders.\n\n## Results\n\n- Excluding the top 10% of “hot” badgeholders had a very small impact on funding allocation.\n- In contrast, excluding the top 10% of “cold” badgeholders led to a disproportionately large reduction in the number of projects receiving funding.\n- This is likely because “cold” badgeholders helped more projects reach the quorum requirement, enabling funding to be distributed more widely. In addition, their low votes (e.g., 0, 1, 2) lowered the average scores of projects, further contributing to broader distribution of funds.\n"
130
+ },
131
+ "14": {
132
+ "frontmatter": {
133
+ "evidence_id": "14",
134
+ "title": "Analysis of the impact of project display order in the user interface (UI) on voting results",
135
+ "author": "BeaconLabs",
136
+ "date": "2024-05-22",
137
+ "citation": [
138
+ {
139
+ "name": "A deepdive into FIL-RetroPGF-1 results",
140
+ "type": "link",
141
+ "src": "https://medium.com/cryptoeconlab/a-deepdive-into-fil-retropgf-1-results-7e5a0bcdba08"
142
+ }
143
+ ],
144
+ "results": [
145
+ {
146
+ "intervention": "The alphabetical order of the first letter of the project name",
147
+ "outcome_variable": "(a) The total number of votes received by each project, and (b) the average project score",
148
+ "outcome": "!"
149
+ }
150
+ ],
151
+ "strength": "1",
152
+ "methodologies": [
153
+ "Regression analysis, MCMC"
154
+ ],
155
+ "version": "1.0.0"
156
+ },
157
+ "content": "\n## Key Points\n\nA regression analysis of the bias introduced by the UI project display order on voting results. In the voting interface, projects were displayed in alphabetical order. We conducted a regression analysis of the correlation between this display order (the first letter of the project name) and both the number of votes received and the average score for each project, in order to evaluate the presence and impact of UI bias.\n\n## Background\n\nIn the FIL-RetroPGF-1 vote, the software “easy-retropgf” displayed projects in alphabetical order. We tested the hypothesis that this ordering biased badge holders’ voting behavior and, as a result, influenced the allocation of funds.\n\n## Analysis Method\n\n### Dataset\n\nProject-level voting counts and average scores from the FIL-RetroPGF-1 round.\n\n### Intervation / Explanatory Variable\n\nThe alphabetical order of the first letter of the project name.\n\n### Dependent Variable\n\n(a) The total number of votes received by each project, and (b) the average project score.\n\n### Identification Strategy\n\nWe performed regression analysis to calculate the correlation between the first letter of the project name and both total votes and average scores. Furthermore, we applied MCMC (Markov Chain Monte Carlo) methods to test statistical significance.\n\n## Results\n\n- A negative correlation of -0.27 was found between the first letter of the project name and total votes, and a negative correlation of -0.20 with average scores. This suggests that projects whose names start with later letters of the alphabet tend to receive fewer votes and lower average scores.\n- This effect was confirmed to be statistically significant.\n- Specifically, it is estimated that for each subsequent letter in the alphabet, the average fund allocation decreases by approximately 40 FIL.\n- Based on these findings, organizers are considering improving the UI in future rounds to reduce such effects.\n",
158
+ "raw": "---\nevidence_id: \"14\"\nresults:\n - intervention: \"The alphabetical order of the first letter of the project name\"\n outcome_variable: \"(a) The total number of votes received by each project, and (b) the average project score\"\n outcome: \"!\"\nstrength: \"1\"\nmethodologies:\n - \"Regression analysis, MCMC\"\nversion: \"1.0.0\"\ntitle: \"Analysis of the impact of project display order in the user interface (UI) on voting results\"\ndate: \"2024-05-22\"\n\ncitation:\n - name: \"A deepdive into FIL-RetroPGF-1 results\"\n src: \"https://medium.com/cryptoeconlab/a-deepdive-into-fil-retropgf-1-results-7e5a0bcdba08\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nA regression analysis of the bias introduced by the UI project display order on voting results. In the voting interface, projects were displayed in alphabetical order. We conducted a regression analysis of the correlation between this display order (the first letter of the project name) and both the number of votes received and the average score for each project, in order to evaluate the presence and impact of UI bias.\n\n## Background\n\nIn the FIL-RetroPGF-1 vote, the software “easy-retropgf” displayed projects in alphabetical order. We tested the hypothesis that this ordering biased badge holders’ voting behavior and, as a result, influenced the allocation of funds.\n\n## Analysis Method\n\n### Dataset\n\nProject-level voting counts and average scores from the FIL-RetroPGF-1 round.\n\n### Intervation / Explanatory Variable\n\nThe alphabetical order of the first letter of the project name.\n\n### Dependent Variable\n\n(a) The total number of votes received by each project, and (b) the average project score.\n\n### Identification Strategy\n\nWe performed regression analysis to calculate the correlation between the first letter of the project name and both total votes and average scores. Furthermore, we applied MCMC (Markov Chain Monte Carlo) methods to test statistical significance.\n\n## Results\n\n- A negative correlation of -0.27 was found between the first letter of the project name and total votes, and a negative correlation of -0.20 with average scores. This suggests that projects whose names start with later letters of the alphabet tend to receive fewer votes and lower average scores.\n- This effect was confirmed to be statistically significant.\n- Specifically, it is estimated that for each subsequent letter in the alphabet, the average fund allocation decreases by approximately 40 FIL.\n- Based on these findings, organizers are considering improving the UI in future rounds to reduce such effects.\n"
159
+ },
160
+ "15": {
161
+ "frontmatter": {
162
+ "evidence_id": "15",
163
+ "title": "Impact of Changes to Scoring Rules on Fund Allocation",
164
+ "author": "BeaconLabs",
165
+ "date": "2024-12-19",
166
+ "citation": [
167
+ {
168
+ "name": "A deepdive into FIL-RetroPGF-2 results",
169
+ "type": "link",
170
+ "src": "https://medium.com/cryptoeconlab/a-deepdive-into-fil-retropgf-2-results-880234699fe4"
171
+ }
172
+ ],
173
+ "results": [
174
+ {
175
+ "intervention": "Scoring rules (sum, median, mean, etc.)",
176
+ "outcome_variable": "Distribution of funding amounts allocated to each project",
177
+ "outcome": "+"
178
+ }
179
+ ],
180
+ "strength": "3",
181
+ "methodologies": [
182
+ "Comparison of simulated counterfactual results"
183
+ ],
184
+ "version": "1.0.0",
185
+ "datasets": [
186
+ "Voting data from badgeholders (voters) in FIL-RetroPGF-2"
187
+ ]
188
+ },
189
+ "content": "\n## Key Points\n\nIn FIL-RetroPGF-2, the scoring rule was changed from the median in Round 1 to the sum. This change made the fund distribution closer to a power-law distribution, increasing the amount of funding received by top projects.\n\n## Background\n\nWithin the Filecoin ecosystem, a Retroactive Public Goods Funding (RetroPGF) program has been implemented to provide funding for public goods. FIL-RetroPGF-2 was the second iteration of this program. Based on the results of Round 1, the scoring rules were revised with two objectives: to make the voting mechanism easier to understand, and to ensure that while top projects received substantial funding, a wide range of projects—including those in the long tail—would also benefit.\n\n## Analysis Method\n\n### Dataset\n\nVoting data from badgeholders (voters) in FIL-RetroPGF-2\n\n### Intervation / Explanatory Variable\n\nScoring rules (sum, median, mean, etc.)\n\n### Dependent Variable\n\nDistribution of funding amounts allocated to each project\n\n### Identification Strategy\n\nThe actual fund allocation results using the sum scoring rule in FIL-RetroPGF-2 were compared with simulated counterfactual results under median or mean scoring. In addition, statistical comparisons were made between Round 1 (median) and Round 2 (sum).\n\n## Results\n\n- The sum scoring rule was observed to generate a funding distribution more closely resembling a power-law distribution than the median or mean rules.\n- As a result, compared to Round 1, a larger share of funding went to top projects in Round 2.\n- This change, together with the shift toward assigning absolute funding amounts rather than percentages by badgeholders, may have contributed to a more exponential distribution of funding.\n",
190
+ "raw": "---\nevidence_id: \"15\"\nresults:\n - intervention: \"Scoring rules (sum, median, mean, etc.)\"\n outcome_variable: \"Distribution of funding amounts allocated to each project\"\n outcome: \"+\"\nstrength: \"3\"\nmethodologies:\n - \"Comparison of simulated counterfactual results\"\nversion: \"1.0.0\"\ndatasets:\n - \"Voting data from badgeholders (voters) in FIL-RetroPGF-2\"\ntitle: \"Impact of Changes to Scoring Rules on Fund Allocation\"\ndate: \"2024-12-19\"\n\ncitation:\n - name: \"A deepdive into FIL-RetroPGF-2 results\"\n src: \"https://medium.com/cryptoeconlab/a-deepdive-into-fil-retropgf-2-results-880234699fe4\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nIn FIL-RetroPGF-2, the scoring rule was changed from the median in Round 1 to the sum. This change made the fund distribution closer to a power-law distribution, increasing the amount of funding received by top projects.\n\n## Background\n\nWithin the Filecoin ecosystem, a Retroactive Public Goods Funding (RetroPGF) program has been implemented to provide funding for public goods. FIL-RetroPGF-2 was the second iteration of this program. Based on the results of Round 1, the scoring rules were revised with two objectives: to make the voting mechanism easier to understand, and to ensure that while top projects received substantial funding, a wide range of projects—including those in the long tail—would also benefit.\n\n## Analysis Method\n\n### Dataset\n\nVoting data from badgeholders (voters) in FIL-RetroPGF-2\n\n### Intervation / Explanatory Variable\n\nScoring rules (sum, median, mean, etc.)\n\n### Dependent Variable\n\nDistribution of funding amounts allocated to each project\n\n### Identification Strategy\n\nThe actual fund allocation results using the sum scoring rule in FIL-RetroPGF-2 were compared with simulated counterfactual results under median or mean scoring. In addition, statistical comparisons were made between Round 1 (median) and Round 2 (sum).\n\n## Results\n\n- The sum scoring rule was observed to generate a funding distribution more closely resembling a power-law distribution than the median or mean rules.\n- As a result, compared to Round 1, a larger share of funding went to top projects in Round 2.\n- This change, together with the shift toward assigning absolute funding amounts rather than percentages by badgeholders, may have contributed to a more exponential distribution of funding.\n"
191
+ },
192
+ "16": {
193
+ "frontmatter": {
194
+ "evidence_id": "16",
195
+ "title": "Elimination of Alphabetical Order Bias through UI Randomization",
196
+ "author": "BeaconLabs",
197
+ "date": "2024-12-19",
198
+ "citation": [
199
+ {
200
+ "name": "A deepdive into FIL-RetroPGF-2 results",
201
+ "type": "link",
202
+ "src": "https://medium.com/cryptoeconlab/a-deepdive-into-fil-retropgf-2-results-880234699fe4"
203
+ }
204
+ ],
205
+ "results": [
206
+ {
207
+ "intervention": "The first letter of the project name (alphabetical order)",
208
+ "outcome_variable": "Number of votes each project received, and total funding allocated to each project",
209
+ "outcome": "+"
210
+ }
211
+ ],
212
+ "strength": "1",
213
+ "methodologies": [
214
+ "Markov Chain Monte Carlo (MCMC)"
215
+ ],
216
+ "version": "1.0.0",
217
+ "datasets": [
218
+ "Voting data from badgeholders (voters) in FIL-RetroPGF-2"
219
+ ]
220
+ },
221
+ "content": "\n## Key Points\n\nIn FIL-RetroPGF-2, the voting software (Gitcoin easy-retropgf) was updated so that the display order of projects was randomized for each badgeholder. As a result, it was confirmed that the bias caused by alphabetical ordering of project names on vote counts and funding allocations was effectively eliminated.\n\n## Background\n\nThere had been concerns that the order in which projects were displayed in the voting interface (UI) could influence voters’ decisions. In particular, to remove the “order effect,” where projects displayed earlier in the list had an advantage, UI improvements were implemented in Round 2.\n\n## Analysis Method\n\n### Dataset\n\nProject data from FIL-RetroPGF-2 (project names), along with the number of votes and amount of funding allocated to each project.\n\n### Intervation / Explanatory Variable\n\nThe first letter of the project name (alphabetical order)\n\n### Dependent Variable\n\n- Number of votes each project received.\n- Total funding allocated to each project.\n\n### Identification Strategy\n\nUsing the Markov Chain Monte Carlo (MCMC) method, the distribution of correlations between the first letter of project names and both “number of votes” and “funding allocations” was inferred. This was used to evaluate whether the correlation was statistically significant.\n\n## Results\n\n- The analysis revealed a very small negative correlation between the first letter of project names and the number of votes received, but no significant correlation with the amount of funding allocated.\n- From these results, it was concluded that the potential alphabetical ordering effect observed in Round 1 was effectively removed by the UI improvements introduced this time.\n",
222
+ "raw": "---\nevidence_id: \"16\"\nresults:\n - intervention: \"The first letter of the project name (alphabetical order)\"\n outcome_variable: \"Number of votes each project received, and total funding allocated to each project\"\n outcome: \"+\"\nstrength: \"1\"\nmethodologies:\n - \"Markov Chain Monte Carlo (MCMC)\"\nversion: \"1.0.0\"\ndatasets:\n - \"Voting data from badgeholders (voters) in FIL-RetroPGF-2\"\ntitle: \"Elimination of Alphabetical Order Bias through UI Randomization\"\ndate: \"2024-12-19\"\ncitation:\n - name: \"A deepdive into FIL-RetroPGF-2 results\"\n src: \"https://medium.com/cryptoeconlab/a-deepdive-into-fil-retropgf-2-results-880234699fe4\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nIn FIL-RetroPGF-2, the voting software (Gitcoin easy-retropgf) was updated so that the display order of projects was randomized for each badgeholder. As a result, it was confirmed that the bias caused by alphabetical ordering of project names on vote counts and funding allocations was effectively eliminated.\n\n## Background\n\nThere had been concerns that the order in which projects were displayed in the voting interface (UI) could influence voters’ decisions. In particular, to remove the “order effect,” where projects displayed earlier in the list had an advantage, UI improvements were implemented in Round 2.\n\n## Analysis Method\n\n### Dataset\n\nProject data from FIL-RetroPGF-2 (project names), along with the number of votes and amount of funding allocated to each project.\n\n### Intervation / Explanatory Variable\n\nThe first letter of the project name (alphabetical order)\n\n### Dependent Variable\n\n- Number of votes each project received.\n- Total funding allocated to each project.\n\n### Identification Strategy\n\nUsing the Markov Chain Monte Carlo (MCMC) method, the distribution of correlations between the first letter of project names and both “number of votes” and “funding allocations” was inferred. This was used to evaluate whether the correlation was statistically significant.\n\n## Results\n\n- The analysis revealed a very small negative correlation between the first letter of project names and the number of votes received, but no significant correlation with the amount of funding allocated.\n- From these results, it was concluded that the potential alphabetical ordering effect observed in Round 1 was effectively removed by the UI improvements introduced this time.\n"
223
+ },
224
+ "17": {
225
+ "frontmatter": {
226
+ "evidence_id": "17",
227
+ "title": "Reliability Verification of Badgeholder Set Size",
228
+ "author": "BeaconLabs",
229
+ "date": "2024-12-19",
230
+ "citation": [
231
+ {
232
+ "name": "A deepdive into FIL-RetroPGF-2 results",
233
+ "type": "link",
234
+ "src": "https://medium.com/cryptoeconlab/a-deepdive-into-fil-retropgf-2-results-880234699fe4"
235
+ }
236
+ ],
237
+ "results": [
238
+ {
239
+ "intervention": "Subsets of badgeholders (randomly sampled, ranging from 28 to 33 members)",
240
+ "outcome_variable": "Distribution of funding allocations to projects",
241
+ "outcome": "N/A"
242
+ }
243
+ ],
244
+ "strength": "0",
245
+ "methodologies": [
246
+ "Bootstrap method"
247
+ ],
248
+ "version": "1.0.0",
249
+ "datasets": [
250
+ "All voting data cast by badgeholders in FIL-RetroPGF-2"
251
+ ]
252
+ },
253
+ "content": "\n## Key Points\n\nBootstrap analysis demonstrated that the number of badgeholders (33 participants in FIL-RetroPGF-2) is a reasonable scale for estimating the “true signal” of funding allocation.\n\n## Background\n\nDecision-making by a small number of voters carries the risk of significant variation depending on how the voter group is selected. Therefore, it was necessary to verify whether the funding allocation results by the 33 participating badgeholders remained stable, even if a different set of members had been chosen.\n\n## Analysis Method\n\n### Dataset\n\nAll voting data cast by badgeholders in FIL-RetroPGF-2\n\n### Intervation / Explanatory Variable\n\nSubsets of badgeholders (randomly sampled, ranging from 28 to 33 members)\n\n### Dependent Variable\n\nDistribution of funding allocations to projects\n\n### Identification Strategy\n\nAnalysis was conducted using the bootstrap method. Specifically, 1,000 subsets were randomly drawn from the actual voting badgeholders, and funding allocations were recalculated for each subset. This allowed for the estimation of confidence intervals for the funding distribution, which were then compared with the actual allocation results.\n\n## Results\n\n- The confidence intervals obtained through bootstrap analysis showed little dispersion.\n- This indicates that with 33 badgeholders, the overall funding allocation trends would not change significantly even if some voters were replaced, thereby confirming the reliability of the results.\n- The interquartile range (IQR) also supports the conclusion that this number of badgeholders is appropriate for capturing the “true signal.”\n",
254
+ "raw": "---\nevidence_id: \"17\"\nresults:\n - intervention: \"Subsets of badgeholders (randomly sampled, ranging from 28 to 33 members)\"\n outcome_variable: \"Distribution of funding allocations to projects\"\n outcome: \"N/A\"\nstrength: \"0\"\nmethodologies:\n - \"Bootstrap method\"\nversion: \"1.0.0\"\ndatasets:\n - \"All voting data cast by badgeholders in FIL-RetroPGF-2\"\ntitle: \"Reliability Verification of Badgeholder Set Size\"\ndate: \"2024-12-19\"\n\ncitation:\n - name: \"A deepdive into FIL-RetroPGF-2 results\"\n src: \"https://medium.com/cryptoeconlab/a-deepdive-into-fil-retropgf-2-results-880234699fe4\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nBootstrap analysis demonstrated that the number of badgeholders (33 participants in FIL-RetroPGF-2) is a reasonable scale for estimating the “true signal” of funding allocation.\n\n## Background\n\nDecision-making by a small number of voters carries the risk of significant variation depending on how the voter group is selected. Therefore, it was necessary to verify whether the funding allocation results by the 33 participating badgeholders remained stable, even if a different set of members had been chosen.\n\n## Analysis Method\n\n### Dataset\n\nAll voting data cast by badgeholders in FIL-RetroPGF-2\n\n### Intervation / Explanatory Variable\n\nSubsets of badgeholders (randomly sampled, ranging from 28 to 33 members)\n\n### Dependent Variable\n\nDistribution of funding allocations to projects\n\n### Identification Strategy\n\nAnalysis was conducted using the bootstrap method. Specifically, 1,000 subsets were randomly drawn from the actual voting badgeholders, and funding allocations were recalculated for each subset. This allowed for the estimation of confidence intervals for the funding distribution, which were then compared with the actual allocation results.\n\n## Results\n\n- The confidence intervals obtained through bootstrap analysis showed little dispersion.\n- This indicates that with 33 badgeholders, the overall funding allocation trends would not change significantly even if some voters were replaced, thereby confirming the reliability of the results.\n- The interquartile range (IQR) also supports the conclusion that this number of badgeholders is appropriate for capturing the “true signal.”\n"
255
+ },
256
+ "18": {
257
+ "frontmatter": {
258
+ "evidence_id": "18",
259
+ "title": "Relationship Between GitHub Links and Funding Allocation",
260
+ "author": "BeaconLabs",
261
+ "date": "2024-05-30",
262
+ "citation": [
263
+ {
264
+ "name": "Reflections on Filecoin's first round of RetroPGF",
265
+ "type": "link",
266
+ "src": "https://docs.oso.xyz/blog/fil-retropgf-1/"
267
+ }
268
+ ],
269
+ "results": [
270
+ {
271
+ "intervention": "Whether or not the project application included a GitHub link",
272
+ "outcome_variable": "The amount of FIL allocated to the project",
273
+ "outcome": "+"
274
+ }
275
+ ],
276
+ "strength": "1",
277
+ "methodologies": [
278
+ "Comparison Between Intervention Group (With GitHub Link) and Non-Intervention Group (Without GitHub Link)"
279
+ ],
280
+ "version": "2.0.0",
281
+ "datasets": [
282
+ "All voting data cast by badgeholders in FIL-RetroPGF-2"
283
+ ]
284
+ },
285
+ "content": "\n## Key Points\n\nProjects that included a GitHub link in their applications tended to receive more funding (FIL) compared to those that did not. This suggests that contributions to open-source software development may have influenced how badgeholders (evaluators) assessed project impact.\n\n## Background\n\nIn Filecoin’s first round of retroactive public goods funding (RetroPGF 1), projects could include links in their applications to document their contributions and impact on the ecosystem. Among these, GitHub links were the most common, making them an important data point in analyzing how contributions to open-source software were evaluated.\n\n## Analysis Method\n\n### Dataset\n\nApplication data from 106 projects that participated in Filecoin RetroPGF 1, verified by Open Source Observer (OSO). Specifically, the analysis examined whether GitHub links were included in applications and the amount of FIL allocated to each project.\n\n### Intervation / Explanatory Variable\n\nWhether or not the project application included a GitHub link\n\n### Dependent Variable\n\nThe amount of FIL allocated to the project\n\n### Identification Strategy\n\nProjects were divided into two groups: those with GitHub links (60 projects verified by OSO) and those without (the remaining 46 projects). The median FIL allocation between the two groups was compared.\n\n## Results\n\n- The 60 projects verified by OSO as having GitHub links received a median of 2135 FIL.\n- The remaining 46 projects, either without GitHub links or not verified by OSO, received a median of 1465 FIL.\n- These results show that projects including GitHub links performed better—that is, they received more funding allocations.\n",
286
+ "raw": "---\nevidence_id: \"18\"\nresults:\n - intervention: \"Whether or not the project application included a GitHub link\"\n outcome_variable: \"The amount of FIL allocated to the project\"\n outcome: \"+\"\nstrength: \"1\"\nmethodologies:\n - \"Comparison Between Intervention Group (With GitHub Link) and Non-Intervention Group (Without GitHub Link)\"\nversion: \"2.0.0\"\ndatasets:\n - \"All voting data cast by badgeholders in FIL-RetroPGF-2\"\ntitle: \"Relationship Between GitHub Links and Funding Allocation\"\ndate: \"2024-05-30\"\ncitation:\n - name: \"Reflections on Filecoin's first round of RetroPGF\"\n src: \"https://docs.oso.xyz/blog/fil-retropgf-1/\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nProjects that included a GitHub link in their applications tended to receive more funding (FIL) compared to those that did not. This suggests that contributions to open-source software development may have influenced how badgeholders (evaluators) assessed project impact.\n\n## Background\n\nIn Filecoin’s first round of retroactive public goods funding (RetroPGF 1), projects could include links in their applications to document their contributions and impact on the ecosystem. Among these, GitHub links were the most common, making them an important data point in analyzing how contributions to open-source software were evaluated.\n\n## Analysis Method\n\n### Dataset\n\nApplication data from 106 projects that participated in Filecoin RetroPGF 1, verified by Open Source Observer (OSO). Specifically, the analysis examined whether GitHub links were included in applications and the amount of FIL allocated to each project.\n\n### Intervation / Explanatory Variable\n\nWhether or not the project application included a GitHub link\n\n### Dependent Variable\n\nThe amount of FIL allocated to the project\n\n### Identification Strategy\n\nProjects were divided into two groups: those with GitHub links (60 projects verified by OSO) and those without (the remaining 46 projects). The median FIL allocation between the two groups was compared.\n\n## Results\n\n- The 60 projects verified by OSO as having GitHub links received a median of 2135 FIL.\n- The remaining 46 projects, either without GitHub links or not verified by OSO, received a median of 1465 FIL.\n- These results show that projects including GitHub links performed better—that is, they received more funding allocations.\n"
287
+ },
288
+ "19": {
289
+ "frontmatter": {
290
+ "evidence_id": "19",
291
+ "title": "No Correlation Between Development Effort and Funding Allocation",
292
+ "author": "BeaconLabs",
293
+ "date": "2024-05-30",
294
+ "citation": [
295
+ {
296
+ "name": "Reflections on Filecoin's first round of RetroPGF",
297
+ "type": "link",
298
+ "src": "https://docs.oso.xyz/blog/fil-retropgf-1/"
299
+ }
300
+ ],
301
+ "results": [
302
+ {
303
+ "intervention": "Developer months as a proxy for development effort",
304
+ "outcome_variable": "The amount of FIL allocated to each project",
305
+ "outcome": "-"
306
+ }
307
+ ],
308
+ "strength": "1",
309
+ "methodologies": [
310
+ "Visualized the relationship by using scatter plots"
311
+ ],
312
+ "version": "2.0.0",
313
+ "datasets": [
314
+ "Data from 60 projects with GitHub links verified by Open Source Observer"
315
+ ]
316
+ },
317
+ "content": "\n## Key Points\n\nThere was little correlation between the amount of development effort (measured in “developer months”) and the amount of funding (FIL) allocated through RetroPGF. Smaller projects tended to show a higher “ROI (Return on Investment)” in terms of FIL received relative to effort invested.\n\n## Background\n\nFor funding mechanisms like RetroPGF to serve as a meaningful source of income for developers, we would expect a proper correlation between inputs (effort), outputs (impact), and rewards. In this analysis, we used “developer months” as a proxy measure of development effort and examined its relationship with rewards.\n\n## Analysis Method\n\n### Dataset\n\n- Data from 60 projects with GitHub links verified by Open Source Observer (OSO).\n- The dataset included each project’s commit history and FIL allocation.\n\n### Intervation / Explanatory Variable\n\n- “Developer months” as a proxy for development effort.\n- A “developer month” was defined as a unique GitHub contributor who made three or more commits to the repository in a given month.\n\n### Dependent Variable\n\nThe amount of FIL allocated to each project\n\n### Identification Strategy\n\nWe visualized the relationship between “developer months” and allocated FIL for the 60 OSO-verified projects using scatter plots and analyzed correlations.\n\n## Results\n\n- There was little correlation between allocated FIL and “developer months.”\n- Even though the amount of input varied exponentially across projects, the differences in FIL rewards were relatively small. As a result, smaller projects tended to have higher FIL per developer month (higher ROI). For example, one project received 5,000 FIL for 72 developer months (about 70 FIL/month), while another received 4,000 FIL for 36 developer months (about 111 FIL/month; note: the original text stated 140 FIL/month, but the calculation does not match—although the trend remains the same).\n- This issue has also been observed in Optimism’s RetroPGF, where smaller teams tend to receive more rewards per contributor compared to larger teams.\n",
318
+ "raw": "---\nevidence_id: \"19\"\nresults:\n - intervention: \"Developer months as a proxy for development effort\"\n outcome_variable: \"The amount of FIL allocated to each project\"\n outcome: \"-\"\nstrength: \"1\"\nmethodologies:\n - \"Visualized the relationship by using scatter plots\"\nversion: \"2.0.0\"\ndatasets:\n - \"Data from 60 projects with GitHub links verified by Open Source Observer\"\ntitle: \"No Correlation Between Development Effort and Funding Allocation\"\ndate: \"2024-05-30\"\ncitation:\n - name: \"Reflections on Filecoin's first round of RetroPGF\"\n src: \"https://docs.oso.xyz/blog/fil-retropgf-1/\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nThere was little correlation between the amount of development effort (measured in “developer months”) and the amount of funding (FIL) allocated through RetroPGF. Smaller projects tended to show a higher “ROI (Return on Investment)” in terms of FIL received relative to effort invested.\n\n## Background\n\nFor funding mechanisms like RetroPGF to serve as a meaningful source of income for developers, we would expect a proper correlation between inputs (effort), outputs (impact), and rewards. In this analysis, we used “developer months” as a proxy measure of development effort and examined its relationship with rewards.\n\n## Analysis Method\n\n### Dataset\n\n- Data from 60 projects with GitHub links verified by Open Source Observer (OSO).\n- The dataset included each project’s commit history and FIL allocation.\n\n### Intervation / Explanatory Variable\n\n- “Developer months” as a proxy for development effort.\n- A “developer month” was defined as a unique GitHub contributor who made three or more commits to the repository in a given month.\n\n### Dependent Variable\n\nThe amount of FIL allocated to each project\n\n### Identification Strategy\n\nWe visualized the relationship between “developer months” and allocated FIL for the 60 OSO-verified projects using scatter plots and analyzed correlations.\n\n## Results\n\n- There was little correlation between allocated FIL and “developer months.”\n- Even though the amount of input varied exponentially across projects, the differences in FIL rewards were relatively small. As a result, smaller projects tended to have higher FIL per developer month (higher ROI). For example, one project received 5,000 FIL for 72 developer months (about 70 FIL/month), while another received 4,000 FIL for 36 developer months (about 111 FIL/month; note: the original text stated 140 FIL/month, but the calculation does not match—although the trend remains the same).\n- This issue has also been observed in Optimism’s RetroPGF, where smaller teams tend to receive more rewards per contributor compared to larger teams.\n"
319
+ },
320
+ "20": {
321
+ "frontmatter": {
322
+ "evidence_id": "20",
323
+ "title": "Comparison of Funding Allocation Distribution in Filecoin and Optimism’s RetroPGF",
324
+ "author": "BeaconLabs",
325
+ "date": "2024-05-30",
326
+ "citation": [
327
+ {
328
+ "name": "Reflections on Filecoin's first round of RetroPGF",
329
+ "type": "link",
330
+ "src": "https://docs.oso.xyz/blog/fil-retropgf-1/"
331
+ }
332
+ ],
333
+ "results": [
334
+ {
335
+ "intervention": "Type of funding program (Filecoin RetroPGF 1 vs. Optimism RetroPGF3)",
336
+ "outcome_variable": "Distribution of funding amounts across projects",
337
+ "outcome": "N/A"
338
+ }
339
+ ],
340
+ "strength": "1",
341
+ "methodologies": [
342
+ "Visualizing the distribution of funding amounts in both rounds and comparing their shapes (flatness)"
343
+ ],
344
+ "version": "2.0.0",
345
+ "datasets": [
346
+ "Allocation data for 99 projects from Filecoin RetroPGF 1 and allocation data from Optimism RetroPGF3"
347
+ ]
348
+ },
349
+ "content": "\n## Key Points\n\nThe funding allocation in Filecoin’s first RetroPGF round showed a very flat distribution compared to Optimism’s RetroPGF3. This means there was not a large gap in funding amounts between high-impact projects and mid-tier projects.\n\n## Background\n\nHow resources are allocated in retrospective public goods funding reflects the values and evaluation mechanisms of the ecosystem. By comparing the results of Filecoin’s first round with the more mature Optimism round, we can highlight their characteristics.\n\n## Analysis Method\n\n### Dataset\n\nAllocation data for 99 projects from Filecoin RetroPGF 1 and allocation data from Optimism RetroPGF3\n\n### Intervation / Explanatory Variable\n\nType of funding program (Filecoin RetroPGF 1 vs. Optimism RetroPGF3)\n\n### Dependent Variable\n\nDistribution of funding amounts across projects\n\n### Identification Strategy\n\nVisualizing the distribution of funding amounts in both rounds and comparing their shapes (flatness)\n\n## Results\n\n- In Filecoin, the top project (GLIF Nodes & RPC API) received 4,365 FIL, which was only about twice as much as the median project (Filemarket), which received 1,925 FIL.\n- This allocation is described as a relatively even “peanut butter spread” distribution.\n- This distribution was much flatter than Optimism’s RetroPGF3.\n- In Filecoin, there was also little difference in funding amounts across categories. Although the “Infrastructure” category was generally favored, the distribution within that category was just as flat as in other categories. In contrast, Optimism showed more pronounced differences by theme.\n",
350
+ "raw": "---\nevidence_id: \"20\"\nresults:\n - intervention: \"Type of funding program (Filecoin RetroPGF 1 vs. Optimism RetroPGF3)\"\n outcome_variable: \"Distribution of funding amounts across projects\"\n outcome: \"N/A\"\nstrength: \"1\"\nmethodologies:\n - \"Visualizing the distribution of funding amounts in both rounds and comparing their shapes (flatness)\"\nversion: \"2.0.0\"\ndatasets:\n - \"Allocation data for 99 projects from Filecoin RetroPGF 1 and allocation data from Optimism RetroPGF3\"\ntitle: \"Comparison of Funding Allocation Distribution in Filecoin and Optimism’s RetroPGF\"\ndate: \"2024-05-30\"\ncitation:\n - name: \"Reflections on Filecoin's first round of RetroPGF\"\n src: \"https://docs.oso.xyz/blog/fil-retropgf-1/\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nThe funding allocation in Filecoin’s first RetroPGF round showed a very flat distribution compared to Optimism’s RetroPGF3. This means there was not a large gap in funding amounts between high-impact projects and mid-tier projects.\n\n## Background\n\nHow resources are allocated in retrospective public goods funding reflects the values and evaluation mechanisms of the ecosystem. By comparing the results of Filecoin’s first round with the more mature Optimism round, we can highlight their characteristics.\n\n## Analysis Method\n\n### Dataset\n\nAllocation data for 99 projects from Filecoin RetroPGF 1 and allocation data from Optimism RetroPGF3\n\n### Intervation / Explanatory Variable\n\nType of funding program (Filecoin RetroPGF 1 vs. Optimism RetroPGF3)\n\n### Dependent Variable\n\nDistribution of funding amounts across projects\n\n### Identification Strategy\n\nVisualizing the distribution of funding amounts in both rounds and comparing their shapes (flatness)\n\n## Results\n\n- In Filecoin, the top project (GLIF Nodes & RPC API) received 4,365 FIL, which was only about twice as much as the median project (Filemarket), which received 1,925 FIL.\n- This allocation is described as a relatively even “peanut butter spread” distribution.\n- This distribution was much flatter than Optimism’s RetroPGF3.\n- In Filecoin, there was also little difference in funding amounts across categories. Although the “Infrastructure” category was generally favored, the distribution within that category was just as flat as in other categories. In contrast, Optimism showed more pronounced differences by theme.\n"
351
+ },
352
+ "08": {
353
+ "frontmatter": {
354
+ "evidence_id": "08",
355
+ "title": "How Platform Owner Entry Affects OSS Contributions by Existing and New Contributors: An Experiment with AWS Elasticsearch",
356
+ "author": "BeaconLabs",
357
+ "date": "2024-06-20",
358
+ "citation": [
359
+ {
360
+ "name": "How Platform Owner Entry Affects Open Source Contribution? Evidence from GitHub Developers",
361
+ "type": "link",
362
+ "src": "https://questromworld.bu.edu/platformstrategy/wp-content/uploads/sites/49/2024/06/PlatStrat2024_paper_100.pdf"
363
+ }
364
+ ],
365
+ "results": [
366
+ {
367
+ "intervention": "The platform owner enters the market by introducing its proprietary product, which is based on the complementary firm’s OSS.",
368
+ "outcome_variable": "Overall OSS contributions by combining both existing and new contributors(the number of commits, lines of code changed/added, and files of code changed/added)",
369
+ "outcome": "+"
370
+ }
371
+ ],
372
+ "strength": "3",
373
+ "methodologies": [
374
+ "DID"
375
+ ],
376
+ "version": "1.0.0",
377
+ "datasets": [
378
+ ""
379
+ ]
380
+ },
381
+ "content": "\n## Key Points\n\nThis study explores how a platform owner’s market entry—by leveraging a complementary firm’s open-source technology—affects the external knowledge sourcing of the complementary firm, focusing on the willingness of GitHub developers to contribute. Using the staggered rollout of Amazon AWS’s Elasticsearch as a natural experiment, the analysis shows that the platform owner’s entry reduces contributions from existing contributors but substantially increases contributions from new contributors, leading to an overall increase in contributions to the open-source technology. This finding provides a new perspective: contrary to common concerns that platform owners’ entry harms open-source startups, from a technology development standpoint, such entry may not necessarily be detrimental.\n\n## Background\n\nGiven the opposing effects of reduced contributions by existing contributors and increased contributions by new contributors, it was an important question to assess how the platform owner’s entry affects total contributions to the open-source project.\n\n## Analysis Method\n\n### Dataset\n\nWeekly contributions from all cloud developers were aggregated at the country level and analyzed at the country-week level.\n\n### Intervation / Explanatory Variable\n\n- AWS’s market entry with Amazon Elasticsearch Service.\n - Entry is a dummy variable equal to 1 if AWS entered the country where the contributor resides, and 0 otherwise.\n - After is a dummy variable equal to 1 if AWS had already entered the contributor’s country during or before a given period, and 0 otherwise.\n - The interaction term Entry × After was used to measure the treatment effect.\n\n### Dependent Variable\n\n- Overall OSS contributions by combining both existing and new contributors(the number of commits, lines of code changed/added, and files of code changed/added)\n - Overall contributions were measured as the natural logarithm of the total number of commits by all contributors.\n - Robustness checks also included the natural logarithm of lines of code and files changed/added.\n\n### Identification Strategy\n\n- After aggregating data from both existing and new contributors, stacked difference-in-differences analysis was applied at the country-week level.\n\n## Results\n\n- The analysis shows that the platform owner’s entry increases overall contributions to the open-source software project by cloud developers.\n- This result suggests that, contrary to concerns that platform owners' entry harms open-source startups or communities, such entry may actually enhance the complementary firm's ability to attract external knowledge contributions.\n",
382
+ "raw": "---\nevidence_id: \"08\"\nresults:\n - intervention: \"The platform owner enters the market by introducing its proprietary product, which is based on the complementary firm’s OSS.\"\n outcome_variable: \"Overall OSS contributions by combining both existing and new contributors(the number of commits, lines of code changed/added, and files of code changed/added)\"\n outcome: \"+\"\nstrength: \"3\"\nmethodologies:\n - \"DID\"\nversion: \"1.0.0\"\ndatasets:\n - \"\"\ntitle: \"How Platform Owner Entry Affects OSS Contributions by Existing and New Contributors: An Experiment with AWS Elasticsearch\"\ndate: \"2024-06-20\"\n\ncitation:\n - name: \"How Platform Owner Entry Affects Open Source Contribution? Evidence from GitHub Developers\"\n src: \"https://questromworld.bu.edu/platformstrategy/wp-content/uploads/sites/49/2024/06/PlatStrat2024_paper_100.pdf\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nThis study explores how a platform owner’s market entry—by leveraging a complementary firm’s open-source technology—affects the external knowledge sourcing of the complementary firm, focusing on the willingness of GitHub developers to contribute. Using the staggered rollout of Amazon AWS’s Elasticsearch as a natural experiment, the analysis shows that the platform owner’s entry reduces contributions from existing contributors but substantially increases contributions from new contributors, leading to an overall increase in contributions to the open-source technology. This finding provides a new perspective: contrary to common concerns that platform owners’ entry harms open-source startups, from a technology development standpoint, such entry may not necessarily be detrimental.\n\n## Background\n\nGiven the opposing effects of reduced contributions by existing contributors and increased contributions by new contributors, it was an important question to assess how the platform owner’s entry affects total contributions to the open-source project.\n\n## Analysis Method\n\n### Dataset\n\nWeekly contributions from all cloud developers were aggregated at the country level and analyzed at the country-week level.\n\n### Intervation / Explanatory Variable\n\n- AWS’s market entry with Amazon Elasticsearch Service.\n - Entry is a dummy variable equal to 1 if AWS entered the country where the contributor resides, and 0 otherwise.\n - After is a dummy variable equal to 1 if AWS had already entered the contributor’s country during or before a given period, and 0 otherwise.\n - The interaction term Entry × After was used to measure the treatment effect.\n\n### Dependent Variable\n\n- Overall OSS contributions by combining both existing and new contributors(the number of commits, lines of code changed/added, and files of code changed/added)\n - Overall contributions were measured as the natural logarithm of the total number of commits by all contributors.\n - Robustness checks also included the natural logarithm of lines of code and files changed/added.\n\n### Identification Strategy\n\n- After aggregating data from both existing and new contributors, stacked difference-in-differences analysis was applied at the country-week level.\n\n## Results\n\n- The analysis shows that the platform owner’s entry increases overall contributions to the open-source software project by cloud developers.\n- This result suggests that, contrary to concerns that platform owners' entry harms open-source startups or communities, such entry may actually enhance the complementary firm's ability to attract external knowledge contributions.\n"
383
+ },
384
+ "00": {
385
+ "frontmatter": {
386
+ "evidence_id": "00",
387
+ "title": "Effect of Pull Request Submission by listing individual OSS contributors on GitHub Sponsors",
388
+ "author": "BeaconLabs",
389
+ "date": "2023-06-25",
390
+ "citation": [
391
+ {
392
+ "name": "Sponsorship Funding in Open-Source Software: Incentivize or Crowd-Out Motivations to Create, Maintain and Share?",
393
+ "type": "link",
394
+ "src": "https://download.ssrn.com/24/07/17/ssrn_id4484403_code2842526.pdf?response-content-disposition=inline&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEL%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLWVhc3QtMSJHMEUCIQCVjc%2FFHZ33w0i7HAmAtYnTEdpgdcQ6UZwOc4n67Tl60gIgZB6Uq5N%2FqP%2B4K20cAeIk52jv644E%2FeVlpwPPQ8dkJpgqxQUI2P%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FARAEGgwzMDg0NzUzMDEyNTciDGAtKXuhQgasRUApeiqZBcalab7rv%2BFmzkC0HZPY3mSLdk4vD%2BiU2er03hIvbf8eMj8igRna5QuCbZbCsW7GePAR2ZVjrE2Kc%2BLigLblL1QvKnpiptI5DA%2FyhM%2BHJU4DyLeNrGe2VQkFSesnc0SCfknLoxs1MSWAIyzOx7hahS2Gmuyv%2FIiauI5IHbchapMskP6TnQJtLubha63W9IKRj9g%2Fp%2B98q3UeIRELJgCGek8JumnFeoyQja%2BJ5zFpOOnzbyjzADk15wOizj801PVEQ4IpQ9L7YGkJXuTFkhsysipO80ALBSpNWT03ShqvBkhqvlR4IbhVE1i%2B4%2Fnr%2FakU%2BnDY%2BvlD%2FTklE%2B1AO3KgKzoi1nNH%2B7F%2FAVQEkyc%2FxvhuUYDQ%2BN10QRGgBM%2Fq5lwBoZ%2FUWR4mLzj8wGzETFkQ8%2F67wu%2FgIloV5BYMiIiEYfo%2FdW4iRD6PFjL4lz5kuSXKy1byMqgAzXOELDpLfMo0nDuok1JFOEzH%2Bh032MQ%2F3P167yeXaEPgHOJO9iH3oKRLkdyykjZRaC4GjV6X%2Fs6vIEkjsCdELkRmvhhDjdwsrraQWZVYrxHvvZuwtBnopzYM84WYnUxf7G51NxqB0%2B0YAv9M6ZlhTHsRrljf1rt8ePWRnPy00zvtjF8dkpW2%2FG3e9RlpxGM4BXPEjCzYBjPbuzQz3TtshMMHHSBj7MJuN0RtIpUw0Ss1%2F8LHgc%2B3LRGEMbQB0oJ8aniRd709PO6jU6cncM9s61L4l2V4c1f76j2CY2UmDMna5S7YCxvTmoucq%2BTmg97SU698%2Fh84OCuy2HKViOfiVAd6vHHnSyiflgNvKUR%2FVUzzEqY2vLPixekNtXum22Gi9LWo9xFTMKnsx1Ta69dwhU9F4qB8JOdizJEnldZbND42mVYzMLms%2BcMGOrEBwyMSRsDbPcBIm6qlkSdFxzJ4w6T1yjroN4xU9Z8dicm9Cge6JAztjPAJQthXMe90DbQp2xBGxeauGif2bWRyQ02t4XziAX7lXyBOPLJhpJdjc273%2BysmLZ68aJ1mOdRw62QKzqmNaN2%2BXKb8zEJ%2B%2BBMh%2BKVhAi5NObOFYE491zio5z1E8LsQTM%2Fv0MKn7X2bCUGoz%2BOjfCJZdI6OT3gqr%2BrbWWVBLtuWssXsBBB0RBJZ&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20250721T153800Z&X-Amz-SignedHeaders=host&X-Amz-Expires=300&X-Amz-Credential=ASIAUPUUPRWEUWLXFSAT%2F20250721%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Signature=d5734119b1eab43ae1a33536df99bfee85c22702c6cb963d32946a325b79ce2f&abstractId=4484403"
395
+ }
396
+ ],
397
+ "results": [
398
+ {
399
+ "intervention": "Listing individual OSS contributors on GitHub Sponsors",
400
+ "outcome_variable": "Submitting Pull Requests (PRs)",
401
+ "outcome": "+-"
402
+ }
403
+ ],
404
+ "strength": "3",
405
+ "methodologies": [
406
+ "DID、Coarsened Exact Matching(CEM) technique、Robustness Tests"
407
+ ],
408
+ "version": "1.0.0",
409
+ "datasets": [
410
+ "A dataset based on the activities of contributors active on both the GitHub and Stack Overflow platforms."
411
+ ],
412
+ "tags": [
413
+ "oss"
414
+ ]
415
+ },
416
+ "content": "\n## Key Points\n\nThe submission of pull requests experienced a temporary short-term increase, but eventually returned to pre-treatment levels, with a crowding-out effect emerging over time. Among contributors with a high level of community engagement (e.g., frequent contributions to others’ projects, large follower count), a negative impact on knowledge creation activities (i.e., the crowding-out effect) was observed. For contributors who set higher active funding goals, sponsorship appeared to have a positive effect on their knowledge creation activities.\n\n## Background\n\nThe world’s digital infrastructure is built on open-source software (OSS) developed on platforms like GitHub (GH), and supported by complementary knowledge-sharing platforms like Stack Overflow (SO). On these platforms, numerous programmers, maintainers, and researchers devote their efforts to building and maintaining code, as well as supporting users. Contributors play a crucial role not only in knowledge creation activities, such as contributing code and adding new features, but also in knowledge maintenance activities, such as reviewing and integrating community-submitted code.\n\nHowever, it has been pointed out that OSS projects are increasingly burdened by the need to maintain existing code rather than create new code, and that many projects fail due to the lack of intrinsic motivation for maintenance work. Maintenance is often seen as “mundane but necessary,” and tends to lack intrinsic appeal. For example, the 2020 Linux Foundation OSS Contributor Survey highlighted that, although participants showed minimal interest in spending significant time on maintenance-related tasks, they emphasized that the most crucial support OSS projects need is for maintenance work—especially related to security improvements.\n\nIn this context, there is a growing trend to provide financial support to encourage and sustain volunteer contributors. In May 2019, GH launched GitHub Sponsors, a feature allowing individual OSS developers to receive donations from the community. Unlike traditional project- or activity-specific funding, GitHub Sponsors focuses on individual contributors, offering them the flexibility to reallocate their efforts not only within the host platform but also across other complementary platforms in which they participate. This study investigates the impact of such sponsorship-based funding on OSS contributors’ behavior—specifically, its effect on knowledge creation and maintenance activities on GH and the spillover effects on SO, a complementary knowledge-sharing platform.\n\n## Analytical Methods\n\n### Dataset\n\n- **Data Collection**: The dataset is based on the behavior of contributors active on both GitHub (GH) and Stack Overflow (SO), obtained via APIs provided by both platforms. The panel data spans four years, from January 2018 to December 2021.\n - During the initial data collection period, GH Sponsors was available in 30 countries that supported STRIPE payments. The sample is therefore limited to GH contributors from these countries.\n - To select GH contributors, the following criteria were applied: owning at least 5 repositories, having more than 10 followers, and having joined GH before 2018. This resulted in a sample of 20,841 GH contributors.\n - SO profiles were identified using names, email addresses, and login IDs listed on GH, and only included if a GH URL matched on the SO profile. Ultimately, 5,910 GH contributors who had joined both platforms prior to the observation period were identified, of whom 1,467 were listed for sponsorship during the observation period.\n - Activity-related data were aggregated on a monthly basis.\n\n### Intervation / Explanatory Variable\n\n- The intervention in this study is the contributor being listed on GitHub Sponsors (Sponsor listing).\n- The primary explanatory variable of interest in this study is a binary indicator that equals 1 starting from the month the contributor is listed for sponsorship (Sponsor Listed).\n- This variable is used to capture the effect starting from the point at which the contributor is listed on GitHub Sponsors.\n\n### Dependent Variables\n\n- **Knowledge Creation Activity on GitHub**:\n - **Number of Pull Requests (PRs) Submitted** per month.\n - Proposals for changes to the codebase, including new features, bug fixes, and improvements.\n\n### Identification Strategy\n\n- **A Difference-in-Differences (DID)** estimation was used to measure changes in pull request submission due to sponsorship.\n- **Coarsened Exact Matching (CEM)** was applied to match contributors based on their activity levels prior to being listed for sponsorship.\n\n### Robustness Test\n\n- The following supplemental analyses on PR submissions were conducted:\n - Use of **alternative dependent variables for PR submissions**.\n - Controlling for **PR characteristics (e.g., size, number of files, number of commits, time to merge)**.\n - Considering **self-merged PRs**.\n - Considering **PRs related to issues**.\n\n## Results\n\n- **No significant overall effect of sponsorship on knowledge creation activity (PR submissions) on GitHub.**。\n - A temporary increase is observed in the short term, but a **crowding-out effect** appears over time.\n - In the long term, the effect on PR submissions disappears due to a decline in intrinsic motivation.\n\n- For **contributors with high community engagement** or **those who set higher funding goals**, a stronger negative effect (reduction in PR submissions) was observed.\n- There is a tendency **to see an increase in PR submissions to less popular projects**.\n",
417
+ "raw": "---\nevidence_id: \"00\"\nresults:\n - intervention: \"Listing individual OSS contributors on GitHub Sponsors\"\n outcome_variable: \"Submitting Pull Requests (PRs)\"\n outcome: \"+-\"\nstrength: \"3\"\nmethodologies:\n - \"DID、Coarsened Exact Matching(CEM) technique、Robustness Tests\"\nversion: \"1.0.0\"\ndatasets:\n - \"A dataset based on the activities of contributors active on both the GitHub and Stack Overflow platforms.\"\ntitle: \"Effect of Pull Request Submission by listing individual OSS contributors on GitHub Sponsors\"\ndate: \"2023-06-25\"\ntags:\n - \"oss\"\ncitation:\n - type: \"link\"\n name: \"Sponsorship Funding in Open-Source Software: Incentivize or Crowd-Out Motivations to Create, Maintain and Share?\"\n src: \"https://download.ssrn.com/24/07/17/ssrn_id4484403_code2842526.pdf?response-content-disposition=inline&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEL%2F%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEaCXVzLWVhc3QtMSJHMEUCIQCVjc%2FFHZ33w0i7HAmAtYnTEdpgdcQ6UZwOc4n67Tl60gIgZB6Uq5N%2FqP%2B4K20cAeIk52jv644E%2FeVlpwPPQ8dkJpgqxQUI2P%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FARAEGgwzMDg0NzUzMDEyNTciDGAtKXuhQgasRUApeiqZBcalab7rv%2BFmzkC0HZPY3mSLdk4vD%2BiU2er03hIvbf8eMj8igRna5QuCbZbCsW7GePAR2ZVjrE2Kc%2BLigLblL1QvKnpiptI5DA%2FyhM%2BHJU4DyLeNrGe2VQkFSesnc0SCfknLoxs1MSWAIyzOx7hahS2Gmuyv%2FIiauI5IHbchapMskP6TnQJtLubha63W9IKRj9g%2Fp%2B98q3UeIRELJgCGek8JumnFeoyQja%2BJ5zFpOOnzbyjzADk15wOizj801PVEQ4IpQ9L7YGkJXuTFkhsysipO80ALBSpNWT03ShqvBkhqvlR4IbhVE1i%2B4%2Fnr%2FakU%2BnDY%2BvlD%2FTklE%2B1AO3KgKzoi1nNH%2B7F%2FAVQEkyc%2FxvhuUYDQ%2BN10QRGgBM%2Fq5lwBoZ%2FUWR4mLzj8wGzETFkQ8%2F67wu%2FgIloV5BYMiIiEYfo%2FdW4iRD6PFjL4lz5kuSXKy1byMqgAzXOELDpLfMo0nDuok1JFOEzH%2Bh032MQ%2F3P167yeXaEPgHOJO9iH3oKRLkdyykjZRaC4GjV6X%2Fs6vIEkjsCdELkRmvhhDjdwsrraQWZVYrxHvvZuwtBnopzYM84WYnUxf7G51NxqB0%2B0YAv9M6ZlhTHsRrljf1rt8ePWRnPy00zvtjF8dkpW2%2FG3e9RlpxGM4BXPEjCzYBjPbuzQz3TtshMMHHSBj7MJuN0RtIpUw0Ss1%2F8LHgc%2B3LRGEMbQB0oJ8aniRd709PO6jU6cncM9s61L4l2V4c1f76j2CY2UmDMna5S7YCxvTmoucq%2BTmg97SU698%2Fh84OCuy2HKViOfiVAd6vHHnSyiflgNvKUR%2FVUzzEqY2vLPixekNtXum22Gi9LWo9xFTMKnsx1Ta69dwhU9F4qB8JOdizJEnldZbND42mVYzMLms%2BcMGOrEBwyMSRsDbPcBIm6qlkSdFxzJ4w6T1yjroN4xU9Z8dicm9Cge6JAztjPAJQthXMe90DbQp2xBGxeauGif2bWRyQ02t4XziAX7lXyBOPLJhpJdjc273%2BysmLZ68aJ1mOdRw62QKzqmNaN2%2BXKb8zEJ%2B%2BBMh%2BKVhAi5NObOFYE491zio5z1E8LsQTM%2Fv0MKn7X2bCUGoz%2BOjfCJZdI6OT3gqr%2BrbWWVBLtuWssXsBBB0RBJZ&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Date=20250721T153800Z&X-Amz-SignedHeaders=host&X-Amz-Expires=300&X-Amz-Credential=ASIAUPUUPRWEUWLXFSAT%2F20250721%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Signature=d5734119b1eab43ae1a33536df99bfee85c22702c6cb963d32946a325b79ce2f&abstractId=4484403\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nThe submission of pull requests experienced a temporary short-term increase, but eventually returned to pre-treatment levels, with a crowding-out effect emerging over time. Among contributors with a high level of community engagement (e.g., frequent contributions to others’ projects, large follower count), a negative impact on knowledge creation activities (i.e., the crowding-out effect) was observed. For contributors who set higher active funding goals, sponsorship appeared to have a positive effect on their knowledge creation activities.\n\n## Background\n\nThe world’s digital infrastructure is built on open-source software (OSS) developed on platforms like GitHub (GH), and supported by complementary knowledge-sharing platforms like Stack Overflow (SO). On these platforms, numerous programmers, maintainers, and researchers devote their efforts to building and maintaining code, as well as supporting users. Contributors play a crucial role not only in knowledge creation activities, such as contributing code and adding new features, but also in knowledge maintenance activities, such as reviewing and integrating community-submitted code.\n\nHowever, it has been pointed out that OSS projects are increasingly burdened by the need to maintain existing code rather than create new code, and that many projects fail due to the lack of intrinsic motivation for maintenance work. Maintenance is often seen as “mundane but necessary,” and tends to lack intrinsic appeal. For example, the 2020 Linux Foundation OSS Contributor Survey highlighted that, although participants showed minimal interest in spending significant time on maintenance-related tasks, they emphasized that the most crucial support OSS projects need is for maintenance work—especially related to security improvements.\n\nIn this context, there is a growing trend to provide financial support to encourage and sustain volunteer contributors. In May 2019, GH launched GitHub Sponsors, a feature allowing individual OSS developers to receive donations from the community. Unlike traditional project- or activity-specific funding, GitHub Sponsors focuses on individual contributors, offering them the flexibility to reallocate their efforts not only within the host platform but also across other complementary platforms in which they participate. This study investigates the impact of such sponsorship-based funding on OSS contributors’ behavior—specifically, its effect on knowledge creation and maintenance activities on GH and the spillover effects on SO, a complementary knowledge-sharing platform.\n\n## Analytical Methods\n\n### Dataset\n\n- **Data Collection**: The dataset is based on the behavior of contributors active on both GitHub (GH) and Stack Overflow (SO), obtained via APIs provided by both platforms. The panel data spans four years, from January 2018 to December 2021.\n - During the initial data collection period, GH Sponsors was available in 30 countries that supported STRIPE payments. The sample is therefore limited to GH contributors from these countries.\n - To select GH contributors, the following criteria were applied: owning at least 5 repositories, having more than 10 followers, and having joined GH before 2018. This resulted in a sample of 20,841 GH contributors.\n - SO profiles were identified using names, email addresses, and login IDs listed on GH, and only included if a GH URL matched on the SO profile. Ultimately, 5,910 GH contributors who had joined both platforms prior to the observation period were identified, of whom 1,467 were listed for sponsorship during the observation period.\n - Activity-related data were aggregated on a monthly basis.\n\n### Intervation / Explanatory Variable\n\n- The intervention in this study is the contributor being listed on GitHub Sponsors (Sponsor listing).\n- The primary explanatory variable of interest in this study is a binary indicator that equals 1 starting from the month the contributor is listed for sponsorship (Sponsor Listed).\n- This variable is used to capture the effect starting from the point at which the contributor is listed on GitHub Sponsors.\n\n### Dependent Variables\n\n- **Knowledge Creation Activity on GitHub**:\n - **Number of Pull Requests (PRs) Submitted** per month.\n - Proposals for changes to the codebase, including new features, bug fixes, and improvements.\n\n### Identification Strategy\n\n- **A Difference-in-Differences (DID)** estimation was used to measure changes in pull request submission due to sponsorship.\n- **Coarsened Exact Matching (CEM)** was applied to match contributors based on their activity levels prior to being listed for sponsorship.\n\n### Robustness Test\n\n- The following supplemental analyses on PR submissions were conducted:\n - Use of **alternative dependent variables for PR submissions**.\n - Controlling for **PR characteristics (e.g., size, number of files, number of commits, time to merge)**.\n - Considering **self-merged PRs**.\n - Considering **PRs related to issues**.\n\n## Results\n\n- **No significant overall effect of sponsorship on knowledge creation activity (PR submissions) on GitHub.**。\n - A temporary increase is observed in the short term, but a **crowding-out effect** appears over time.\n - In the long term, the effect on PR submissions disappears due to a decline in intrinsic motivation.\n\n- For **contributors with high community engagement** or **those who set higher funding goals**, a stronger negative effect (reduction in PR submissions) was observed.\n- There is a tendency **to see an increase in PR submissions to less popular projects**.\n"
418
+ },
419
+ "05": {
420
+ "frontmatter": {
421
+ "evidence_id": "05",
422
+ "title": "Trading Volume and Utilization Outcomes of SuperStacks",
423
+ "author": "BeaconLabs",
424
+ "date": "2025-08-15",
425
+ "citation": [
426
+ {
427
+ "name": "SuperStacks Impact Analysis",
428
+ "type": "link",
429
+ "src": "https://gov.optimism.io/t/superstacks-impact-analysis/10225"
430
+ }
431
+ ],
432
+ "results": [
433
+ {
434
+ "intervention": "SuperStacks: A New Approach to Rewards on the Superchain",
435
+ "outcome_variable": "Trading volume and utilization",
436
+ "outcome": "+"
437
+ }
438
+ ],
439
+ "strength": "4",
440
+ "methodologies": [
441
+ "Pro-rata model, One-sided t-tests"
442
+ ],
443
+ "version": "1.0.0",
444
+ "datasets": [
445
+ ""
446
+ ]
447
+ },
448
+ "content": "\n## Key Points\n\n- The SuperStacks program achieved **$58.0M in net TVL inflows** during its implementation period, with **$53.7M retained 30 days after the program concluded**. This translates to **$23.2/OP and $21.5/OP** after costs, respectively.\n- Findings supported the theory that strong demand-side activity can lift post-incentive equilibrium levels, though a more rigorous analysis is still needed.\n- **Incentivized lending pools generally demonstrated higher liquidity retention**.\n- However, TVL retention in DEXs appeared **inflated by Uniswap's concurrent Gauntlet campaign**. Specifically, when the two co-incentivized DEX pools were excluded, retained TVL inflows dropped to $48.2M, revealing a significant divergence between the lending and DEX verticals.\n- Metrics for measuring demand-side traction included trading volume per TVL for DEX pools and utilization rate for lending pools. For DEX pools, a moderate correlation was observed between peak net TVL inflows and trading volume per TVL, hinting at possible synergistic effects.\n- A key limitation was that isolating the program's causal effect alone was extremely difficult due to numerous confounding variables, such as external co-incentives and broader market volatility.\n\n## Background\n\nSuperStacks was the **Optimism Foundation's first proactive DeFi incentive pilot program**. It was designed with the goal of **increasing liquidity for interoperable assets (e.g., USD₮0) across the Superchain**. At its core, the program aimed to help these assets overcome the cold start problem and **test new mechanisms for establishing sustainable DeFi growth loops**.\n\nThe program's design was based on a theoretical framework suggesting that a two-pronged approach, focusing on both supply-side and demand-side activity, could initiate a sustainable flywheel effect for interoperable assets on the Superchain, ultimately catalyzing lasting liquidity growth.\n\nThis theory is broken down into three phases:\n\n1. **Supply-Side Growth**: Available incentives attract deposits into DEX pools, which creates deeper liquidity and improves the execution quality for trades. On the lending side, an increase in lending supply reduces the borrow rate.\n2. **Demand-Side Growth**: Improved trading conditions draw in more trades that are routed through the incentivized DEX pools, which increases trading volumes and generates more fees for LPs. Similarly, more competitive borrow rates attract more borrowers, which raises utilization rates and generates a higher yield for lenders.\n3. **Sustainable Traction**: Once incentives are switched off, a portion of the TVL (Total Value Locked) and net liquidity leaves the incentivized pools, but due to higher fee and yield generation for LPs and lenders, liquidity settles at higher baseline levels compared to the pre-incentives period.\n\n## Analysis Method\n\n### Dataset\n\nThe datasets and methods used for the SuperStacks analysis were:\n\n- Period: 76-day incentive period (April 16 – June 30) and a 30-day retention evaluation period after program end (through July 30).\n- Data sources: 25 pools and vaults targeted by SuperStacks incentives.\n- Metrics:\n - TVL (Total Value Locked): Net TVL inflows during the program and retained inflows after program end.\n - Trading volume: Indicator of demand-side activity in DEX pools.\n - Utilization: Indicator of demand-side activity in lending pools.\n - Cost efficiency: Net TVL inflows per OP token ($/OP).\n\n### Intervation / Explanatory Variable\n\nSuperStacks Program:\n\nSuperStacks was the Optimism Foundation’s first attempt at a “proactive DeFi incentive,” designed as a pilot program to increase liquidity of interoperable assets across the Superchain. The program was built on a two-pronged approach targeting both supply-side and demand-side activities. Specifically, incentives were provided to encourage supply-side actions (deposits into DEX pools, increased lending supply), which in turn aimed to stimulate demand-side activities (higher trading volume, increased utilization).\n\n### Dependent Variable\n\nTrading volume and utilization: Determining whether TVL growth led to higher demand-side trading volume and lending utilization, and whether these metrics remained at elevated equilibrium levels after incentives stopped.\n\n### Identification Strategy\n\n- A pro-rata model was applied to disentangle the complexity of overlapping incentive programs, attributing impact based on each program’s share of total USD incentives.\n- To evaluate pool-level performance, incentivized pools (treatment group) were paired with comparable non-incentivized pools (control group) on the same chain and protocol. One-sided t-tests were conducted on changes in TVL and trading volume.\n- The analysis focused on DEX pools (9 pairs) that passed statistical filtering.\n\n## Results\n\n- DEX Trading Volume:\n - In DEX pools, trading volume per TVL was used as a measure of liquidity “productivity.”\n - CL100-USD₮0/kBTC on OP Mainnet: Alongside rapid TVL growth, trading volume increased by 126.7% during the program, and remained 42.7% above baseline after incentives ended (p < 0.01).\n - CL1-USD₮0/USDC on OP Mainnet: TVL rose, while trading volume saw a modest 2.5% increase during the program. However, after incentives ended, volume accelerated and stabilized 25.1% above baseline (p < 0.01).\n - BV-WETH/weETH on OP Mainnet: Beyond TVL growth, trading volume rose even more sharply, increasing 123.8% during the program and remaining 49.4% above baseline afterward (p < 0.01).\n- Lending Market Utilization:\n - In lending pools, utilization rate was used as the demand-side traction metric.\n - The lending pool with the highest performance at peak net TVL inflow showed a healthy 60.2% utilization rate, but the relationship between supply and demand momentum was less clear than in DEXs. Theoretically, competitive borrowing rates were expected to attract more borrowers, boost utilization, and deliver higher yields to lenders.\n",
449
+ "raw": "---\nevidence_id: \"05\"\nresults:\n - intervention: \"SuperStacks: A New Approach to Rewards on the Superchain\"\n outcome_variable: \"Trading volume and utilization\"\n outcome: \"+\"\nstrength: \"4\"\nmethodologies:\n - \"Pro-rata model, One-sided t-tests\"\nversion: \"1.0.0\"\ndatasets:\n - \"\"\ntitle: \"Trading Volume and Utilization Outcomes of SuperStacks\"\ndate: \"2025-08-15\"\n\ncitation:\n - name: \"SuperStacks Impact Analysis\"\n src: \"https://gov.optimism.io/t/superstacks-impact-analysis/10225\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\n- The SuperStacks program achieved **$58.0M in net TVL inflows** during its implementation period, with **$53.7M retained 30 days after the program concluded**. This translates to **$23.2/OP and $21.5/OP** after costs, respectively.\n- Findings supported the theory that strong demand-side activity can lift post-incentive equilibrium levels, though a more rigorous analysis is still needed.\n- **Incentivized lending pools generally demonstrated higher liquidity retention**.\n- However, TVL retention in DEXs appeared **inflated by Uniswap's concurrent Gauntlet campaign**. Specifically, when the two co-incentivized DEX pools were excluded, retained TVL inflows dropped to $48.2M, revealing a significant divergence between the lending and DEX verticals.\n- Metrics for measuring demand-side traction included trading volume per TVL for DEX pools and utilization rate for lending pools. For DEX pools, a moderate correlation was observed between peak net TVL inflows and trading volume per TVL, hinting at possible synergistic effects.\n- A key limitation was that isolating the program's causal effect alone was extremely difficult due to numerous confounding variables, such as external co-incentives and broader market volatility.\n\n## Background\n\nSuperStacks was the **Optimism Foundation's first proactive DeFi incentive pilot program**. It was designed with the goal of **increasing liquidity for interoperable assets (e.g., USD₮0) across the Superchain**. At its core, the program aimed to help these assets overcome the cold start problem and **test new mechanisms for establishing sustainable DeFi growth loops**.\n\nThe program's design was based on a theoretical framework suggesting that a two-pronged approach, focusing on both supply-side and demand-side activity, could initiate a sustainable flywheel effect for interoperable assets on the Superchain, ultimately catalyzing lasting liquidity growth.\n\nThis theory is broken down into three phases:\n\n1. **Supply-Side Growth**: Available incentives attract deposits into DEX pools, which creates deeper liquidity and improves the execution quality for trades. On the lending side, an increase in lending supply reduces the borrow rate.\n2. **Demand-Side Growth**: Improved trading conditions draw in more trades that are routed through the incentivized DEX pools, which increases trading volumes and generates more fees for LPs. Similarly, more competitive borrow rates attract more borrowers, which raises utilization rates and generates a higher yield for lenders.\n3. **Sustainable Traction**: Once incentives are switched off, a portion of the TVL (Total Value Locked) and net liquidity leaves the incentivized pools, but due to higher fee and yield generation for LPs and lenders, liquidity settles at higher baseline levels compared to the pre-incentives period.\n\n## Analysis Method\n\n### Dataset\n\nThe datasets and methods used for the SuperStacks analysis were:\n\n- Period: 76-day incentive period (April 16 – June 30) and a 30-day retention evaluation period after program end (through July 30).\n- Data sources: 25 pools and vaults targeted by SuperStacks incentives.\n- Metrics:\n - TVL (Total Value Locked): Net TVL inflows during the program and retained inflows after program end.\n - Trading volume: Indicator of demand-side activity in DEX pools.\n - Utilization: Indicator of demand-side activity in lending pools.\n - Cost efficiency: Net TVL inflows per OP token ($/OP).\n\n### Intervation / Explanatory Variable\n\nSuperStacks Program:\n\nSuperStacks was the Optimism Foundation’s first attempt at a “proactive DeFi incentive,” designed as a pilot program to increase liquidity of interoperable assets across the Superchain. The program was built on a two-pronged approach targeting both supply-side and demand-side activities. Specifically, incentives were provided to encourage supply-side actions (deposits into DEX pools, increased lending supply), which in turn aimed to stimulate demand-side activities (higher trading volume, increased utilization).\n\n### Dependent Variable\n\nTrading volume and utilization: Determining whether TVL growth led to higher demand-side trading volume and lending utilization, and whether these metrics remained at elevated equilibrium levels after incentives stopped.\n\n### Identification Strategy\n\n- A pro-rata model was applied to disentangle the complexity of overlapping incentive programs, attributing impact based on each program’s share of total USD incentives.\n- To evaluate pool-level performance, incentivized pools (treatment group) were paired with comparable non-incentivized pools (control group) on the same chain and protocol. One-sided t-tests were conducted on changes in TVL and trading volume.\n- The analysis focused on DEX pools (9 pairs) that passed statistical filtering.\n\n## Results\n\n- DEX Trading Volume:\n - In DEX pools, trading volume per TVL was used as a measure of liquidity “productivity.”\n - CL100-USD₮0/kBTC on OP Mainnet: Alongside rapid TVL growth, trading volume increased by 126.7% during the program, and remained 42.7% above baseline after incentives ended (p < 0.01).\n - CL1-USD₮0/USDC on OP Mainnet: TVL rose, while trading volume saw a modest 2.5% increase during the program. However, after incentives ended, volume accelerated and stabilized 25.1% above baseline (p < 0.01).\n - BV-WETH/weETH on OP Mainnet: Beyond TVL growth, trading volume rose even more sharply, increasing 123.8% during the program and remaining 49.4% above baseline afterward (p < 0.01).\n- Lending Market Utilization:\n - In lending pools, utilization rate was used as the demand-side traction metric.\n - The lending pool with the highest performance at peak net TVL inflow showed a healthy 60.2% utilization rate, but the relationship between supply and demand momentum was less clear than in DEXs. Theoretically, competitive borrowing rates were expected to attract more borrowers, boost utilization, and deliver higher yields to lenders.\n"
450
+ },
451
+ "07": {
452
+ "frontmatter": {
453
+ "evidence_id": "07",
454
+ "title": "How Platform Owner Entry Affects OSS Contributions by New Contributors: An Experiment with AWS Elasticsearch",
455
+ "author": "BeaconLabs",
456
+ "date": "2024-06-20",
457
+ "citation": [
458
+ {
459
+ "name": "How Platform Owner Entry Affects Open Source Contribution? Evidence from GitHub Developers",
460
+ "type": "link",
461
+ "src": "https://questromworld.bu.edu/platformstrategy/wp-content/uploads/sites/49/2024/06/PlatStrat2024_paper_100.pdf"
462
+ }
463
+ ],
464
+ "results": [
465
+ {
466
+ "intervention": "The platform owner enters the market by introducing its proprietary product, which is based on the complementary firm’s OSS.",
467
+ "outcome_variable": "OSS contributions by new contributors (both the number of new entrants and the average contribution per new entrant).(the number of commits, lines of code changed/added, and files of code changed/added)",
468
+ "outcome": "+"
469
+ }
470
+ ],
471
+ "strength": "3",
472
+ "methodologies": [
473
+ "DID, PSE"
474
+ ],
475
+ "version": "1.0.0",
476
+ "datasets": [
477
+ ""
478
+ ]
479
+ },
480
+ "content": "\n## Key Points\n\nThis study explores how a platform owner’s market entry—by leveraging a complementary firm’s open-source technology—affects the external knowledge sourcing of the complementary firm, focusing on the willingness of GitHub developers to contribute. Using the staggered rollout of Amazon AWS’s Elasticsearch as a natural experiment, the analysis shows that the platform owner’s entry reduces contributions from existing contributors but substantially increases contributions from new contributors, leading to an overall increase in contributions to the open-source technology. This finding provides a new perspective: contrary to common concerns that platform owners’ entry harms open-source startups, from a technology development standpoint, such entry may not necessarily be detrimental.\n\n## Background\n\nNew contributors are individuals who made their first contribution to the open-source project in a given period. Unlike existing contributors, they have no prior investments or ties to the project, and thus hold different reference points. From a prospect theory perspective, it was hypothesized that new contributors may perceive the platform owner’s entry as an opportunity for potential gains—such as enhanced professional reputation or career opportunities through increased visibility of the technology.\n\n## Analysis Method\n\n### Dataset\n\nData was collected from AWS and GitHub. Specifically, AWS official announcements provided information about the date and location of the Amazon Elasticsearch Service launch, while GitHub data provided developers’ contributions to Elasticsearch (contribution volume, personal GitHub experience, popularity, etc.). Commits by Elastic employees were excluded to focus on external developers’ contributions.\n\n### Intervation / Explanatory Variable\n\n- AWS’s market entry with Amazon Elasticsearch Service.\n - Entry is a dummy variable equal to 1 if AWS entered the country where the contributor resides, and 0 otherwise.\n - After is a dummy variable equal to 1 if AWS had already entered the contributor’s country during or before a given period, and 0 otherwise.\n - The interaction term Entry × After was used to measure the treatment effect.\n\n### Dependent Variable\n\n- OSS contributions by new contributors(the number of commits, lines of code changed/added, and files of code changed/added)\n - Contributions by new contributors were measured as the natural logarithm of the total number of commits by all new contributors in each period.\n - Additionally, the natural logarithm of the number of new contributors (extensive margin) and the average contribution per new contributor (intensive margin) were measured.\n - As robustness checks, the natural logarithm of lines of code and files changed/added was also included.\n\n### Identification Strategy\n\n- AWS’s staggered market entry was treated as a natural experiment and stacked difference-in-differences analysis was applied.\n- The analysis was conducted at the country-week level with 5,635 observations across 16 treatment countries and 13 control countries.\n- Control countries were matched to treatment countries using PSM.\n\n## Results\n\n- The analysis strongly supports the hypothesis that the platform owner’s entry increases contributions by new contributors.\n- On average, the number of commits increased by about 7.27%.\n- This increase was driven by both a rise in the number of new contributors (awareness effect) and an increase in the average contribution per new contributor (reward effect). Moreover, after the platform owner's entry, new contributors tended to show stronger extrinsic motivations, suggesting a selection effect in the types of new contributors.\n",
481
+ "raw": "---\nevidence_id: \"07\"\nresults:\n - intervention: \"The platform owner enters the market by introducing its proprietary product, which is based on the complementary firm’s OSS.\"\n outcome_variable: \"OSS contributions by new contributors (both the number of new entrants and the average contribution per new entrant).(the number of commits, lines of code changed/added, and files of code changed/added)\"\n outcome: \"+\"\nstrength: \"3\"\nmethodologies:\n - \"DID, PSE\"\nversion: \"1.0.0\"\ndatasets:\n - \"\"\ntitle: \"How Platform Owner Entry Affects OSS Contributions by New Contributors: An Experiment with AWS Elasticsearch\"\ndate: \"2024-06-20\"\n\ncitation:\n - name: \"How Platform Owner Entry Affects Open Source Contribution? Evidence from GitHub Developers\"\n src: \"https://questromworld.bu.edu/platformstrategy/wp-content/uploads/sites/49/2024/06/PlatStrat2024_paper_100.pdf\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nThis study explores how a platform owner’s market entry—by leveraging a complementary firm’s open-source technology—affects the external knowledge sourcing of the complementary firm, focusing on the willingness of GitHub developers to contribute. Using the staggered rollout of Amazon AWS’s Elasticsearch as a natural experiment, the analysis shows that the platform owner’s entry reduces contributions from existing contributors but substantially increases contributions from new contributors, leading to an overall increase in contributions to the open-source technology. This finding provides a new perspective: contrary to common concerns that platform owners’ entry harms open-source startups, from a technology development standpoint, such entry may not necessarily be detrimental.\n\n## Background\n\nNew contributors are individuals who made their first contribution to the open-source project in a given period. Unlike existing contributors, they have no prior investments or ties to the project, and thus hold different reference points. From a prospect theory perspective, it was hypothesized that new contributors may perceive the platform owner’s entry as an opportunity for potential gains—such as enhanced professional reputation or career opportunities through increased visibility of the technology.\n\n## Analysis Method\n\n### Dataset\n\nData was collected from AWS and GitHub. Specifically, AWS official announcements provided information about the date and location of the Amazon Elasticsearch Service launch, while GitHub data provided developers’ contributions to Elasticsearch (contribution volume, personal GitHub experience, popularity, etc.). Commits by Elastic employees were excluded to focus on external developers’ contributions.\n\n### Intervation / Explanatory Variable\n\n- AWS’s market entry with Amazon Elasticsearch Service.\n - Entry is a dummy variable equal to 1 if AWS entered the country where the contributor resides, and 0 otherwise.\n - After is a dummy variable equal to 1 if AWS had already entered the contributor’s country during or before a given period, and 0 otherwise.\n - The interaction term Entry × After was used to measure the treatment effect.\n\n### Dependent Variable\n\n- OSS contributions by new contributors(the number of commits, lines of code changed/added, and files of code changed/added)\n - Contributions by new contributors were measured as the natural logarithm of the total number of commits by all new contributors in each period.\n - Additionally, the natural logarithm of the number of new contributors (extensive margin) and the average contribution per new contributor (intensive margin) were measured.\n - As robustness checks, the natural logarithm of lines of code and files changed/added was also included.\n\n### Identification Strategy\n\n- AWS’s staggered market entry was treated as a natural experiment and stacked difference-in-differences analysis was applied.\n- The analysis was conducted at the country-week level with 5,635 observations across 16 treatment countries and 13 control countries.\n- Control countries were matched to treatment countries using PSM.\n\n## Results\n\n- The analysis strongly supports the hypothesis that the platform owner’s entry increases contributions by new contributors.\n- On average, the number of commits increased by about 7.27%.\n- This increase was driven by both a rise in the number of new contributors (awareness effect) and an increase in the average contribution per new contributor (reward effect). Moreover, after the platform owner's entry, new contributors tended to show stronger extrinsic motivations, suggesting a selection effect in the types of new contributors.\n"
482
+ },
483
+ "01": {
484
+ "frontmatter": {
485
+ "evidence_id": "01",
486
+ "title": "Effect of Code for America Brigade for momentum of OSS activity",
487
+ "author": "BeaconLabs",
488
+ "date": "2016-10-07",
489
+ "citation": [
490
+ {
491
+ "name": "Code for America Brigade: Connecting People, Places, and Apps within the Civic Hacker Community",
492
+ "type": "link",
493
+ "src": "https://www.ischool.berkeley.edu/sites/default/files/student_projects/karimcglynn_codeforamericabrigade_finalreport.pdf"
494
+ }
495
+ ],
496
+ "results": [
497
+ {
498
+ "intervention": "Construction and Deployment of 'Code for America Brigade'",
499
+ "outcome_variable": "Generation of 'momentum' in existing civic tech projects (e.g., number of page edits, map additions, and photo uploads on LocalWiki).",
500
+ "outcome": "+-"
501
+ }
502
+ ],
503
+ "strength": "0",
504
+ "methodologies": [
505
+ "Interview, Survey, Google Analitics"
506
+ ],
507
+ "version": "1.0.0",
508
+ "datasets": [
509
+ "Interview, Survey, Google Analitics"
510
+ ],
511
+ "tags": [
512
+ "oss"
513
+ ]
514
+ },
515
+ "content": "\n## Key Points\n\nThe LocalWiki project saw significant progress during a single-day event, with 633 pages edited, 100 maps added, and 138 new photos uploaded.\n\n## Background\n\nCode for America (CfA) is a nonprofit organization based in San Francisco, with a mission to make city governments more efficient, transparent, and responsive to resident needs through technology.\n\nEach year, CfA organizes a fellowship program in which small teams of selected programmers, designers, and other technologists partner with local governments to build web and mobile applications that address community issues.\n\nHowever, in Fall 2011, CfA faced two major challenges:\n\n1. There was a lack of infrastructure to coordinate and manage volunteer contributions from outside the organization. Despite receiving over 550 fellowship applications and having more than 10,000 fans and followers on social media—an “enthusiasm surplus”—CfA was unable to effectively leverage this support.\n\n2. While CfA planned to significantly expand the fellowship program over the next 3–5 years, the current program structure was seen as unsuitable for scaling to smaller cities and towns.\n\nTo address these challenges, the foundation for a new initiative, “CFA Everywhere,” was laid in Fall 2011 as a class project in UC Berkeley’s ISSD course. The proposal received funding in early 2012 and was renamed Code for America Brigade, inspired by fire brigades, with the goal of transforming isolated civic hacker efforts into a broader, integrated movement. It aimed to be an open-source platform connecting CfA’s activities with the wider civic hacker community.\n\n## Analysis Methods\n\n### Dataset\n\n- Interview Data:\n - Volunteers in open source development projects: Interviews were conducted with volunteers involved in both technical (open source development, hackathons) and non-technical (e.g., Habitat for Humanity) projects.\n - Code for America fellows and staff: Interviews were conducted during the project's early stages.\n\n- Social Network Data:\n - CfA fellowship applicants and Twitter followers: Analyzed to understand their skills and interests.\n - Twitter stream data: Live tweets using the #codeacross hashtag during the “Code Across America” events and post-event tweets were used to generate a world map of activity.\n\n- Event Participant Data:\n - Code Across America hackathon surveys: Surveys were developed and used to gather participant intentions and feedback.\n\n- Site Usage Data:\n - Google Analytics: Provided insights on Brigade site traffic, visitor count, time-on-site, page views, bounce rates, return visitor ratios, and referrers.\n\n- User Feedback Data:\n - Unofficial feedback: Gathered from users via the Brigade mailing list and other channels after launch.\n - Official surveys: A brief open-ended survey was conducted after the launch of Brigade v.1 to collect user experience feedback from registered users.\n\n### Preliminary Research and Insight Extraction:\n\n- **Open Source Development Interviews**: Found that the top motivator for volunteers was “becoming part of a community of people with shared interests,” leading to the insight that the platform should be designed not as a project management tool but as a community organizing platform.\n- **Social Network Analysis**: Revealed that many CfA followers and applicants were interested in non-coding skills such as graphic design, UX, project management, and community work, indicating the platform should be designed not only for developers but for a diverse range of volunteers.\n- **Fellow and Staff Interviews**: Highlighted that CfA staff lacked bandwidth to manage external projects, leading to the insight that CfA should act as a “catalyst for unleashing the potential of supporters nationwide,” focusing on enabling self-organization and resource sharing.\n\n### Shift to Lean Startup and Agile Development:\n\n- Transitioning from an academic proposal to an actual development project required defining the requirements of a Lean Startup-style Minimum Viable Product (MVP).\n- Agile Inception Event: Held at the CfA office in January 2012, where stakeholders gathered to define MVP “user stories” using index card brainstorming and dot-voting techniques.\n\n### Post-Launch Evaluation and Improvements:\n\n- Analysis of Unofficial Feedback: Post-launch feedback helped identify areas for improvement (e.g., lack of content, unclear app status, absence of editing tools).\n- User Survey Analysis: Open-ended responses from a five-question survey were manually parsed, categorized, and visualized in bar charts. Key findings included user excitement about connecting with others, the need for better support for collaboration, and demands for documentation, case studies, and tutorials.\n- Google Analytics Analysis: Tracked post-launch traffic and engagement, revealing that most traffic was direct and engagement was low (approx. 90% stayed less than five minutes, 85% had fewer than five return visits).\n\n## Result\n\n- **Platform Status at Launch**:\n - brigade.codeforamerica.org hosted 9 reusable civic apps and provided links and instructions for deploying local instances.\n - Over 250 registered users, more than 80 locations, and approximately 40 Brigades were formed.\n - MVP features included sign-up via email or GitHub, profile creation, joining/starting Brigades, viewing apps, Brigades, people, and location detail pages, submitting/viewing challenges, and sharing via social media.\n - Based on an open design strategy—“don’t reinvent the wheel” and “don’t monopolize user interactions”—the platform integrated with third-party APIs like Civic Commons and Gravatar and recommended using existing tools like Google Groups.\n - Brigades were designed as purely virtual associations, allowing users to freely form groups, though this information architecture proved confusing for new users.\n - The app deployment checklist was simplified from its original concept due to documentation gaps and varied deployment processes, leading to increased burden on administrators.\n\n- **Launch and Early Impact at SXSW**:\n - On March 14, 2012, the Brigade platform officially launched during the SXSW Interactive keynote.\n - The launch was a significant success, with over 120 users from 45 locations signing up within the first 24 hours.\n - The February “Code Across America” hackathon series spanned 16 cities, boosting existing civic tech projects. For example, in Raleigh, NC, the LocalWiki project saw ~50 volunteers contribute 633 page edits, 100 maps, and 138 new photos—nearly doubling six months’ worth of progress in a single day. The events also helped forge connections among civic technologists across cities and build a support community around open source software.\n\n- **Initial Feedback and Challenges**:\n - Unofficial user feedback pointed out content gaps, unclear distinctions between \"deployable\" and \"deployed\" apps, and a lack of editing functionality.\n - Most communication between Brigade staff and civic hackers continued through offline channels such as email, conference calls, and Google Hangouts—useful for forming new relationships, identifying partners, and addressing urgent issues. The Brigade director noted that during early platform development, staff were expected to bridge functionality gaps through direct interaction.\n - The site wasn’t always the main driver of community activity. Instead, it functioned more as a symbolic record of connections between people, places, and projects.\n - Notable challenges included the lack of task tracking (relying only on GitHub links) and inadequate support for outreach and organization by fledgling Brigades.\n\n- **Brigade v.1 Updates**:\n - A site design update was deployed in early April 2012.\n - Navigation was clarified and restructured around “Applications,” “Brigades,” and “People.”\n - Due to resource constraints, the “Challenges” feature was temporarily shelved.\n - Informational content was added to explain site terminology (apps, deploy, Brigade).\n - A new “status” column was added to the list of deployed apps to better indicate progress.\n\n- **Survey and Google Analytics Evaluation**:\n - Survey results showed that civic hackers were excited to connect via the platform but also desired more connection support (e.g., communication tools, wiki spaces, mailing list features).\n - Feature limitations were found to contribute to reduced engagement among some users.\n - High demand was expressed for content expansions like case studies, tutorials, and wiki/forum features.\n - Google Analytics showed a major spike in traffic during the SXSW launch, followed by a slowdown and relatively low user engagement (short visit times, low repeat visits). There was also room for improvement in social media referrals.\n\nBased on these findings, recommendations for Brigade v.2 included enabling user-generated content (e.g., wiki-style app “recipes,” technical updates), enhancing content discovery via location-based features (e.g., Brigade maps, filtering by people), increasing visibility of social sharing features, and launching a Brigade blog.\n",
516
+ "raw": "---\nevidence_id: \"01\"\nresults:\n - intervention: \"Construction and Deployment of 'Code for America Brigade'\"\n outcome_variable: \"Generation of 'momentum' in existing civic tech projects (e.g., number of page edits, map additions, and photo uploads on LocalWiki).\"\n outcome: \"+-\"\nstrength: \"0\"\nmethodologies:\n - \"Interview, Survey, Google Analitics\"\nversion: \"1.0.0\"\ndatasets:\n - \"Interview, Survey, Google Analitics\"\ntitle: \"Effect of Code for America Brigade for momentum of OSS activity\"\ndate: \"2016-10-07\"\ntags:\n - \"oss\"\ncitation:\n - name: \"Code for America Brigade: Connecting People, Places, and Apps within the Civic Hacker Community\"\n src: \"https://www.ischool.berkeley.edu/sites/default/files/student_projects/karimcglynn_codeforamericabrigade_finalreport.pdf\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nThe LocalWiki project saw significant progress during a single-day event, with 633 pages edited, 100 maps added, and 138 new photos uploaded.\n\n## Background\n\nCode for America (CfA) is a nonprofit organization based in San Francisco, with a mission to make city governments more efficient, transparent, and responsive to resident needs through technology.\n\nEach year, CfA organizes a fellowship program in which small teams of selected programmers, designers, and other technologists partner with local governments to build web and mobile applications that address community issues.\n\nHowever, in Fall 2011, CfA faced two major challenges:\n\n1. There was a lack of infrastructure to coordinate and manage volunteer contributions from outside the organization. Despite receiving over 550 fellowship applications and having more than 10,000 fans and followers on social media—an “enthusiasm surplus”—CfA was unable to effectively leverage this support.\n\n2. While CfA planned to significantly expand the fellowship program over the next 3–5 years, the current program structure was seen as unsuitable for scaling to smaller cities and towns.\n\nTo address these challenges, the foundation for a new initiative, “CFA Everywhere,” was laid in Fall 2011 as a class project in UC Berkeley’s ISSD course. The proposal received funding in early 2012 and was renamed Code for America Brigade, inspired by fire brigades, with the goal of transforming isolated civic hacker efforts into a broader, integrated movement. It aimed to be an open-source platform connecting CfA’s activities with the wider civic hacker community.\n\n## Analysis Methods\n\n### Dataset\n\n- Interview Data:\n - Volunteers in open source development projects: Interviews were conducted with volunteers involved in both technical (open source development, hackathons) and non-technical (e.g., Habitat for Humanity) projects.\n - Code for America fellows and staff: Interviews were conducted during the project's early stages.\n\n- Social Network Data:\n - CfA fellowship applicants and Twitter followers: Analyzed to understand their skills and interests.\n - Twitter stream data: Live tweets using the #codeacross hashtag during the “Code Across America” events and post-event tweets were used to generate a world map of activity.\n\n- Event Participant Data:\n - Code Across America hackathon surveys: Surveys were developed and used to gather participant intentions and feedback.\n\n- Site Usage Data:\n - Google Analytics: Provided insights on Brigade site traffic, visitor count, time-on-site, page views, bounce rates, return visitor ratios, and referrers.\n\n- User Feedback Data:\n - Unofficial feedback: Gathered from users via the Brigade mailing list and other channels after launch.\n - Official surveys: A brief open-ended survey was conducted after the launch of Brigade v.1 to collect user experience feedback from registered users.\n\n### Preliminary Research and Insight Extraction:\n\n- **Open Source Development Interviews**: Found that the top motivator for volunteers was “becoming part of a community of people with shared interests,” leading to the insight that the platform should be designed not as a project management tool but as a community organizing platform.\n- **Social Network Analysis**: Revealed that many CfA followers and applicants were interested in non-coding skills such as graphic design, UX, project management, and community work, indicating the platform should be designed not only for developers but for a diverse range of volunteers.\n- **Fellow and Staff Interviews**: Highlighted that CfA staff lacked bandwidth to manage external projects, leading to the insight that CfA should act as a “catalyst for unleashing the potential of supporters nationwide,” focusing on enabling self-organization and resource sharing.\n\n### Shift to Lean Startup and Agile Development:\n\n- Transitioning from an academic proposal to an actual development project required defining the requirements of a Lean Startup-style Minimum Viable Product (MVP).\n- Agile Inception Event: Held at the CfA office in January 2012, where stakeholders gathered to define MVP “user stories” using index card brainstorming and dot-voting techniques.\n\n### Post-Launch Evaluation and Improvements:\n\n- Analysis of Unofficial Feedback: Post-launch feedback helped identify areas for improvement (e.g., lack of content, unclear app status, absence of editing tools).\n- User Survey Analysis: Open-ended responses from a five-question survey were manually parsed, categorized, and visualized in bar charts. Key findings included user excitement about connecting with others, the need for better support for collaboration, and demands for documentation, case studies, and tutorials.\n- Google Analytics Analysis: Tracked post-launch traffic and engagement, revealing that most traffic was direct and engagement was low (approx. 90% stayed less than five minutes, 85% had fewer than five return visits).\n\n## Result\n\n- **Platform Status at Launch**:\n - brigade.codeforamerica.org hosted 9 reusable civic apps and provided links and instructions for deploying local instances.\n - Over 250 registered users, more than 80 locations, and approximately 40 Brigades were formed.\n - MVP features included sign-up via email or GitHub, profile creation, joining/starting Brigades, viewing apps, Brigades, people, and location detail pages, submitting/viewing challenges, and sharing via social media.\n - Based on an open design strategy—“don’t reinvent the wheel” and “don’t monopolize user interactions”—the platform integrated with third-party APIs like Civic Commons and Gravatar and recommended using existing tools like Google Groups.\n - Brigades were designed as purely virtual associations, allowing users to freely form groups, though this information architecture proved confusing for new users.\n - The app deployment checklist was simplified from its original concept due to documentation gaps and varied deployment processes, leading to increased burden on administrators.\n\n- **Launch and Early Impact at SXSW**:\n - On March 14, 2012, the Brigade platform officially launched during the SXSW Interactive keynote.\n - The launch was a significant success, with over 120 users from 45 locations signing up within the first 24 hours.\n - The February “Code Across America” hackathon series spanned 16 cities, boosting existing civic tech projects. For example, in Raleigh, NC, the LocalWiki project saw ~50 volunteers contribute 633 page edits, 100 maps, and 138 new photos—nearly doubling six months’ worth of progress in a single day. The events also helped forge connections among civic technologists across cities and build a support community around open source software.\n\n- **Initial Feedback and Challenges**:\n - Unofficial user feedback pointed out content gaps, unclear distinctions between \"deployable\" and \"deployed\" apps, and a lack of editing functionality.\n - Most communication between Brigade staff and civic hackers continued through offline channels such as email, conference calls, and Google Hangouts—useful for forming new relationships, identifying partners, and addressing urgent issues. The Brigade director noted that during early platform development, staff were expected to bridge functionality gaps through direct interaction.\n - The site wasn’t always the main driver of community activity. Instead, it functioned more as a symbolic record of connections between people, places, and projects.\n - Notable challenges included the lack of task tracking (relying only on GitHub links) and inadequate support for outreach and organization by fledgling Brigades.\n\n- **Brigade v.1 Updates**:\n - A site design update was deployed in early April 2012.\n - Navigation was clarified and restructured around “Applications,” “Brigades,” and “People.”\n - Due to resource constraints, the “Challenges” feature was temporarily shelved.\n - Informational content was added to explain site terminology (apps, deploy, Brigade).\n - A new “status” column was added to the list of deployed apps to better indicate progress.\n\n- **Survey and Google Analytics Evaluation**:\n - Survey results showed that civic hackers were excited to connect via the platform but also desired more connection support (e.g., communication tools, wiki spaces, mailing list features).\n - Feature limitations were found to contribute to reduced engagement among some users.\n - High demand was expressed for content expansions like case studies, tutorials, and wiki/forum features.\n - Google Analytics showed a major spike in traffic during the SXSW launch, followed by a slowdown and relatively low user engagement (short visit times, low repeat visits). There was also room for improvement in social media referrals.\n\nBased on these findings, recommendations for Brigade v.2 included enabling user-generated content (e.g., wiki-style app “recipes,” technical updates), enhancing content discovery via location-based features (e.g., Brigade maps, filtering by people), increasing visibility of social sharing features, and launching a Brigade blog.\n"
517
+ },
518
+ "06": {
519
+ "frontmatter": {
520
+ "evidence_id": "06",
521
+ "title": "How Platform Owner Entry Affects OSS Contributions by Existing Contributors: An Experiment with AWS Elasticsearch",
522
+ "author": "BeaconLabs",
523
+ "date": "2024-06-20",
524
+ "citation": [
525
+ {
526
+ "name": "How Platform Owner Entry Affects Open Source Contribution? Evidence from GitHub Developers",
527
+ "type": "link",
528
+ "src": "https://questromworld.bu.edu/platformstrategy/wp-content/uploads/sites/49/2024/06/PlatStrat2024_paper_100.pdf"
529
+ }
530
+ ],
531
+ "results": [
532
+ {
533
+ "intervention": "The platform owner enters the market by introducing its proprietary product, which is based on the complementary firm’s OSS.",
534
+ "outcome_variable": "OSS contributions by existing contributors(the number of commits, lines of code changed/added, and files of code changed/added)",
535
+ "outcome": "-"
536
+ }
537
+ ],
538
+ "strength": "3",
539
+ "methodologies": [
540
+ "DID, PSE"
541
+ ],
542
+ "version": "1.0.0",
543
+ "datasets": [
544
+ ""
545
+ ]
546
+ },
547
+ "content": "\n## Key Points\n\nThis study explores how a platform owner’s market entry—by leveraging a complementary firm’s open-source technology—affects the external knowledge sourcing of the complementary firm, focusing on the willingness of GitHub developers to contribute. Using the staggered rollout of Amazon AWS’s Elasticsearch as a natural experiment, the analysis shows that the platform owner’s entry reduces contributions from existing contributors but substantially increases contributions from new contributors, leading to an overall increase in contributions to the open-source technology. This finding provides a new perspective: contrary to common concerns that platform owners’ entry harms open-source startups, from a technology development standpoint, such entry may not necessarily be detrimental.\n\n## Background\n\nExisting contributors are individuals who had begun contributing to the complementary firm’s open-source project before the platform owner’s entry. Based on prospect theory, they are likely to perceive strong potential losses to their existing “endowments,” such as their commitment to open-source philosophy, community interactions, and established reputation, as a result of the platform owner’s entry. Thus, it was hypothesized that they might reduce their involvement due to concerns about their contributions being exploited by the platform owner and about threats to community sustainability.\n\n## Analysis Method\n\n### Dataset\n\nData was collected from AWS and GitHub. Specifically, AWS official announcements provided information about the date and location of the Amazon Elasticsearch Service launch, while GitHub data provided developers’ contributions to Elasticsearch (contribution volume, personal GitHub experience, popularity, etc.). Commits by Elastic employees were excluded to focus on external developers’ contributions.\n\n### Intervation / Explanatory Variable\n\n- AWS’s market entry with Amazon Elasticsearch Service.\n - Entry is a dummy variable equal to 1 if AWS entered the country where the contributor resides, and 0 otherwise.\n - After is a dummy variable equal to 1 if AWS had already entered the contributor’s country during or before a given period, and 0 otherwise.\n - The interaction term Entry × After was used to measure the treatment effect.\n\n### Dependent Variable\n\n- OSS contributions by existing contributors(the number of commits, lines of code changed/added, and files of code changed/added)\n - Contributions by existing contributors were measured as the natural logarithm of the number of commits to Elasticsearch.\n - As robustness checks, the natural logarithm of lines of code changed/added and files changed/added were also used.\n\n### Identification Strategy\n\n- AWS’s staggered introduction of managed Elasticsearch services across countries/regions was treated as a natural experiment, and stacked difference-in-differences analysis was applied.\n- This method addresses the “forbidden comparison problem” in staggered interventions and strengthens analytical robustness.\n- The treatment group consisted of contributors in countries where AWS launched Elasticsearch services; the control group consisted of contributors in countries where AWS had not yet launched or never launched the service.\n- Propensity score matching (PSM) was used to align contributors across groups. The analysis was conducted at the contributor-week level, with 110,249 observations across 459 contributors.\n\n## Results\n\n- The analysis provides empirical evidence that the platform owner’s entry reduces contributions by existing contributors.\n- On average, the number of commits decreased by about 4.33%. Notably, contributors with stronger intrinsic motivations reduced their contributions more sharply after the platform owner’s entry, while contributors with stronger extrinsic motivations showed a significantly smaller decline.\n- These results support the prospect theory–based hypothesis that existing contributors decrease participation due to concerns over losses to their \"endowments,\" such as community attachment and identity.\n",
548
+ "raw": "---\nevidence_id: \"06\"\nresults:\n - intervention: \"The platform owner enters the market by introducing its proprietary product, which is based on the complementary firm’s OSS.\"\n outcome_variable: \"OSS contributions by existing contributors(the number of commits, lines of code changed/added, and files of code changed/added)\"\n outcome: \"-\"\nstrength: \"3\"\nmethodologies:\n - \"DID, PSE\"\nversion: \"1.0.0\"\ndatasets:\n - \"\"\ntitle: \"How Platform Owner Entry Affects OSS Contributions by Existing Contributors: An Experiment with AWS Elasticsearch\"\ndate: \"2024-06-20\"\n\ncitation:\n - name: \"How Platform Owner Entry Affects Open Source Contribution? Evidence from GitHub Developers\"\n src: \"https://questromworld.bu.edu/platformstrategy/wp-content/uploads/sites/49/2024/06/PlatStrat2024_paper_100.pdf\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nThis study explores how a platform owner’s market entry—by leveraging a complementary firm’s open-source technology—affects the external knowledge sourcing of the complementary firm, focusing on the willingness of GitHub developers to contribute. Using the staggered rollout of Amazon AWS’s Elasticsearch as a natural experiment, the analysis shows that the platform owner’s entry reduces contributions from existing contributors but substantially increases contributions from new contributors, leading to an overall increase in contributions to the open-source technology. This finding provides a new perspective: contrary to common concerns that platform owners’ entry harms open-source startups, from a technology development standpoint, such entry may not necessarily be detrimental.\n\n## Background\n\nExisting contributors are individuals who had begun contributing to the complementary firm’s open-source project before the platform owner’s entry. Based on prospect theory, they are likely to perceive strong potential losses to their existing “endowments,” such as their commitment to open-source philosophy, community interactions, and established reputation, as a result of the platform owner’s entry. Thus, it was hypothesized that they might reduce their involvement due to concerns about their contributions being exploited by the platform owner and about threats to community sustainability.\n\n## Analysis Method\n\n### Dataset\n\nData was collected from AWS and GitHub. Specifically, AWS official announcements provided information about the date and location of the Amazon Elasticsearch Service launch, while GitHub data provided developers’ contributions to Elasticsearch (contribution volume, personal GitHub experience, popularity, etc.). Commits by Elastic employees were excluded to focus on external developers’ contributions.\n\n### Intervation / Explanatory Variable\n\n- AWS’s market entry with Amazon Elasticsearch Service.\n - Entry is a dummy variable equal to 1 if AWS entered the country where the contributor resides, and 0 otherwise.\n - After is a dummy variable equal to 1 if AWS had already entered the contributor’s country during or before a given period, and 0 otherwise.\n - The interaction term Entry × After was used to measure the treatment effect.\n\n### Dependent Variable\n\n- OSS contributions by existing contributors(the number of commits, lines of code changed/added, and files of code changed/added)\n - Contributions by existing contributors were measured as the natural logarithm of the number of commits to Elasticsearch.\n - As robustness checks, the natural logarithm of lines of code changed/added and files changed/added were also used.\n\n### Identification Strategy\n\n- AWS’s staggered introduction of managed Elasticsearch services across countries/regions was treated as a natural experiment, and stacked difference-in-differences analysis was applied.\n- This method addresses the “forbidden comparison problem” in staggered interventions and strengthens analytical robustness.\n- The treatment group consisted of contributors in countries where AWS launched Elasticsearch services; the control group consisted of contributors in countries where AWS had not yet launched or never launched the service.\n- Propensity score matching (PSM) was used to align contributors across groups. The analysis was conducted at the contributor-week level, with 110,249 observations across 459 contributors.\n\n## Results\n\n- The analysis provides empirical evidence that the platform owner’s entry reduces contributions by existing contributors.\n- On average, the number of commits decreased by about 4.33%. Notably, contributors with stronger intrinsic motivations reduced their contributions more sharply after the platform owner’s entry, while contributors with stronger extrinsic motivations showed a significantly smaller decline.\n- These results support the prospect theory–based hypothesis that existing contributors decrease participation due to concerns over losses to their \"endowments,\" such as community attachment and identity.\n"
549
+ },
550
+ "04": {
551
+ "frontmatter": {
552
+ "evidence_id": "04",
553
+ "title": "SuperStacks increase TVL",
554
+ "author": "BeaconLabs",
555
+ "date": "2025-08-15",
556
+ "citation": [
557
+ {
558
+ "name": "SuperStacks Impact Analysis",
559
+ "type": "link",
560
+ "src": "https://gov.optimism.io/t/superstacks-impact-analysis/10225"
561
+ }
562
+ ],
563
+ "results": [
564
+ {
565
+ "intervention": "SuperStacks: A New Approach to Rewards on the Superchain",
566
+ "outcome_variable": "Growth and retention of TVL",
567
+ "outcome": "+"
568
+ }
569
+ ],
570
+ "strength": "3",
571
+ "methodologies": [
572
+ "Pro-rata model, One-sided t-tests"
573
+ ],
574
+ "version": "1.0.0"
575
+ },
576
+ "content": "\n## Key Points\n\n- The SuperStacks program achieved **$58.0M in net TVL inflows** during its implementation period, with **$53.7M retained 30 days after the program concluded**. This translates to **$23.2/OP and $21.5/OP** after costs, respectively.\n- Findings supported the theory that strong demand-side activity can lift post-incentive equilibrium levels, though a more rigorous analysis is still needed.\n- **Incentivized lending pools generally demonstrated higher liquidity retention**.\n- However, TVL retention in DEXs appeared **inflated by Uniswap's concurrent Gauntlet campaign**. Specifically, when the two co-incentivized DEX pools were excluded, retained TVL inflows dropped to $48.2M, revealing a significant divergence between the lending and DEX verticals.\n- Metrics for measuring demand-side traction included trading volume per TVL for DEX pools and utilization rate for lending pools. For DEX pools, a moderate correlation was observed between peak net TVL inflows and trading volume per TVL, hinting at possible synergistic effects.\n- A key limitation was that isolating the program's causal effect alone was extremely difficult due to numerous confounding variables, such as external co-incentives and broader market volatility.\n\n## Background\n\nSuperStacks was the **Optimism Foundation's first proactive DeFi incentive pilot program**. It was designed with the goal of **increasing liquidity for interoperable assets (e.g., USD₮0) across the Superchain**. At its core, the program aimed to help these assets overcome the cold start problem and **test new mechanisms for establishing sustainable DeFi growth loops**.\n\nThe program's design was based on a theoretical framework suggesting that a two-pronged approach, focusing on both supply-side and demand-side activity, could initiate a sustainable flywheel effect for interoperable assets on the Superchain, ultimately catalyzing lasting liquidity growth.\n\nThis theory is broken down into three phases:\n\n1. **Supply-Side Growth**: Available incentives attract deposits into DEX pools, which creates deeper liquidity and improves the execution quality for trades. On the lending side, an increase in lending supply reduces the borrow rate.\n2. **Demand-Side Growth**: Improved trading conditions draw in more trades that are routed through the incentivized DEX pools, which increases trading volumes and generates more fees for LPs. Similarly, more competitive borrow rates attract more borrowers, which raises utilization rates and generates a higher yield for lenders.\n3. **Sustainable Traction**: Once incentives are switched off, a portion of the TVL (Total Value Locked) and net liquidity leaves the incentivized pools, but due to higher fee and yield generation for LPs and lenders, liquidity settles at higher baseline levels compared to the pre-incentives period.\n\n## Analysis Method\n\n### Dataset\n\nThe datasets and methods used for the SuperStacks analysis were:\n\n- Period: 76-day incentive period (April 16 – June 30) and a 30-day retention evaluation period after program end (through July 30).\n- Data sources: 25 pools and vaults targeted by SuperStacks incentives.\n- Metrics:\n - TVL (Total Value Locked): Net TVL inflows during the program and retained inflows after program end.\n - Trading volume: Indicator of demand-side activity in DEX pools.\n - Utilization: Indicator of demand-side activity in lending pools.\n - Cost efficiency: Net TVL inflows per OP token ($/OP).\n\n### Intervation / Explanatory Variable\n\nSuperStacks Program:\n\nSuperStacks was the Optimism Foundation’s first attempt at a “proactive DeFi incentive,” designed as a pilot program to increase liquidity of interoperable assets across the Superchain. The program was built on a two-pronged approach targeting both supply-side and demand-side activities. Specifically, incentives were provided to encourage supply-side actions (deposits into DEX pools, increased lending supply), which in turn aimed to stimulate demand-side activities (higher trading volume, increased utilization).\n\n### Dependent Variable\n\nGrowth and retention of TVL (Total Value Locked): Measuring both the increase in TVL through incentives and the extent to which that TVL was maintained after incentives ended. The analysis centered on “retained TVL inflows.”\n\n### Identification Strategy\n\n- A pro-rata model was applied to disentangle the complexity of overlapping incentive programs, attributing impact based on each program’s share of total USD incentives.\n- To evaluate pool-level performance, incentivized pools (treatment group) were paired with comparable non-incentivized pools (control group) on the same chain and protocol. One-sided t-tests were conducted on changes in TVL and trading volume.\n- The analysis focused on DEX pools (9 pairs) that passed statistical filtering.\n\n## Results\n\n- Overall TVL Inflow and Retention:\n - During the program, the SuperStacks initiative achieved $58M in net TVL inflows, with $53.7M retained 30 days after the program ended.\n - Considering costs, this corresponds to $23.2 net TVL per 1 OP during the program, and $21.5 per 1 OP after the program.\n - Across all incentivized pools and vaults, $87.7M of TVL was retained 30 days after incentives ended.\n - Net TVL inflows peaked before the program ended, at $15.7M in DEXs and $70M in lending markets.\n- DEX vs. Lending Markets:\n - Of the net TVL inflows during the program, DEXs accounted for 31.6% and lending markets 68.4%.\n - Lending markets showed stronger liquidity retention compared to DEXs. Lending pools, especially deposit-only vaults such as Morpho’s, often showed growth.\n - DEX TVL retention appeared inflated by concurrent incentive programs run by the Uniswap Foundation and Gauntlet. Excluding the two DEX pools covered by these joint incentives, retained TVL inflows fell to $48.2M, revealing a notable divergence between lending and DEX outcomes.\n",
577
+ "raw": "---\nevidence_id: \"04\"\nresults:\n - intervention: \"SuperStacks: A New Approach to Rewards on the Superchain\"\n outcome_variable: \"Growth and retention of TVL\"\n outcome: \"+\"\nstrength: \"3\"\nmethodologies:\n - \"Pro-rata model, One-sided t-tests\"\nversion: \"1.0.0\"\ntitle: \"SuperStacks increase TVL\"\ndate: \"2025-08-15\"\ncitation:\n - name: \"SuperStacks Impact Analysis\"\n src: \"https://gov.optimism.io/t/superstacks-impact-analysis/10225\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\n- The SuperStacks program achieved **$58.0M in net TVL inflows** during its implementation period, with **$53.7M retained 30 days after the program concluded**. This translates to **$23.2/OP and $21.5/OP** after costs, respectively.\n- Findings supported the theory that strong demand-side activity can lift post-incentive equilibrium levels, though a more rigorous analysis is still needed.\n- **Incentivized lending pools generally demonstrated higher liquidity retention**.\n- However, TVL retention in DEXs appeared **inflated by Uniswap's concurrent Gauntlet campaign**. Specifically, when the two co-incentivized DEX pools were excluded, retained TVL inflows dropped to $48.2M, revealing a significant divergence between the lending and DEX verticals.\n- Metrics for measuring demand-side traction included trading volume per TVL for DEX pools and utilization rate for lending pools. For DEX pools, a moderate correlation was observed between peak net TVL inflows and trading volume per TVL, hinting at possible synergistic effects.\n- A key limitation was that isolating the program's causal effect alone was extremely difficult due to numerous confounding variables, such as external co-incentives and broader market volatility.\n\n## Background\n\nSuperStacks was the **Optimism Foundation's first proactive DeFi incentive pilot program**. It was designed with the goal of **increasing liquidity for interoperable assets (e.g., USD₮0) across the Superchain**. At its core, the program aimed to help these assets overcome the cold start problem and **test new mechanisms for establishing sustainable DeFi growth loops**.\n\nThe program's design was based on a theoretical framework suggesting that a two-pronged approach, focusing on both supply-side and demand-side activity, could initiate a sustainable flywheel effect for interoperable assets on the Superchain, ultimately catalyzing lasting liquidity growth.\n\nThis theory is broken down into three phases:\n\n1. **Supply-Side Growth**: Available incentives attract deposits into DEX pools, which creates deeper liquidity and improves the execution quality for trades. On the lending side, an increase in lending supply reduces the borrow rate.\n2. **Demand-Side Growth**: Improved trading conditions draw in more trades that are routed through the incentivized DEX pools, which increases trading volumes and generates more fees for LPs. Similarly, more competitive borrow rates attract more borrowers, which raises utilization rates and generates a higher yield for lenders.\n3. **Sustainable Traction**: Once incentives are switched off, a portion of the TVL (Total Value Locked) and net liquidity leaves the incentivized pools, but due to higher fee and yield generation for LPs and lenders, liquidity settles at higher baseline levels compared to the pre-incentives period.\n\n## Analysis Method\n\n### Dataset\n\nThe datasets and methods used for the SuperStacks analysis were:\n\n- Period: 76-day incentive period (April 16 – June 30) and a 30-day retention evaluation period after program end (through July 30).\n- Data sources: 25 pools and vaults targeted by SuperStacks incentives.\n- Metrics:\n - TVL (Total Value Locked): Net TVL inflows during the program and retained inflows after program end.\n - Trading volume: Indicator of demand-side activity in DEX pools.\n - Utilization: Indicator of demand-side activity in lending pools.\n - Cost efficiency: Net TVL inflows per OP token ($/OP).\n\n### Intervation / Explanatory Variable\n\nSuperStacks Program:\n\nSuperStacks was the Optimism Foundation’s first attempt at a “proactive DeFi incentive,” designed as a pilot program to increase liquidity of interoperable assets across the Superchain. The program was built on a two-pronged approach targeting both supply-side and demand-side activities. Specifically, incentives were provided to encourage supply-side actions (deposits into DEX pools, increased lending supply), which in turn aimed to stimulate demand-side activities (higher trading volume, increased utilization).\n\n### Dependent Variable\n\nGrowth and retention of TVL (Total Value Locked): Measuring both the increase in TVL through incentives and the extent to which that TVL was maintained after incentives ended. The analysis centered on “retained TVL inflows.”\n\n### Identification Strategy\n\n- A pro-rata model was applied to disentangle the complexity of overlapping incentive programs, attributing impact based on each program’s share of total USD incentives.\n- To evaluate pool-level performance, incentivized pools (treatment group) were paired with comparable non-incentivized pools (control group) on the same chain and protocol. One-sided t-tests were conducted on changes in TVL and trading volume.\n- The analysis focused on DEX pools (9 pairs) that passed statistical filtering.\n\n## Results\n\n- Overall TVL Inflow and Retention:\n - During the program, the SuperStacks initiative achieved $58M in net TVL inflows, with $53.7M retained 30 days after the program ended.\n - Considering costs, this corresponds to $23.2 net TVL per 1 OP during the program, and $21.5 per 1 OP after the program.\n - Across all incentivized pools and vaults, $87.7M of TVL was retained 30 days after incentives ended.\n - Net TVL inflows peaked before the program ended, at $15.7M in DEXs and $70M in lending markets.\n- DEX vs. Lending Markets:\n - Of the net TVL inflows during the program, DEXs accounted for 31.6% and lending markets 68.4%.\n - Lending markets showed stronger liquidity retention compared to DEXs. Lending pools, especially deposit-only vaults such as Morpho’s, often showed growth.\n - DEX TVL retention appeared inflated by concurrent incentive programs run by the Uniswap Foundation and Gauntlet. Excluding the two DEX pools covered by these joint incentives, retained TVL inflows fell to $48.2M, revealing a notable divergence between lending and DEX outcomes.\n"
578
+ },
579
+ "02": {
580
+ "frontmatter": {
581
+ "evidence_id": "02",
582
+ "title": "Effect of a Grant Program on Developer Activity ",
583
+ "author": "BeaconLabs",
584
+ "date": "2024-11-18",
585
+ "citation": [
586
+ {
587
+ "name": "Early experiments with synthetic controls and causal inference",
588
+ "type": "link",
589
+ "src": "https://docs.oso.xyz/blog/synthetic-controls/"
590
+ }
591
+ ],
592
+ "results": [
593
+ {
594
+ "intervention": "Whether grants were received or token incentives were provided",
595
+ "outcome_variable": "Developer retention, user activity, and Total Value Locked (TVL) of the network",
596
+ "outcome": "+-"
597
+ }
598
+ ],
599
+ "strength": "3",
600
+ "methodologies": [
601
+ "Synthetic Control Method"
602
+ ],
603
+ "version": "2.0.0",
604
+ "datasets": [
605
+ "https://github.com/opensource-observer/insights/tree/main/analysis/optimism/syncon"
606
+ ],
607
+ "tags": [
608
+ "oss",
609
+ "public goods funding"
610
+ ]
611
+ },
612
+ "content": "\n## Key Points\n\nOn average, there was an observed increase of approximately 150 to 200 monthly active developers.\n\n## Background\n\n- Open Source Observer (OSO) is exploring advanced metrics to better measure the impact of certain types of interventions on public goods ecosystems.\n- For example, it aims to compare the performance of projects or users who received token incentives against those who did not.\n- However, in real-world economies, it is impossible to randomly assign treatment and control groups like in controlled A/B tests.\n- Therefore, advanced statistical techniques must be employed to estimate the causal effect of a treatment on a target cohort, while controlling for other factors such as market conditions, competing incentives, and geopolitical events.\n\n## Analysis Method\n\n### Dataset\n\n- The synthetic control work is part of a broader initiative to build a flexible analytics engine capable of analyzing virtually all metrics over time.\n- OSO is currently rolling out a suite of timeseries metrics. These models allow metrics to be computed for any cohort over any time period, enabling \"time travel\" to evaluate past performance.\n- Most timeseries metrics are computed using a rolling window with daily buckets. For example, rather than measuring monthly active developers as a static monthly count, OSO uses 30-day and 90-day rolling windows to provide a more detailed view of cohort performance.\n- Sample SQL queries show the use of tables such as `timeseries_metrics_by_collection_v0`, `metrics_v0`, and `collections_v1`.\n\n### Intervation / Explanatory Variable\n\n- OSO is interested in measuring the impact of specific types of interventions on public goods ecosystems.\n- Specifically, it seeks to assess how grants and incentives affect outcomes such as developer retention, user activity, and network TVL (Total Value Locked).\n- The initial experiment evaluates an intervention targeting a cohort of projects that received Optimism Retro Funding in January 2024.\n- The explanatory variable is `new_contributors_over_90_day` (new contributors over 90 days), `commits_over_90_day` (commits over 90 days), and `issues_opened_over_90_day` (issues opened over 90 days).\n\n### Dependent Variable\n\n- The dependent variable is `active_developers_over_90_day` (90-day active developers)\n\n### Identification Strategy\n\n- As an early experiment in crypto network economics, OSO is exploring methods such as synthetic controls and causal inference.\n- Synthetic control methods are widely used to assess the impact of interventions in complex systems.\n- In this approach, a synthetic control is a weighted average of several units, constructed to replicate the trajectory that the treated unit would have followed in the absence of the intervention.\n- Weights are selected in a data-driven way so that the resulting synthetic control closely resembles the treated unit with respect to key predictors of the outcome variable.\n- Unlike difference-in-differences approaches, this method allows for adjustments for time-varying confounders by weighting the control group to better match the treatment group in the pre-intervention period.\n- Economists frequently use synthetic controls to evaluate policy impacts in non-experimental settings (e.g., Abadie and Gardeazabal’s study on the economic impact of the Basque separatist conflict).\n- One key advantage of synthetic controls is the ability to systematically select comparison groups. In OSO’s case, this means comparing grant recipients to similar non-recipient projects.\n- Inspired by work from Counterfactual Labs, OSO uses the pysyncon package to estimate treatment effects across the range of timeseries metrics available within OSO.\n- Each analysis request includes a pre-period start and end date, an optimization period start and end date, a dependent variable, treatment identifier, control identifiers, and predictor variables.\n\n## Results\n\n- In early findings, OSO analyzed monthly active developers over a 90-day rolling window for a cohort of projects that received Optimism Retro Funding in January 2024.\n- The results indicate that the gap between the treated group and the synthetic control group reflects the treatment effect, with an average increase of approximately 150 to 200 monthly active developers.\n- OSO is in the early stages of applying advanced metrics like synthetic control to measure the impact of incentives in crypto networks and plans to share further insights in the future.\n",
613
+ "raw": "---\nevidence_id: \"02\"\nresults:\n - intervention: \"Whether grants were received or token incentives were provided\"\n outcome_variable: \"Developer retention, user activity, and Total Value Locked (TVL) of the network\"\n outcome: \"+-\"\nstrength: \"3\"\nmethodologies:\n - \"Synthetic Control Method\"\nversion: \"2.0.0\"\ndatasets:\n - \"https://github.com/opensource-observer/insights/tree/main/analysis/optimism/syncon\"\ntitle: \"Effect of a Grant Program on Developer Activity \"\ndate: \"2024-11-18\"\ntags:\n - \"oss\"\n - \"public goods funding\"\ncitation:\n - name: \"Early experiments with synthetic controls and causal inference\"\n src: \"https://docs.oso.xyz/blog/synthetic-controls/\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\nOn average, there was an observed increase of approximately 150 to 200 monthly active developers.\n\n## Background\n\n- Open Source Observer (OSO) is exploring advanced metrics to better measure the impact of certain types of interventions on public goods ecosystems.\n- For example, it aims to compare the performance of projects or users who received token incentives against those who did not.\n- However, in real-world economies, it is impossible to randomly assign treatment and control groups like in controlled A/B tests.\n- Therefore, advanced statistical techniques must be employed to estimate the causal effect of a treatment on a target cohort, while controlling for other factors such as market conditions, competing incentives, and geopolitical events.\n\n## Analysis Method\n\n### Dataset\n\n- The synthetic control work is part of a broader initiative to build a flexible analytics engine capable of analyzing virtually all metrics over time.\n- OSO is currently rolling out a suite of timeseries metrics. These models allow metrics to be computed for any cohort over any time period, enabling \"time travel\" to evaluate past performance.\n- Most timeseries metrics are computed using a rolling window with daily buckets. For example, rather than measuring monthly active developers as a static monthly count, OSO uses 30-day and 90-day rolling windows to provide a more detailed view of cohort performance.\n- Sample SQL queries show the use of tables such as `timeseries_metrics_by_collection_v0`, `metrics_v0`, and `collections_v1`.\n\n### Intervation / Explanatory Variable\n\n- OSO is interested in measuring the impact of specific types of interventions on public goods ecosystems.\n- Specifically, it seeks to assess how grants and incentives affect outcomes such as developer retention, user activity, and network TVL (Total Value Locked).\n- The initial experiment evaluates an intervention targeting a cohort of projects that received Optimism Retro Funding in January 2024.\n- The explanatory variable is `new_contributors_over_90_day` (new contributors over 90 days), `commits_over_90_day` (commits over 90 days), and `issues_opened_over_90_day` (issues opened over 90 days).\n\n### Dependent Variable\n\n- The dependent variable is `active_developers_over_90_day` (90-day active developers)\n\n### Identification Strategy\n\n- As an early experiment in crypto network economics, OSO is exploring methods such as synthetic controls and causal inference.\n- Synthetic control methods are widely used to assess the impact of interventions in complex systems.\n- In this approach, a synthetic control is a weighted average of several units, constructed to replicate the trajectory that the treated unit would have followed in the absence of the intervention.\n- Weights are selected in a data-driven way so that the resulting synthetic control closely resembles the treated unit with respect to key predictors of the outcome variable.\n- Unlike difference-in-differences approaches, this method allows for adjustments for time-varying confounders by weighting the control group to better match the treatment group in the pre-intervention period.\n- Economists frequently use synthetic controls to evaluate policy impacts in non-experimental settings (e.g., Abadie and Gardeazabal’s study on the economic impact of the Basque separatist conflict).\n- One key advantage of synthetic controls is the ability to systematically select comparison groups. In OSO’s case, this means comparing grant recipients to similar non-recipient projects.\n- Inspired by work from Counterfactual Labs, OSO uses the pysyncon package to estimate treatment effects across the range of timeseries metrics available within OSO.\n- Each analysis request includes a pre-period start and end date, an optimization period start and end date, a dependent variable, treatment identifier, control identifiers, and predictor variables.\n\n## Results\n\n- In early findings, OSO analyzed monthly active developers over a 90-day rolling window for a cohort of projects that received Optimism Retro Funding in January 2024.\n- The results indicate that the gap between the treated group and the synthetic control group reflects the treatment effect, with an average increase of approximately 150 to 200 monthly active developers.\n- OSO is in the early stages of applying advanced metrics like synthetic control to measure the impact of incentives in crypto networks and plans to share further insights in the future.\n"
614
+ },
615
+ "09": {
616
+ "frontmatter": {
617
+ "evidence_id": "09",
618
+ "title": "How Wikipedia Offline Meetings Shape Participants’ Editing Activity: An Empirical Analysis of the German-Language Community",
619
+ "author": "BeaconLabs",
620
+ "date": "2024-11-05",
621
+ "citation": [
622
+ {
623
+ "name": "How offline meetings affect online activities: the case of Wikipedia",
624
+ "type": "link",
625
+ "src": "https://epjdatascience.springeropen.com/articles/10.1140/epjds/s13688-024-00506-w"
626
+ }
627
+ ],
628
+ "results": [
629
+ {
630
+ "intervention": "Participation in Wikipedia offline meetings",
631
+ "outcome_variable": "Increase in contributions to Wikipedia(overall change in editing activity)",
632
+ "outcome": "+"
633
+ }
634
+ ],
635
+ "strength": "3",
636
+ "methodologies": [
637
+ "DID, Covariate matching"
638
+ ],
639
+ "version": "1.0.0",
640
+ "datasets": [
641
+ ""
642
+ ]
643
+ },
644
+ "content": "\n## Key Points\n\n- Participation in offline meetings exerts a positive, statistically significant effect on users’ contribution behavior over the short term (1 week), medium term (1 month), and long term (1 year).\n- The magnitude of decline in editing activity among participants is significantly smaller than the decline observed among comparable non-participants.\n- Notably, users attending their first meeting were observed to increase their editing thereafter.\n\n## Background\n\nOpen-source communities and peer-production projects face challenges in long-term sustainability, and offline gatherings are increasingly recognized for promoting community resilience. Although Wikipedia struggles with declining activity and retention of new users, the German-language Wikipedia hosts regular offline meetings. These meetings provide opportunities to form personal ties, and face-to-face interaction is thought to strengthen commitment to the project and reinforce identity.\n\n## Analysis Method\n\n### Dataset\n\n- We combine a comprehensive dataset on informal offline meetings in the German-language Wikipedia community from 2001 to 2020 with large-scale online activity data.\n- The dataset includes information on 4,408 small-scale meetings and 4,013 participating users.\n- All online actions on Wikipedia are recorded, and users’ editing activities are measured from metadata dumps.\n\n### Intervation / Explanatory Variable\n\n- The intervention for this outcome is participation in offline meetings.\n- The analysis examines whether a user attended a meeting, and in particular whether it was their first meeting.\n\n### Dependent Variable\n\n- The outcome variable is the volume of a user’s editing activity on Wikipedia (number of edits).\n- This is measured separately as the total number of edits across all namespaces and edits in the article main namespace.\n- Activity is analyzed over windows of 1 week (7 days), 1 month (28 days), and 1 year (364 days) before and after the meeting.\n\n### Identification Strategy\n\n- Quasi-experimental approach: We employ a difference-in-differences (DiD) design comparing meeting participants (treatment group) with comparable non-participants selected via matching (control group).\n- Covariate matching: From a pool of non-participants, we construct a control group most similar to participants based on five features (days since registration; cumulative activity in mainspace and outside mainspace from registration to the meeting; and recent activity in mainspace and outside mainspace over the 7-day, 1-month, 2-month, and 1-year periods prior to the meeting). This aims to minimize pre-existing differences between groups.\n- Statistical models: For the binary outcome of resuming activity, we use a multilevel linear probability model (LPM); for changes in activity volume, we use multilevel negative binomial models. Control variables (prior activity level, tenure, administrator status, and meeting year) are included.\n\n## Results\n\n- Compared to the control group, participants’ contributions increased significantly over the short, medium, and long terms.\n- Among users inactive before the meeting, the probability of resuming editing after the meeting increased substantially relative to the control group (e.g., the probability of resuming edits in mainspace rose from 16.7% in the control group to 33.4% among participants).\n- While the control group tended to reduce their editing, the decline among participants was significantly smaller, suggesting that offline interaction helps mitigate the broader decline of online communities.\n- Attending a first meeting showed a particularly strong positive effect, increasing editing activity more than attending other meetings.\n",
645
+ "raw": "---\nevidence_id: \"09\"\nresults:\n - intervention: \"Participation in Wikipedia offline meetings\"\n outcome_variable: \"Increase in contributions to Wikipedia(overall change in editing activity)\"\n outcome: \"+\"\nstrength: \"3\"\nmethodologies:\n - \"DID, Covariate matching\"\nversion: \"1.0.0\"\ndatasets:\n - \"\"\ntitle: \"How Wikipedia Offline Meetings Shape Participants’ Editing Activity: An Empirical Analysis of the German-Language Community\"\ndate: \"2024-11-05\"\n\ncitation:\n - name: \"How offline meetings affect online activities: the case of Wikipedia\"\n src: \"https://epjdatascience.springeropen.com/articles/10.1140/epjds/s13688-024-00506-w\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n\n- Participation in offline meetings exerts a positive, statistically significant effect on users’ contribution behavior over the short term (1 week), medium term (1 month), and long term (1 year).\n- The magnitude of decline in editing activity among participants is significantly smaller than the decline observed among comparable non-participants.\n- Notably, users attending their first meeting were observed to increase their editing thereafter.\n\n## Background\n\nOpen-source communities and peer-production projects face challenges in long-term sustainability, and offline gatherings are increasingly recognized for promoting community resilience. Although Wikipedia struggles with declining activity and retention of new users, the German-language Wikipedia hosts regular offline meetings. These meetings provide opportunities to form personal ties, and face-to-face interaction is thought to strengthen commitment to the project and reinforce identity.\n\n## Analysis Method\n\n### Dataset\n\n- We combine a comprehensive dataset on informal offline meetings in the German-language Wikipedia community from 2001 to 2020 with large-scale online activity data.\n- The dataset includes information on 4,408 small-scale meetings and 4,013 participating users.\n- All online actions on Wikipedia are recorded, and users’ editing activities are measured from metadata dumps.\n\n### Intervation / Explanatory Variable\n\n- The intervention for this outcome is participation in offline meetings.\n- The analysis examines whether a user attended a meeting, and in particular whether it was their first meeting.\n\n### Dependent Variable\n\n- The outcome variable is the volume of a user’s editing activity on Wikipedia (number of edits).\n- This is measured separately as the total number of edits across all namespaces and edits in the article main namespace.\n- Activity is analyzed over windows of 1 week (7 days), 1 month (28 days), and 1 year (364 days) before and after the meeting.\n\n### Identification Strategy\n\n- Quasi-experimental approach: We employ a difference-in-differences (DiD) design comparing meeting participants (treatment group) with comparable non-participants selected via matching (control group).\n- Covariate matching: From a pool of non-participants, we construct a control group most similar to participants based on five features (days since registration; cumulative activity in mainspace and outside mainspace from registration to the meeting; and recent activity in mainspace and outside mainspace over the 7-day, 1-month, 2-month, and 1-year periods prior to the meeting). This aims to minimize pre-existing differences between groups.\n- Statistical models: For the binary outcome of resuming activity, we use a multilevel linear probability model (LPM); for changes in activity volume, we use multilevel negative binomial models. Control variables (prior activity level, tenure, administrator status, and meeting year) are included.\n\n## Results\n\n- Compared to the control group, participants’ contributions increased significantly over the short, medium, and long terms.\n- Among users inactive before the meeting, the probability of resuming editing after the meeting increased substantially relative to the control group (e.g., the probability of resuming edits in mainspace rose from 16.7% in the control group to 33.4% among participants).\n- While the control group tended to reduce their editing, the decline among participants was significantly smaller, suggesting that offline interaction helps mitigate the broader decline of online communities.\n- Attending a first meeting showed a particularly strong positive effect, increasing editing activity more than attending other meetings.\n"
646
+ },
647
+ "03": {
648
+ "frontmatter": {
649
+ "evidence_id": "03",
650
+ "title": "SuperStacks for Sustainable DeFi Liquidity on the Superchain",
651
+ "author": "BeaconLabs",
652
+ "date": "2025-08-15",
653
+ "citation": [
654
+ {
655
+ "name": "SuperStacks Impact Analysis",
656
+ "type": "link",
657
+ "src": "https://gov.optimism.io/t/superstacks-impact-analysis/10225"
658
+ }
659
+ ],
660
+ "results": [
661
+ {
662
+ "intervention": "SuperStacks: A New Approach to Rewards on the Superchain",
663
+ "outcome_variable": "Establishing a sustainable DeFi growth loop",
664
+ "outcome": "+"
665
+ }
666
+ ],
667
+ "strength": "3",
668
+ "methodologies": [
669
+ "Pro-rata model, One-sided t-tests"
670
+ ],
671
+ "version": "1.0.0"
672
+ },
673
+ "content": "\n## Key Points\n- The SuperStacks program achieved **$58.0M in net TVL inflows** during its implementation period, with **$53.7M retained 30 days after the program concluded**. This translates to **$23.2/OP and $21.5/OP** after costs, respectively.\n- Findings supported the theory that strong demand-side activity can lift post-incentive equilibrium levels, though a more rigorous analysis is still needed.\n- **Incentivized lending pools generally demonstrated higher liquidity retention**.\n- However, TVL retention in DEXs appeared **inflated by Uniswap's concurrent Gauntlet campaign**. Specifically, when the two co-incentivized DEX pools were excluded, retained TVL inflows dropped to $48.2M, revealing a significant divergence between the lending and DEX verticals.\n- Metrics for measuring demand-side traction included trading volume per TVL for DEX pools and utilization rate for lending pools. For DEX pools, a moderate correlation was observed between peak net TVL inflows and trading volume per TVL, hinting at possible synergistic effects.\n- A key limitation was that isolating the program's causal effect alone was extremely difficult due to numerous confounding variables, such as external co-incentives and broader market volatility.\n\n## Background\n\nSuperStacks was the **Optimism Foundation's first proactive DeFi incentive pilot program**. It was designed with the goal of **increasing liquidity for interoperable assets (e.g., USD₮0) across the Superchain**. At its core, the program aimed to help these assets overcome the cold start problem and **test new mechanisms for establishing sustainable DeFi growth loops**.\n\nThe program's design was based on a theoretical framework suggesting that a two-pronged approach, focusing on both supply-side and demand-side activity, could initiate a sustainable flywheel effect for interoperable assets on the Superchain, ultimately catalyzing lasting liquidity growth.\n\nThis theory is broken down into three phases:\n\n1. **Supply-Side Growth**: Available incentives attract deposits into DEX pools, which creates deeper liquidity and improves the execution quality for trades. On the lending side, an increase in lending supply reduces the borrow rate.\n2. **Demand-Side Growth**: Improved trading conditions draw in more trades that are routed through the incentivized DEX pools, which increases trading volumes and generates more fees for LPs. Similarly, more competitive borrow rates attract more borrowers, which raises utilization rates and generates a higher yield for lenders.\n3. **Sustainable Traction**: Once incentives are switched off, a portion of the TVL (Total Value Locked) and net liquidity leaves the incentivized pools, but due to higher fee and yield generation for LPs and lenders, liquidity settles at higher baseline levels compared to the pre-incentives period.\n\n## Analysis Method\n\n### Dataset\n\nThe datasets and methods used for the SuperStacks analysis were:\n\n- Period: 76-day incentive period (April 16 – June 30) and a 30-day retention evaluation period after program end (through July 30).\n- Data sources: 25 pools and vaults targeted by SuperStacks incentives.\n- Metrics:\n - TVL (Total Value Locked): Net TVL inflows during the program and retained inflows after program end.\n - Trading volume: Indicator of demand-side activity in DEX pools.\n - Utilization: Indicator of demand-side activity in lending pools.\n - Cost efficiency: Net TVL inflows per OP token ($/OP).\n\n### Intervation / Explanatory Variable\n\nSuperStacks Program:\n\nSuperStacks was the Optimism Foundation’s first attempt at a “proactive DeFi incentive,” designed as a pilot program to increase liquidity of interoperable assets across the Superchain. The program was built on a two-pronged approach targeting both supply-side and demand-side activities. Specifically, incentives were provided to encourage supply-side actions (deposits into DEX pools, increased lending supply), which in turn aimed to stimulate demand-side activities (higher trading volume, increased utilization).\n\n### Dependent Variable\n\nEstablishing a sustainable DeFi growth loop: Testing a new mechanism for interoperable assets to overcome the cold-start problem and achieve sustained growth.\n\n### Identification Strategy\n\n- A pro-rata model was applied to disentangle the complexity of overlapping incentive programs, attributing impact based on each program’s share of total USD incentives.\n- To evaluate pool-level performance, incentivized pools (treatment group) were paired with comparable non-incentivized pools (control group) on the same chain and protocol. One-sided t-tests were conducted on changes in TVL and trading volume.\n- The analysis focused on DEX pools (9 pairs) that passed statistical filtering.\n\n## Results\n\n- Theoretical Support: The analysis confirmed the theory that strong demand-side activity raises the equilibrium level after incentives end. The program was designed to test new mechanisms by which interoperable assets on the Superchain could overcome the cold-start problem and establish a sustainable DeFi growth loop.\n- Mechanism Validation: The two-pronged approach targeting both supply-side and demand-side activities supported the theoretical framework that sustained liquidity growth of interoperable assets on the Superchain can be promoted. The hypothesis was that incentives would increase TVL (supply side), leading to higher trading volume and utilization (demand side), and ultimately these metrics would settle at higher equilibrium levels even after incentives stopped.\n- Challenges: However, more rigorous analysis of this theory is needed. While trading volume per TVL emerged as a potential predictor of net TVL inflows, its explanatory power was inconsistent, requiring further careful analysis.\n",
674
+ "raw": "---\nevidence_id: \"03\"\nresults:\n - intervention: \"SuperStacks: A New Approach to Rewards on the Superchain\"\n outcome_variable: \"Establishing a sustainable DeFi growth loop\"\n outcome: \"+\"\nstrength: \"3\"\nmethodologies:\n - \"Pro-rata model, One-sided t-tests\"\nversion: \"1.0.0\"\ntitle: \"SuperStacks for Sustainable DeFi Liquidity on the Superchain\"\ndate: \"2025-08-15\"\ncitation:\n - name: \"SuperStacks Impact Analysis\"\n src: \"https://gov.optimism.io/t/superstacks-impact-analysis/10225\"\n type: \"link\"\nauthor: \"BeaconLabs\"\n---\n\n## Key Points\n- The SuperStacks program achieved **$58.0M in net TVL inflows** during its implementation period, with **$53.7M retained 30 days after the program concluded**. This translates to **$23.2/OP and $21.5/OP** after costs, respectively.\n- Findings supported the theory that strong demand-side activity can lift post-incentive equilibrium levels, though a more rigorous analysis is still needed.\n- **Incentivized lending pools generally demonstrated higher liquidity retention**.\n- However, TVL retention in DEXs appeared **inflated by Uniswap's concurrent Gauntlet campaign**. Specifically, when the two co-incentivized DEX pools were excluded, retained TVL inflows dropped to $48.2M, revealing a significant divergence between the lending and DEX verticals.\n- Metrics for measuring demand-side traction included trading volume per TVL for DEX pools and utilization rate for lending pools. For DEX pools, a moderate correlation was observed between peak net TVL inflows and trading volume per TVL, hinting at possible synergistic effects.\n- A key limitation was that isolating the program's causal effect alone was extremely difficult due to numerous confounding variables, such as external co-incentives and broader market volatility.\n\n## Background\n\nSuperStacks was the **Optimism Foundation's first proactive DeFi incentive pilot program**. It was designed with the goal of **increasing liquidity for interoperable assets (e.g., USD₮0) across the Superchain**. At its core, the program aimed to help these assets overcome the cold start problem and **test new mechanisms for establishing sustainable DeFi growth loops**.\n\nThe program's design was based on a theoretical framework suggesting that a two-pronged approach, focusing on both supply-side and demand-side activity, could initiate a sustainable flywheel effect for interoperable assets on the Superchain, ultimately catalyzing lasting liquidity growth.\n\nThis theory is broken down into three phases:\n\n1. **Supply-Side Growth**: Available incentives attract deposits into DEX pools, which creates deeper liquidity and improves the execution quality for trades. On the lending side, an increase in lending supply reduces the borrow rate.\n2. **Demand-Side Growth**: Improved trading conditions draw in more trades that are routed through the incentivized DEX pools, which increases trading volumes and generates more fees for LPs. Similarly, more competitive borrow rates attract more borrowers, which raises utilization rates and generates a higher yield for lenders.\n3. **Sustainable Traction**: Once incentives are switched off, a portion of the TVL (Total Value Locked) and net liquidity leaves the incentivized pools, but due to higher fee and yield generation for LPs and lenders, liquidity settles at higher baseline levels compared to the pre-incentives period.\n\n## Analysis Method\n\n### Dataset\n\nThe datasets and methods used for the SuperStacks analysis were:\n\n- Period: 76-day incentive period (April 16 – June 30) and a 30-day retention evaluation period after program end (through July 30).\n- Data sources: 25 pools and vaults targeted by SuperStacks incentives.\n- Metrics:\n - TVL (Total Value Locked): Net TVL inflows during the program and retained inflows after program end.\n - Trading volume: Indicator of demand-side activity in DEX pools.\n - Utilization: Indicator of demand-side activity in lending pools.\n - Cost efficiency: Net TVL inflows per OP token ($/OP).\n\n### Intervation / Explanatory Variable\n\nSuperStacks Program:\n\nSuperStacks was the Optimism Foundation’s first attempt at a “proactive DeFi incentive,” designed as a pilot program to increase liquidity of interoperable assets across the Superchain. The program was built on a two-pronged approach targeting both supply-side and demand-side activities. Specifically, incentives were provided to encourage supply-side actions (deposits into DEX pools, increased lending supply), which in turn aimed to stimulate demand-side activities (higher trading volume, increased utilization).\n\n### Dependent Variable\n\nEstablishing a sustainable DeFi growth loop: Testing a new mechanism for interoperable assets to overcome the cold-start problem and achieve sustained growth.\n\n### Identification Strategy\n\n- A pro-rata model was applied to disentangle the complexity of overlapping incentive programs, attributing impact based on each program’s share of total USD incentives.\n- To evaluate pool-level performance, incentivized pools (treatment group) were paired with comparable non-incentivized pools (control group) on the same chain and protocol. One-sided t-tests were conducted on changes in TVL and trading volume.\n- The analysis focused on DEX pools (9 pairs) that passed statistical filtering.\n\n## Results\n\n- Theoretical Support: The analysis confirmed the theory that strong demand-side activity raises the equilibrium level after incentives end. The program was designed to test new mechanisms by which interoperable assets on the Superchain could overcome the cold-start problem and establish a sustainable DeFi growth loop.\n- Mechanism Validation: The two-pronged approach targeting both supply-side and demand-side activities supported the theoretical framework that sustained liquidity growth of interoperable assets on the Superchain can be promoted. The hypothesis was that incentives would increase TVL (supply side), leading to higher trading volume and utilization (demand side), and ultimately these metrics would settle at higher equilibrium levels even after incentives stopped.\n- Challenges: However, more rigorous analysis of this theory is needed. While trading volume per TVL emerged as a potential predictor of net TVL inflows, its explanatory power was inconsistent, requiring further careful analysis.\n"
675
+ }
676
+ };
677
+ export const evidenceSlugs = ["00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20"];
678
+ //# sourceMappingURL=evidence.js.map