MathJax.Hub.Config({ extensions: ['tex2jax.js'], jax: ['input/TeX','output/HTML-CSS'], tex2jax: { inlineMath: [ ['$$$','$$$'] ], processEscapes: true, processClass: 'tex2jax', ignoreClass: 'html' }, showProcessingMessages: false, messageStyle: 'none' });           All Categories Blogs | GiveWell         @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/modules/system/system.base.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/modules/system/system.menus.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/modules/system/system.messages.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/modules/system/system.theme.css?opty8q");   @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/jquery_update/replace/ui/themes/base/minified/jquery.ui.core.min.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/jquery_update/replace/ui/themes/base/minified/jquery.ui.theme.min.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/jquery_update/replace/ui/themes/base/minified/jquery.ui.accordion.min.css?opty8q");   @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/footnotes/footnotes.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/modules/aggregator/aggregator.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/modules/comment/comment.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/date/date_api/date.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/modules/field/theme/field.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/google_cse/google_cse.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/modules/node/node.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/modules/search/search.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/modules/user/user.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/views/css/views.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/ckeditor/css/ckeditor.css?opty8q");   @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/ctools/css/ctools.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/taxonomy_access/taxonomy_access.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/modules/contrib/content_type_extras/css/content_type_extras.css?opty8q");   @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/themes/gw_basic/styles/css/global/normalize.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/themes/gw_basic/styles/css/global/gw-fonts.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/themes/gw_basic/styles/css/global/html.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/themes/gw_basic/styles/css/global/global.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/themes/gw_basic/styles/css/global/meanmenu.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/themes/gw_basic/styles/css/global/page-layout.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/themes/gw_basic/styles/css/global/html-colors.css?opty8q"); @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/themes/gw_basic/styles/css/pages/pages.css?opty8q");   @import url("https://web.archive.org./web/20170622115043cs_/http://www.givewell.org/sites/all/themes/gw_basic/styles/css/global/admin.css?opty8q");                                   var _kiq = _kiq || []; (function(){ setTimeout(function(){ var d = document, f = d.getElementsByTagName('script')[0], s = d.createElement('script'); s.type = 'text/javascript'; s.async = true; s.src = '//web.archive.org./web/20170622115043/http://s3.amazonaws.com/ki.js/64233/eR2.js'; f.parentNode.insertBefore(s, f); }, 1); })();   Jump to Navigation          GiveWell        Giving EffectivelyGiving 101 Your Donation Can Change Someone's Life The Wrong Donation Can Accomplish Nothing Your Dollar Goes Further Overseas Your Dollar Goes Further When You Fund the Right Program Quick Start Guide  How We WorkWho We Are Process Criteria Transparency Evaluations of GiveWell Research FAQ Core Competencies  Top CharitiesAgainst Malaria Foundation Schistosomiasis Control Initiative The END Fund Malaria Consortium Sightsavers Deworm the World Initiative GiveDirectly Standout Charities  ResearchCharity Site Visits Notes from Research Conversations Intervention Reports Cost-Effectiveness Analyses Other Charity Reviews GiveWell Incubation Grants  Our Mistakes AboutAbout GiveWell Progress to Date Our Story Our People Official Records Reputation Impact Frequently Asked Questions Donate Contact Us Jobs  UpdatesBlog RSS Feeds Stay Updated           Enter search terms here.   This search returns results from both GiveWell's main site and from the GiveWell Blog. 

  Search form  Search             DonateDonate

             You are hereHome » Feed aggregator » Sources »  All Categories Blogs     FacebookTwitter>Print>Email                Exploring how to get real change for your dollar.   URL: http://blog.givewell.org   Updated: 25 min 42 sec ago     Are GiveWell’s top charities the best option for every donor?   Wed, 06/21/2017 - 12:15   We’re sometimes asked whether we think GiveWell’s top charities are the “best,” in some absolute sense of the word, or whether we’d ever advise that a donor give to an opportunity outside of our recommendations. This post aims to clarify how GiveWell thinks about different giving options and their suitability for different types of donors.

 We believe that GiveWell’s top charities offer donors an outstanding opportunity to do a lot of good and are the best option for most donors. However, some donors—those with a very high degree of trust in a particular individual or organization to make this decision, donors with lots of time (in excess of 50 hours per year, and likely more) to consider their giving decision, or donors whose values point strongly toward a particular cause outside of the ones GiveWell covers—may find opportunities to have a greater impact per dollar than GiveWell’s top charities. Note that we think these characteristics are likely to be necessary, but not sufficient, for finding these types of opportunities; we still expect good giving to be hard, and spending, for example, 50 hours per year on research isn’t necessarily going to yield better opportunities.

 In this post, we describe relevant considerations for donors in greater detail. 

 Giving to GiveWell’s top charities

 GiveWell was founded to serve donors with limited amounts of time to make giving decisions. GiveWell’s co-founders, Elie Hassenfeld and Holden Karnofsky, were in this situation when they started GiveWell as a side project in 2006. They found that determining where to give effectively was a full-time project and quit their jobs to start GiveWell in 2007.

 GiveWell’s top charity recommendations serve all donors. We rely on evidence and detail our rationale for making a recommendation publicly, so donors can vet our work; a strength of our recommendations is their falsifiability. We believe our top charity recommendations serve donors who want to give as effectively as possible and have only limited time to determine where to donate, and (prior to GiveWell) no trusted person or entity to outsource their thinking to, particularly well. Our criteria and recommendations were designed with this type of donor in mind:

  Our top charities are largely uncontroversial and relatively straightforward ways to do a lot of good—for example, by providing direct aid such as insecticide-treated nets to prevent malaria and cash transfers to very poor households. There is room for debate on the evidence behind these interventions and their cost-effectiveness, but the basic case for them—and the fact that they are likely to do more good than harm—is subject to little debate, so a donor can feel fairly confident in these basics without needing to do their own research. GiveWell publishes the full details of our charity analyses so that donors can review and vet our work, and so that donors with very limited time can trust that any major problems would likely be caught by others (with more time). Because we lay out the entire case for the charities online, donors can spot-check any particular part of it to get a sense of whether we’re thinking reasonably about the issues that seem most salient to them. Our top charities have room for more funding. In other words, we believe additional marginal donations to these organizations enable them to do more good.  Our guess is that most donors that use GiveWell fit this profile (want to give as effectively as possible and have only limited time to determine where to donate, and no other trusted person or entity to outsource their thinking to). 

 Below, we discuss alternative donor profiles:

 (1) Donors with limited time and a high amount of trust in a person or organization to inform their giving decisions

 This group of donors has limited time to spend on making a giving decision and has an organization or person (other than GiveWell or GiveWell staff) they personally trust to make or inform this decision. In this case, they may defer to that person or organization’s recommendations. 

 (2) Donors with lots of time

 Donors with a lot of time to spend on giving decisions (50+ hours per year) may be able to find opportunities that GiveWell hasn’t. For example, a donor might know someone who is starting a charity and feel, based on their research, that supporting their project at an early stage might be a particularly leveraged way to do good. A donor with lots of time may also be very familiar with a particular cause and feel highly confident in a particular organization and its need for funding. These donors may want to compare alternative opportunities to GiveWell’s top charities. They may also want to actively vet GiveWell’s recommendations as part of their research process.

 Donors with lots of time may also wish to apply a different strategy to their giving. GiveWell largely recommends charities where sufficient evidence exists to make a fairly robust estimate of the expected value of a donation. Donors with much more time to spend (maybe even significantly more than 50 hours per year) thinking about where to give may want to take a “hits-based giving” approach—having a high tolerance for philanthropic risk, so long as the overall expected value is sufficiently high. This is the approach the Open Philanthropy Project, which was incubated at GiveWell, has taken, and we believe doing this well requires a lot of work, as the Open Philanthropy Project discussed in a blog post last year (emphasis original):

 Aim for deep understanding of the key issues, literatures, organizations, and people around a cause, either by putting in a great deal of work or by forming a high-trust relationship with someone else who can. If we [the Open Philanthropy Project] support projects that seem exciting and high-impact based on superficial understanding, we’re at high risk of being redundant with other funders. If we support projects that seem superficially exciting and high-impact, but aren’t being supported by others, then we risk being systematically biased toward projects that others have chosen not to support for good reasons. By contrast, we generally aim to support projects based on the excitement of trusted people who are at a world-class level of being well-informed, well-connected, and thoughtful in relevant ways.

 Achieving this is challenging. It means finding people who are (or can be) maximally well-informed about issues we’ll never have the time to engage with fully, and finding ways to form high-trust relationships with them. As with many other philanthropists, our basic framework for doing this is to choose focus areas and hire staff around those focus areas. In some cases, rather than hiring someone to specialize in a particular cause, we try to ensure that we have a generalist who puts a great deal of time and thought into an area. Either way, our staff aim to become well-networked and form their own high-trust relationships with the best-informed people in the field.

 I [Open Philanthropy Project Executive Director Holden Karnofsky] believe that the payoff of all of this work is the ability to identify ideas that are exciting for reasons that require unusual amounts of thought and knowledge to truly appreciate.

 (3) Donors with values that differ from GiveWell staff

 Donors who hold different values than the majority of GiveWell staff, or who place more weight on a particular cause outside of the causes covered by GiveWell, may find other giving opportunities to be more attractive for reasons beyond the time/trust framework articulated earlier in this post. For example, individuals who place a very high value on farm animal welfare may wish to give a large proportion of their donation, if not all of their donation, to organizations working in that cause.

 We’re happy to speak with you about giving decisions. If you’re not sure which considerations apply to you, please reach out. We’re always happy to talk through giving decisions.

 The post Are GiveWell’s top charities the best option for every donor? appeared first on The GiveWell Blog.

     June 2017 open thread   Thu, 06/15/2017 - 10:02   Our goal with hosting quarterly open threads is to give blog readers an opportunity to publicly raise comments or questions about GiveWell or related topics (in the comments section below). As always, you’re also welcome to email us at info@givewell.org or to request a call with GiveWell staff if you have feedback or questions you’d prefer to discuss privately. We’ll try to respond promptly to questions or comments.

 You can view our March 2017 open thread here.

 The post June 2017 open thread appeared first on The GiveWell Blog.

     Separating GiveWell and the Open Philanthropy Project   Mon, 06/12/2017 - 13:07   GiveWell has been planning to separate the Open Philanthropy Project from GiveWell for over a year. We’re happy to announce that as of June 1, GiveWell and the Open Philanthropy Project are separate organizations. GiveWell sold assets and transferred staff to the Open Philanthropy Project LLC, an entity created for the purpose of potentially acquiring the Open Philanthropy Project’s assets and continuing its operations. (Read more about the Open Philanthropy Project LLC here.) The transaction was unanimously approved by GiveWell’s non-conflicted Board members.

 We do not expect this change to impact most of GiveWell’s donors. We’re proud to have incubated the Open Philanthropy Project as part of GiveWell and are excited to see what it achieves as an independent organization.

 This post will discuss:

  Brief context for the sale. The Open Philanthropy Project was incubated at GiveWell; GiveWell and the Open Philanthropy Project developed different teams and approaches over the years. We (GiveWell) think the separation of GiveWell and the Open Philanthropy Project has benefits for GiveWell in preserving our mission and clarifying our brand of finding and recommending outstanding evidence-backed, cost-effective charities. What this means for donors who use GiveWell’s research. We do not expect the experience for most GiveWell donors to change. GiveWell remains dedicated to publishing a shortlist of top-recommended charities, along with the full details of our analysis, to help donors decide where to give. We will continue to operate as a 501(c)(3) organization. Organizational changes at GiveWell. Holden Karnofsky stepped down as Co-Executive Director of GiveWell and Elie Hassenfeld became the sole Executive Director of GiveWell. The steps we took to make the decision to sell assets related to the Open Philanthropy Project, including intellectual property, to the Open Philanthropy Project LLC. The details of the transaction are discussed here. The relationship between GiveWell and the Open Philanthropy Project LLC. We will continue to share office space and some staff in the near term with the Open Philanthropy Project LLC. Holden Karnofsky stayed on the GiveWell Board of Directors. Elie Hassenfeld remains on the GiveWell Board and now also serves on the Open Philanthropy Project LLC’s Board of Managers.  

 GiveWell and the Open Philanthropy Project The Open Philanthropy Project originated in 2011 as GiveWell Labs, a division of GiveWell that would consider giving opportunities in any form or sector, beyond the strict criteria GiveWell applies to potential top charities. GiveWell Labs entered into a funding partnership with Good Ventures, a foundation co-founded by Dustin Moskovitz and Cari Tuna, with whom GiveWell felt deeply aligned on values and mission, for this work. GiveWell Labs eventually rebranded as the Open Philanthropy Project. Good Ventures has funded most of the operational expenses of and giving opportunities identified by GiveWell Labs, later the Open Philanthropy Project, throughout its history. Prior to the creation of GiveWell Labs, Cari Tuna joined GiveWell’s Board of Directors, and Good Ventures has been a major supporter of GiveWell and our recommended charities since 2011.

 Over time, the Open Philanthropy Project and GiveWell developed separate teams and approaches. GiveWell decided more than a year ago to explore options for separating the Open Philanthropy Project from GiveWell. Eventually, the Open Philanthropy Project LLC was formed as a potential acquirer of the Open Philanthropy Project. The LLC has a Board of Managers consisting of Dustin Moskovitz, Cari Tuna, Holden Karnofsky, Elie Hassenfeld, and Alexander Berger. This Open Philanthropy Project LLC blog post discusses the separation from their perspective.

 

 What this change means for donors who rely on GiveWell’s research GiveWell’s approach to finding and recommending charities is not changing as a result of the transaction. We remain driven by our mission to find evidence-backed, cost-effective giving opportunities and to publish the full details of our analysis to help donors decide where to give. We’ll continue to publish an annual list of recommendations of the best giving opportunities we can find. We see the separation with the Open Philanthropy Project as beneficial in providing clarity around our focus and our brand. 

 GiveWell views the separation as a formal reflection of how GiveWell and the Open Philanthropy Project have been operating over the past few years. The separation does not reflect any planned reduction in support for GiveWell or our top charities from Good Ventures, which anticipates continuing to be a major supporter of both. We do expect to have a continued need for funding for GiveWell’s operations in the future, and hope that donors who value our research will continue to support us.

 

 Organizational changes at GiveWell There will be some internal organizational changes at GiveWell as a result of the sale, which we don’t expect to have an impact on donors who use our research:

   Holden Karnofsky, who served as Co-Executive Director of GiveWell, stepped down. Holden had been spending the vast majority of his time on the Open Philanthropy Project, and will now exclusively be an employee of the LLC (although he will remain on GiveWell’s Board). We do not expect the amount of capacity Holden dedicates to GiveWell to change as a result of the transaction.

 Elie Hassenfeld is now the sole Executive Director of GiveWell. Elie will remain a full-time GiveWell employee and will continue to spend approximately 10-20% of his time on Open Philanthropy Project work, consistent with his time allocation over the past few years.

 Approximately 9 other GiveWell employees will also continue to work for GiveWell but will provide some services to the Open Philanthropy Project LLC. All employees who split time will track their time and the costs will be allocated, along with associated overhead, to the relevant entity, with some protections built in to make sure that GiveWell is receiving fair value for any services it provides to the LLC.

  15 employees, including Holden Karnofsky, left GiveWell and joined the Open Philanthropy Project LLC.

 GiveWell and the Open Philanthropy Project LLC do not expect these changes to impact the staff capacity dedicated to either organization relative to before the separation (in other words, employees’ work after the separation will largely be the same as before the separation).

  

 The process we followed to sell the assets and transfer the staff of the Open Philanthropy Project to the LLC GiveWell took the following steps to evaluate the proposal from the Open Philanthropy Project LLC to purchase assets primarily related to the Open Philanthropy Project, including intellectual property:

  GiveWell set up a sub-committee of our Board of Directors dedicated to sale-related decisions; this sub-committee excluded members of the Board with potential conflicts of interest from discussions and votes related to the sale. Board members Tim Ogden, Rob Reich, Jake Gibson, and Phil Steinmeyer served on the sub-committee. The Board members who were excluded due to conflicts of interest were Cari Tuna, who was expected to be a manager and funder of the LLC, Holden Karnofsky, who was expected to become a manager and employee of the LLC, and Elie Hassenfeld, who was expected to become a manager of and consultant for the LLC. GiveWell hired external legal counsel with relevant expertise to advise us on the separation and sale of assets and an experienced valuation firm to estimate the fair market value of the assets we expected to transfer as part of the transaction. The GiveWell Board sub-committee negotiated a term sheet for the sale of GiveWell assets primarily related to the Open Philanthropy Project and the transfer of employees to the LLC, which it recommended the full Board approve. The major terms of the transaction were unanimously approved by the non-conflicted members of the GiveWell Board on April 25. The sale price for the assets transferred from GiveWell to Open Philanthropy Project LLC was $2.65 million, which the Board determined represented a fair market value of the assets. The board appointed Tim Ogden and Sarah Ward to negotiate final agreements consistent with the term sheet, and the sale of assets closed on May 31, 2017.  

 GiveWell and the Open Philanthropy Project LLC’s relationship There will be substantial connections between GiveWell and the Open Philanthropy Project LLC:

  The two organizations will continue to share an office, at least through the life of the current lease (March 2019). The Open Philanthropy Project LLC plans to donate office space to GiveWell during this time. As indicated above, some staff members will split their time between the two organizations. Both Cari Tuna and Holden Karnofsky will be staying on the GiveWell Board of Directors, and they will have a governance role at the LLC. GiveWell Executive Director and Board member Elie Hassenfeld will serve on the Open Philanthropy Project’s Board of Managers.  Questions? If you have questions, please contact us.

 The post Separating GiveWell and the Open Philanthropy Project appeared first on The GiveWell Blog.

     How GiveWell uses cost-effectiveness analyses   Thu, 06/01/2017 - 12:19   Our cost-effectiveness analysis plays a critical role in the recommendations we make to donors. For example, as a direct result of our cost-effectiveness calculations, we place a higher priority on filling funding gaps at the charities we recommend that work on deworming programs and distributing malaria nets than we do directing funding to GiveDirectly, a GiveWell top charity that distributes direct cash transfers. We believe that GiveDirectly is the strongest organization we’ve ever seen, but according to our analysis, cash transfers are less cost-effective in terms of impact per dollar donated than deworming treatments and malaria nets. 

 Accordingly, cost-effectiveness analysis is a major part of GiveWell’s research process. We dedicate a large part of a full-time staff member (Christian Smith)’s capacity to this work and others involved with GiveWell research spend a considerable amount of time engaging with our cost-effectiveness model throughout the year. We consider this analysis a key part of our output and publish our model online so that anyone can check our calculations, enter their own inputs, and see if they agree with our approach and outputs.

 This post will provide some basic information about how our cost-effectiveness analyses inform our charity recommendations. 

 Summary

  We don’t believe our cost-effectiveness estimates should be taken literally, because they involve (1) subjective judgment calls; (2) educated guesses; and (3) simplifications to make them understandable and able to be vetted internally and externally. When comparing charities’ relative cost-effectiveness, we look for differences of 2-3x or more. If we find a difference of less than 2-3x, we feel unsure whether such a difference truly exists, due to the above-mentioned uncertainty. Donors’ intuitions on the relevant difference in cost-effectiveness may vary. Beyond prioritizing funding gaps, our cost-effectiveness analyses help us think through major questions related to charities’ work.  We don’t view our cost-effectiveness estimates as literally true.

 Cost-effectiveness is arguably the single most important input into GiveWell’s charity recommendations. GiveWell is looking for charities that have the greatest impact per dollar donated, and this is the metric upon which we base our funding recommendations. Within GiveWell’s list of top charities, we further parse the value of funding according to its cost-effectiveness and what it would enable a charity to do. 

 However, we think it would be a mistake to take our cost-effectiveness estimates as a high-confidence precise estimate of the actual value a charity accomplishes:

  We may miss factors or make errors in our model. For example, in the past, we did not adjust the cost-effectiveness of malaria nets to account for the possibility that in some cases where the Against Malaria Foundation does not distribute nets, other funders would take its place. We have since added this adjustment to our cost-effectiveness model (see cell A58). We rely on a number of subjective inputs:   GiveWell’s top charities implement a number of different interventions, with different expected benefits. We recommend organizations that distribute malaria nets and implement seasonal malaria chemoprevention due to strong evidence suggesting these interventions reduce child mortality due to malaria. We recommend deworming because we think there is a possibility that children who receive deworming treatments have higher incomes later in life. We recommend cash transfers due to their impact on consumption. In order to compare the relative cost-effectiveness of these organizations’ work, we use a highly subjective conversion factor that enables us to compare years of healthy life with increases in income.

 We use subjective value judgments to make this comparison, about which people may reasonably disagree. There are also large disagreements among individuals who are involved with GiveWell research and fill out the cost-effectiveness model; you can see that in the “Personal Values” sheet here.

 We plan to write more about the subjective moral value judgments in our cost-effectiveness analyses in future blog posts.

 We make other adjustments based on educated guesses. For example, we make a “replicability adjustment” for deworming to account for the fact that the consumption increase in a major study we rely upon may not hold up if it were replicated (see cell A8). If you are skeptical that such a large income increase would occur, given the limited evidence for short-term health benefits and generally unexpected nature of the findings, you may think that the effect the study measured wasn’t real, wasn’t driven by deworming, or relied on an atypical characteristic shared by the study population but not likely to found among recipients of the intervention today, as one staff member pointed out in a comment (see cell E8). This adjustment is not well-grounded in data.  We often rely on poor-quality data that may change significantly from year to year. For example, a key input into our cost-effectiveness analysis for anti-malaria interventions is malaria mortality data: How many people are dying of malaria each year? Two of the most well-respected global health groups disagree. The World Health Organization (WHO) estimates that 394,000 people died in Africa in 2015 from malaria in sub-Saharan Africa (see page 43); the Institute for Health Metrics and Evaluation says the figure is 629,945—or approximately 1.6 times as many. (Differences in their methodology for counting were discussed, with slightly older figures, in a 2012 blog post.) We aim to balance accuracy with developing a model that can be vetted, both internally and externally. We may, in our quest for simplicity, leave out some relevant factors, even though we’re trying to model the most significant ones. We would guess that the benefit of others being able to check our work outweighs the benefit of including a large number of additional but small factors in our model. We don’t model everything. For example, potential upside—we discuss this in our charity reviews, and in the past included it in a table listing our recommended charities—isn’t incorporated into our cost-effectiveness model. We also don’t model organizational strength; for example, we don’t explicitly model the effect that GiveDirectly’s organizational strength (one of the best we’ve ever seen) has on its program implementation, nor do we model the effect that the Schistosomiasis Control Initiative’s (in our opinion, weaker) organizational strength has on its. In general, we exclude flow-through effects from our model due to uncertainty over how best to account for them.  In practice, we look for significant differences in cost-effectiveness to guide our decisions.

 Due to the uncertainties and imprecision described above, we look for very large differences in modeled cost-effectiveness when making decisions about which charities to investigate or recommend. 

 Historically, GiveWell has looked for differences of 2-3x or more as significant, although this has varied from person to person working on our model. We typically won’t move forward with a charity in our process if it appears that it won’t meet the threshold of at least 2-3x as cost-effective as cash transfers. We think cash transfers are a reasonable baseline to use due to the intuitive argument that if you’re going to help someone with Program X, Program X should be more cost-effective than just giving someone cash to buy that which they need most.

 Another benefit of doing cost-effectiveness analyses.

 We also believe that intensely modeling cost-effectiveness helps us by causing us to ask—and quantify—the importance of questions that could affect our view of a charity. We believe that time spent on cost-effectiveness analyses sharpens our thinking on our recommendations and our review process, and encourages internal debate and reflection.

 The post How GiveWell uses cost-effectiveness analyses appeared first on The GiveWell Blog.

     Update on our views on cataract surgery   Thu, 05/11/2017 - 12:29   We’re often asked why GiveWell doesn’t recommend any organizations that focus on providing surgeries. This post will describe:

  Work we did previously to try to find surgery charities to recommend. In brief, our inability to identify organizations with room for more funding and high-quality monitoring data prevented us from recommending surgery charities in general. Our current (rough, preliminary) view that cataract surgery’s cost-effectiveness may be competitive with that of our priority programs, and some of the major open questions we have about our estimate. Organizations implementing cataract surgery programs that we’ve spoken with. They run a variety of programs, and our impression is that they do not yet have the type of high-quality monitoring information we’re interested in. Our plans to move forward with IDinsight to improve our understanding of cataract surgery as an intervention.  Background

 Our impression is that surgical interventions are intuitively attractive to many donors because they seem to offer concrete, low-cost, and life-changing impacts. For example, some organizations performing cataract surgeries claim that each surgery costs approximately $25 (e.g. 1, 2). Fistula surgery organizations, which repair obstetric fistulas, cite costs around $450-$600 per surgery (e.g. 1, 2). These figures would likely be competitive with our cost-effectiveness estimates for our current recommended charities, a critical factor in GiveWell deciding to recommend them. This suggests that we should look into recommending organizations working on these interventions.

 We briefly looked at developing-world corrective surgery as a potential priority program in 2010. However, we identified two major challenges to finding top charities working in this space:

  Room for more funding. We were unsure whether directing additional funding to a charity would cause more surgeries to happen, or if something other than funding—such as the availability of surgeons—was the bottleneck to further surgeries being performed. High-quality monitoring. Many charities don’t conduct the type of high-quality monitoring that we’d like to see, including surgery charities. We’re particularly interested in closely monitoring surgical outcomes due to our view that surgical interventions are complex relative to, for example, the distribution of a mass commodity like a deworming pill or cash. Performing surgery requires skill and we think it’s likely that the quality of surgeries varies. In addition, some surgeries may require longer-term follow-up care, and we’re unsure what the impact is for patients who do not receive this care. Monitoring information thus feels particularly important in our analysis of whether to recommend charities working on surgery.  We deprioritized additional work on corrective surgeries at the time that report was published, although we maintained our interest in potentially recommending charities working in this area. In 2016, we completed an evidence review of cataract surgery and classified it as one of our “priority programs.” 

 Cataract surgery as an intervention

 We believe there is evidence that cataract surgeries substantially improve vision. Very roughly, we estimate that the cost-effectiveness of cataract surgery is ~$1,000 per severe visual impairment reversed.[1]

 However, we have not completed an in-depth cost-effectiveness analysis of cataract surgery. We remain highly uncertain about the full costs involved because our current cost estimates are based on literature on the costs of performing surgeries. In our experience, charities’ own budgets (rather than academic literature) have given us the best information about how much an intervention costs.

 In addition, we also currently have limited information about the preoperative visual acuity of cataract surgery patients.[2] And we do not have a good understanding of the progression of cataract to blindness—we have looked for this information but have not found it—and so we do not currently incorporate an estimate of the benefits of preventing future blindness in our cost-effectiveness estimate.

 Why we don’t focus on trachoma

 Another sight-related intervention is surgery to treat trachoma, a bacterial infection commonly transmitted by flies that can result in low vision and eventually blindness from scarring due to eyelashes rubbing on the cornea. Our impression is that the evidence base around trachoma progression is weaker than cataract surgery, such that we have open questions around the likelihood of the infection progressing from its earlier stages (trachomatous trichiasis, where the eyelashes are rubbing against the cornea) to blindness, and the average age of onset of each. In addition, we have concerns about trachoma recurrence; because trachoma can recur, surgery to repair vision loss from trachoma may be less cost-effective than cataract surgery in the long run. Cataracts do not recur because the surgery replaces the natural lens, although individuals who have had cataract surgery may still experience vision loss due to other causes.

 Organizations implementing cataract surgery programs

 The organizations we spoke with as part of our investigation into cataract surgery run a variety of programs. Many of them were not directly implementing additional surgeries, but rather were conducting activities such as supporting trainings for surgeons, providing general support to hospitals for eye care interventions, or encouraging more people to access available health services. We have not yet seen from organizations compelling monitoring and evaluation to demonstrate their impact.

 Our plans with IDinsight

 We’re working closely with IDinsight as part of GiveWell’s Incubation Grants program to grow the pipeline of potential future top charities. IDinsight conducts impact evaluations with the goal of informing decisionmakers, such as governments or NGOs. (More on why we’re partnering with IDinsight in this post.) 

 We partnered with IDinsight to find a cataract surgery organization it can work with on monitoring and evaluation, as that remains one of the biggest obstacles we’re aware of to GiveWell recommending cataract surgery organizations. (As indicated above, we have spoken with a number of cataract surgery organizations but do not believe any have sufficient monitoring and evaluation information available to inform a GiveWell recommendation.) We expect IDinsight to consider a number of organizations by holding initial scoping calls, and to ultimately focus on working with a single organization that appears most likely to become a GiveWell top charity, although that organization may still not become a top charity. We expect IDinsight to focus on:

  Key monitoring questions, such as measuring pre-operative visual acuity as well as post-operative visual acuity. Conducting an evaluation of programs’ causal impact. Does the organization cause more surgeries to happen?  We’re also planning to ask other cataract surgery organizations for more detailed information about the costs of their programs. We hope that between IDinsight’s work and receiving additional cost information, we will better be able to assess whether cataract surgery should continue to be a GiveWell priority program.

 Notes [1] This estimate is on the higher end of the range we calculated, because it assumes additional costs due to demand generation activities, or identifying patients who would not otherwise have known about surgery. We use this figure because we expect that GiveWell is more likely to recommend an organization that can demonstrate, through its demand generation activities, that it is causing additional surgeries to happen. The $1,000 figure also reflects our sense that cost-effectiveness in general tends to worsen (become more expensive) as we spend more time building our model of any intervention. Finally, it is a round figure that communicates our uncertainty about this estimate overall.

 [2] Visual acuity is reported as the ratio of the distance at which someone can distinguish a fixed detail relative to a person with “normal” vision. A ratio of 6/6 refers to “normal” vision; a ratio of 6/60 means that someone with impaired vision sees at 6 meters what someone with “normal” vision sees at 60 meters. The World Health Organization (WHO) defines binocular blindness as visual acuity worse than 3/60 in both eyes.

 Visual acuity thresholds for surgical eligibility vary. Our understanding is that some portion of cataract surgery is done on individuals whose visual acuity is worse than 6/6 vision, but better than 3/60, and that this proportion likely varies by program and context. For the purposes of our cost-effectiveness estimate, we’ve assumed treatment of patients whose visual acuity is 6/60 or worse.

 The post Update on our views on cataract surgery appeared first on The GiveWell Blog.

     Why GiveWell is partnering with IDinsight   Thu, 05/04/2017 - 12:28   This post will highlight GiveWell’s work with IDinsight, part of our Incubation Grants program to help grow the pipeline of potential future top charities and improve the quality of GiveWell’s recommendations. We previously highlighted the work of No Lean Season and Zusha!, Incubation Grant recipients and potential 2017 GiveWell top charities. Unlike these organizations, we don’t expect IDinsight to itself become a top charity. Instead, we hope it will help GiveWell support the development of more top charities and increase our understanding of the organizations we recommend.

 IDinsight is an international NGO that aims to help its clients develop and use rigorous evidence to improve social impact. GiveWell is partnering with IDinsight to support organizations’ development of monitoring and evaluation information of the type we’re interested in. This is the first partnership of this kind for GiveWell.

 Summary

 Working with IDinsight is a major part of our GiveWell Incubation Grants program. We hope that IDinsight can (a) conduct randomized controlled trials (RCTs) of interventions that seem promising but have little evidence to back them up, (b) work with promising organizations or existing top charities to assess and improve their monitoring systems, and (c) conduct additional research work that could inform our recommendations (e.g., additional site visits or surveying beneficiaries about their preferences).

 We don’t expect IDinsight’s work to influence our top-charity recommendations in 2017. We may have information from this partnership to inform our 2018 recommendations, and we expect it to influence our 2019 recommendations.

 Background

 We believe IDinsight fills a unique role in the development sphere. Unlike academics, who may be incentivized to focus on advancing the academic literature, IDinsight focuses on providing decision-relevant data and assessments; it serves nonprofits and policymakers who have to decide whether to implement intervention A or B, by, e.g., producing quick, low-cost, randomized controlled trial evidence. In addition to these types of “decision-focused evaluations,” IDinsight sets up “embedded learning partnerships” within governments and NGOs to answer priority policy questions using a broad suite of data and evidence tools. We think this kind of work is highly valuable and relevant to GiveWell’s mission of finding and identifying cost-effective, evidence-backed programs and charities.

 We came across IDinsight early on in our GiveWell Incubation Grants program (then known as “GiveWell’s experimental work”) when we searched for organizations that could help us “bridge the gap between research and implementation”. GiveWell made its first Incubation Grant to IDinsight in September 2014. Since then, we have provided additional support to IDinsight in June 2016 to expand their general operations (in the hopes this would lead to more evidence and organizations of the type GiveWell is interested in recommending) and in October 2016. The latter grant is to support the creation of an IDinsight “embedded team” at GiveWell, and is the focus of this post. We recently recommended an additional grant to support the scale-up of the embedded team’s work. 

 Funding for IDinsight’s work with GiveWell comes from Good Ventures, a large foundation with which GiveWell works closely, and which has supported our Incubation Grants program.

 Goals of our partnership with IDinsight

 We hope IDinsight can help fill a gap in GiveWell’s evaluation process. One of our core criteria for recommending charities is high-quality monitoring and evaluation information to demonstrate a charity is having impact. Our impression is that many organizations—likely the majority of charitable organizations—do not have this type of information; in some cases, we think it’s likely that even excellent organizations may not be collecting this type of information due to the high cost, both monetary and in staff-hours, it requires.

 We hope IDinsight will help fill this gap by working with promising potential top charities to develop these kinds of monitoring systems and/or to complete an impact evaluation of their work. We’re also interested in IDinsight working to strengthen monitoring systems for some of GiveWell’s current top charities, to help us improve our understanding of their impact. 

 Initial plans for the IDinsight “embedded GiveWell team”

 We’re still early in exploring all of the possible ways in which GiveWell and IDinsight may work together. Two projects are most likely in the near term:

   Helping GiveWell identify and develop a potential top charity working on cataract surgery. To do this, we think IDinsight will most likely build and implement a monitoring and evaluation system for a promising cataract charity. In August 2016, we published an intervention report announcing our view that cataract surgery was a GiveWell “priority program.” In other words, we believe that the strength of the evidence for and potential cost-effectiveness of the program is competitive with the other interventions we recommend, like distributing nets to prevent malaria and providing direct cash transfers to very poor households, and that we’d be interested in considering charities implementing cataract surgery programs for a top-charity recommendation.

 However, in our early conversations with organizations working on cataract surgery programs, our impression was that they didn’t yet have the type of monitoring and evaluation information on surgical outcomes and their impact on the number of surgeries carried out that we’d like to see as part of our charity review process. That’s why we’re working with IDinsight to support the identification or development of a GiveWell top charity working in this space.

 IDinsight is also undertaking a similar project focused on organizations providing surgeries to correct obstetric fistulas.

  Conducting an impact evaluation of New Incentives’ immunization incentives program. We may ask IDinsight to work with New Incentives, a GiveWell Incubation Grant recipient and potential future top charity, to run a randomized controlled trial of its pilot program to provide conditional cash transfers to incentivize routine immunizations of infants.

 In February 2017, IDinsight traveled to Nigeria to visit New Incentives and observe their immunization incentives program as part of this project. IDinsight’s notes from the trip are available here.

  Other potential projects with IDinsight include, but are not limited, to:

  Improving the Against Malaria Foundation (AMF)’s monitoring. AMF is currently our top recommendation to donors, although we in 2016 we wrote about weaknesses in its monitoring. IDinsight may work with AMF to improve its monitoring standards. Conducting an RCT on GiveWell Incubation Grant recipient Charity Science: Health‘s work. Charity Science: Health is a young organization that provides SMS reminders for vaccinations in India. We do not yet have a view on this intervention and are interested in potentially working with IDinsight to better understand Charity Science: Health’s work in this space and its impact on immunization rates. Informing subjective value judgments in GiveWell’s cost-effectiveness model. Cost-effectiveness plays a major role in GiveWell’s top-charity recommendations, and we rely on a number of highly uncertain and subjective tradeoffs between increasing income and various health outcomes in order to compare charities that work on different programs. Better understanding the preferences of individuals impacted by the work of the charities we recommend would improve our cost-effectiveness outputs. IDinsight may design and pilot a survey of beneficiaries on how they compare certain health and non-health interventions.  We’re looking forward to seeing what we learn from this partnership.

 The post Why GiveWell is partnering with IDinsight appeared first on The GiveWell Blog.

     Allocation of discretionary funds and new recommendation for donors   Mon, 04/03/2017 - 20:07   Since we released our 2016 recommendations in November, we have received about $4.9 million in funding for making grants at our discretion. We noted at the time that we would use these funds to fill the next highest priority funding gaps among our top charities. We have now reassessed the funding gaps for our top charities and plan to allocate $4.4 million to the Against Malaria Foundation (AMF) and $0.5 million of the funding we received for granting to the Deworm the World Initiative. 

 Our updated recommendation for donors

 We continue to recommend all seven of our current recommended charities as top charities and think all offer outstanding opportunities for donors to accomplish significant good with their donations. 

 We have updated our bottom line recommendation for donors seeking to follow our recommended allocation. We now recommend that donors give 100% of their donation to AMF, which will continue to have a pressing need for funding after the grant from GiveWell’s discretionary funds and after accounting for expected fundraising.

 This is an update on the recommendation we made in November 2016 of giving 75% to AMF and 25% to the Schistosomiasis Control Initiative (SCI). We will update this recommendation again in November, and may do so sooner if we have new information that affects where we think additional donations would have the greatest impact.

 We have not completed any updates on our standout charities, and that list remains the same.

 Room for more funding reassessment

 For this analysis, we asked each of our top charities how much they raised in total through December 31 (because we were asking in early February, this is the most up-to-date information we expected to be available) and compared this information, along with how much funding we have received that was donated to GiveWell specifically for each charity (rather than for granting at our discretion), to our previous expectations. 

 We did not ask each organization for the full details of how they would use additional funding, given that only a few months have elapsed since we last requested this information, but we did have conversations with AMF, Deworm the World, and SCI about how they would use additional funding. 

 (For additional explanation of what we mean by execution level gaps below, see this post. In short, an execution level 1 gap as the amount at which we believe the charity has a 50% chance of being bottlenecked by funding and level 2 is 80%.)

 In short:

  Deworm the World raised $0.53 million less than expected and has an execution level 2 gap. AMF raised slightly more than expected, but continues to have a large execution level 1 gap. SCI received $4.6 million less than the total we projected, which reflects both that we expected to allocate a portion of the discretionary funds we received to SCI and that other sources of revenue came in under our projections. GiveDirectly raised slightly more than projected, but continues to be constrained by funding (it has a large execution level 1 gap) and told us that it has laid off some field staff as a result. GiveDirectly is also still in the process of raising funding for its basic income guarantee study, which begins this summer; it had projected raising the full amount by the end of February 2017. We had less information with which to project revenues for the END Fund and Sightsavers’ deworming programs and Malaria Consortium’s seasonal malaria chemoprevention (SMC) program last year. We estimate that Malaria Consortium and the END Fund continue to have execution level 1 gaps and that Sightsavers does not have an execution level 1 or 2 gap for these programs.  More detail in this spreadsheet.

 Why we selected this allocation

 The decision to fill the remainder of Deworm the World’s execution level 2 gap was an easy one. This was a fairly high-priority gap for us in November and Deworm the World continues to be the strongest opportunity we’ve found, when weighing all factors other than room for more funding. It is the strongest on cost-effectiveness among our top charities and it is strong on monitoring and communication. Execution level 2 means that marginal funds are fairly unlikely (~20-30% chance) to be used or make a difference for planning this year, but we believe it is worthwhile to further decrease the chances that Deworm the World is bottlenecked by funding. If the funds aren’t instrumental this year, they will be used in future years.

 AMF has a large remaining execution level 1 gap. From conversations with AMF early this year, our understanding is that AMF continues to have high-value opportunities that exceed its available funding. Below, we detail why we prefer further funding to AMF over further funding to other top charities.

 Other possibilities that we decided against

 SCI

 SCI raised less funding than we projected in November and recently told us that $2.4 million in additional funding could be used to provide deworming treatments to more children in its next budget year (April 2017 to March 2018). 

 At the same time, SCI already expects to grow rapidly this year. SCI is allocating a higher portion of funding on hand to its next budget year (as opposed to holding more for future years) than we projected in November. Merck KGaA has recently increased the amount of praziquantel (the drug used to treat schistosomiasis) it donates annually, and SCI has decided to do all it can to deliver treatments in the next year, in order to demonstrate to Merck that deworming programs are capable of reaching the treatment targets. 

 Out of about $16 million it has received since late last year, both due to GiveWell and from other sources, $1.6 million is unallocated; our understanding is that SCI expects to use the remainder in the following year. We had projected, based on conversations with SCI, that it would spend about 60% of the funds it received from a GiveWell recommendation in the next budget year and hold the remainder to ensure that programs could be sustained in the following year. In other words, our expectation for $16 million in funding directed to SCI was that $9.6 million would be used in the next budget year, rather than the $14.4 SCI has allocated. As a result, SCI’s budget for direct implementation (excluding central costs) is expected to double in the coming year. We believe that if we gave SCI additional funding now, it would allocate the additional funding to expanding its budget further this year and we are concerned that with such rapid growth, program quality may suffer.

 We are not opposed to taking risky bets when (a) the expected value is high; and (b) we expect to learn, after the fact, whether the results were in line with expectations. In SCI’s case, the expected value of additional funding, according to our cost-effectiveness analysis, is ~2x that of donations to AMF. Given the lack of precision in our model, ~2x is only a modest difference. And we believe that we are less likely to learn about significant problems in SCI programs than we are for AMF, Deworm the World, or GiveDirectly programs. This is because SCI has not conducted coverage surveys—the main tool it uses to monitor the quality of its programs—in a representative portion of the distributions it has funded, nor do we expect it to in the future. We worry that results may be systematically missing from the lowest quality programs: i.e., programs that struggle to implement distributions may also struggle to implement surveys. There are also methodological limitations to these surveys, particularly that they rely on children’s recall of the distribution several (~3) months after it has occurred.

 The END Fund

 The key differences in our assessments of the END Fund and SCI are that (1) we have seen monitoring results from SCI but not from the END Fund; (2) we do not yet have an estimate of the END Fund’s cost per deworming treatment, and therefore have not modeled its cost-effectiveness; and (3) the END Fund’s budget is set to increase more slowly in the coming year than SCI’s. On (1), as discussed above, we don’t find SCI’s monitoring results to be a major point in its favor. The main reason we decided against additional funding to the END Fund in this round is (2). There are reasons to expect that the END Fund’s cost per deworming treatment may be higher than SCI’s, such as donations being partially fungible with other neglected tropical disease programs being implemented by the END Fund—unlike SCI, which is focused exclusively on deworming—and the END Fund being a grantmaker itself to SCI and Deworm the World, which may increase the END Fund’s cost per treatment relative to SCI, since it is also following the work of the charities it supports—which may require additional funding. This may lead us to conclude that its cost-effectiveness is roughly on par with AMF, and worse than the other deworming organizations we recommend. We regret not recognizing earlier that the cost per treatment analysis could affect this allocation decision; it’s possible that we and the END Fund would have decided to accelerate the process of creating this analysis.

 GiveDirectly

 GiveDirectly is an outstanding organization and is currently very constrained by funding. We think GiveDirectly’s monitoring is stronger than AMF’s and that GiveDirectly would deploy funding more quickly, but we prefer to allocate the available funding to AMF because we believe that AMF’s work is significantly more cost-effective than GiveDirectly’s.

 Malaria Consortium

 We estimate that AMF and Malaria Consortium’s SMC programs are similarly cost-effective. We currently know considerably more about AMF’s track record and plans than we do about Malaria Consortium’s—we have followed AMF for about 8 years and Malaria Consortium for less than a year. We expect to learn significantly more about Malaria Consortium this year. 

 Sightsavers

 Earlier this year, Good Ventures made a grant to Sightsavers to fully fill its execution level 1 gap on GiveWell’s recommendation. We now believe that Sightsavers has limited room for more funding for its deworming program. We had previously estimated a small execution level 2 gap, but no longer believe it has an execution level 2 gap. (The execution level 2 funding was for committing to three rather than two years of work in one country, however the work in that country is going forward with two years of funding on hand, so we believe it is of limited value to provide a third year of funding currently.) We have not funded execution level 3 for any deworming groups, preferring to fund AMF at that margin. 

 The post Allocation of discretionary funds and new recommendation for donors appeared first on The GiveWell Blog.

     GiveWell as an organization: progress in 2016 and plans for 2017   Thu, 03/30/2017 - 12:38   This is the third of four posts that form our annual review and plan for the following year. This post reviews and evaluates GiveWell’s progress last year as an organization and sketches out some high level goals for the current year. The first two posts covered GiveWell’s progress and plans on research. The last post in the series will look at metrics on our influence on donations in 2016.

 First, a point of clarification. GiveWell as a legal entity currently employs both (a) staff whose work is described on givewell.org (finding outstanding evidence-backed, cost-effective programs) and (b) staff who work on the Open Philanthropy Project. We expect Open Philanthropy to become a separate organization this year (more below), pending board approval. The scope of this post is limited to (a) – the parts of the organization that will not become part of Open Philanthropy. Open Philanthropy has written about its progress and plans in this post.

 Below, we first note three high-level points about where GiveWell is as an organization today. We then reflect on four questions that are important for thinking about our performance as an organization: 

  Do we have sufficient staff capacity? Does our impact justify our operating expenses? Does GiveWell have a positive and accurate public image? Are we in a stable financial position?  Major organizational developments

 Separation of the Open Philanthropy Project

 We had aimed to complete the transition of Open Philanthropy staff to a new entity by the end of 2016 and did not accomplish this goal, though we are now effectively operating as two separate teams. We now expect, pending board approval, to complete the legal split by mid-2017. After the split, there will continue to be some shared staff between the organizations (GiveWell staff will track the time they spend on work for Open Philanthropy and GiveWell will bill Open Philanthropy for the time). We will continue to share office space. 

 GiveWell as an entity currently employs 35 staff members. After the split, we anticipate that GiveWell will continue to employ 15-20 of the current employees and that Elie Hassenfeld will remain as Executive Director of GiveWell. Holden Karnofsky, Co-Founder of GiveWell, currently spends very little time on GiveWell and will work full time for Open Philanthropy. 

 Outreach is now more of a limiting factor than research

 We’ve gone from feeling that we had more funding available than we had good giving opportunities to a situation where we believe that strong giving opportunities have surpassed available funding. We estimate that we left over $100 million worth of very strong opportunities (top charity execution level 1 or 2 gaps, excluding GiveDirectly) unfilled last year.

 This is due to increased research output (we added three new top charities and two new standouts) in 2016, an expectation of increased research output in the future (from our standard process and Incubation Grants), and decreased expectations of funding from Good Ventures. In a change from the previous year, Open Philanthropy’s tentative guess is currently that the “last dollar” it will give (from the pool of currently available capital) has higher expected value than gifts to GiveWell’s top charities today, leading it to recommend that Good Ventures cap its giving to GiveWell’s top charities at $50 million in 2016.

 We expect to put more emphasis on expanding our outreach to potential donors interested in following our recommendations in 2017 than we have in past years. We are at early stages of thinking through what that might involve. 

 Organizational maturity

 GiveWell will be 10 years old this year and we feel that we’ve reached a relatively stable place in our development. We are now making a major effort to strengthen our organizational infrastructure through filling specialized roles, particularly in operations (finance, donations management, technology, etc.); formalizing policies and procedures; and creating contingency plans for replacing senior staff.

 Four key questions

 Below we pose and respond to four questions about how we are doing as an organization.

 Do we have sufficient staff capacity?

 Operations: To date we have not had sufficient capacity for operations and have been slower to make improvements to our systems than we would have liked. In the last year, we have begun to make major changes to GiveWell’s operations team to try to correct for this. Sarah Ward was named Director of Operations, a new role, and we are pursuing a strategy of (a) hiring specialized firms to handle more of the HR and IT work that generalist staff have done in the past; (b) replacing our external accountants and auditors with firms that specialize in non-profits; and (c) moving current staff into and hiring for specialized roles, such as a donations manager, donor relations assistant, controller, and office manager. Our number of generalist operations staff has decreased; we expect to continue to have a need for a small number of generalist staff to manage relationships with external firms and fill gaps between specialist domains. 

 Our current operations team includes a Director of Operations, two operations generalists (who work on the website, accounting, recruiting, personnel management, donation processing, and IT), an Office Manager, an Administrative Assistant, a Donations Manager, a Donations Assistant, and a Donor Relations Assistant. We are hiring for an Operations and Legal Program Manager and expect to hire for additional roles in the coming months. After the expected spinoff of Open Philanthropy into a separate organization, the office manager, administrative assistant and one of the operations generalists will divide their time between the two organizations and Sarah will manage operations for both organizations temporarily; Open Philanthropy will begin building a separate operations team this year. 

 Research: Seven staff work on GiveWell’s research full time or close to full time. Elie Hassenfeld, GiveWell’s Executive Director, spends about half his time on GiveWell research. Elie spends the other half of his time on a combination of the Open Philanthropy project (about 20% of his time currently) and overseeing outreach, recruiting, and operations for GiveWell. 

 Josh Rosenberg and I have taken over much of the research work that Elie and Holden, co-founders of GiveWell, used to do, including all updates on current top charities, reviewing top charity contenders, managing research staff, and some intervention assessments. Holden now spends almost no time on GiveWell research.

 We feel that we have sufficient capacity to follow up with our current top charities, consider promising contenders for top charity recommendations, and make decisions about Incubation Grants. We do not yet have sufficient capacity for reviewing the evidence for and modeling cost-effectiveness of interventions. We aim to make at least one hire for this work in the next few months. More on this in our post about our research plans for the year.

 Outreach: As noted above, we feel we’ve reached the point where we are identifying outstanding giving opportunities more quickly than we can expand our reach to donors to fill the opportunities. Throughout most of our history, we felt that the opposite was true, that the amount of funding we could influence surpassed the opportunities we had identified, so this represents a significant shift for us. We don’t yet have concrete plans for future outreach work, but expect to give outreach significantly more attention than we have in the past.

 We currently have one staff member, Catherine Hollander, who works on outreach full-time. Our outreach priorities in 2016 were to speak or meet with all major donors who were interested in talking to us, take any opportunities that came up to discuss our work with the media, and continue posting regularly to our blog. We feel that we accomplished our goals for connecting with major donors and keeping up with media requests, and fell short on blogging.

 Catherine is leading the search for a Research Analyst, Outreach Focus to do more of the types of outreach we’ve focused on in the past, namely connecting with more media and major donors, and increasing the frequency of blog posts. 

 Does our impact justify our operating expenses?

 GiveWell’s impact on donations (or “money moved”) to our recommended charities likely decreased somewhat in 2016. We are in the process of gathering and analyzing data on our influence on donations, but expect it to be in the range of $80-90 million to recommended charities and $9.2 million for Incubation Grants. Money moved to top charities in 2015 was $110 million.

 Good Ventures’ giving to top charities fell from about $70 million to $50 million, due to changes in the way it is allocating funding across priorities and to a large one-off grant to GiveDirectly in 2015. Based on GiveWell’s recommendations, Good Ventures also funded $9.2 million in Incubation Grants, up from about $400,000 to $500,000 in each of 2014 and 2015. 

 Over the same period, we spent approximately $2 million on our operations. In total, GiveWell as an entity spent about $5.5 on operational expenses, of which $3.5 million was spent on the Open Philanthropy Project. 

 We previously wrote that we believe that expenses that are 15% of money moved are well within the range of normal, so we feel comfortable with the relative size of our operating expenses at this point.

 Does GiveWell have a positive and accurate public image?

 We believe that GiveWell’s public image is largely positive and reasonably accurate. This is true for all or nearly all of the major media coverage we have received. See, for example, coverage on NPR and in The Atlantic, Esquire and Vox.

 There are two aspects of our public image that we would like to change. First, media has sometimes portrayed our top charities as having guaranteed impact and as being the “best” charities—for example, a 2015 article in The Atlantic said, “If what you want is to save lives with certainty, several people said, you have to go to GiveWell.” We believe that our top charities offer the highest expected value among evidence-backed opportunities that we have found to date, but are not risk-free and may not be the best giving opportunities for donors with different values or unique expertise, connections, or resources. Second, charities may have an inaccurate view of the costs and benefits of engaging with us—more in this post. 

 Our biggest public image project in the last year was launching a redesigned website. This project took much longer than expected. The original launch date was April 2015, but due to unexpected problems and lack of staff capacity, it didn’t go live until September 2016. Our previous website had an outdated look and confusing architecture. We think the new one is a large improvement, though we aim to make some further improvements in the future. 

 Are we in a stable financial position?

 The short answer is yes.

 In 2016, we raised about $3 million in revenue available for funding our operations that was not specifically for funding Open Philanthropy Project expenses (Open Philanthropy has, recently, been fully funded by Good Ventures). We have roughly projected GiveWell’s expenses (excluding pre-split Open Philanthropy expenses) at $2.7 million in 2017 and $3.2 million in 2018. Given our money moved to top charities and our experiences with fundraising in the past, it seems reasonable to expect that we will be able to raise this funding, though we expect to do a more detailed analysis of our financial situation once the details of the split with Open Philanthropy have been fully worked out.

 We do not expect revenue available for operations to decrease as a result of splitting with Open Philanthropy because most major donors have told us that they support GiveWell due to our work identifying top charities. We think it is likely that Good Ventures will continue to support 20% of GiveWell’s operational budget, as it has for the last several years. 

 The post GiveWell as an organization: progress in 2016 and plans for 2017 appeared first on The GiveWell Blog.

     GiveWell’s research plans for 2017   Wed, 03/29/2017 - 12:10   This is the second of four posts that form our annual review and plan for the following year. The first post reviewed our progress in 2016. The following two posts will cover GiveWell’s progress and plans as an organization and metrics on our influence on donations in 2016.

 Our primary research goals for 2017 are to:

  Speed up our output of new intervention assessments, by hiring a Senior Fellow and by improving our process for reviewing interventions at a shallow level. Increase the number of promising charities that apply for our recommendation. Alternatively, we may learn why we have relatively few strong applicants and decide whether to change our process as a result. Research Analyst Chelsea Tabart will spend most of her time on this project. Through GiveWell Incubation Grants, fund projects that may lead to more top charity contenders in the future and consider grantees No Lean Season and Zusha! as potential 2017 top charities. Further improve the robustness and usability of our cost-effectiveness model. Improve our process for following the progress of current top charities to reduce staff time, while maintaining quality. We also have some specific goals (discussed below) with respect to answering open questions about current top charities.  We discuss each of these goals in more depth below.

 Intervention assessments

 Intervention assessments are key to our research process. We generally only consider recommending funding for programs that are implementing one of our priority programs (an exception is if an organization has done rigorous evaluation of its own program, though in practice we have found this to be very rare). In recent years, we have completed few intervention reports, which has limited our ability to consider new potential top charities. We plan to increase the rate at which we form views on interventions this year by:

  Hiring a Senior Fellow (or possibly more than one). We expect a Senior Fellow to have a Ph.D. in economics, public health, or statistics or equivalent experience and to focus on in-depth evidence reviews and cost-effectiveness assessments of interventions that appear promising after a shallower investigation. In addition, Open Philanthropy Project Senior Advisor David Roodman may spend some more time on intervention related work. Doing low-intensity research on a large number of promising interventions. We generally start with a two to four hour “quick intervention assessment,” and then prioritize interventions for a 20-30 hour “interim intervention report” (example). We don’t yet have a good sense of how many of these of these we will complete this year, because we’re unsure both about how much capacity we will have for this work and about how many promising interventions there will be at each step in the process. Continuing to improve our systems for ensuring that we become aware of promising interventions and new relevant research as it becomes available. We expect to learn about additional interventions by tracking new research, particularly randomized controlled trials, in global health and development and by talking to select organizations about programs they run that they think we should look into.  Charity applications

 In the past few years, we have been surprised by how little interest there has been from charities in applying for a GiveWell recommendation. Our impression is that for global health and development charities there are relatively few funders of our size: in 2015, we tracked $110 million given due to our research; we are in the process of compiling the data for 2016, but expect it to be in the range of $80-90 million to recommended charities. We would like to better understand whether we have failed to get the word out about the potential value we offer or communicate well about our process and charities’ likelihood of success, or, alternatively, whether charities are making well-informed decisions about their fit with our criteria. (More on why we think more charities should consider applying for a GiveWell recommendation in this post.)

 This year, we have designated GiveWell Research Analyst Chelsea Tabart as charity liaison. Her role is to increase and improve our pipeline of top charity contenders by answering charities’ questions about our process and which program(s) they should apply with, encouraging promising organizations to apply, and, through these conversations, understanding what the barriers are to more charities applying. 

 We aim by the end of the year to have a stronger pipeline of charities applying, have confidence that we are not missing strong contenders, or understand how we should adjust our process in the future.

 Incubation Grants

 We made significant progress on Incubation Grants in 2016 and plan in 2017 to largely continue with ongoing engagements, while being open to new grantmaking opportunities that are brought to our attention. 

 Among early-to-mid stage grants, we plan to spend the most time on working with IDinsight and New Incentives (where our feedback is needed to move the projects forward), and a smaller amount of time on Results for Development and Charity Science: Health (where we are only following along with ongoing projects). 

 Another major priority will be following up on two later-stage grantees, No Lean Season and Zusha!, groups that are contenders for a top charity recommendation in 2017. For No Lean Season, a program run by Evidence Action, our main outstanding questions are whether the program will have room for more funding in 2018 and whether monitoring will be high quality as the program scales. We have similar questions about Zusha! and in addition are awaiting randomized controlled trial results that are expected later this year.

 Cost-effectiveness model

 We plan to continue making improvements to our cost-effectiveness model and the data it draws on (separate from adding new interventions to the model, which is part of the intervention report work discussed above). Projects we are currently prioritizing include:

  Making it more straightforward to see how personal values are incorporated into the model and what the implications of those values are. Revisiting the prevalence and intensity adjustment that we use to compare the average per-person impact of deworming in places that our top charities work to the locations where the studies that found long-term impact of deworming were conducted. More in this post. Improving the insecticide-treated nets model by revisiting how it incorporates effects on adult mortality and adjustments for regions with different malaria burdens and changes in malaria burden over time.  Current top charities

 Our goal this year is to maintain the quality of top charity updates while decreasing the amount of staff time we spend and we ask top charities to spend on this work. Below, we detail our plans for following up with each charity.

 Deworm the World Initiative, GiveDirectly, and Schistosomiasis Control Initiative (SCI)

 We have now followed these groups for several years and do not have major outstanding questions about them. We plan to ask for updates on financial information, monitoring results, and room for more funding and have regular phone calls with them to learn about operational changes that might lead us to ask additional questions.

 Against Malaria Foundation (AMF)

 We have two major outstanding questions about AMF that we hope to make progress on this year:

  Will AMF’s monitoring processes be high quality? We wrote about our concerns about AMF’s past monitoring last year and expect new information to be available this year. Going forward, AMF aims to fund larger distributions and commit funding further ahead of when a distribution is scheduled to occur than it has, for the most part, done in the past. Will this increase the extent to which AMF funds displace funds from other sources, or will there continue to be evidence that AMF’s funds are largely adding to the total number of nets distributed? More on this question in our review of AMF.  To help us make progress on these questions, we and AMF have agreed to have monthly calls to discuss questions we have about the monitoring AMF is producing and what AMF is learning about distributions it is considering funding. We will likely also seek out calls with AMF’s partner organizations to discuss these questions.

 In order to estimate AMF’s room for more funding, we will seek out information on the location and size of funding gaps for mass net distribution campaigns from AMF, the African Leaders Malaria Alliance, and possibly other funders of nets. As we have in the past, we will use this information in conjunction with conversations with AMF about non-funding bottlenecks to its ability to fill various gaps.

 The END Fund – deworming program

 Compared with the charities we have recommended for several years, we have more open questions about the END Fund. The main questions we plan to seek more information on this year are:

  We have not yet seen monitoring on par with that from our other top charities from the END Fund. We expect results from coverage surveys from END Fund programs this year. Will these surveys be high quality and demonstrate that the END Fund is funding successful programs? We have not yet tried to compare the cost-effectiveness of the END Fund to our other top charities in our cost-effectiveness model. We will be seeking additional information from the END Fund to about cost per treatment and baseline infection rates. Questions around room for more funding: the extent to which funding due to GiveWell’s recommendation increases the amount that the END Fund spends on deworming versus other programs, actual and projected revenue from other sources, and what deworming grantmaking opportunities the END Fund expects to have.  We visited the END Fund’s programs in Rwanda and Idjwi island, DRC in January 2017 and will publish notes and photos from our visit shortly. 

 Malaria Consortium – seasonal malaria chemoprevention program

 As with the END Fund, we have more open questions about Malaria Consortium than we do for the charities we have recommended for several years. Our main priorities are:

  Further research on the evidence of effectiveness, cost-effectiveness, and potential downsides of seasonal malaria chemoprevention (SMC) (due to time constraints we have not yet completed a full intervention report, though we felt sufficiently confident in the intervention to recommend Malaria Consortium). Getting a better understanding of the methodology Malaria Consortium uses for estimating coverage rates. Completing a more in-depth room for more funding analysis for the program for 2018 than we did for 2017.  Malaria Consortium expects to have several new studies of its SMC programs to share in April 2017 (details). 

 We may visit a Malaria Consortium seasonal malaria chemoprevention program in summer 2017.

 Sightsavers – deworming program 

 As with the END Fund and Malaria Consortium, we have more open questions about Sightsavers than we do for the charities we have recommended for several years. We expect to make limited progress this year because the first deworming mass drug administration funded with GiveWell-influenced funds is not expected to take place until September at the earliest and monitoring results aren’t expected until early 2018. Because Sightsavers has done fairly little deworming in the past year, we don’t expect to be able to learn much from its ongoing programs. Our main priorities for the year are:

  Getting more information from Sightsavers about baseline prevalence and intensity of worm infections in the areas it is working, to inform our cost-effectiveness analysis. Using Sightsavers’ budget for the projects and planned treatment numbers to improve our estimate of the cost per treatment – another input into our cost-effectiveness analysis. Our current cost per treatment estimate is very rough. Completing a room for more funding analysis for 2018.  Standout charities

 Standout charities are groups that we have a large amount of information about and that meet some but not all of our criteria. Because we have not followed them closely over time, it is possible that they may now be a stronger fit (or that they no longer focus on the program we reviewed). We plan to have at least one phone call with each of these groups to discuss whether anything has changed that might lead us to reopen consideration of the organization as a potential top charity. Due to our focus on organizations that are most likely to become top charities, we don’t expect to make this work a priority beyond that.

 The post GiveWell’s research plans for 2017 appeared first on The GiveWell Blog.

     GiveWell’s progress on research in 2016   Mon, 03/27/2017 - 10:49   This is the first of four posts that form our annual review and plan for the following year. This post reviews and evaluates last year’s progress on our work of finding and recommending evidence-based, thoroughly vetted charities that serve the global poor. The following three posts will cover our plans for GiveWell’s research in 2017, GiveWell’s progress and plans as an organization and metrics on our influence on donations in 2016.

 Summary

 We feel that 2016 was a highly successful year for GiveWell’s research. We accomplished or made significant progress on all of our top priorities and accomplished some of the goals we didn’t know if we would have time for. Our research output was greater than in any past year. We added three new top charities and two new standout charities. We made important progress on building the pipeline of future GiveWell top charities through our work on GiveWell Incubation Grants. The research team’s staff capacity has continued increase and we expect output to continue to grow.

 More subjectively, we feel that the quality of our research has continued to improve. Of particular note are our improved understanding of room for more funding for insecticide-treated nets and of the evidence for deworming. 

 We compare our annual output for 2012-2016 in this spreadsheet.

 Our progress in 2016 relative to our plans

 In early 2016, we laid out our goals for research in 2016. Below we discuss each goal in two of the categories we set out last year, “top priorities” and “other research we will undertake if we have the time to do so,” and what progress we made on each. 

 “Top priorities”

 Supporting the development of potential future GiveWell top charities: making grants to organizations that could become top charity contenders in the future or supporting research that could lead to more organizations that are a strong fit with our criteria. […]

 We investigated and recommended that Good Ventures make grants to five early stage projects: No Lean Season (migration assistance), New Incentives (conditional cash transfers), Zusha! (road safety), Charity Science: Health (immunizations), and Results for Development (childhood pneumonia treatment). We have published grant descriptions for most of these grants; three are forthcoming. We have also begun working closely with IDinsight to partner with charities that work on priority programs to strengthen their monitoring and evaluation to increase the chances that the charities meet our criteria in the future.

 Considering additional funding for insecticide-treated nets [beyond the Against Malaria Foundation] […]

 We spent relatively little time on this priority because (a) the Against Malaria Foundation (AMF) succeeded in signing agreements for several major distributions early in the year, increasing our estimate of its ability to absorb additional funds; and (b) our initial conversations with large funders of nets seemed unlikely to result in a top charity recommendation. At the end of the year, we found that the Against Malaria Foundation was able to absorb considerably more funding than we directed to it. This was in part due to Good Ventures’s shift away from continued growth in funding for GiveWell top charities. Separately, we also gained a stronger understanding of the size and nature of funding gaps for nets globally through country case studies and conversations to understand the global funding landscape.

 Intervention prioritization: quick investigations on a large number of interventions with the goal of finding more priority programs. […]

 This was a major priority for us and we made some progress, but not as much as we wanted to. The main things we did were (a) quick reviews (3-10 hours spent reviewing the evidence base) for ~30 programs, which ultimately led us to prioritize seasonal malaria chemoprevention (SMC) and recommend Malaria Consortium’s work on SMC; (b) publishing three “interim intervention reports” (on SMC, integrated community case management, and severe acute malnutrition) which have provided a template for mid-level assessments and enabled more staff to produce such reports in 2017 (one of which, on Sayana Press, has been published); and (c) writing a full intervention assessment of voluntary male medical circumcision, which is now one of our priority programs. 

 Current top charities: continuing to follow our current top charities and trying to answer our highest priority unanswered questions about these groups. […]

 We answered our most important questions about the top charities we recommended in 2015. In particular, we had stronger answers at the end of the year on AMF’s progress at signing agreements, the quality of AMF’s monitoring, Schistosomiasis Control Initiative’s past spending and financial position, and had an overall much stronger understanding of Deworm the World Initiative, particularly its work in Kenya.

 However, we feel that we spent too much time on this work in 2016. Three staff spent the majority of their time on top charity updates and, while following the progress and plans of our top charities is a crucial piece of GiveWell’s work, the value we got from this work felt out of proportion with the time spent. In 2017, we plan to have a single staff member do most of this work and expect it to take a half to two-thirds of a full-time job. Three other staff will spend a small portion of their time, totaling approximately the equivalent of one full-time job, on this work. 

 New evidence on deworming and bednets. The next round of follow up on a key deworming study is expected to be available later this year and could make a big difference to our view of deworming. We’re also looking more into the degree to which insecticide resistance may be reducing the impact of bednets. […]

 We completed an evidence review on the impact of insecticide resistance on malaria control. David Roodman, Senior Advisor at the Open Philanthropy Project spent several months revisiting the evidence for deworming and summarizing his findings in two blog posts. 

 We wrote last year that we expected to see a new round of follow-up data on a key deworming study that could significantly affect our view of deworming. We have seen preliminary results and hope to get more complete results later this year and write about them at that time. We also decided to recommend a ~$1 million grant to support a more intensive 20-year follow-up to the Worms at Work study (writeup forthcoming).

 “Other research we will undertake if we have the time to do so”

 Micronutrient fortification charities. […]

 We completed interim reviews of Food Fortification Initiative and Project Healthy Children and added them to our list of standout charities.

 Neglected tropical disease (NTD) charities [and…] other organizations – if organizations apply for a recommendation and seem sufficiently promising, we will aim to review them.

 Perhaps the most important development in 2016 came out of two secondary goals for 2016: continuing investigations of deworming programs (which led to recommending Sightsavers and the END Fund for their work on deworming) and continuing to be open to applications from charities (which led to recommending Malaria Consortium for its work on seasonal malaria chemoprevention).

 At the same time, in the past few years, we have been surprised by how little interest there has been from charities in applying for a GiveWell recommendation. We have come to believe that charities may have misconceptions about our process or lack the information about whether they would be a fit for our criteria, and that this could be improved by us reaching out to more promising organizations and taking the time to understand the reasons why they have not applied in the past. We discuss what we are doing this year on charity outreach in the next post in this series.

 Surgery charities. We have had several conversations with organizations that work on cataract surgery and we may reach out to organizations that work on obstetric fistula surgery. […]

 In addition to the conversations with organizations that work on on cataract, we also spoke to several groups that work on obstetric fistula. We have not identified a group we would like to invite to apply. This work is now moving ahead primarily through our engagement with IDinsight to work with charities to strengthen their monitoring and evaluation.

 Publishing research we largely completed in 2015: updates on standout charities (GAIN, IGN, and Living Goods), interim reviews of charities we began investigating in 2015 (Sightsavers, END Fund, and Project Healthy Children), and intervention reports (folic acid fortification, surgery for cataracts, trachoma and fistula, measles immunization campaigns, mass drug administration for lymphatic filariasis, and “Targeting the Ultra Poor”).

 We made some progress on this goal. We published updates on standout charities (GAIN, Iodine Global Network, and Living Goods), reviews of charities we began investigating in 2015 (Sightsavers, the END Fund, and Project Healthy Children), and one of the intervention reports we hoped to publish in 2016 (cataract surgery). We did not publish the other intervention reports we hoped to (folic acid fortification, surgery for trachoma and fistula, measles immunization campaigns, mass drug administration for lymphatic filariasis, and “Targeting the Ultra Poor”).

 Other work

 Compared to previous years, we made a lot of progress on improving the usability of and getting staff engagement with our cost-effectiveness model. We designated a staff member, currently Chris Smith, to work on this close to full-time. Chris re-formatted the file to make it easier for staff and others to input uncertain and subjective values, integrated the seasonal malaria chemoprevention analysis into the model, and restructured the bed nets model to take into account country-level variation in malaria rates. 

 The post GiveWell’s progress on research in 2016 appeared first on The GiveWell Blog.

     March 2017 open thread   Tue, 03/21/2017 - 13:36   Our goal with hosting quarterly open threads is to give blog readers an opportunity to publicly raise comments or questions about GiveWell or related topics (in the comments section below). As always, you’re also welcome to email us at info@givewell.org or to request a call with GiveWell staff if you have feedback or questions you’d prefer to discuss privately. We’ll try to respond promptly to questions or comments.

 If you have questions related to the Open Philanthropy Project, you can post those in the Open Philanthropy Project’s most recent open thread.

 You can view our December 2016 open thread here.

 The post March 2017 open thread appeared first on The GiveWell Blog.

     Why we’re considering Zusha! as a potential 2017 top charity   Tue, 02/28/2017 - 12:33   This post will discuss Zusha!, a 2017 GiveWell top charity contender and GiveWell Incubation Grant recipient. We previously highlighted No Lean Season as a potential 2017 top charity originating from our Incubation Grants work.

 GiveWell first learned about Zusha! in 2013 following our publication of a shallow investigation into road safety. This month, Good Ventures made a GiveWell Incubation Grant of $900,000 to support the Georgetown University Initiative on Innovation, Development and Evaluation (gui2de) for work on Zusha!. Also this month, two GiveWell staff members visited Zusha! in Nairobi to learn more about its work. We plan to share additional details from their site visit in the future; this post is meant to provide a higher-level overview of Zusha! as a potential GiveWell recommendation.

 Road safety campaign

 Car accidents are a major cause of preventable death and disability around the globe, killing approximately 1.25 million people each year and injuring an additional 20 to 50 million. On the current trajectory, the World Health Organization (WHO) projects that road traffic crashes will be the 7th leading cause of death globally in 2030. The problem is particularly pronounced in low- and middle-income countries, which account for 90% of all traffic deaths, despite having ~50% of the world’s vehicles, according to the WHO.

 Zusha! is a road safety campaign that targets unsafe drivers of public service vehicles. The campaign distributes stickers for buses with messages encouraging passengers to speak up and urge drivers to drive more safely—”Zusha” means “protest” in Swahili. Drivers are incentivized to keep the stickers in their vehicles via enrollment in a weekly lottery with cash prizes. The goal is to reduce traffic deaths and injuries. gui2de has primarily worked in Kenya.

 Zusha! is part of gui2de. Professors James Habyarimana and William Jack have conducted two randomized controlled trials (RCTs) of the program in Kenya: first, a small pilot study of ~2,400 vehicles followed by a larger study of ~12,500 vehicles. The researchers found large, statistically significant effects of the program in reducing the number of accidents for vehicles in the treatment group.[1] With support from a Development Innovation Ventures (DIV) grant from USAID, Zusha! scaled up in Kenya following the second RCT. As of March 2016, Professors Habyarimana and Jack estimated that the campaign was reaching 25,000 minibuses and larger buses, out of roughly 40,000 in the country.

 gui2de is running three additional RCTs on this program in Rwanda, Tanzania, and Uganda. 

 A note on terminology in this blog post

 In this blog post, we generally refer to the program we’re interested in as Zusha! to distinguish it from gui2de‘s other programs (not related to road safety). However, Zusha! is only the name of the road safety campaign in Kenya; the road safety campaigns in Rwanda, Tanzania, and Uganda have other names. Although the Kenya campaign is the one we’re most knowledgeable about, it’s possible that a GiveWell top charity recommendation would include gui2de‘s road safety work in other countries. We’ve used the term Zusha! in this post for simplicity. 

 Potential future top charity

 We’re interested in Zusha! as a potential future top charity due to the potential strength of the evidence base and cost-effectiveness. 

 We believe the evidence for Zusha! is compelling. The pilot study finds that driving accidents decreased by a half to two-thirds and the larger Kenya RCT finds that driving accidents decreased by between one-quarter and one-third. These effects seem surprisingly large to us, and we are interested to see whether the intervention will find similar effects in future RCTs. In our most up-to-date cost-effectiveness calculation, we estimate a cost of ~$13,000 per road accident death averted (including injuries and incorporating discounts to account for whether the studies would be likely to replicate and questions around external validity).

 GiveWell’s current estimate is that the cost-effectiveness of Zusha! is comparable to the Against Malaria Foundation, one of our top charities, and about 3-4x as cost-effective as direct cash transfers, a baseline we use for comparing interventions, although this may change as we incorporate additional inputs. We incorporated age weights into this estimate that reflect the older average age of passengers on vehicles, relative to average age of people whose deaths are averted by AMF-distributed nets, and approximate GiveWell median staff values for averting an adult death.

 GiveWell Incubation Grant

 Good Ventures’ recent grant to gui2de is intended to:

  Allow gui2de to continue operating at scale in Kenya and collect higher-quality monitoring data of that work. Strong monitoring data, such as information to demonstrate the stickers are being distributed to the intended vehicles or that the stickers remain in use over time, is a necessary component for a top charity recommendation and is one of our biggest open questions about Zusha!. Increase the sample size of the RCT in Uganda by ~50%, improving the study’s power and making it more likely the results will inform our views. Potentially improve the quality of data collection for the RCT in Tanzania. Provide funding to enable gui2de to continue its ongoing work through the end of 2017, when GiveWell might potentially name Zusha! a top charity, in which case we would expect to direct it substantial funding. Enabling gui2de to continue operating in Rwanda, Tanzania, and Uganda for six additional months would also allow faster scale-up if the RCT results are positive.  A write-up on the February 2017 grant is forthcoming. It will be published here.

 Our open questions

 We have several open questions about Zusha!‘s work that will be key in helping us decide whether to recommend Zusha! as a top charity:

  We expect that results from the three pending RCTs in Rwanda, Tanzania, and Uganda will substantially affect our view of the likely impact of the program, although we don’t expect to have full results from all three RCTs by the end of 2017. Zusha! researchers found a nearly statistically significant impact for the placebo intervention (stickers that had messages like “Travel well”) in the second Kenya study. This finding casts uncertainty on the mechanism by which the intervention works and whether the intervention is having an impact. Additional RCTs may help fill out our understanding. Our cost-effectiveness analysis suggests that Zusha! is competitive with—but not far better than—our current top charities, at ~3-4x as cost-effective as cash transfers. GiveWell’s cost-effectiveness analyses tend to become worse (less cost-effective) as we add new inputs and adjustments. Our estimate of Zusha!‘s cost-effectiveness already became significantly worse when a GiveWell Research Analyst, Leon Zhang, identified a mathematical error in one of the studies published on Zusha!‘s program. It’s possible we will conclude that Zusha! is not as cost-effective as our other top charities after spending additional time on this. Provision of high-quality monitoring information to demonstrate that the stickers are being used in buses over time. We understand from our recent site visit that Zusha! tentatively plans to do three types of monitoring in Kenya going forward: At National Transport and Safety Authority (NTSA) inspection centers, bus parks where passengers are picked up, and via the lottery. We have questions about the implementation of these processes, but our impression is that Zusha! is working to significantly improve its monitoring, and we expect to have more information by the end of the year. Zusha! is a behavioral intervention. Over time, people may get used to seeing the stickers, causing the effect to diminish. We currently have limited information on the extent to which this has occurred or may occur in the future. We hope additional information about long-term impacts of the program will enable us to assess this question over time.  Path to GiveWell top charity

 We publish our updated top charities list in November. By then, we expect to have new monitoring information from Kenya as well as preliminary RCT results from Tanzania. We also expect to have partial results from the Uganda RCT. (We do not expect to have results from the Rwanda RCT.) We guess that the information we will have by late 2017 should be sufficient to assess Zusha! for a potential GiveWell recommendation.

 Notes [1] 

  Results from the pilot study (published 2010): “Our results indicate that insurance claims fell by a half to two-thirds, from an annual rate of about 10 percent without the intervention, and that claims involving injury or death fell by 60%.” Habyarimana and Jack 2010, p. 1 Results from the larger study (published 2015): “Overall, the stickers reduce insurance claims of matatus assigned to treatment groups by between one-quarter and one-third on an intent-to-treat (ITT) basis. Among the roughly 8,000 vehicles in the treatment groups, the reduction was 25%, and we estimate that about 140 accidents were avoided per year, and about 55 lives were saved annually.” Habyarimana and Jack 2015, p. 1  The post Why we’re considering Zusha! as a potential 2017 top charity appeared first on The GiveWell Blog.

     What does it mean when a charity declines to participate in GiveWell’s review process?   Tue, 02/21/2017 - 12:37   We noted in a recent blog post that 10 out of 23 organizations that we invited to apply for a recommendation were named top charities or standout charities, and that nine of the remaining charities declined to participate in our process. What does it mean when a charity declines to participate?

 Charities can withdraw from GiveWell’s review process  at any time and for any reason. Typically, when a charity withdraws from our process, we publish documents that we have permission to publish, such as notes from previous conversations. We also publish a page indicating that the charity declined to fully participate in our application process and run this page by the charity before publishing it. Some past ‘decline’ pages appear here, under “Organizations that declined to fully participate in our process.” 

 A charity may decline to participate at any stage of the GiveWell review. All of the following could lead to GiveWell publishing a page indicating that a charity declined to participate:

  A charity doesn’t respond to our invitation to apply. A charity has one call with GiveWell research staff and decides not to participate. GiveWell writes up an interim review (example of a published interim review, from a charity that did not decline to participate), the charity reviews it, and then declines to participate. The review is not published in this case.  GiveWell generally doesn’t publish the reason a charity decided not to participate in order to preserve this option for charities who are concerned that engaging with GiveWell could potentially harm them if GiveWell publishes a negative review. We discuss this in greater detail here:

 While we want to be open, we don’t want to create a dynamic in which working with us creates significant risks for grantees. (This could lead good organizations to avoid working with us.) So we’ve had to find ways of balancing the goal of openness with the goal of making it “safe” for an organization to work with us.

 For this reason, a typical ‘decline’ page reads: “Organization X declined to participate in our process,” with no further context, so that organizations can engage with GiveWell without worrying that we’ll publish a harmful review of their work. (This post discusses some of the pros and cons of this approach for donors who rely on our research and charities we review.)

 We hope to minimize GiveWell staff time spent with groups that ultimately decline to participate so that we can focus our capacity on organizations that could become top charities. We also hope to minimize the number of groups that decline to participate due to misunderstanding GiveWell’s process, expectations around transparency and review publishing, or the value-add of a GiveWell recommendation. We now have a staff member, Chelsea Tabart, that works closely with charities we might review so they know what to expect. 

 We hope that concerns about a negative review will not be a barrier to organizations working with us, and recently published a blog post on why more charities should consider applying for a GiveWell recommendation.

 The post What does it mean when a charity declines to participate in GiveWell’s review process? appeared first on The GiveWell Blog.

     Why more charities should consider applying for a GiveWell recommendation   Tue, 02/14/2017 - 12:00   This post will highlight major changes to GiveWell and our charity review process over the past few years, with hopes of encouraging certain organizations working in global health and development to apply for a top charity recommendation.

 We believe that GiveWell may now be a better fit for a number of organizations than we had been in the past. However, we still do not expect to fund the vast majority of organizations. GiveWell remains focused on international aid, and our criteria will likely rule out many organizations.

 In brief,

  GiveWell’s top charities receive a substantial amount of funding (millions of dollars each). Charities quickly learn whether we think they might be a potential top charity, before putting in lots of time. Charities we don’t recommend may receive a $100,000 participation grant. We’re open to funding large, multi-program organizations as well as small, single-program organizations.  Why charities should consider applying   Our recommended charities receive a lot of funding. In 2015, GiveWell’s recommendation resulted in charities receiving an estimated $110 million; the majority of this funding was divided between our four top charities. That’s money we directly track from individual donors who attribute their gifts to our recommended charities. The total has grown significantly in recent years. We have not yet completed our metrics report for 2016; we expect money moved last year to be similar or slightly lower than money moved in 2015.

 Our top charities and standout charities have received annual “incentive” funding for earning these designations in recent years, with the aim of encouraging other organizations to seek to meet our criteria. These grants have been made by Good Ventures, with GiveWell’s recommendation. In 2016, we increased the annual incentive amount that each of our top charities receives from $1 million to $2.5 million.

 Last year, we recommended that Good Ventures make $250,000 grants to each standout, and Good Ventures followed our recommendation. We’re not sure what we’ll recommend in the future, but we’re tentatively planning to recommend $100,000 grants for standout charities.

  We provide early-stage support to promising organizations and programs through our Incubation Grant program, and recommended more than $10 million in Incubation Grants in 2016. The goal of GiveWell Incubation Grants is to support the development of future top charities by providing funding to charities or programs that don’t yet meet GiveWell’s criteria but may develop into future top charities or priority programs with additional funding.

  We’ve streamlined our charity review process so that we ask for relatively little time, compared to our understanding of a typical grant application, from an applicant before we tell them whether they are a promising candidate. Generally, the first phase of our application involves one or two 1-2 hour phone calls between GiveWell and program staff, after which we ask the staff to provide feedback on notes from that conversation for us to publish on our website. We also ask for internal documents to demonstrate how the organization uses funds and tracks impact. If a charity doesn’t seem likely to become a top charity after this initial review, we do not ask for more of its staff’s time.

 After GiveWell completes the first phase of our review, we ask charities to sign off on GiveWell publishing an interim review of that organization. GiveWell will then make a $100,000 participation grant to the group, regardless of whether it is ultimately named one of GiveWell’s recommended charities.

  Odds are good of being named a top charity for organizations we explicitly invite to continue in the application process after our initial review. Since 2013, 10 out of 23 organizations that we invited to apply have been named a top charity or standout; nine of the remaining organizations declined to participate, three are still being considered, and we decided not to recommend one.

 We’re open to assessing large, multi-program organizations as well as small, single-program groups. In 2016, we recommended restricted donations to individual programs run by large, multi-program organizations: in particular, Sightsavers’ deworming program and Malaria Consortium’s seasonal malaria chemoprevention (SMC) program. We previously almost exclusively recommended unrestricted donations to smaller organizations (for reasons discussed in this blog post).

  We have a staff member, Chelsea Tabart, dedicated to helping charities understand us, our process, and the types of funding GiveWell offers. We created this position in response to our impression that not all charities working on programs we are interested in are aware of our interest in funding their work, what is involved in our review process, or how much funding we’re directing to recommended charities . We hope that having a staff member serve as a charity liaison will make it more likely that charities who might be a good fit for GiveWell funding apply.

  The above list highlights changes to GiveWell’s process that have occurred over time and may make GiveWell a better fit for some organizations than it was previously. Other core elements of our review process and criteria have not changed. We remain interested in international health and development, for example, and committed to supporting interventions for which a strong evidence base exists or may be developed with additional funding. In addition:

  We prefer to provide unrestricted funding. Within program areas, or when recommending funding to organizations that only run one program, we offer unrestricted funding and have no formal reporting requirements. Instead, we require check-in conversations approximately every quarter as well as ongoing budgets, monitoring and evaluation results, or similar materials to keep us up to date. We prefer to publish as much information as we can, although we keep all non-public information confidential until we have explicit permission to publish it. Conversations with GiveWell are ‘off the record’ until we have approval to share non-public information.  What makes a top charity? GiveWell recommends charities that are evidence-backed, cost-effective, and underfunded. We’re looking for charities that are implementing a program with a strong independent evidence base, such as multiple randomized controlled trials (we list programs that meet this criteria—our “priority programs”—here). We’re looking for charities whose work is in the same range of cost-effectiveness as our current top charities, and with a significant need for additional funding. Our top charities are transparent about their work, and can share detailed monitoring and evaluation information, financials, and future plans, and are comfortable with GiveWell discussing their work publicly and in detail.

 Applying for a GiveWell recommendation We hope the above will encourage additional charities to consider applying for a GiveWell recommendation or reach out with questions about our process. If you work at or know of an organization that might be a good fit based on the above information, please contact us. You can email us for more details about our application process at applications@givewell.org.

 The post Why more charities should consider applying for a GiveWell recommendation appeared first on The GiveWell Blog.

     Open Philanthropy Project post on giving suggestions pertaining to recent executive actions   Thu, 02/09/2017 - 14:12   GiveWell has recently received a number of questions about where to donate in response to recent executive actions in the United States. The Open Philanthropy Project published a blog post today with its suggestions. Read it here.

 The post Open Philanthropy Project post on giving suggestions pertaining to recent executive actions appeared first on The GiveWell Blog.

     Why we’re considering No Lean Season as a potential 2017 top charity   Fri, 02/03/2017 - 14:17   In recent years, we’ve added a new source for potential GiveWell top charity recommendations: GiveWell Incubation Grants. This post will highlight a GiveWell Incubation Grant recipient, No Lean Season, that we see as a top charity contender for 2017. 

 

 GiveWell has traditionally identified our top charities through our standard process, during which we examine a charity’s track record and funding needs. Our goal with GiveWell Incubation Grants, outlined in an earlier blog post, is to grow the pipeline of potential future top charities, in part by supporting organizations at an earlier stage than we would traditionally consider them for a top charity recommendation. We generally expect there to be a lag of a few years between receiving an Incubation Grant and being considered for a top charity recommendation. 

 Good Ventures, a large foundation with which we work closely, funds GiveWell Incubation Grants. Good Ventures made its first Incubation Grant to No Lean Season in 2014 and we now believe it is a top charity contender when we update our recommendations at the end of 2017. We’re planning to highlight another 2017 contender, Zusha!, in a future post.

 Seasonal income support program

 No Lean Season offers subsidies to low-income agricultural workers in Bangladesh to incentivize them to temporarily migrate from rural areas to urban areas, where they may earn higher wages seasonally. These subsidies (which may be made as grants or loans) are around $8-19 USD and cover travel costs and a couple days of food. Follow-up studies found that individuals who once received an incentive to migrate chose to do so again—without a subsidy—at a higher rate than would otherwise be expected, suggesting they found migrating to be useful. 

 The below infographic from No Lean Season shows a high-level overview of the intervention (click for detail):

 

 Evidence Action, the parent organization of GiveWell top charity Deworm the World Initiative, started No Lean Season as part of Evidence Action Beta, its program to test interventions that could be significantly scaled up.

 How we decided to support No Lean Season

 We approached Evidence Action in late 2013 to express our interest in supporting the creation of new GiveWell top charities.

 In March 2014, Good Ventures made a $250,000 grant to Evidence Action to support the investigation and scale-up of promising programs. Since then, Good Ventures has made three additional grants totaling approximately $2.7 million to support the program’s scale-up; the write-up for the most recent grant, made in December 2016, is forthcoming.

 No Lean Season as a GiveWell top charity contender

 We assess potential GiveWell top charities along four criteria: evidence of effectiveness, cost-effectiveness, transparency, and room for more funding. No Lean Season appears as a plausible contender when reviewed along these dimensions. We plan to spend significantly more time reviewing No Lean Season this year as we move forward in our top charity review process and update our views on its work.

 Evidence of effectiveness. A number of randomized controlled trials (RCTs) have studied the effects of seasonal income support in northern Bangladesh, where Evidence Action is scaling up the program. Trials conducted in 2008 and 2014 found significant effects on household expenditures and income, respectively, during the relevant season. In addition, later follow-ups of households that were incentivized to migrate found that they did so again at higher rates, even in the absence of a continued incentive; Mushfiq Mobarak, a Yale economics professor and a lead researcher on No Lean Season, estimates that the effects persist for three additional years. However, the 2008 and 2014 studies did not measure directly comparable or combinable outcomes, so we can’t compare them or combine the results. We take these RCTs as strong evidence that No Lean Season’s seasonal migration subsidies lead to improved economic outcomes in northern Bangladesh. 

 Analysis is not yet complete for a separate RCT, conducted in 2013, a year when labor unrest was unusually high (see Figure 1). Given the possibility of mitigating circumstances, we’re unsure how informative the 2013 RCT results will be to predicting future success of the program.

 Potential risks of the program could include negative impacts at the destination labor market (e.g. on job availability or food prices) or vulnerability of migrants or family members left at home after migration. As of January 2016, our impression was that Evidence Action planned to monitor possible negative effects on the destination labor market; Evidence Acton said it had not found indication in surveys that migrants or their families were less secure (see p. 4). 

 Evidence Action is planning to run an RCT at scale during the 2017 lean season. Due to our current best estimate of the program’s cost-effectiveness and expectation that No Lean Season will collect and share high-quality monitoring data from its 2016 work (discussed below), we think the evidence base may be sufficient for the organization to qualify as a top charity at the end of 2017, before results from this RCT are available.

 Cost-effectiveness. We currently estimate that No Lean Season is between 5-14 times as cost-effective as direct cash transfers, a baseline we use for comparison among global health and development interventions. At scale, we estimate that individuals will experience a consumption benefit of $15 for every $3 No Lean Season spends. These benefits and costs are averages over the population that is eligible for and offered the program; we believe that the benefits are actually larger for households that send a migrant and smaller for households that don’t. We’re very uncertain about the baseline per capita lean season consumption in this population, but for comparison purposes, we estimate it at roughly $116 for the entire 5-month lean season. We also expect, based on previous studies, remigration without further incentive for about two future years, as well as at least one additional migration during the lesser lean season.

 Our estimate of No Lean Season’s cost-effectiveness is in the range of our current top charities. We believe that the four deworming charities we recommend are ~4-10x as cost-effective as cash transfers, and the two charities we recommend for their work to prevent malaria, the Against Malaria Foundation and Malaria Consortium, are ~4x as cost-effective as cash transfers. However, our cost-effectiveness estimates typically become worse—the cost-effectiveness decreases—as we spend more time on our analysis and incorporate additional inputs and discounts. We expect this is likely to occur with our current estimate of No Lean Season’s cost-effectiveness, as well.

 Monitoring. We have not yet reviewed No Lean Season’s monitoring. However, we are quite familiar with its parent organization, Evidence Action, as a result of our recommendation of the Deworm the World Initiative as a top charity since 2013, and our previous reviews of its work, and feel confident Evidence Action will share its monitoring of No Lean Season based on its track record. We expect this monitoring to be of a high quality.

 Organizational strength and transparency. We have a positive view of Evidence Action as an organization, based on our significant experience communicating with its staff. Our impression is that Karen Levy, Evidence Action’s Director of Global Innovation and Beta, played a key role in scaling the Deworm the World Initiative, suggesting the organizational capacity exists to similarly scale a program like No Lean Season. We also believe that Evidence Action, based on its track record and our experience, will operate No Lean Season transparently.

 Room for more funding. Evidence Action currently estimates that No Lean Season could productively use approximately $16 million over five years in Bangladesh and Indonesia, and additional funding to expand to India and Ghana; this estimate may be adjusted in the future.

 We remain unsure whether this program will be successful in locations beyond Bangladesh. If it isn’t a good fit in other locations, No Lean Season’s overall room for more funding could be quite limited.

 Progress to date and future plans

 As of early 2016, Evidence Action planned to scale up its program in Bangladesh to offer a total of 16,000 subsidies and reach 9,000 households with its implementing partner, RDRS Bangladesh, in 2016, the first of a four-year scale-up. By 2019, No Lean Season provisionally plans to offer ~295,000 subsidies and reach ~165,000 households in Bangladesh (see p. 5 here; No Lean Season staff plan to update these figures going forward).

 Evidence Action is also exploring the possibility of working in locations beyond Bangladesh. It visited Zambia and Malawi in 2014 to assess whether the program might help alleviate seasonal hunger in those locations. Our understanding is that Evidence Action is not planning to expand in Malawi based on its findings. We believe Evidence Action is also not planning to scale up in Zambia.

 Mobarak, the No Lean Season researcher, has conducted two research studies in Indonesia, which he says suggest the country may have similar underlying conditions to Bangladesh. As of August 2016, Evidence Action was also considering expanding into India, particularly states close to Bangladesh, although it had not yet done any research there. Evidence Action also considered Ghana as a potential future location, although we are not aware of concrete plans to begin implementation there.[1]

 Path to GiveWell top charity

 By November 2017, we expect to see results from No Lean Season’s first year of a four-year scaling effort (the September-December 2016 seasonal effort to reach 9,000 households described above). This, combined with the fact that there are multiple rounds of randomized controlled trials in the past and a large forthcoming RCT at scale, maybe sufficient for No Lean Season to qualify as a 2017 top charity.

 Notes [1] “Indonesia 

 Mushfiq Mubarak has completed two research studies related to No Lean Season in Indonesia: 1. An exploratory study, similar to those done in Zambia and Malawi. 2. A small-scale pilot in West Timor conducted by the Southeast Asian office of the Abdul Latif Jameel Poverty Action Lab (J-PAL). This study was similar to a previous trial in Bangladesh, but was not a randomized evaluation. 

 The studies’ initial results are promising. Indonesia appears to have similar underlying conditions to Bangladesh, including: lean season migration; availability of jobs in urban areas; and migration from rural to urban areas and between islands. 

 India 

 No Lean Season is also considering expanding into India but has not yet conducted research there. Its first activity in India would likely be a small-scale project, such as a pilot in one village. The underlying conditions in Indian states bordering the Rangpur region of Bangladesh are similar to those that exist in Rangpur. 

 Ghana 

 No Lean Season is considering expanding into Ghana but does not yet have concrete plans to initiate a project there. In general, it does not intend to expand into new countries until it has sufficient capacity to do so.” 

 From a conversation with Dr. Karen Levy and Guillaume Kroll, August 2, 2016, p 1.

 The post Why we’re considering No Lean Season as a potential 2017 top charity appeared first on The GiveWell Blog.

     GiveWell Incubation Grants   Thu, 01/19/2017 - 13:32   GiveWell Incubation Grants have become an increasingly substantial part of our work, and our impression is that not everyone who follows GiveWell is familiar with this program. This blog post is intended to (a) briefly explain and outline our main goals and expectations for this work, and (b) share some updates on promising organizations that have been supported by Incubation Grants. 

 The goal of GiveWell Incubation Grants (previously known as GiveWell’s experimental work) is to support the development of future top charities and improve our understanding of our current top charities. We plan to do this in a few ways (not an exclusive list):

  Increasing the body of evidence around potential top charities and priority programs; Providing early-stage support for new organizations; Supporting improved monitoring and evaluation for potential or current top charities.  Good Ventures, a foundation with which we work closely, has funded the grants made as part of this work, which are listed here.

 Promising investigations

 Due to the nature of this support—early-stage funding, intended to allow an organization to develop a stronger track record or to collect more evidence on a promising program—we don’t expect Incubation Grants to produce new top charities over very short time horizons. We expect there will be, in many cases, a period of multiple years between a grant and an organization or intervention being considered a potential top charity or priority program.

 This post highlights grants that we don’t expect to lead to top charities before 2018. It should provide a reasonable overview of the type of grants we’re excited to recommend as part of this work. Future posts will highlight the organizations we’re closely tracking as potential 2017 top charities (No Lean Season and Zusha!).[1]

 This post will discuss Incubation Grants to:

  IDinsight New Incentives Results for Development (R4D) Charity Science: Health Mindset engagement for cash transfers Incentives for immunization studies  IDinsight IDinsight supports and conducts rigorous evaluations of development interventions with an explicit focus on providing useful data to inform funders and policymakers. Good Ventures made a $1.985 million grant to IDinsight for general support in June 2016 as part of GiveWell Incubation Grants. 

 In conversations with our network, we’ve often heard that IDinsight fills a unique gap in the development sector. There are other organizations that conduct research and advocate for evidence-based decision-making, but our impression is that IDinsight is currently the one most focused on research whose primary goal is to help decision-makers with specific decisions (in contrast to e.g. academic merit). We have seen some indications of other organizations moving in a similar direction, however. We hope that this grant allows IDinsight to grow its staff and take on more projects. IDinsight’s work has the potential to inform GiveWell’s list of top charities by increasing the body of evidence around potential priority programs and improving available monitoring and evaluation information around specific organizations.

 Recently, Good Ventures made an additional grant to IDinsight to support an “embedded IDinsight team” for GiveWell top charities, i.e., a small group of IDinsight staff explicitly focused on supporting the creation of high-quality monitoring and evidence for current and future GiveWell top charities. For example, IDinsight may work with New Incentives to run an impact study, and possibly a randomized controlled trial (RCT), on its pilot program to incentivize immunization. Another possible project for the embedded team is conducting monitoring and evaluation of cataract surgery programs, which could improve our understanding of the efficacy of the program and whether we should recommend charities that work on it. Additional possible projects for the IDinsight embedded team are discussed here. 

 We don’t expect a new GiveWell top charity to originate from this work in 2017, but hope that it will inform our future recommendations.

 New Incentives We made three Incubation Grants to New Incentives for its conditional cash transfer program aimed at preventing mother-to-child transmission (PMTCT) of HIV and encouraging pregnant women to deliver in health facilities (e.g., rather than at home). We decided not to recommend New Incentives’ PMTCT and facility delivery program as a 2016 top charity due to insufficient evidence supporting the program, although we were impressed by the organization’s staff. We wrote about this decision at length in this blog post. 

 With our encouragement, New Incentives shifted its focus to a new program, conditional cash transfers to incentivize immunizations in Nigeria. We’re planning to follow its work on this program as a potential future top charity, although we do not consider it likely to become a GiveWell-recommended charity in 2017. 

 Results for Development (R4D) Pneumonia is one of the leading killers of children worldwide, and our impression is that there is no dedicated funding stream for its treatment (as there is for other major diseases like AIDS, tuberculosis, and malaria). R4D is implementing a program to increase use of amoxicillin, the World Health Organization-recommended first-line treatment, to treat childhood pneumonia in Tanzania. In May 2016, Good Ventures provided $6.4 million to support this program as part of GiveWell Incubation Grants. 

 We have a positive view of R4D as an organization: its staff, evidence-driven approach, and transparency. We also believe that the use of amoxicillin to treat childhood pneumonia could be competitive with our current priority programs. Our key question around this program as a possible GiveWell top charity is monitoring and evaluation. We’re unsure whether R4D’s monitoring will lead us to feel confident that children sick with pneumonia actually receive treatment. This is due to the complex nature of the intervention, which may make it more challenging to collect high-quality monitoring data comparable with that of our current top charities. 

 We currently expect that R4D will have the data available to potentially qualify as a top charity in 2018 or 2019 and we hope to evaluate it then.

 Charity Science: Health Charity Science: Health was founded by members of the effective altruism community with the explicit goal of creating a GiveWell top charity. Charity Science: Health plans to send SMS text reminders for vaccinations due to the strong evidence base they see for this program in increasing immunization rates. Good Ventures made a grant of $200,000 to support the first year of the organization’s work in India.

 Because we have not yet vetted the relevant evidence closely, we remain unsure about whether we would recommend SMS reminders as a priority program. Charity Science: Health has been transparent and communicative with us, and we expect to learn from its work. Charity Science: Health is also a young organization with a very short track record, and we don’t anticipate evaluating it as a top charity until 2018 or 2019.

 Mindset engagement for cash transfers GiveDirectly, one of GiveWell’s top charities, provides unconditional cash transfers to very poor individuals in East Africa. In May 2016, Good Ventures made a $350,000 grant to Innovations for Poverty Action to support an RCT—in collaboration with GiveDirectly—testing whether “mindset engagement” approaches to cash transfers, such as watching an inspirational film or meeting with a counselor, affects the outcomes for cash transfer recipients by changing the framing of the transfer and thus how it is spent. The approaches are aimed at encouraging recipients to use the transfers to pursue their goals by increasing their sense of self-efficacy and understanding of their opportunities, which—according to the researchers’ theory—may have been adversely impacted by time spent in poverty. This study could influence the work of one of our current top charities (GiveDirectly) or our understanding of cash transfers as a priority program.

 Incentives for immunization studies In 2015, Good Ventures made two $100,000 grants to support further study of whether providing incentives for immunization could increase vaccination rates. These grants were made as part of our work to grow the body of evidence around promising programs that could become potential GiveWell priority programs. 

 The Incubation Grants were made to the Abdul Latif Jameel Poverty Action Lab (J-PAL) at the Massachusetts Institute of Technology and Interactive Research and Development (IRD) to support high-quality replications of a promising study on the impact of providing non-cash incentives, such as grocery vouchers, for parents to vaccinate their children. The replication studies are being conducted in India and Pakistan. 

 We are unsure when the results of these studies will be available.

 Other work to support potential future top charities

 Evidence Action, the parent organization of GiveWell top charity Deworm the World Initiative as well as No Lean Season, a GiveWell Incubation Grant recipient, recently announced a call for results of RCTs and other rigorous empirical studies that demonstrated a positive impact of an intervention benefiting poor households, and is planning to fund 3-6 of these proposals for further research. We’re excited to see this announcement and expect the results may further our understanding of potential GiveWell priority programs.

 Full list of GiveWell Incubation Grants

 A full list of grants we’ve recommended is available at www.givewell.org/research/incubation-grants.

 If you know of a strong proposal for a potential GiveWell Incubation Grant, please email applications@givewell.org. We’d be particularly interested in new groups that work on promising programs for which we have not found charity implementers.

 Notes [1] In December, we recommended a grant of $900,000 to Zusha! to scale up its road-safety programs. This grant write-up is not yet public, but notes from our initial conversations with Zusha! are available here and here.

 The post GiveWell Incubation Grants appeared first on The GiveWell Blog.

     How thin the reed? Generalizing from “Worms at Work”   Wed, 01/04/2017 - 14:37   

 Hookworm (AJC1/flickr) My last post explains why I largely trust the most famous school-based deworming experiment, the report in Worms at Work about its long-term benefits. That post also gives background on the deworming debate, so please read it first. In this post, I’ll talk about the problem of generalization. If deworming in southern Busia County, Kenya, in the late 1990s permanently improved the lives of some children, what does that tell us about the impact of deworming programs today, from sub-Saharan Africa to South Asia? How safely can we generalize from this study?

 I’ll take up three specific challenges to its generalizability:

  That a larger evidence base appears to show little short-term benefit from mass deworming—and if it doesn’t help much in the short run, how can it make a big difference in the long run? That where mass deworming is done today, typically fewer children need treatment than in the Busia experiment. That impact heterogeneity within the Busia sample—the same treatment bringing different results for different children—might undercut expectations of benefits beyond. For example, if examination of the Busia data revealed long-term gains only among children with schistosomiasis, that would devalue treatment for the other three parasites tracked.  In my view, none of the specific challenges I’ll consider knocks Worms at Work off its GiveWell-constructed pedestal. GiveWell’s approach to evaluating mass deworming charities starts with the long-term earnings impacts estimated in Worms at Work. Then it discounts by roughly a factor of ten for lower worm burdens in other places, and by another factor of ten out of more subjective conservatism. As in the previous post, I conclude that the GiveWell approach is reasonable.

 But if I parry specific criticisms, I don’t dispel a more general one. Ideally, we wouldn’t be relying on just one study to judge a cause, no matter how compelling the study or how conservative our extrapolation therefrom. Nonprofits and governments are spending tens of millions per year on mass deworming. More research on whether and where the intervention is especially beneficial would cost only a small fraction of all those deworming campaigns, yet potentially multiply their value.

 Unfortunately, the benefits that dominate our cost-effectiveness calculations manifest over the long run, as treated children grow up. And long-term research tends to take a long time. So I close by suggesting two strategies that might improve our knowledge more quickly.

 

 Here are Stata files for the uantitative assertions and graphs presented below.

 Evidence suggests short-term benefits are modest Researchers have performed several systematic reviews of the evidence on the impacts of deworming treatment. In my research, I focused on three of those reviews. Two come from institutions dedicated to producing such surveys, and find that mass deworming brings little benefit, most emphatically in the short run. But the third comes to a more optimistic answer.

 The three are:

  The Cochrane review of 2015, which covers 45 trials of the drug albendazole for soil-transmitted worms (geohelminths). It concludes: “Treating children known to have worm infection may have some nutritional benefits for the individual. However, in mass treatment of all children in endemic areas, there is now substantial evidence that this does not improve average nutritional status, haemoglobin, cognition, school performance, or survival.” The Campbell review of 2016, which extends to 56 randomized short-term studies, in part by adding trials of praziquantel for water-transmitted schistosomiasis. “Mass deworming for soil-transmitted helminths …had little effect. For schistosomiasis, mass deworming might be effective for weight but is probably ineffective for height, cognition, and attendance.” The working paper by Kevin Croke, Eric Hsu, and authors of Worms at Work. The paper looks at impacts only on weight, as an indicator of recent nutrition. (Weight responds more quickly to nutrition than height.) While the paper lacks the elaborate, formal protocols of the Cochrane and Campbell reviews, it adds value in extracting more information from available studies in order to sharpen the impact estimates. It finds: “The average effect on child weight is 0.134 kg.”  Before confronting the contradiction between the first two reviews and the third, I will show you a style of reasoning in all of them. The figure below constitutes part of the Campbell review’s analysis of the impact of mass administration of albendazole (for soil-transmitted worms) on children’s weight (adapted from Figure 6 in the initial version):

 

 Each row distills results from one experiment; the “Total” row at the bottom draws the results together. The first row, for instance, is read as follows. During a randomized trial in Uganda run by Harold Alderman and coauthors, the 14,940 children in the treatment group gained an average 2.413 kilograms while the 13,055 control kids gained 2.259 kg, for a difference in favor of the treatment group of 0.154 kg. For comparability with other studies, which report progress on weight in other ways, the difference is then re-expressed as 0.02 standard deviations, where a standard deviation is computed as a sort of average of the 7.42 and 8.01 kg figures shown for the treatment and control groups. The 95% confidence range surrounding the estimate of 0.02 is written as [–0.00, 0.04] and is in principle graphed as a horizontal black line to the right, but is too short to show up. Because of its large sample, the Alderman study receives more weight (in the statistical sense) than any other in the figure, at 21.6% of the overall number. The relatively large green square in the upper right signifies this influence.

 In the lower-right of the figure, the bolded numbers and the black diamond present the meta-analytical bottom line: across these 13 trials, mass deworming increased weight by an average 0.05 standard deviations. The aggregate 95% confidence interval stretches from –0.02  to 0.11, just bracketing zero. The final version of the Campbell report expresses the result in physical units: an average gain of 0.09 kg, with a 95% confidence interval stretching from –0.09 kg to +0.28 kg. And so it concludes: “Mass deworming for soil-transmitted helminths with albendazole twice per year compared with controls probably leads to little to no improvement in weight over a period of about 12 months.”

 Applying similar methods to a similar pool of studies, the Cochrane review (Analysis 4.1) produces similar numbers: an average weight gain of 0.08 kg, with a 95% confidence interval of –0.11 to 0.27. This it expresses as “For weight, overall there was no evidence of an effect.”

 But Croke et al. incorporate more studies, as well as more data from the available studies, and obtain an average weight gain of 0.134 kg (95% confidence interval: 0.03 to 0.24), which they take as evidence of impact.

 How do we reconcile the contradiction between Croke et al. and the other two? We don’t. For no reconciliation is needed, as is made obvious by this depiction of the three estimates of the impact of mass treatment for soil-transmitted worms on children’s weight:

 

 Each band depicts one of the confidence intervals I just cited. The varied shading reminds us that within each band, confidence is highest near the center. The bands greatly overlap, meaning that the three reviews hardly disagree. Switching from graphs to numerical calculations, I find that the Cochrane results reject the central Croke et al. estimate of 0.134 kg at p = 0.58 (two-tailed Z-test), which is to say, they do not reject with any strength. For Croke et al. vs. Campbell, p = 0.64. So the Croke et al. estimate does not contradict the others; it is merely more precise. The three reviews are best seen as converging to a central impact estimate of about 0.1 kg of weight gain. Certainly 0.1 kg fits the evidence better than 0.0 kg.

 If wide confidence intervals in the Cochrane and Campbell reviews are obscuring real impact on weight, perhaps the same happening with other outcomes, including height, hemoglobin, cognition, and mortality. Discouragingly, when I scan the Cochrane review’s “Summary of findings for the main comparison” and Campbell’s corresponding tables, confidence intervals for outcomes other than weight look more firmly centered on zero. That in turn raises the worry that by looking only at weight, Croke et al. make a selective case on behalf of deworming.[1]

 On the other hand, when we shift our attention from trials of mass deworming to trials restricted to children known to be infected—which have more power to detect impacts—it becomes clear that the boost to weight is not a one-off. The Cochrane review estimates that targeting treatment at kids with soil-transmitted worms increased weight by 0.75 kilograms, height by 0.25 centimeters, mid-upper arm circumference by 0.49 centimeters, and triceps skin fold thickness by 1.34 millimeters, all significant at p = 0.05. Treatment did not, however, increase hemoglobin (Cochrane review, “Data and Analyses,” Comparison 1).

 In this light, the simplest theory that is compatible with the evidence arrayed so far is that deworming does improve nutrition in infected children while leaving uninfected children unchanged; and that available studies of mass deworming tend to lack the statistical power to detect the diluted benefits of mass deworming, even when combined in a (random effects) meta-analysis. The compatibility of that theory with the evidence, by the way, exposes a logical fallacy in the Cochrane authors’ conclusion that “there is now substantial evidence” that mass treatment has zero effect on the outcomes of interest. Lack of compelling evidence is not compelling evidence of lack.

 Yet the Cochrane authors might be right in spirit. If the benefit of mass deworming is almost too small to detect, it might be almost too small to matter. Return to the case of weight: is ~0.1 kg a lot? Croke et al. contend that it is. They point out that “only between 2 and 16 percent of the population experience moderate to severe intensity infections in the studies in our sample that report this information,” so their central estimate of 0.134 could indicate, say, a tenth of children gaining 1.34 kg (3 pounds). This would cohere with Cochrane’s finding of an average 0.75 kilogram gain in trials that targeted infected children. In a separate line of argument, Croke et al. calculate that even at 0.134, deworming more cost-effectively raises children’s weight than school feeding programs do.

 But neither defense gets at what matters most for GiveWell, which is whether small short-term benefits make big long-term earnings gains implausible. Is 0.134 kg in weight gain compatible with 15% income gain 10 years later reported in Worms at Work?

 More so than it may at first appear, once we take account of two discrepancies embedded in that comparison. First, more kids had worms in Busia. I calculate that 27% of children in the Worms sample had moderate or serious infections, going by World Health Organization (WHO) guidelines, which can be viewed conservatively as double the 2–16% Croke et al. cite as average for the kids behind that 0.134 kg number.[2] So in a Worms-like setting, we should expect twice as many children to have benefited, doubling the average weight gain from 0.134 to 0.268 kg. Second, at 13.25 years, the Worms children were far older than most of the children in the studies surveyed by Croke et al. Subjects averaged 9 months of age in the Awasthi 2001 study, 12–18 months in Joseph 2015, 24 months in Ndibazza 2012, 36 months in Willett 1979, and 2–5 years in Sur 2005. 0.268 kg means more for such small people. As Croke et al. point out, an additional 0.268 kg nearly suffices to lift a toddler from the 25th to the 50th percentile for weight gain between months 18 and 24 of life (girls, boys).

 In sum, the statistical consensus on short-term impacts on nutritional status does not render implausible the long-term benefits reported out of Busia. The verdict of Garner, Taylor-Robinson, and Sachdev—“no effect for the main biomedical outcomes…, making the broader societal benefits on economic development barely credible”—overreaches.

 In many places, fewer kids have worms than in Busia in 1998–99 If we accept the long-term impact estimates from Worms at Work, we can still question whether those results carry over to other settings. This is precisely why GiveWell deflates the earnings impact by two orders of magnitude in estimating the cost-effectiveness of deworming charities. One of those orders of magnitude arises from the fact that school-age children in Busia carried especially heavy parasite loads. Where loads are lighter, mass deworming will probably do less good. (The other order of magnitude reflects a more subjective worry that if Worms at Work were replicated in other places with similar parasite loads, it would fail to show any benefits there, a theme to which I will return at the end.)

 GiveWell’s cost-effectiveness spreadsheet does adjust for difference in worm loads between Worms and places where recommended charities support mass deworming today. So I spent some time scrutinizing this discount—more precisely, the discounts of individual GiveWell staffers. I worried in particular that the ways we measure worm loads could lead my colleagues to overestimate the need for and benefit from mass deworming.

 As a starting point, I selected a few data points from one of the metrics GiveWell has gathered, the fraction of kids who test positive for worms. This table shows the prevalence of worm infection, by type, in Busia, 1998–99, before treatment, and in program areas of two GiveWell-recommended charities:

  The first row, computed from the public Worms data set, reports that before receiving any treatment from the experiment, 81% of tested children in Busia were positive for hookworm, 51% for roundworm, 62% for whipworm, and 36% for schistosomiasis. 94% tested positive for at least one of those parasites. On average, each child carried 2.3 distinct types of worm. Then, from the GiveWell cost-effectiveness spreadsheet, come corresponding numbers for areas served by programs linked to the Schistosomiasis Control Initiative (SCI) and Deworm the World. Though approximate, the numbers suffice to demonstrate that far fewer children served by these charities have worms than in the Worms experiment. For example, the hookworm rate for Deworm the World is estimated at 24%, which is 30% of the rate of Busia in 1998–99. Facing less need, we should expect these charities’ activities to do less good than is found in Worms at Work.

 But that comparison would misrepresent the value of deworming today if the proportion of serious infections is even lower today relative to Busia. To get at the possibility, I made a second table for the other indicator available to GiveWell, which is the intensity of infection, measured in eggs per gram of stool:

 

 Indeed, this comparison widens the apparent gap between Busia of 1998–99 and charities of today. For example, hookworm prevalence in Deworm the World service areas was 30% of the Busia rate (24 vs. 81 out of every 100 of kids), while intensity was only 20% (115 vs. 568 eggs/gram).

 After viewing these sorts of numbers, the median GiveWell staffer multiplies the Worms at Work impact estimate by 14%—that is, divides it by seven. In aggregate, I think my coworkers blend the discounts implied by the prevalence and intensity perspectives.[3]

 To determine the best discount, we’d need to know precisely what characterized the children within the Worms experiment who most benefited over the long term—perhaps lower weight, or greater infection with a particular parasite species. As I will discuss below, such insight is easier imagined than attained. Then, if we had it, we would need to know the number of children in today’s deworming program areas with similar profiles. Obtaining that data could be a tall order in itself.

 To think more systematically about how to discount for differences in worm loads, within the limits of the evidence, I looked to some recent research that models how deworming affects parasite populations. Nathan Lo and Jason Andrews led the work (2015, 2016). With Lo’s help, I copied their approach in order to estimate how the prevalence of serious infection varies with the two indicators at GiveWell’s fingertips.[4]

 For my purposes, the approach introduces two key ideas. First, data gathered from many locales shows how, for each worm type, the average intensity of infection tends to rise as prevalence increases. Not surprisingly, where worm infection is more common, average severity tends to be higher too—and Lo and colleagues estimate how much so. Second is the use a particular mathematical family of curves to represent the distribution of children by intensity levels—how many have no infection, how many have 1-100 eggs/gram, how many are above 100 eggs/gram, etc. (The family, the negative binomial, is an accepted model for the prevalence of infectious diseases.) If we know two things about the pattern of infection, such as the fraction of kids who have it and their average intensity, we can mathematically identify a unique member of the family. And once a member is chosen, we can estimate the share of children with, for example, hookworm infections exceeding 2,000 eggs/gram, which is the WHO’s suggested minimum for moderate or heavy infection.

 The next two graphs examine how, under these modeling assumptions, the fraction of children with moderate/heavy infections varies in tandem with the two indicators at GiveWell’s disposal, which are prevalence of infection and average infection intensity:

 

 

 The important thing to notice is that the curves are much curvier in the first graph. There, for example, as the orange hookworm curve descends, it converges to the left edge just below 40%. This suggests that if a community has half as many kids with hookworm as in Busia—40% instead of about 80%—then it could have far less than half as many kids with serious infections—indeed, almost none. But the straighter lines in the second graph mean that a 50% drop in intensity (eggs/gram) corresponds to a 50% drop in the number of children with serious disease.

 While we don’t know exactly what defines a serious infection, in the sense of offering hope that treatment could permanently lift children’s trajectories, these simulations imply that it is reasonable for GiveWell to extrapolate from Worms at Work on the basis of intensity (eggs/gram).

 Returning to the intensity table above, I find that the Deworm the World egg counts, by worm type, average 16% of those in Busia. For the Schistosomiasis Control Initiative, the average ratio is 7% (and is 6% just for SCI’s namesake disease). These numbers say—as far as this sort of analysis can take us—that GiveWell’s 14% discounts are about right for Deworm the World, and perhaps ought to be halved for SCI. Halving is not as big a big change as it may seem; GiveWell has no illusions about the precision of its estimates, and performs them only to sense the order of magnitude of expected impact.

 Impact heterogeneity in the Worms experiment Having confronted two challenges to the generalizability of Worms at Work—that short-term non-impacts make long-term impacts implausible, and that worm loads are lower in most places today than they were in Busia in 1998–99—I turned to one more. Might there be patterns within the Worms at Works data that would douse hopes for impact beyond? For example, if only children with schistosomiasis experienced those big benefits, that would call into question the value of treating geohelminths (hookworm, roundworm, whipworm).

 Returning to the Worms at Work data, I searched for—and perhaps found—signs of heterogeneity in impact. I gained two insights thereby. The first, as it happens, is more evidence that is easier-explained if we assume that the Worms experiment largely worked, the theme of the last post. The second is a keener sense that there is no such thing as the “the” impact of an intervention, since it varies by person, time, and place. That heightened my nervousness about extrapolating from a single study. Beyond that general concern, I did not find specific evidence that would explicitly cast grave doubt on whole deworming campaigns.

 My hunt for heterogeneity went through two phases. In the first, motivated by a particular theory, I brought a narrow set of hypotheses to the data. In the second, I threw about 20 hypotheses at the data and watched what stuck: Did impact vary by sex or age? By proximity to Lake Victoria, where live the snails that carry Schistosoma mansoni?  As statisticians put it, I mined the data. The problem with that is that since I tested about 20 hypotheses, I should expect about one to manifest as statistically significant just by chance (at p = 0.05). So the pattern I unearthed in the second phase should perhaps not be viewed as proof of anything, but as the basis for a hypothesis that, for a proper test, requires fresh data from another setting.

 Introducing elevation My search began this way. In my previous post, I entertained an alternative theory for Owen Ozier‘s finding that deworming indirectly benefited babies born right around the time of the original Worms experiment. Maybe, I thought, the 1997–98 El Nino, which brought heavy flooding to Kenya, exacerbated the conditions for the spread of worms, especially at low elevations. And perhaps by chance the treatment schools were situated disproportionately at high elevations, so their kids fared better. This could explain all the results in Worms and its follow-ups, including Ozier’s paper. But the second link in that theory proved weak, especially when defining the treatment group as groups 1 and 2 together, as done in Worms at Work. (Group 1 received treatment starting in 1998, group 2 in 1999, and group 3 in 2001, after the experiment ended.) Average elevation was essentially indistinguishable between the Worms at Work treatment and control groups.

 Nevertheless, my investigation of the first link in the theory led me to some interesting discoveries. To start, I directly tested the hypothesis that elevation mattered for impact by “interacting” elevation with the treatment indicator in a key Worms at Work regression. In the original regression, deworming is found to increase the logarithm of wage earnings by 0.269, meaning that deworming increased wage earnings by 30.8%. In the modified regression, the impact could vary with elevation in a straight-line way, as shown in this graph of the impact of deworming in childhood on log wage earnings in early adulthood as a function of school elevation:

 

 The grey bands around the central line show confidence intervals rather as in the earlier graph on weight gains. The black dots along the bottom show the distribution of schools by elevation.

 I was struck to find the impact confined to low schools. Yet it could be explained. Low schools are closer to Lake Victoria and the rivers that feed it; and their children therefore were more afflicted by schistosomiasis. In addition, geohelminths (soil-transmitted worms) might have spread more easily in the low, flat lands, especially after El Nino–driven floods. So lower schools may have had higher worm loads.[5]

 To fit the data more flexibly, I estimated the relationship semi-parametrically, with locally weighted regressions[6]. This involved analyzing whether among schools around 1140 meters, deworming raised wages; then the same around 1150 meters, and so on. That produced this Lowess-smoothed graph of the impact of deworming on log wage earnings:

 

 This version suggests that the big earnings impact occurred in schools below about 1180 meters, and possibly among schools at around 1250. (For legibility, I truncated the fit at 1270 meters; beyond which the confidence intervals explode for lack of much data.)

 Motivated by the theory that elevation mattered for impact because of differences in pre-experiment infection rates, I then graphed how those infections varied with elevation, among the subset of schools with the needed data.[7] Miguel and Kremer measure worm burdens in three ways: prevalence of any infection, prevalence of moderate or heavy infection, and intensity (eggs/gram). So I did as well. First, this graph shows infection prevalence versus school elevation, again in a locally smoothed way:

 

 Like the first table in this post, this graph shows that hookworms lived in nearly all the children, while roundworm and whipworm were each in about half. Not evident before is that schistosomiasis was common at low elevations, but faded higher up. Roundworm and whipworm also appear to fall as one scans from left to right, but then rebound around 1260 meters.

 The next graph is the same except that it only counts infections that are moderate or heavy according to WHO definitions[8]:

 

 Interestingly, restricting to serious cases enhances the similarity between the infection curves, just above, and the earlier semi-parametric graph of earnings impact versus elevation. The “Total” curve starts high, declines until 1200 meters or so, then peaks again around 1260. Last, I graphed Miguel and Kremer’s third measure of worm burden, intensity, against elevation. Those images resemble the graph above, and I relegate them to a footnote for concision.[9]

 These elevation-stratified plots teach three lessons. First, the similarity between the prevalence contours and the earnings impact contour shown earlier—high at the low elevations and then again around 1260 meters—constitutes circumstantial evidence for a sensible theory: children with the greatest worm burdens benefited most from treatment. Second, that measuring worm load to reflect intensity—moving to the graph just above from the one before—strengthens this resemblance and reinforces the notion of extrapolating from Worms at Work on the basis of intensity (average eggs/gram, not how many kids have any infection).

 Finally, these patterns buttress the conclusion of my last post, that the Worms experiment mostly worked. If we grant that deworming probably boosted long-term earnings of children in Busia, then it becomes unsurprising that it did so more where children had more worms. But if we doubt the Worms experiments, then these results become more coincidental. For example, if we hypothesize that flawed randomization put schools whose children were destined to earn more in adulthood disproportionately in the treatment group, then we need another story to explain why this asymmetry only occurred among the schools with the heaviest worm loads. And all else equal, per Occam’s razor, more-complicated theories are less credible.

 As I say, the evidence is circumstantial: two quantities of primary interest—initial worm burden and subsequent impact—relate to elevation in about the same way. Unfortunately, it is almost impossible to directly assess the relationship between those two quantities, to ask whether impact covaried with need. The Worms team did not test kids until their schools were about to receive deworming treatment “since it was not considered ethical to collect detailed health information from pupils who were not scheduled to receive medical treatment in that year.” My infection graphs are based on data collected at treatment-group schools only, just before they began receiving deworming in 1998 or 1999. Absent test results for control-group kids, I can’t run the needed comparison.

 Contemplating the exploration to this point, I was struck to appreciate that while elevation might not directly matter for the impacts of deworming, like a saw through a log, introducing it exposed the grain of the data. It gave me insight into a relationship that I could not access directly, between initial worm load and subsequent benefit.

 Mining in space After I confronted the impossibility of directly testing whether initial worm burden influenced impact, I thought of one more angle from which to attack the question, if obliquely. This led me, unplanned, to explore the data spatially.

 As we saw, nearly all children had geohelminths. So all schools were put on albendazole, whether during the experiment (for treatment groups) or after (control group). In addition, the pervasiveness of schistosomiasis in some areas called for a second drug, praziquantel. I sought to check whether the experiment raised earnings more for children in those areas. Such a finding could be read to say that schistosomiasis is an especially damaging parasite, making treatment for it especially valuable. Or, since the low-elevation schistosomiasis schools tended to have the highest overall worm burdens, it could be taken as a sign that higher parasite loads in general lead to higher benefit from deworming.

 Performing the check first required some educated guess work. The Worms data set documents which of the 50 schools in the treatment groups needed and received praziquantel, but not which of the 25 control group schools would have needed it in 1998–99. To fill in these blanks, I mapped the schools by treatment group and praziquantel status. Group 1 schools, treated starting in 1998, are green. Group 2 schools, treated starting in 1999, are yellow. And group 3 (schools not treated till 2001) are red. The white 0’s and 1’s next to the group 1 and 2 markers show which were deemed to need praziquantel, with 1 indicating need:

 

 Most of the 1’s appear in the southern delta and along the shore of Lake Victoria. By eyeballing the map, I could largely determine which group 3 schools also needed praziquantel. For example, those in the delta to the extreme southwest probably needed it since all their neighbors did. I was least certain about the pair to the southeast, which lived in a mixed neighborhood, as it were; I arbitrarily marked one for praziquantel and one not.[10]

 Returning to the Worms at Work wage earnings regression and interacting treatment with this new dummy for praziquantel need revealed no difference in impact between schools where only albendazole was deemed needed and given, and schools where both drugs were needed and given:

 

 Evidently, treatment for geohelminths and schistosomiasis, where both were needed, did not help future earnings much more or less than treatment for geohelminths, where only that was warranted. So the comparison generates no strong distinction between the worm types.

 After I mapped the schools, it hit me: I could make two-dimensional versions of my earlier graphs, slicing the data not by elevation, but by longitude and latitude.

 To start, I fed the elevations of the 75 schools, marked below with white dots, into my statistics software, Stata, and had it estimate the topography that best fit. This produced a depiction of the contours of the land in southern Busia County, with the brightest reds indicating the highest areas:

 

 (Click image for a larger version.) I next graphed the impact of deworming on log wage earnings. Where before I ran the Worms at Work wage earnings regression centering on 1140 meters, then 1150, etc., now I ran the regression repeatedly across a grid, each time giving the most weight to the nearest schools [11]:

 

 Two valleys of low impact dimly emerge, one toward the Lake in the south, one in the north where schools are higher up. Possibly these two troughs are linked to the undulations in my earlier, elevation-stratified graphs.

 Next, I made graphs like these for all 21 baseline variables that Worms checks for balance—such as fraction of students who are girls and average age. All the graphs are here. Now I wonder if this was a mistake. None of the graphs fit the one above like a key in lock, so I found myself staring at blobs and wondering which vaguely resembled the pattern I sought. I had no formal, pre-specified measure of fit, which increased uncertainty and discretion. Perhaps it was just a self-administered Rorschach test. Yet the data mining had the power to dilute any p values from subsequent formal tests.

 In the end, one variable caught my eye when mapped, and then appeared to be an important mediator of impact when entered into the wage earnings regression. It is: a child’s initial weight-for-age Z-score (WAZ), which measures a child’s weight relative to his or her age peers.[12] Here is the WAZ spatial plot side by side with the impact plot I just showed you. To my eye, where WAZ was high, subsequent impact was generally lower:

 

 (Since most children in this sample fell below the reference median, their weight-to-age Z-scores were negative, so in here average WAZ ranges between –1.3 and about –1.5.)

 Going back to two dimensions, this graph more directly checks the relationship I glimpsed above, by showing how the impact of deworming on wage earnings varied with children’s pre-treatment weight-to-age Z-score:

 

 It appears that only children below –2, which is the standard definition of “underweight,” benefited enough from deworming treatment that it permanently lifted their developmental trajectories.

 If the pattern is real, two dynamics could explain it. Children who were light for their age may have been so precisely because they carried more parasites, and were in deep need of treatment. Or perhaps other health problems made them small, which also rendered them less resilient to infection, and again more needful of treatment. The lack of baseline infection data for the control group prevents me from distinguishing between these theories.

 Struck by this suggestion that low initial weight predicted impact, and mindful of the meta-analytic consensus that deworming affects weight, I doubled back to the original Worms study to ask a final question. Were any short-term weight gains in Busia concentrated among kids who started out the most underweight? This could link short-term impacts on weight with long-term impacts on earnings, making both more credible. I made this graph of the one-year impact of deworming treatment on weight-for-age Z-score versus weight-for-age Z-score before treatment (1998)[13]:

  The graph seems to support my hypothesis. Severely underweight children (at or below –3) improve by about 0.2 points in Z-score. Underweight children (at or below –2) gain perhaps 0.1 on average.

 But there is a puzzling twist. While treatment raised weight among the most severely underweight children, it apparently reduced the weight of the heaviest children. (Bear in mind that in registering just above 0, the highest-WAZ children in Busia were merely surpassing 50th percentile in the global reference population.) Conceivably, certain worm infections cause weight gain, which is reversed by treatment; but here I am speculating. Statisticians might wonder if this graph reveals regression toward the mean. Just as the temperature must rise after the coldest day of the year and fall after the hottest, we could expect that the children who started the experiment the most underweight would become less so, and vice versa. But since the graph compares treatment and control schools, regression toward the mean only works as a theory if it occurred more in the treatment group. That would require a failure of randomization. The previous post argued that the imperfections in the Worms randomization were probably not driving the main results; but possibly they are playing a larger role in these second-order findings about heterogeneity of impact.

 Because of these doubts, and because I checked many hypotheses before gravitating to weight-for-age as a mediator of impact, I am not confident that physical health was a good predictor of the long-run impact of deworming on earnings. I view the implications of the last two graphs—that deworming increased weight in the short run and earnings in the long run only among the worst-off children—merely as intriguing. As an indicator of heavy worm burden or poor general health, low weight may have predicted impact. That hypotheses ought to probed afresh in other data, this time with pre-registered transparency. The results from such replication could then sharpen our understanding of how to generalize from Worms at Work.

 But I emphasize that my earlier findings revolving around elevation are more confident, because they came out of a small and theoretically motivated set of hypotheses. At elevations where worms were more prevalent, deworming did more long-term good.

 Conclusions I glean these facts:

  Treatment of children known to carry worms improves their nutritional status, as measured by weight and height. Typically, a minority of children in today’s deworming settings are infected, so impacts from mass deworming are smaller and harder to detect. In meta-analyses, 95% confidence intervals for the impacts of mass deworming tend to contain zero. In the case of weight—which is among the best-studied outcomes and more likely to respond to treatment in the short run—Croke et al. improve the precision of meta-analysis. Their results are compatible with others’ estimates, yet make it appear unlikely that average short-term impact of mass deworming is zero or negative. Though the consensus estimate of about 0.1 kg for weight gain looks small, once one accounts for the youth and low infection rates of the children behind the number, it does not sit implausibly with the big long-term earnings benefit found in Worms at Work. Extrapolating the Worms at Work results to other settings in proportion to infection intensity (eggs/gram) looks reasonable. This will adjust for the likelihood that as prevalence of infection falls, prevalence of serious infection falls faster. Extrapolating this way might leave GiveWell’s cost-effectiveness rating for the Deworm the World unchanged while halving that for the Schistosomiasis Control Initiative (which is not a lot in calculations that already contain large margins of error). Within Busia, 1998–99, evidence suggests that the benefits of deworming were confined to children who were the worst off, e.g., who were more numerous at elevations with the most worm infections. To speak to the theme of the previous post, this hint of heterogeneity is harder to explain if we believe randomization failure caused the Worms at Work results. I did not find heterogeneity that could radically alter our appraisal of charities, such as signs that only treatment of schistosomiasis had long-term benefits.  This recitation of facts makes GiveWell’s estimate of the expected value of deworming charities look reasonable.

 Yet, it is also unsatisfying. It is entirely possible that today’s deworming programs do much less, or much more, good than implied by the most thoughtful extrapolation from Worms at Work. Worms, humans, institutions, and settings are diverse, so impacts probably are too. And given the stakes in wealth and health, we ideally would not be in the position of relying so much on one study, which could be flawed or unrepresentative, my defenses notwithstanding. Only more research can make us more sure. If donors and governments are willing to spend nine-figure sums on deworming, they ought to devote a small percentage of that flow to research that could inform how best to spend that money.

 Unfortunately, research on long-term impacts can take a long time. In the hope of bringing relevant knowledge to light faster, here are two suggestions. All reasonable effort should be made to:

  Gather and revisit underlying data (“microdata”) from existing high-quality trials, so that certain potential mediators of impact, such as initial worm load and weight, can be studied. This information could influence how we extrapolate from the studies we have to the contexts where mass deworming may be undertaken today. As a general matter, it cannot be optimal that only the original authors can test hypotheses against their data, as is so often the case. In practice, different authors test different outcomes measured different ways, reducing comparability across studies and eroding the statistical power of meta-analysis. Opportunities for learning left unexploited are a waste potentially measured in the health of children. Turn past short-term studies into long-term ones by tracking down the subjects and resurvey them.[14] This is easier said than done, but that does not mean a priori that it would be a waste to push harder against this margin. Then, long-term research might not take quite so long.  Notes [1] Croke et al. do motivate their focus on weight in a footnote. Only three outcomes are covered by more than three studies in the Cochrane review’s meta-analyses: weight, height, and hemoglobin. Height responds less to recent health changes than weight, so analysis of impacts on height should have lower power. Hemoglobin destruction occurs most with hookworm, yet only one of the hemoglobin studies in the Cochrane review took place in a setting with significant hookworm prevalence.

 [2] I thank Kevin Croke for pointing out the need for this adjustment.

 [3] Columns S–W of the Parameters tab suggest several choices based on prevalence, intensity, or a mix. Columns Y–AC provide explanations. GiveWell staff may then pick from suggested values or introduce their own.

 [4] Lo et al. 2016 fit quadratic curves for the relationship between average infection intensity among the infected (in eggs/gram) and prevalence of any infection. The coefficients are in Table A2. If we then assume that the distribution of infection intensity is in the (two-parameter) negative binomial family, fixing two statistics—prevalence and average intensity as implied by its quadratic relationship with prevalence—suffices to determine the distribution. We can then compute the number of people whose infection intensity exceeds a given standard. In the usual conceptual framework of the negative binomial distribution, each egg per gram is considered a “success.” A fact about the negative binomial distribution that helps us determine the parameters is P = 1–(1 + M/r)^(–r), where M is average eggs/gram for the entire population, including the uninfected; r is the dispersion parameter, i.e., the number of failures before the trials stop; and P is prevalence of any infection, i.e., the probability of at least one success before the requisite number of failures. One conceptual problem in this approach is that intensity in eggs/gram is not a natural count variable despite being modeled as such. Changing the unit of mass in denominator, such as to 100 mg, will somewhat change the simulation results. In the graphs presented here, I work with 1000/24 = 41.67 grams as the denominator since that is a typical mass on the slide of a Kato-Katz test and 24 is thus a standard multiplier when performing the test.

 [5] I also experimented with higher-order polynomials in elevation. This hardly changed the results.

 [6] I rerun the Worms at Work regression repeatedly while introducing weights centered around elevations 1140, 1150, …, etc. meters. Following the default in Stata’s lowess command, the kernel is Cleveland’s bicube. The bandwidth is 50% of the sample elevation span.

 [7] The Worms research team tested random subsets of children at treatment schools just before they were treated, meaning that pre-treatment infection data are available for a third of schools (group 1) for early 1998 and another third (group 2) for early 1999. To maximize statistical power, I merge these pre-treatment samples. Ecological conditions changed between those two collection times, as the El Nino passed, which may well have affected worm loads. But pooling them should not cause bias if schools are reasonably well mixed in elevation, as they appear to be. Averages adjust for the stratification in the sampling of students for testing: 15 students were chosen for each school and grade.

 [8] Miguel and Kremer modify the World Health Organization’s suggested standards for moderate infection, stated with reference to eggs per gram of stool. To minimize my discretion, I follow the WHO standards exactly.

 [9] There are separate graphs for hookworm, roundworm, whipworm, and schistosomiasis. Here, the shades of grey do not signify levels of confidence about the true average value. Rather, they indicate the 10th, 20th, …, 90th percentiles in eggs per gram, while the black lines show medians (50th percentiles).

 [10] Among the group 3 schools, I marked those which school identifiers 108, 218, 205, 202, 189, 167, 212, 211 as warranting praziquantel.

 [11] The spatially smoothed impact regressions, and the spatially smoothed averages of baseline variables graphed next, are plotted using the same bandwidth and kernel as before, except that now distance is measured in degrees, in two dimensions. Since Busia is very close to the equator, latitude and longitude degrees correspond to the same distances. Locally weighted averages are computed at a 21×21 grid of points within the latitude and longitude spans of the schools. Points more than .05 degrees from all schools are excluded. Stata’s thin-plate-spline interpolation then fills in the contours.

 [12] Weight-for-age z scores are expressed relative to the median of a reference distribution, which I believe comes from samples of American children from about 50 years ago. The WHO and CDC provide reference tables.

 [13] The regressions behind the following two graphs incorporate all controls from the Baird et al. low wage earnings regression that are meaningful in this shorter-term context: all interactions of sex and standard (grade) dummies, zone dummies, and initial pupil population.

 [14] This idea is inspired by a paper by Kevin Croke, although that paper links a short-term deworming study to long-term outcomes at the parish level, not the individual level.

 The post How thin the reed? Generalizing from “Worms at Work” appeared first on The GiveWell Blog.

     Just a few days left in 2016…   Wed, 12/28/2016 - 07:38   There are only a few days left to give to charity this calendar year.

 The majority of donors who support GiveWell’s recommendations choose to make their gifts in December, for tax reasons or due to the holiday season.

 This blog post contains quick tips and information about donating to GiveWell’s recommended charities.

 But first, to everyone who supported our charities or followed our work in 2016: Thank you!

 Donate Here  

 Will my donation be tax-deductible?

 Donors in many countries can make tax-deductible donations to GiveWell’s recommended charities.

  Click here to view this information by country; scroll down to see this information listed by charity.  What’s the best way for me to donate?

 We discuss tips for giving efficiently (including tax considerations) on our website. More information about methods of donating is available here.

 Please don’t hesitate to reach out to donations@givewell.org if you have any questions about donation logistics. We’re happy to talk with you about questions about our research or recommendations, too.

 Thank you!

 The post Just a few days left in 2016… appeared first on The GiveWell Blog.

     Front-loading my personal giving this year   Thu, 12/22/2016 - 17:12   I’ve decided to give a little more than double what I normally give to charity this year, and skip giving next year. I see many reasons to give a larger-than-normal gift this year, and no countervailing reasons. If it weren’t for some idiosyncratic factors in my situation, I would roll my next three years of giving into this year’s gift.

 I decided to write up my reasoning in the hopes of prompting others to consider whether they should be doing similarly. That said, everyone’s financial situation is different, and it may be a good idea to consult with a tax lawyer for personalized advice.

 Tax policy The issue that originally prompted me to consider a larger-than-usual gift was the prospect of changing tax policy due to the new administration, which could result in lower tax benefits for charitable giving in 2017 vs. 2016. A quick summary of my thinking follows; this should not be taken as tax advice, merely as my own personal guesswork and reasoning behind my own giving.

 President-elect Trump’s public tax plan has three important features that could affect tax benefits for charitable giving:

  Reducing tax rates “across-the-board.”* The proposal looks similar in this respect to the 2016 House Republican Tax Reform Plan. Depending on one’s tax bracket, this could mean that the benefit for charitable giving falls by a few percentage points, so giving this year could save more money on taxes than giving next year. Raising the standard deduction significantly (more than doubling it). The proposal looks similar in this respect to the 2016 House Republican Tax Reform Plan. Charitable deductions are only beneficial insofar as total itemized deductions exceed the standard deduction; depending on how else treatment of itemized deductions changes, and on a taxpayer’s specific situation, this could reduce the amount of charitable giving that is effectively deductible by several thousand dollars per year, or not at all. It could also strengthen the case for giving less frequently than once per year. Capping total itemized deductions at $100k for singles/$200k for couples. If this happened as stated, it could effectively eliminate the tax benefit of charitable giving for many people (most of them earning very high amounts, giving very high amounts, or both). The 2016 House Republican Tax Reform Plan does not have a similar provision, and I consider this change less likely than the above two. Giving opportunities GiveWell’s top charities look strong this year and have very large amounts of room for more funding. It’s reasonably likely that this will be true again in the next few years, but I don’t know that it will be, and it’s hard to imagine the giving opportunities on this front getting much better in the near term.

 I also see a fair amount of appeal in the option I mentioned in the staff personal giving post:

 I thought about reallocating my giving to another individual, someone who is quite value-aligned with me and quite knowledgeable, and thinks differently enough that they might see opportunities I don’t.

 Right now, I can think of more than one individual in this category, and some of the giving opportunities they’re interested in are not a fit for Good Ventures. In future years, I hope that the Open Philanthropy Project makes connections with more donors and effective philanthropy rises generally, and this could mean that more money flows to opportunities in this category (opportunities that I don’t see and/or that aren’t a good fit for Good Ventures). This is another case where it seems like giving opportunities may get weaker, but are unlikely to get stronger.

 What I’m doing I’m planning to give an amount equivalent to my next two years’ worth of charitable giving, taking the likely trajectory of my salary into account. If not for some idiosyncratic aspects of my situation, I would have gone with three years. I don’t want to plan beyond three years because I think there are a lot of difficult-to-anticipate changes that could take place in that time. 

 Note that there are limits on the total proportion of income that can be deducted in a year, and one should check these before deciding to make a multi-year gift this year.

  * Though as written, the tax plan would appear to constitute a major tax increase for many single filers, based on this statement: “Brackets for single filers are ½ of these amounts.” I’ve chosen not to focus on this issue, partly because there is no similar change in the 2016 House Republican Tax Reform Plan.

 The post Front-loading my personal giving this year appeared first on The GiveWell Blog.

   Pages1 2 next › last »              Home Contact Stay updated FAQ For Charities Site map Open Philanthropy Project       Follow Us: Facebook Twitter RSS   Subscribe to email updates:   GiveWell, aka The Clear Fund (a tax-exempt 501(c)(3) public charity). This work is licensed under a Creative Commons Attribution-Noncommercial-Share alike 3.0 United States License          try { clicky.init(78566); }catch(e){}