May 2016 | Open Philanthropy                    @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/modules/system/system.base.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/modules/system/system.menus.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/modules/system/system.messages.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/modules/system/system.theme.css?q7b2rf");   @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/jquery_update/replace/ui/themes/base/minified/jquery.ui.core.min.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/jquery_update/replace/ui/themes/base/minified/jquery.ui.theme.min.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/jquery_update/replace/ui/themes/base/minified/jquery.ui.accordion.min.css?q7b2rf");   @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/comment_notify/comment_notify.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/modules/comment/comment.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/date/date_api/date.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/date/date_popup/themes/datepicker.1.7.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/modules/field/theme/field.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/footnotes/footnotes.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/custom/grants_summary/grants_summary.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/modules/node/node.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/modules/search/search.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/modules/user/user.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/views/css/views.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/caption_filter/caption-filter.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/ckeditor/css/ckeditor.css?q7b2rf");   @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/ctools/css/ctools.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/typogrify/typogrify.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/modules/contrib/content_type_extras/css/content_type_extras.css?q7b2rf");   @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/themes/op_basic/styles/css/global/normalize.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/themes/op_basic/styles/css/global/op-fonts.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/themes/op_basic/styles/css/global/html.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/themes/op_basic/styles/css/global/meanmenu.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/themes/op_basic/styles/css/global/global.css?q7b2rf"); @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/themes/op_basic/styles/css/pages/pages.css?q7b2rf");   @import url("https://web.archive.org./web/20200425191316cs_/https://www.openphilanthropy.org/sites/all/themes/op_basic/styles/css/global/admin.css?q7b2rf");                                      Jump to Navigation            Research & IdeasCause Selection Notable Lessons Cause Reports Conversations History of Philanthropy  Focus AreasU.S. Policy Criminal Justice Reform Farm Animal Welfare Macroeconomic Stabilization Policy Immigration Policy Land Use Reform Global Catastrophic Risks Biosecurity and Pandemic Preparedness Potential Risks from Advanced Artificial Intelligence Scientific Research Global Health & Development Other areas  GivingGrants Database Current Priorities Guide for Grant Seekers Grantmaking Stages  About UsWho We Are Vision & Values Openness Progress to Date Team Press Kit  Blog Get InvolvedContact Us Jobs Stay Updated          Search form  Search                     Home / Blog / Archive  May 2016              History of Philanthropy Case Study: The Founding of the Center on Budget and Policy Priorities   by Holden Karnofsky Published May 20, 2016       Suzanne Kahn, a consultant who has been working with us as part of our History of Philanthropy project, recently finished a case study on the founding and growth of the Center on Budget and Policy Priorities (CBPP), a well-regarded D.C. think tank that focuses on tax and budget policy with an aim of improving outcomes for low-income people.

 We were interested in learning more about the history and founding of CBPP because: [node:read-more:link]

      History of Philanthropy       Holden Karnofsky's blog 2 comments Add new comment          Some Background on Our Views Regarding Advanced Artificial Intelligence   by Holden Karnofsky Published May 06, 2016       We’re planning to make potential risks from advanced artificial intelligence a major priority in 2016. A future post will discuss why; this post gives some background.

 Summary: 

I first give our definition of “transformative artificial intelligence,” our term for a type of potential advanced artificial intelligence we find particularly relevant for our purposes. Roughly and conceptually, transformative AI refers to potential future AI that precipitates a transition comparable to (or more significant than) the agricultural or industrial revolution. I also provide (below) a more detailed definition. The concept of “transformative AI” has some overlap with concepts put forth by others, such as “superintelligence” and “artificial general intelligence.” However, “transformative AI” is intended to be a more inclusive term, leaving open the possibility of AI systems that count as “transformative” despite lacking many abilities humans have. I then discuss the question of whether, and when, we might expect transformative AI to be developed. This question has many properties (long timelines, relatively vague concepts, lack of detailed public analysis) I associate with developments that are nearly impossible to forecast, and I don’t think it is possible to make high-certainty forecasts on the matter. With that said, I am comfortable saying that I think there is a nontrivial likelihood (at least 10% with moderate robustness, and at least 1% with high robustness) of transformative AI within the next 20 years. I can’t feasibly share all of the information that goes into this view, but I try to outline the general process I have followed to reach it. Finally, I briefly discuss whether there are other potential future developments that seem to have similar potential for impact on similar timescales to transformative AI, in order to put our interest in AI in context.The ideas in this post overlap with some arguments made by others, but I think it is important to lay out the specific views on these issues that I endorse. Note that this post is confined in scope to the above topics; it does not, for example, discuss potential risks associated with AI or potential measures for reducing them. I will discuss the latter topics more in the future. [node:read-more:link]

      Artificial Intelligence       Holden Karnofsky's blog 2 comments Add new comment          Potential Risks from Advanced Artificial Intelligence: The Philanthropic Opportunity   by Holden Karnofsky Published May 06, 2016       We’re planning to make potential risks from artificial intelligence a major priority this year. We feel this cause presents an outstanding philanthropic opportunity — with extremely high importance, high neglectedness, and reasonable tractability (our three criteria for causes) — for someone in our position. We believe that the faster we can get fully up to speed on key issues and explore the opportunities we currently see, the faster we can lay the groundwork for informed, effective giving both this year and in the future.

 With all of this in mind, we’re placing a larger “bet” on this cause, this year, than we are placing even on other focus areas — not necessarily in terms of funding (we aren’t sure we’ll identify very large funding opportunities this year, and are more focused on laying the groundwork for future years), but in terms of senior staff time, which at this point is a scarcer resource for us. Consistent with our philosophy of hits-based giving, we are doing this not because we have confidence in how the future will play out and how we can impact it, but because we see a risk worth taking. In about a year, we’ll formally review our progress and reconsider how senior staff time is allocated.

 This post will first discuss why I consider this cause to be an outstanding philanthropic opportunity. (My views are fairly representative, but not perfectly representative, of those of other staff working on this cause.) It will then give a broad outline of our planned activities for the coming year, some of the key principles we hope to follow in this work, and some of the risks and reservations we have about prioritizing this cause as highly as we are. 

 In brief: 

It seems to me that artificial intelligence is currently on a very short list of the most dynamic, unpredictable, and potentially world-changing areas of science. I believe there’s a nontrivial probability that transformative AI will be developed within the next 20 years, with enormous global consequences. By and large, I expect the consequences of this progress — whether or not transformative AI is developed soon — to be positive. However, I also perceive risks. Transformative AI could be a very powerful technology, with potentially globally catastrophic consequences if it is misused or if there is a major accident involving it. Because of this, I see this cause as having extremely high importance (one of our key criteria), even while accounting for substantial uncertainty about the likelihood of developing transformative AI in the coming decades and about the size of the risks. I discuss the nature of potential risks below; note that I think they do not apply to today’s AI systems. I consider this cause to be highly neglected in important respects. There is a substantial and growing field around artificial intelligence and machine learning research, but most of it is not focused on reducing potential risks. We’ve put substantial work into trying to ensure that we have a thorough landscape of the researchers, funders, and key institutions whose work is relevant to potential risks from advanced AI. We believe that the amount of work being done is well short of what it productively could be (despite recent media attention); that philanthropy could be helpful; and that the activities we’re considering wouldn’t be redundant with those of other funders. I believe that there is useful work to be done today in order to mitigate future potential risks. In particular, (a) I think there are important technical problems that can be worked on today, that could prove relevant to reducing accident risks; (b) I preliminarily feel that there is also considerable scope for analysis of potential strategic and policy considerations. More broadly, the Open Philanthropy Project may be able to help support an increase in the number of people – particularly people with strong relevant technical backgrounds - thinking through how to reduce potential risks, which could be important in the future even if the work done in the short term does not prove essential. I believe that one of the things philanthropy is best-positioned to do is provide steady, long-term support as fields and institutions grow. I consider this a challenging cause. I think it would be easy to do harm while trying to do good. For example, trying to raise the profile of potential risks could contribute (and, I believe, has contributed to some degree) to non-nuanced or inaccurate portrayals of risk in the media, which in turn could raise the risks of premature and/or counterproductive regulation. I consider the Open Philanthropy Project relatively well-positioned to work in this cause while being attentive to pitfalls, and to deeply integrate people with strong technical expertise into our work. I see much room for debate in the decision to prioritize this cause as highly as we are. However, I think it is important that a philanthropist in our position be willing to take major risks, and prioritizing this cause is a risk that I see as very worth taking.My views on this cause have evolved considerably over time. I will discuss the evolution of my thinking in detail in a future post, but this post focuses on the case for prioritizing this cause today. [node:read-more:link]

      Artificial Intelligence       Holden Karnofsky's blog 13 comments Add new comment                 Stay Updated   Email me new blog posts Blog RSS feed More information     Recent blog posts        Forecasting the COVID-19 Pandemic    Suggestions for Individual Donors from Open Philanthropy Staff - 2019    2019 Allocation to GiveWell Top Charities    Co-funding Partnership with Ben Delo    How Feasible Is Long-range Forecasting?        Archives    All posts

   2020 March 2020    March 2020  2019 December 2019    December 2019    December 2019   November 2019    November 2019   October 2019    October 2019   August 2019    August 2019    August 2019   July 2019    July 2019   June 2019    June 2019   April 2019    April 2019    April 2019    April 2019   March 2019    March 2019   February 2019    February 2019  2018 December 2018    December 2018    December 2018    December 2018   October 2018    October 2018   September 2018    September 2018   May 2018    May 2018   April 2018    April 2018   March 2018    March 2018    March 2018    March 2018   February 2018    February 2018    February 2018   January 2018    January 2018    January 2018    January 2018  2017 December 2017    December 2017    December 2017    December 2017   November 2017    November 2017   October 2017    October 2017   September 2017    September 2017    September 2017    September 2017    September 2017    September 2017   June 2017    June 2017    June 2017    June 2017    June 2017   April 2017    April 2017    April 2017   March 2017    March 2017    March 2017    March 2017   February 2017    February 2017    February 2017  2016 December 2016    December 2016    December 2016    December 2016   October 2016    October 2016   September 2016    September 2016    September 2016    September 2016    September 2016   July 2016    July 2016   June 2016    June 2016    June 2016   May 2016    May 2016    May 2016    May 2016   April 2016    April 2016    April 2016   March 2016    March 2016    March 2016   February 2016    February 2016    February 2016    February 2016    February 2016  2015 December 2015    December 2015   November 2015    November 2015   October 2015    October 2015   September 2015    September 2015    September 2015    September 2015    September 2015    September 2015   August 2015    August 2015    August 2015    August 2015   July 2015    July 2015    July 2015    July 2015    July 2015    July 2015   June 2015    June 2015    June 2015    June 2015   May 2015    May 2015    May 2015    May 2015   April 2015    April 2015    April 2015    April 2015    April 2015    April 2015   March 2015    March 2015    March 2015    March 2015    March 2015   February 2015    February 2015    February 2015    February 2015  2014 October 2014    October 2014    October 2014   September 2014    September 2014    September 2014   August 2014    August 2014   July 2014    July 2014    July 2014   June 2014    June 2014   May 2014    May 2014    May 2014    May 2014    May 2014   April 2014    April 2014    April 2014   March 2014    March 2014    March 2014   January 2014    January 2014    January 2014  2013 December 2013    December 2013   November 2013    November 2013    November 2013   October 2013    October 2013    October 2013    October 2013    October 2013   September 2013    September 2013   July 2013    July 2013    July 2013    July 2013   June 2013    June 2013    June 2013   May 2013    May 2013    May 2013   April 2013    April 2013    April 2013    April 2013   March 2013    March 2013   February 2013    February 2013  2012 September 2012    September 2012    September 2012   July 2012    July 2012   June 2012    June 2012    June 2012    June 2012   May 2012    May 2012    May 2012    May 2012   March 2012    March 2012   February 2012    February 2012    February 2012    February 2012   January 2012    January 2012  2011 October 2011    October 2011   September 2011    September 2011    September 2011    September 2011                contact us jobs press kit  facebook twitter rss © Open Philanthropy. Except as otherwise noted, this work is licensed under a Creative Commons Attribution-Noncommercial-ShareAlike 3.0 United States License. Some images may be copyrighted by others and not licensed for re-use: see image captions or footnotes. Privacy policy         try { clicky.init(100914494); }catch(e){}