+Follow
jiehuihuang
No personal profile
21
Follow
2
Followers
1
Topic
0
Badge
Posts
Hot
jiehuihuang
04-07
Haha time pass fast
jiehuihuang
2023-05-31
😎
"AI may exterminate mankind, comparable to nuclear war!" Over 350 bigwigs jointly spoke out
Go to Tiger App to see more news
{"i18n":{"language":"en_US"},"userPageInfo":{"id":"3570972159518527","uuid":"3570972159518527","gmtCreate":1613374120837,"gmtModify":1635457682741,"name":"jiehuihuang","pinyin":"jiehuihuang","introduction":"","introductionEn":"","signature":"","avatar":"https://static.tigerbbs.com/92d0548599d8f5f02e773bccc069f734","hat":null,"hatId":null,"hatName":null,"vip":1,"status":2,"fanSize":2,"headSize":21,"tweetSize":21,"questionSize":0,"limitLevel":999,"accountStatus":4,"level":{"id":1,"name":"萌萌虎","nameTw":"萌萌虎","represent":"呱呱坠地","factor":"评论帖子3次或发布1条主帖(非转发)","iconColor":"3C9E83","bgColor":"A2F1D9"},"themeCounts":1,"badgeCounts":0,"badges":[],"moderator":false,"superModerator":false,"manageSymbols":null,"badgeLevel":null,"boolIsFan":false,"boolIsHead":false,"favoriteSize":2,"symbols":null,"coverImage":null,"realNameVerified":"success","userBadges":[{"badgeId":"1026c425416b44e0aac28c11a0848493-3","templateUuid":"1026c425416b44e0aac28c11a0848493","name":" Tiger Idol","description":"Join the tiger community for 1500 days","bigImgUrl":"https://static.tigerbbs.com/8b40ae7da5bf081a1c84df14bf9e6367","smallImgUrl":"https://static.tigerbbs.com/f160eceddd7c284a8e1136557615cfad","grayImgUrl":"https://static.tigerbbs.com/11792805c468334a9b31c39f95a41c6a","redirectLinkEnabled":0,"redirectLink":null,"hasAllocated":1,"isWearing":0,"stamp":null,"stampPosition":0,"hasStamp":0,"allocationCount":1,"allocatedDate":"2025.03.27","exceedPercentage":null,"individualDisplayEnabled":0,"backgroundColor":null,"fontColor":null,"individualDisplaySort":0,"categoryType":1001},{"badgeId":"972123088c9646f7b6091ae0662215be-1","templateUuid":"972123088c9646f7b6091ae0662215be","name":"Elite Trader","description":"Total number of securities or futures transactions reached 30","bigImgUrl":"https://static.tigerbbs.com/ab0f87127c854ce3191a752d57b46edc","smallImgUrl":"https://static.tigerbbs.com/c9835ce48b8c8743566d344ac7a7ba8c","grayImgUrl":"https://static.tigerbbs.com/76754b53ce7a90019f132c1d2fbc698f","redirectLinkEnabled":0,"redirectLink":null,"hasAllocated":1,"isWearing":0,"stamp":null,"stampPosition":0,"hasStamp":0,"allocationCount":1,"allocatedDate":"2023.08.02","exceedPercentage":"60.65%","individualDisplayEnabled":0,"backgroundColor":null,"fontColor":null,"individualDisplaySort":0,"categoryType":1100},{"badgeId":"7a9f168ff73447fe856ed6c938b61789-1","templateUuid":"7a9f168ff73447fe856ed6c938b61789","name":"Knowledgeable Investor","description":"Traded more than 10 stocks","bigImgUrl":"https://static.tigerbbs.com/e74cc24115c4fbae6154ec1b1041bf47","smallImgUrl":"https://static.tigerbbs.com/d48265cbfd97c57f9048db29f22227b0","grayImgUrl":"https://static.tigerbbs.com/76c6d6898b073c77e1c537ebe9ac1c57","redirectLinkEnabled":0,"redirectLink":null,"hasAllocated":1,"isWearing":0,"stamp":null,"stampPosition":0,"hasStamp":0,"allocationCount":1,"allocatedDate":"2023.03.25","exceedPercentage":null,"individualDisplayEnabled":0,"backgroundColor":null,"fontColor":null,"individualDisplaySort":0,"categoryType":1102},{"badgeId":"cbe1c45c584340f3bd9ae6c77e0e9981-1","templateUuid":"cbe1c45c584340f3bd9ae6c77e0e9981","name":"Academy Experiencer","description":"5 lessons learned","bigImgUrl":"https://community-static.tradeup.com/news/fb5ae275631fb96a92d475cdc85d2302","smallImgUrl":"https://community-static.tradeup.com/news/c2660a1935bd2105e97c9915619936c3","grayImgUrl":null,"redirectLinkEnabled":0,"redirectLink":null,"hasAllocated":1,"isWearing":0,"stamp":null,"stampPosition":0,"hasStamp":0,"allocationCount":1,"allocatedDate":"2022.08.06","exceedPercentage":null,"individualDisplayEnabled":0,"backgroundColor":null,"fontColor":null,"individualDisplaySort":0,"categoryType":2006},{"badgeId":"a83d7582f45846ffbccbce770ce65d84-1","templateUuid":"a83d7582f45846ffbccbce770ce65d84","name":"Real Trader","description":"Completed a transaction","bigImgUrl":"https://static.tigerbbs.com/2e08a1cc2087a1de93402c2c290fa65b","smallImgUrl":"https://static.tigerbbs.com/4504a6397ce1137932d56e5f4ce27166","grayImgUrl":"https://static.tigerbbs.com/4b22c79415b4cd6e3d8ebc4a0fa32604","redirectLinkEnabled":0,"redirectLink":null,"hasAllocated":1,"isWearing":0,"stamp":null,"stampPosition":0,"hasStamp":0,"allocationCount":1,"allocatedDate":"2021.12.28","exceedPercentage":null,"individualDisplayEnabled":0,"backgroundColor":null,"fontColor":null,"individualDisplaySort":0,"categoryType":1100}],"userBadgeCount":5,"currentWearingBadge":null,"individualDisplayBadges":null,"crmLevel":11,"crmLevelSwitch":1,"location":null,"starInvestorFollowerNum":0,"starInvestorFlag":false,"starInvestorOrderShareNum":0,"subscribeStarInvestorNum":3,"ror":null,"winRationPercentage":null,"showRor":false,"investmentPhilosophy":null,"starInvestorSubscribeFlag":false},"baikeInfo":{},"tab":"post","tweets":[{"id":421850501824712,"gmtCreate":1744011580656,"gmtModify":1744011585577,"author":{"id":"3570972159518527","authorId":"3570972159518527","name":"jiehuihuang","avatar":"https://static.tigerbbs.com/92d0548599d8f5f02e773bccc069f734","crmLevel":11,"crmLevelSwitch":1,"followedFlag":false,"authorIdStr":"3570972159518527","idStr":"3570972159518527"},"themes":[],"htmlText":"Haha time pass fast","listText":"Haha time pass fast","text":"Haha time pass fast","images":[{"img":"https://community-static.tradeup.com/news/4fd63d14e408d1d4151692a3b289dc42","width":"1125","height":"1476"}],"top":1,"highlighted":1,"essential":1,"paper":1,"likeSize":0,"commentSize":0,"repostSize":0,"link":"https://ttm.financial/post/421850501824712","isVote":1,"tweetType":1,"viewCount":960,"authorTweetTopStatus":1,"verified":2,"comments":[],"imageCount":1,"langContent":"EN","totalScore":0},{"id":182266852249744,"gmtCreate":1685508001249,"gmtModify":1685508007309,"author":{"id":"3570972159518527","authorId":"3570972159518527","name":"jiehuihuang","avatar":"https://static.tigerbbs.com/92d0548599d8f5f02e773bccc069f734","crmLevel":11,"crmLevelSwitch":1,"followedFlag":false,"authorIdStr":"3570972159518527","idStr":"3570972159518527"},"themes":[],"htmlText":"😎","listText":"😎","text":"😎","images":[],"top":1,"highlighted":1,"essential":1,"paper":1,"likeSize":0,"commentSize":0,"repostSize":0,"link":"https://ttm.financial/post/182266852249744","repostId":"2339231977","repostType":2,"repost":{"id":"2339231977","kind":"highlight","pubTimestamp":1685506671,"share":"https://ttm.financial/m/news/2339231977?lang=en_US&edition=fundamental","pubTime":"2023-05-31 12:17","market":"us","language":"zh","title":"\"AI may exterminate mankind, comparable to nuclear war!\" Over 350 bigwigs jointly spoke out","url":"https://stock-news.laohu8.com/highlight/detail?id=2339231977","media":"华尔街见闻","summary":"谷歌DeepMind和Anthropic CEO,以及图灵奖获得者都签署了这封联名信。英伟达市值一度超过万亿美元,AI风头正盛的背后,公众的担忧也越来越多,科技圈再次流出了一封重磅公开信。当地时间5月","content":"<p><html><head></head><body><strong>The CEOs of Google DeepMind and Anthropic, as well as Turing Award winners, all signed the joint letter.</strong><p style=\"text-align: justify;\">Nvidia's market value once exceeded one trillion US dollars. Behind the booming AI limelight, the public is also more and more worried, and a blockbuster open letter has once again flowed out of the technology circle.</p><p><p style=\"text-align: justify;\">On May 30, local time, the non-profit organization \"Center for AI Safety\" released a joint open letter on its official website,<strong>It is said that AI, a technology comparable to \"epidemic and nuclear war\", may pose an existential threat to mankind.<br/></strong></p><p><p class=\"t-img-caption\"><img src=\"https://static.tigerbbs.com/3919f297c6c4b115a1460004b90a8028\" title=\"\" tg-width=\"676\" tg-height=\"364\"/></p><p>The whole statement is only one sentence, 22 words:</p><p><strong>\"Mitigating the extinction risk posed by artificial intelligence should be a global priority along with other social-scale risk levels such as pandemics and nuclear wars.\"</strong></p><p>The Biden administration has said in recent months that artificial intelligence poses a threat to public safety, privacy and *, but the government has limited authority to regulate it.</p><p><strong>Led by the founder of OpenAI, signed by more than 350 AI giants</strong><strong>At present, more than 350 AI tycoons have signed the above-mentioned joint open letter</strong>, including OpenAI CEO Sam Altman, Google DeepMind CEO Demis Hassabis and Anthropic CEO Dario Amode, the heads of three major AI companies.</p><p><p class=\"t-img-caption\"><img src=\"https://static.tigerbbs.com/45eeee0f689e413524634f98d287d2a7\" title=\"\" tg-width=\"554\" tg-height=\"1522\"/></p><p><strong>Turing Award winners Geoffrey Hinton and Yoshua Bengio, known as the \"godfathers\" of artificial intelligence, are also impressively listed.</strong></p><p><strong>There are also Chinese scholars on the list</strong>, including Zhang Yaqin, academician of the Chinese Academy of Engineering and president of the Intelligent Industry Research Institute (AIR) of Tsinghua University, Zeng Yi, director of the Artificial Intelligence Ethics and Governance Research Center of the Institute of Automation, Chinese Academy of Sciences, and Zhan Xianyuan, associate professor of Tsinghua University.</p><p>Other signatories include Microsoft CTO Kevin Scott, former UN High Representative for Disarmament Affairs Angela Kane, Skype co-founder Jaan Tallinn, and Quora CEO Adam D 'Angelo.</p><p>While the public has been acknowledging the risks of AI, there are more challenges to discuss, they said. They also want to \"create common knowledge for a growing number of experts and public figures who also take seriously some of the most serious risks that exist in the field of advanced AI.\"</p><p>It is worth mentioning that at present,<strong>Microsoft founder Bill Gates did not sign this joint letter.</strong></p><p><strong>\"Danger! Stop all large-scale AI research immediately\"</strong>Since the release of ChatGPT last year, the technology industry has set off a fiercely competitive AI race, and it has also aroused public fear of this unknown technology.</p><p>In March this year, technology giants such as Bengio, Musk, Apple co-founder Steve Wozniak, and Stability AI founder Emad Mostaque issued a joint open letter.<strong>Call for a suspension of training on artificial intelligence systems more powerful than GPT-4 for at least 6 months.</strong></p><p>The open letter emphasizes the rapid development of AI, but it is not planned and managed accordingly:</p><p><strong>\"In recent months, there has been an AI frenzy in artificial intelligence labs. They are frantically racing AI to develop and deploy increasingly powerful AI.</strong></p><p><strong>Unfortunately, so far, no one can understand, predict or reliably control AI systems, and there is no corresponding level of planning and management. \"</strong></p><p>The letter mentioned that now that artificial intelligence has become as competitive as humans in general tasks, we must ask ourselves:</p><p>\"Should non-human brains be developed so that they ultimately exceed human numbers, outperform human intelligence, eliminate and replace humans?</p><p><strong>Should we risk losing control of human civilization?</strong>”</p><p>The answer is no, the open letter states,<strong>A powerful AI system should only be developed if we are confident that its effects are positive and its risks are manageable.</strong></p><p>Therefore, the open letter calls on all artificial intelligence laboratories to immediately suspend training on artificial intelligence systems more powerful than GPT-4 for at least 6 months.</p><p>\"We should step back from the dangerous race … to let the most powerful model of today<strong>More accurate and safe</strong>。</p><p>Let's enjoy a long summer of AI instead of rushing into autumn unprepared. \"</p><p><strong>The Godfather of AI: Regretting his life's work, unable to stop the human AI war</strong>At the beginning of May, Geoffrey Hinton, a master of deep learning and godfather of artificial intelligence, suddenly announced his resignation from Google, and he was finally able to talk freely about the risks of AI.<br/></p><p><p class=\"t-img-caption\"><img src=\"https://static.tigerbbs.com/acc3dc261cb6d19c118db0dec4afd246\" title=\"\" tg-width=\"1080\" tg-height=\"274\"/></p><p>It is the deep concern about the risks of artificial intelligence that makes this deep learning giant bluntly say:<strong>\"I regret my life's work very much.\"</strong></p><p>In media reports, Hinton directly warned the world:<strong>Attention, there is danger ahead, there is danger ahead, there is danger ahead!</strong></p><p>In an interview, Hinton said, \"We have almost learned how computers improve themselves,<strong>This is dangerous, and we must seriously consider how to control it.</strong>”</p><p>He once pointed out that,<strong>The competition between Google and Microsoft, among others, will escalate into a global race. Without some sort of global regulation, this race will not stop!</strong></p><p><strong>He said he was worried that GPT-4 iterations would pose a threat to humans, and strong AI could learn unexpected behaviors from large amounts of data.</strong></p><p><strong>He worried that one day real autonomous weapons-those killing robots-would become a reality.</strong></p><p><p class=\"t-img-caption\"><img src=\"https://static.tigerbbs.com/4ed375d339a014b2b25985d64d12cce0\" title=\"图片来自《西部世界》\" tg-width=\"1080\" tg-height=\"607\"/><span>Image from Westworld</span></p><p>In the 1980s, Hinton was a professor of computer science at Carnegie Mellon University, but because he was unwilling to accept funding from the Pentagon, he chose to leave CMU and go to Canada.<strong>At the time, Hinton strongly opposed the use of AI on the battlefield, which he called robot soldiers.</strong></p><p>From artificial intelligence pioneer to doomsday prophet, Hinton's transformation may mark<strong>The technology industry is at its most important inflection point in decades.</strong></p><p></body></html></p>","source":"wallstreetcn_api","collect":0,"html":"<!DOCTYPE html>\n<html>\n<head>\n<meta http-equiv=\"Content-Type\" content=\"text/html; charset=utf-8\" />\n<meta name=\"viewport\" content=\"width=device-width,initial-scale=1.0,minimum-scale=1.0,maximum-scale=1.0,user-scalable=no\"/>\n<meta name=\"format-detection\" content=\"telephone=no,email=no,address=no\" />\n<title>\"AI may exterminate mankind, comparable to nuclear war!\" Over 350 bigwigs jointly spoke out</title>\n<style type=\"text/css\">\na,abbr,acronym,address,applet,article,aside,audio,b,big,blockquote,body,canvas,caption,center,cite,code,dd,del,details,dfn,div,dl,dt,\nem,embed,fieldset,figcaption,figure,footer,form,h1,h2,h3,h4,h5,h6,header,hgroup,html,i,iframe,img,ins,kbd,label,legend,li,mark,menu,nav,\nobject,ol,output,p,pre,q,ruby,s,samp,section,small,span,strike,strong,sub,summary,sup,table,tbody,td,tfoot,th,thead,time,tr,tt,u,ul,var,video{ font:inherit;margin:0;padding:0;vertical-align:baseline;border:0 }\nbody{ font-size:16px; line-height:1.5; color:#999; background:transparent; }\n.wrapper{ overflow:hidden;word-break:break-all;padding:10px; }\nh1,h2{ font-weight:normal; line-height:1.35; margin-bottom:.6em; }\nh3,h4,h5,h6{ line-height:1.35; margin-bottom:1em; }\nh1{ font-size:24px; }\nh2{ font-size:20px; }\nh3{ font-size:18px; }\nh4{ font-size:16px; }\nh5{ font-size:14px; }\nh6{ font-size:12px; }\np,ul,ol,blockquote,dl,table{ margin:1.2em 0; }\nul,ol{ margin-left:2em; }\nul{ list-style:disc; }\nol{ list-style:decimal; }\nli,li p{ margin:10px 0;}\nimg{ max-width:100%;display:block;margin:0 auto 1em; }\nblockquote{ color:#B5B2B1; border-left:3px solid #aaa; padding:1em; }\nstrong,b{font-weight:bold;}\nem,i{font-style:italic;}\ntable{ width:100%;border-collapse:collapse;border-spacing:1px;margin:1em 0;font-size:.9em; }\nth,td{ padding:5px;text-align:left;border:1px solid #aaa; }\nth{ font-weight:bold;background:#5d5d5d; }\n.symbol-link{font-weight:bold;}\n/* header{ border-bottom:1px solid #494756; } */\n.title{ margin:0 0 8px;line-height:1.3;color:#ddd; }\n.meta {color:#5e5c6d;font-size:13px;margin:0 0 .5em; }\na{text-decoration:none; color:#2a4b87;}\n.meta .head { display: inline-block; overflow: hidden}\n.head .h-thumb { width: 30px; height: 30px; margin: 0; padding: 0; border-radius: 50%; float: left;}\n.head .h-content { margin: 0; padding: 0 0 0 9px; float: left;}\n.head .h-name {font-size: 13px; color: #eee; margin: 0;}\n.head .h-time {font-size: 12.5px; color: #7E829C; margin: 0;}\n.small {font-size: 12.5px; display: inline-block; transform: scale(0.9); -webkit-transform: scale(0.9); transform-origin: left; -webkit-transform-origin: left;}\n.smaller {font-size: 12.5px; display: inline-block; transform: scale(0.8); -webkit-transform: scale(0.8); transform-origin: left; -webkit-transform-origin: left;}\n.bt-text {font-size: 12px;margin: 1.5em 0 0 0}\n.bt-text p {margin: 0}\n</style>\n</head>\n<body>\n<div class=\"wrapper\">\n<header>\n<h2 class=\"title\">\n\"AI may exterminate mankind, comparable to nuclear war!\" Over 350 bigwigs jointly spoke out\n</h2>\n<h4 class=\"meta\">\n<p class=\"head\">\n<strong class=\"h-name small\">华尔街见闻</strong><span class=\"h-time small\">2023-05-31 12:17</span>\n</p>\n</h4>\n</header>\n<article>\n<p><html><head></head><body><strong>The CEOs of Google DeepMind and Anthropic, as well as Turing Award winners, all signed the joint letter.</strong><p style=\"text-align: justify;\">Nvidia's market value once exceeded one trillion US dollars. Behind the booming AI limelight, the public is also more and more worried, and a blockbuster open letter has once again flowed out of the technology circle.</p><p><p style=\"text-align: justify;\">On May 30, local time, the non-profit organization \"Center for AI Safety\" released a joint open letter on its official website,<strong>It is said that AI, a technology comparable to \"epidemic and nuclear war\", may pose an existential threat to mankind.<br/></strong></p><p><p class=\"t-img-caption\"><img src=\"https://static.tigerbbs.com/3919f297c6c4b115a1460004b90a8028\" title=\"\" tg-width=\"676\" tg-height=\"364\"/></p><p>The whole statement is only one sentence, 22 words:</p><p><strong>\"Mitigating the extinction risk posed by artificial intelligence should be a global priority along with other social-scale risk levels such as pandemics and nuclear wars.\"</strong></p><p>The Biden administration has said in recent months that artificial intelligence poses a threat to public safety, privacy and *, but the government has limited authority to regulate it.</p><p><strong>Led by the founder of OpenAI, signed by more than 350 AI giants</strong><strong>At present, more than 350 AI tycoons have signed the above-mentioned joint open letter</strong>, including OpenAI CEO Sam Altman, Google DeepMind CEO Demis Hassabis and Anthropic CEO Dario Amode, the heads of three major AI companies.</p><p><p class=\"t-img-caption\"><img src=\"https://static.tigerbbs.com/45eeee0f689e413524634f98d287d2a7\" title=\"\" tg-width=\"554\" tg-height=\"1522\"/></p><p><strong>Turing Award winners Geoffrey Hinton and Yoshua Bengio, known as the \"godfathers\" of artificial intelligence, are also impressively listed.</strong></p><p><strong>There are also Chinese scholars on the list</strong>, including Zhang Yaqin, academician of the Chinese Academy of Engineering and president of the Intelligent Industry Research Institute (AIR) of Tsinghua University, Zeng Yi, director of the Artificial Intelligence Ethics and Governance Research Center of the Institute of Automation, Chinese Academy of Sciences, and Zhan Xianyuan, associate professor of Tsinghua University.</p><p>Other signatories include Microsoft CTO Kevin Scott, former UN High Representative for Disarmament Affairs Angela Kane, Skype co-founder Jaan Tallinn, and Quora CEO Adam D 'Angelo.</p><p>While the public has been acknowledging the risks of AI, there are more challenges to discuss, they said. They also want to \"create common knowledge for a growing number of experts and public figures who also take seriously some of the most serious risks that exist in the field of advanced AI.\"</p><p>It is worth mentioning that at present,<strong>Microsoft founder Bill Gates did not sign this joint letter.</strong></p><p><strong>\"Danger! Stop all large-scale AI research immediately\"</strong>Since the release of ChatGPT last year, the technology industry has set off a fiercely competitive AI race, and it has also aroused public fear of this unknown technology.</p><p>In March this year, technology giants such as Bengio, Musk, Apple co-founder Steve Wozniak, and Stability AI founder Emad Mostaque issued a joint open letter.<strong>Call for a suspension of training on artificial intelligence systems more powerful than GPT-4 for at least 6 months.</strong></p><p>The open letter emphasizes the rapid development of AI, but it is not planned and managed accordingly:</p><p><strong>\"In recent months, there has been an AI frenzy in artificial intelligence labs. They are frantically racing AI to develop and deploy increasingly powerful AI.</strong></p><p><strong>Unfortunately, so far, no one can understand, predict or reliably control AI systems, and there is no corresponding level of planning and management. \"</strong></p><p>The letter mentioned that now that artificial intelligence has become as competitive as humans in general tasks, we must ask ourselves:</p><p>\"Should non-human brains be developed so that they ultimately exceed human numbers, outperform human intelligence, eliminate and replace humans?</p><p><strong>Should we risk losing control of human civilization?</strong>”</p><p>The answer is no, the open letter states,<strong>A powerful AI system should only be developed if we are confident that its effects are positive and its risks are manageable.</strong></p><p>Therefore, the open letter calls on all artificial intelligence laboratories to immediately suspend training on artificial intelligence systems more powerful than GPT-4 for at least 6 months.</p><p>\"We should step back from the dangerous race … to let the most powerful model of today<strong>More accurate and safe</strong>。</p><p>Let's enjoy a long summer of AI instead of rushing into autumn unprepared. \"</p><p><strong>The Godfather of AI: Regretting his life's work, unable to stop the human AI war</strong>At the beginning of May, Geoffrey Hinton, a master of deep learning and godfather of artificial intelligence, suddenly announced his resignation from Google, and he was finally able to talk freely about the risks of AI.<br/></p><p><p class=\"t-img-caption\"><img src=\"https://static.tigerbbs.com/acc3dc261cb6d19c118db0dec4afd246\" title=\"\" tg-width=\"1080\" tg-height=\"274\"/></p><p>It is the deep concern about the risks of artificial intelligence that makes this deep learning giant bluntly say:<strong>\"I regret my life's work very much.\"</strong></p><p>In media reports, Hinton directly warned the world:<strong>Attention, there is danger ahead, there is danger ahead, there is danger ahead!</strong></p><p>In an interview, Hinton said, \"We have almost learned how computers improve themselves,<strong>This is dangerous, and we must seriously consider how to control it.</strong>”</p><p>He once pointed out that,<strong>The competition between Google and Microsoft, among others, will escalate into a global race. Without some sort of global regulation, this race will not stop!</strong></p><p><strong>He said he was worried that GPT-4 iterations would pose a threat to humans, and strong AI could learn unexpected behaviors from large amounts of data.</strong></p><p><strong>He worried that one day real autonomous weapons-those killing robots-would become a reality.</strong></p><p><p class=\"t-img-caption\"><img src=\"https://static.tigerbbs.com/4ed375d339a014b2b25985d64d12cce0\" title=\"图片来自《西部世界》\" tg-width=\"1080\" tg-height=\"607\"/><span>Image from Westworld</span></p><p>In the 1980s, Hinton was a professor of computer science at Carnegie Mellon University, but because he was unwilling to accept funding from the Pentagon, he chose to leave CMU and go to Canada.<strong>At the time, Hinton strongly opposed the use of AI on the battlefield, which he called robot soldiers.</strong></p><p>From artificial intelligence pioneer to doomsday prophet, Hinton's transformation may mark<strong>The technology industry is at its most important inflection point in decades.</strong></p><p></body></html></p>\n<div class=\"bt-text\">\n\n\n<p> source:<a href=\"https://mp.weixin.qq.com/s/9E5214u8xFF621wqbiUEdg\">华尔街见闻</a></p>\n\n\n</div>\n</article>\n</div>\n</body>\n</html>\n","type":0,"thumbnail":"https://static.tigerbbs.com/8e1f8714fd568c2c469a3deade788e7d","relate_stocks":{"IE00B7SZL793.SGD":"Legg Mason Royce - US Small Cap Opportunity A Acc SGD-H","IE00B19Z4B17.USD":"LEGG MASON ROYCE US SMALL CAP OPPORTUNITY \"A\" (USD) ACC","MSFT":"微软","BK4572":"航空租赁","IE00B66KJ199.SGD":"LEGG MASON ROYCE US SMALL CAP OPPORTUNITY \" A\" (SGD) ACC","BK4187":"航天航空与国防","IE0031619046.USD":"LEGG MASON ROYCE US SMALL CAP OPPORTUNITY \"A\" (USD) INC"},"source_url":"https://mp.weixin.qq.com/s/9E5214u8xFF621wqbiUEdg","is_english":false,"share_image_url":"https://static.laohu8.com/e9f99090a1c2ed51c021029395664489","article_id":"2339231977","content_text":"谷歌DeepMind和Anthropic CEO,以及图灵奖获得者都签署了这封联名信。英伟达市值一度超过万亿美元,AI风头正盛的背后,公众的担忧也越来越多,科技圈再次流出了一封重磅公开信。当地时间5月30日,非营利组织“人工智能安全中心”(Center for AI Safety)在其官网发布一封联名公开信,称AI这一堪比“疫情和核战争”的技术或对人类构成生存威胁。整个声明只有一句话,22个单词:“减轻人工智能带来的灭绝风险,应该与大流行病和核战争等其他社会规模的风险级别一样,成为一个全球性的优先事项。”拜登政府近几个月表示,人工智能对公共安全、隐私和民主构成威胁,但政府对其监管的权力有限。OpenAI创始人领衔,超350位AI大佬签名目前已有超350位AI大佬签署了上述联名公开信,包括OpenAI首席执行官Sam Altman、谷歌DeepMind首席执行官Demis Hassabis和Anthropic首席执行官Dario Amode三大AI公司掌门人等。图灵奖获得者、被誉为人工智能“教父”的Geoffrey Hinton和Yoshua Bengio也赫然在列。名单里也不乏中国学者的身影,包括中国工程院院士、清华大学智能产业研究院(AIR)院长张亚勤,中国科学院自动化研究所人工智能伦理与治理研究中心主任曾毅,清华大学副教授詹仙园。其他签署人还包括微软首席技术官Kevin Scott,前联合国裁军事务高级代表Angela Kane,Skype联合创始人Jaan Tallinn,Quora首席执行官Adam D'Angelo。他们表示,虽然公众一直在承认人工智能的风险,但还有更多挑战需要讨论。他们还想“为越来越多的专家和公众人物创造共同知识,他们也认真对待先进人工智能领域存在的一些最严重的风险”。值得一提的是,目前,微软创始人比尔·盖茨没有签署这一联名信。“危险!立刻停下所有大型AI研究”自去年ChatGPT发布以来,科技行业掀起了一场竞争激烈的AI竞赛,同时也引发公众对于这项未知技术的恐惧。今年3月份,Bengio、马斯克、苹果联合创始人Steve Wozniak、Stability AI创始人Emad Mostaque等科技大佬发表联名公开信,呼吁暂停比GPT-4更强大的人工智能系统的训练,暂停时间至少为6个月。公开信强调了AI在迅猛发展,但却没有对其进行相应的规划和管理:“最近几个月人工智能实验室掀起AI狂潮,它们疯狂地开展AI竞赛,开发和部署越来越强大的AI。不幸的是,目前为止,没有任何人能理解、预测或可靠地控制AI系统,也没有相应水平的规划和管理。”信中提到,如今人工智能在一般任务上变得与人类一样有竞争力,我们必须问自己:“是否应该开发非人类的大脑,使其最终超过人类数量,胜过人类的智慧,淘汰并取代人类?是否应该冒着失去对人类文明控制的风险?”答案是否定的,公开信指出,只有当我们确信强大的AI系统的效果是积极的,其风险是可控的,才应该开发。因此,公开信呼吁所有人工智能实验室立即暂停比GPT-4更强大的人工智能系统的训练,时间至少持续6个月。“我们应该从危险的竞赛中退后一步,……让如今最强大的模型更加准确、安全。让我们享受一个漫长的人工智能夏天,而不是毫无准备地匆忙进入秋天。”AI教父:痛悔毕生工作,无法阻止人类AI大战5月初,深度学习泰斗、人工智能教父Geoffrey Hinton突然宣布离职谷歌,终于可以畅所欲言地谈论AI的风险了。正是对人工智能风险深深地担忧,让这位深度学习巨佬直言:“我对自己的毕生工作,感到非常后悔。”在媒体报道中,Hinton直接警示世人:注意,前方有危险,前方有危险,前方有危险!在一次采访中,Hinton表示“我们几乎已经让学会计算机如何自我改进了,这很危险,我们必须认真考虑,如何控制它。”他曾指出,谷歌和微软,以及其他公司之间的竞争,将升级为一场全球性竞赛。如果没有某种全球性的监管,这场竞赛将不会停止!他表示自己很担忧,GPT-4迭代会对人类构成威胁,强AI从大量数据中能够学到意想不到的行为。他担心有一天真正的自主武器ーー那些杀人机器人ーー会成为现实。图片来自《西部世界》上世纪80年代,Hinton曾是卡内基梅隆大学的计算机科学教授,但因为不愿接受五角大楼的资助,他选择了离开CMU,前往加拿大。当时,Hinton强烈反对在战场上使用AI,他将其称为「机器人士兵」。从人工智能的开创者到末日预言者,Hinton的转变,也许标志着科技行业正处于几十年来最重要的一个拐点。","news_type":1,"symbols_score_info":{"MSFT":0.9}},"isVote":1,"tweetType":1,"viewCount":2311,"authorTweetTopStatus":1,"verified":2,"comments":[],"imageCount":0,"langContent":"EN","totalScore":0}],"hots":[{"id":421850501824712,"gmtCreate":1744011580656,"gmtModify":1744011585577,"author":{"id":"3570972159518527","authorId":"3570972159518527","name":"jiehuihuang","avatar":"https://static.tigerbbs.com/92d0548599d8f5f02e773bccc069f734","crmLevel":11,"crmLevelSwitch":1,"followedFlag":false,"authorIdStr":"3570972159518527","idStr":"3570972159518527"},"themes":[],"htmlText":"Haha time pass fast","listText":"Haha time pass fast","text":"Haha time pass fast","images":[{"img":"https://community-static.tradeup.com/news/4fd63d14e408d1d4151692a3b289dc42","width":"1125","height":"1476"}],"top":1,"highlighted":1,"essential":1,"paper":1,"likeSize":0,"commentSize":0,"repostSize":0,"link":"https://ttm.financial/post/421850501824712","isVote":1,"tweetType":1,"viewCount":960,"authorTweetTopStatus":1,"verified":2,"comments":[],"imageCount":1,"langContent":"EN","totalScore":0},{"id":182266852249744,"gmtCreate":1685508001249,"gmtModify":1685508007309,"author":{"id":"3570972159518527","authorId":"3570972159518527","name":"jiehuihuang","avatar":"https://static.tigerbbs.com/92d0548599d8f5f02e773bccc069f734","crmLevel":11,"crmLevelSwitch":1,"followedFlag":false,"authorIdStr":"3570972159518527","idStr":"3570972159518527"},"themes":[],"htmlText":"😎","listText":"😎","text":"😎","images":[],"top":1,"highlighted":1,"essential":1,"paper":1,"likeSize":0,"commentSize":0,"repostSize":0,"link":"https://ttm.financial/post/182266852249744","repostId":"2339231977","repostType":2,"isVote":1,"tweetType":1,"viewCount":2311,"authorTweetTopStatus":1,"verified":2,"comments":[],"imageCount":0,"langContent":"EN","totalScore":0}],"lives":[]}