基于opencv的gpu与cpu对比程序,代码来自opencv的文档中

 

原文链接:

http://www.opencv.org.cn/opencvdoc/2.3.2/html/doc/tutorials/gpu/gpu-basics-similarity/gpu-basics-similarity.html

 

代码中有错误,关于GpuMat OpenCV代码中没有对其进行操作符运算的重载,所有编译的时候有错误。对于GpuMat的运算只能调用相关函数才行,后面我嫌麻烦就没有重写

 

 

 

<span style="font-size:18px;">// PSNR.cpp : 定义控制台应用程序的入口点。
//#include "stdafx.h"#include <iostream>                   // Console I/O
#include <sstream>                    // String to number conversion#include <opencv2/core/core.hpp>      // Basic OpenCV structures
#include <opencv2/imgproc/imgproc.hpp>// Image processing methods for the CPU
#include <opencv2/highgui/highgui.hpp>// Read images
#include <opencv2/gpu/gpu.hpp>        // GPU structures and methodsusing namespace std;
using namespace cv;double getPSNR(const Mat& I1, const Mat& I2);      // CPU versions
Scalar getMSSIM( const Mat& I1, const Mat& I2);double getPSNR_GPU(const Mat& I1, const Mat& I2);  // Basic GPU versions
Scalar getMSSIM_GPU( const Mat& I1, const Mat& I2);struct BufferPSNR                                     // Optimized GPU versions
{   // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later.gpu::GpuMat gI1, gI2, gs, t1,t2;gpu::GpuMat buf;
};
double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b);struct BufferMSSIM                                     // Optimized GPU versions
{   // Data allocations are very expensive on GPU. Use a buffer to solve: allocate once reuse later.gpu::GpuMat gI1, gI2, gs, t1,t2;gpu::GpuMat I1_2, I2_2, I1_I2;vector<gpu::GpuMat> vI1, vI2;gpu::GpuMat mu1, mu2; gpu::GpuMat mu1_2, mu2_2, mu1_mu2; gpu::GpuMat sigma1_2, sigma2_2, sigma12; gpu::GpuMat t3; gpu::GpuMat ssim_map;gpu::GpuMat buf;
};
Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b);void help()
{cout<< "\n--------------------------------------------------------------------------" << endl<< "This program shows how to port your CPU code to GPU or write that from scratch." << endl<< "You can see the performance improvement for the similarity check methods (PSNR and SSIM)."  << endl<< "Usage:"                                                               << endl<< "./gpu-basics-similarity referenceImage comparedImage numberOfTimesToRunTest(like 10)." << endl<< "--------------------------------------------------------------------------"   << endl<< endl;
}int main(int argc, char *argv[])
{help(); Mat I1 = imread("swan1.jpg",1);           // Read the two imagesMat I2 = imread("swan2.jpg",1);if (!I1.data || !I2.data)           // Check for success{cout << "Couldn't read the image";return 0;}BufferPSNR bufferPSNR;BufferMSSIM bufferMSSIM;int TIMES; stringstream sstr("500"); sstr >> TIMES;double time, result;//------------------------------- PSNR CPU ----------------------------------------------------time = (double)getTickCount();    for (int i = 0; i < TIMES; ++i)result = getPSNR(I1,I2);time = 1000*((double)getTickCount() - time)/getTickFrequency();time /= TIMES;cout << "Time of PSNR CPU (averaged for " << TIMES << " runs): " << time << " milliseconds."<< " With result of: " <<  result << endl; //------------------------------- PSNR GPU ----------------------------------------------------time = (double)getTickCount();    for (int i = 0; i < TIMES; ++i)result = getPSNR_GPU(I1,I2);time = 1000*((double)getTickCount() - time)/getTickFrequency();time /= TIMES;cout << "Time of PSNR GPU (averaged for " << TIMES << " runs): " << time << " milliseconds."<< " With result of: " <<  result << endl; 
/*//------------------------------- PSNR GPU Optimized--------------------------------------------time = (double)getTickCount();                                  // Initial callresult = getPSNR_GPU_optimized(I1, I2, bufferPSNR);time = 1000*((double)getTickCount() - time)/getTickFrequency();cout << "Initial call GPU optimized:              " << time  <<" milliseconds."<< " With result of: " << result << endl;time = (double)getTickCount();    for (int i = 0; i < TIMES; ++i)result = getPSNR_GPU_optimized(I1, I2, bufferPSNR);time = 1000*((double)getTickCount() - time)/getTickFrequency();time /= TIMES;cout << "Time of PSNR GPU OPTIMIZED ( / " << TIMES << " runs): " << time << " milliseconds." << " With result of: " <<  result << endl << endl; //------------------------------- SSIM CPU -----------------------------------------------------Scalar x;time = (double)getTickCount();    for (int i = 0; i < TIMES; ++i)x = getMSSIM(I1,I2);time = 1000*((double)getTickCount() - time)/getTickFrequency();time /= TIMES;cout << "Time of MSSIM CPU (averaged for " << TIMES << " runs): " << time << " milliseconds."<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl; //------------------------------- SSIM GPU -----------------------------------------------------time = (double)getTickCount();    for (int i = 0; i < TIMES; ++i)x = getMSSIM_GPU(I1,I2);time = 1000*((double)getTickCount() - time)/getTickFrequency();time /= TIMES;cout << "Time of MSSIM GPU (averaged for " << TIMES << " runs): " << time << " milliseconds."<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl; //------------------------------- SSIM GPU Optimized--------------------------------------------time = (double)getTickCount();    x = getMSSIM_GPU_optimized(I1,I2, bufferMSSIM);time = 1000*((double)getTickCount() - time)/getTickFrequency();cout << "Time of MSSIM GPU Initial Call            " << time << " milliseconds."<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl; time = (double)getTickCount();    for (int i = 0; i < TIMES; ++i)x = getMSSIM_GPU_optimized(I1,I2, bufferMSSIM);time = 1000*((double)getTickCount() - time)/getTickFrequency();time /= TIMES;cout << "Time of MSSIM GPU OPTIMIZED ( / " << TIMES << " runs): " << time << " milliseconds."<< " With result of B" << x.val[0] << " G" << x.val[1] << " R" << x.val[2] << endl << endl; return 0;*/getchar();
}double getPSNR(const Mat& I1, const Mat& I2)
{Mat s1; absdiff(I1, I2, s1);       // |I1 - I2|s1.convertTo(s1, CV_32F);  // cannot make a square on 8 bitss1 = s1.mul(s1);           // |I1 - I2|^2Scalar s = sum(s1);         // sum elements per channeldouble sse = s.val[0] + s.val[1] + s.val[2]; // sum channelsif( sse <= 1e-10) // for small values return zeroreturn 0;else{double  mse =sse /(double)(I1.channels() * I1.total());double psnr = 10.0*log10((255*255)/mse);return psnr;}
}double getPSNR_GPU_optimized(const Mat& I1, const Mat& I2, BufferPSNR& b)
{    b.gI1.upload(I1);b.gI2.upload(I2);b.gI1.convertTo(b.t1, CV_32F);b.gI2.convertTo(b.t2, CV_32F);gpu::absdiff(b.t1.reshape(1), b.t2.reshape(1), b.gs);gpu::multiply(b.gs, b.gs, b.gs);double sse = gpu::sum(b.gs, b.buf)[0];if( sse <= 1e-10) // for small values return zeroreturn 0;else{double mse = sse /(double)(I1.channels() * I1.total());double psnr = 10.0*log10((255*255)/mse);return psnr;}
}double getPSNR_GPU(const Mat& I1, const Mat& I2)
{gpu::GpuMat gI1, gI2, gs, t1,t2; gI1.upload(I1);gI2.upload(I2);gI1.convertTo(t1, CV_32F);gI2.convertTo(t2, CV_32F);gpu::absdiff(t1.reshape(1), t2.reshape(1), gs); gpu::multiply(gs, gs, gs);Scalar s = gpu::sum(gs);double sse = s.val[0] + s.val[1] + s.val[2];if( sse <= 1e-10) // for small values return zeroreturn 0;else{double  mse =sse /(double)(gI1.channels() * I1.total());double psnr = 10.0*log10((255*255)/mse);return psnr;}
}Scalar getMSSIM( const Mat& i1, const Mat& i2)
{ const double C1 = 6.5025, C2 = 58.5225;/***************************** INITS **********************************/int d     = CV_32F;Mat I1, I2; i1.convertTo(I1, d);           // cannot calculate on one byte large valuesi2.convertTo(I2, d); Mat I2_2   = I2.mul(I2);        // I2^2Mat I1_2   = I1.mul(I1);        // I1^2Mat I1_I2  = I1.mul(I2);        // I1 * I2/*************************** END INITS **********************************/Mat mu1, mu2;   // PRELIMINARY COMPUTINGGaussianBlur(I1, mu1, Size(11, 11), 1.5);GaussianBlur(I2, mu2, Size(11, 11), 1.5);Mat mu1_2   =   mu1.mul(mu1);    Mat mu2_2   =   mu2.mul(mu2); Mat mu1_mu2 =   mu1.mul(mu2);Mat sigma1_2, sigma2_2, sigma12; GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);sigma1_2 -= mu1_2;GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);sigma2_2 -= mu2_2;GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);sigma12 -= mu1_mu2;/ FORMULA Mat t1, t2, t3; t1 = 2 * mu1_mu2 + C1; t2 = 2 * sigma12 + C2; t3 = t1.mul(t2);              // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))t1 = mu1_2 + mu2_2 + C1; t2 = sigma1_2 + sigma2_2 + C2;     t1 = t1.mul(t2);               // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))Mat ssim_map;divide(t3, t1, ssim_map);      // ssim_map =  t3./t1;Scalar mssim = mean( ssim_map ); // mssim = average of ssim mapreturn mssim; 
}Scalar getMSSIM_GPU( const Mat& i1, const Mat& i2)
{ const float C1 = 6.5025f, C2 = 58.5225f;/***************************** INITS **********************************/gpu::GpuMat gI1, gI2, gs1, t1,t2; gI1.upload(i1);gI2.upload(i2);gI1.convertTo(t1, CV_MAKE_TYPE(CV_32F, gI1.channels()));gI2.convertTo(t2, CV_MAKE_TYPE(CV_32F, gI2.channels()));vector<gpu::GpuMat> vI1, vI2; gpu::split(t1, vI1);gpu::split(t2, vI2);Scalar mssim;for( int i = 0; i < gI1.channels(); ++i ){gpu::GpuMat I2_2, I1_2, I1_I2; gpu::multiply(vI2[i], vI2[i], I2_2);        // I2^2gpu::multiply(vI1[i], vI1[i], I1_2);        // I1^2gpu::multiply(vI1[i], vI2[i], I1_I2);       // I1 * I2/*************************** END INITS **********************************/gpu::GpuMat mu1, mu2;   // PRELIMINARY COMPUTINGgpu::GaussianBlur(vI1[i], mu1, Size(11, 11), 1.5);gpu::GaussianBlur(vI2[i], mu2, Size(11, 11), 1.5);gpu::GpuMat mu1_2, mu2_2, mu1_mu2; gpu::multiply(mu1, mu1, mu1_2);   gpu::multiply(mu2, mu2, mu2_2);   gpu::multiply(mu1, mu2, mu1_mu2);   gpu::GpuMat sigma1_2, sigma2_2, sigma12; gpu::GaussianBlur(I1_2, sigma1_2, Size(11, 11), 1.5);//sigma1_2 = sigma1_2 - mu1_2;gpu::subtract(sigma1_2,mu1_2,sigma1_2);gpu::GaussianBlur(I2_2, sigma2_2, Size(11, 11), 1.5);//sigma2_2 = sigma2_2 - mu2_2;gpu::GaussianBlur(I1_I2, sigma12, Size(11, 11), 1.5);(Mat)sigma12 =(Mat)sigma12 - (Mat)mu1_mu2;//sigma12 = sigma12 - mu1_mu2/ FORMULA gpu::GpuMat t1, t2, t3; // 		t1 = 2 * mu1_mu2 + C1; 
// 		t2 = 2 * sigma12 + C2; 
// 		gpu::multiply(t1, t2, t3);     // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
// 
// 		t1 = mu1_2 + mu2_2 + C1; 
// 		t2 = sigma1_2 + sigma2_2 + C2;     
// 		gpu::multiply(t1, t2, t1);     // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))gpu::GpuMat ssim_map;gpu::divide(t3, t1, ssim_map);      // ssim_map =  t3./t1;Scalar s = gpu::sum(ssim_map);    mssim.val[i] = s.val[0] / (ssim_map.rows * ssim_map.cols);}return mssim; 
}Scalar getMSSIM_GPU_optimized( const Mat& i1, const Mat& i2, BufferMSSIM& b)
{ int cn = i1.channels();const float C1 = 6.5025f, C2 = 58.5225f;/***************************** INITS **********************************/b.gI1.upload(i1);b.gI2.upload(i2);gpu::Stream stream;stream.enqueueConvert(b.gI1, b.t1, CV_32F);stream.enqueueConvert(b.gI2, b.t2, CV_32F);      gpu::split(b.t1, b.vI1, stream);gpu::split(b.t2, b.vI2, stream);Scalar mssim;for( int i = 0; i < b.gI1.channels(); ++i ){        gpu::multiply(b.vI2[i], b.vI2[i], b.I2_2, stream);        // I2^2gpu::multiply(b.vI1[i], b.vI1[i], b.I1_2, stream);        // I1^2gpu::multiply(b.vI1[i], b.vI2[i], b.I1_I2, stream);       // I1 * I2//gpu::GaussianBlur(b.vI1[i], b.mu1, Size(11, 11), 1.5, 0, BORDER_DEFAULT, -1, stream);//gpu::GaussianBlur(b.vI2[i], b.mu2, Size(11, 11), 1.5, 0, BORDER_DEFAULT, -1, stream);gpu::multiply(b.mu1, b.mu1, b.mu1_2, stream);   gpu::multiply(b.mu2, b.mu2, b.mu2_2, stream);   gpu::multiply(b.mu1, b.mu2, b.mu1_mu2, stream);   //gpu::GaussianBlur(b.I1_2, b.sigma1_2, Size(11, 11), 1.5, 0, BORDER_DEFAULT, -1, stream);//gpu::subtract(b.sigma1_2, b.mu1_2, b.sigma1_2, stream);//b.sigma1_2 -= b.mu1_2;  - This would result in an extra data transfer operation//gpu::GaussianBlur(b.I2_2, b.sigma2_2, Size(11, 11), 1.5, 0, BORDER_DEFAULT, -1, stream);//gpu::subtract(b.sigma2_2, b.mu2_2, b.sigma2_2, stream);//b.sigma2_2 -= b.mu2_2;//gpu::GaussianBlur(b.I1_I2, b.sigma12, Size(11, 11), 1.5, 0, BORDER_DEFAULT, -1, stream);//gpu::subtract(b.sigma12, b.mu1_mu2, b.sigma12, stream);//b.sigma12 -= b.mu1_mu2;//here too it would be an extra data transfer due to call of operator*(Scalar, Mat)gpu::multiply(b.mu1_mu2, 2, b.t1, stream); //b.t1 = 2 * b.mu1_mu2 + C1; //gpu::add(b.t1, C1, b.t1, stream);gpu::multiply(b.sigma12, 2, b.t2, stream); //b.t2 = 2 * b.sigma12 + C2; //gpu::add(b.t2, C2, b.t2, stream);     gpu::multiply(b.t1, b.t2, b.t3, stream);     // t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))//gpu::add(b.mu1_2, b.mu2_2, b.t1, stream);//gpu::add(b.t1, C1, b.t1, stream);//gpu::add(b.sigma1_2, b.sigma2_2, b.t2, stream);//gpu::add(b.t2, C2, b.t2, stream);gpu::multiply(b.t1, b.t2, b.t1, stream);     // t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))        gpu::divide(b.t3, b.t1, b.ssim_map, stream);      // ssim_map =  t3./t1;stream.waitForCompletion();Scalar s = gpu::sum(b.ssim_map, b.buf);    mssim.val[i] = s.val[0] / (b.ssim_map.rows * b.ssim_map.cols);}return mssim; 
}</span>


 

 

实现效果:

转载于:https://www.cnblogs.com/wuyida/p/6301427.html

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.mzph.cn/news/492812.shtml

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈email:809451989@qq.com,一经查实,立即删除!

相关文章

不只是华为/阿里/百度/小米/京东,AIoT已然成为资本与新兴企业都认可的赚钱方向...

来源&#xff1a;物联网智库整理发布摘要&#xff1a;当互联网的上半场结束之后&#xff0c;所有的互联网下半场都是重生意。随着AIoT越来越热门&#xff0c;这一概念已然成为巨头、资本以及新兴企业竞相角逐的热点。2019年注定是AIoT具有重要发展的一年&#xff0c;仅仅在刚过…

机会与挑战:2019人工智能应用趋势预测

来源&#xff1a;资本实验室摘要&#xff1a;最近几天&#xff0c;印度人工智能数据分析公司Fractal Analytics宣布获得私募投资机构Apax Partners的2亿美元投资&#xff0c;估值达到了5亿美元。也是在近期&#xff0c;该公司的几位人工智能专家分别对2019年的人工智能应用趋势…

边缘检测中非极大值抑制简单解释

首先要明白的是: (a.) canny算子中非最大抑制(Non-maximum suppression)是回答这样一个问题: “当前的梯度值在梯度方向上是一个局部最大值吗?” 所以,要把当前位置的梯度值与梯度方向上两侧的梯度值进行比较. (b.) 梯度方向垂直于边缘方向, 这一点不要误解. - Q1: 插值…

哥德尔不完备定理”到底说了些什么?

来源&#xff1a;人机与认知实验室&#xff08;一&#xff09;【中文网上深入介绍哥德尔不完备定理的文章很少&#xff0c;我这篇文章写得很长&#xff0c;花了不少时间打磨它&#xff0c;希望能帮助到爱好数学与逻辑的人。文章把理解哥德尔不完备定理分为了五重&#xff0c;建…

关于相机标定的简单介绍

关于Opencv实现的代码&#xff0c;参考&#xff1a; https://blog.csdn.net/dcrmg/article/details/52939318 - 相机从生产出厂时&#xff0c;存在一些难以避免的畸变。这种镜头畸变可以通过矫正来达到最佳的拍摄效果。标定板要拍摄不同角度的20张照片&#xff0c;是因为镜头…

谷歌Waymo自建车厂,L4级无人车量产指日可待

来源&#xff1a;AI科技大本营摘要&#xff1a;现在&#xff0c;Waymo 宣称世界上第一家 100&#xff05; 致力于大规模生产 L4 自动驾驶汽车的工厂&#xff0c;即将诞生。美国当地时间 1 月 22 日&#xff0c;Google 旗下无人驾驶公司 Waymo 宣布&#xff0c;在未来五年内&…

腾讯“科学探索奖”提名报名正式启动

科学探索奖“申报指南”记者1月23日从腾讯公司获悉&#xff0c;由腾讯基金会联合杨振宁等众多知名科学家发起的“科学探索奖”提名报名正式启动。这标志着腾讯基金会这一投入10亿元发起的民间科学大奖正式拉开帷幕。按照科学探索奖官方网站提供的“申报指南”&#xff0c;该奖的…

语音公司集体杀入AI芯片 2019场景落地战打响!

来源&#xff1a;智东西摘要&#xff1a;近十家语音公司集体搞芯片&#xff01;为什么杀红了眼&#xff0c;谁又能笑到最后&#xff1f;2019年的第一个工作日&#xff0c;在第一颗AI芯片“雨燕”落地不久&#xff0c;AI创业公司云知声紧锣密鼓地推出多模态AI芯片战略&#xff0…

任总讲话,说明华为对5G有清醒的认识

来源&#xff1a;科工力量摘要&#xff1a;无线通信产业已经发展了四代&#xff0c;目前正处于5G产业化前夕&#xff0c;是当下到一个最热的话题&#xff0c;5G如何发展&#xff0c;前景如何&#xff0c;是各个方面包括学术界、产业界、投资界以及政府都非常关心的。另外&#…

这些机器人决定全球机器人未来趋势·Science Robotics 评选年度十大机器人

来源&#xff1a;机器人大讲堂摘要&#xff1a;如果评选机器人领域最高端的学术杂志&#xff0c;那应该非《Science Robotics》莫属了。作为顶级期刊Science的子刊&#xff0c;一经问世便受到机器人界各位学术大牛的青睐&#xff0c;令人咋舌的前沿科技层出不穷。最近&#xff…

2019年社交媒体趋势报告

来源&#xff1a;新媒体创意营销Kantar Media发布了新报告“2019年社交媒体趋势”。世界上40%的人口使用社交媒体。一些行业报告显示人们平均每天花两小时在这些平台上分享、点赞、发推和更新&#xff0c;那么2019年会怎样呢&#xff1f;第五个年度社交媒体趋势报告强调了该领域…

小猪的Android入门之路 day 1

小猪的Android入门之路 Day 1 Android相关背景与开发环境的搭建 ------转载请注明出处:coder-pig 本节引言: 随着社会经济的发展,移动互联网的越来越热,手机APP开发显得格外火爆, 作为一名快要毕业的IT屌丝,自然要趟一趟这一浑水啦,当前主流的手机系统 IOS(苹果),Android(安卓)…

146亿美元!2018全球数字医疗投资创下新纪录

来源&#xff1a;资本实验室摘要&#xff1a;与其他行业一样&#xff0c;全球医疗业的数字化正在逐步推进&#xff0c;并为风险资本带来更多的投资机会。总体来看&#xff0c;全球数字医疗投资正在快速增长&#xff0c;投资朝着中后期阶段延伸&#xff0c;机器学习等新技术扮演…

揭秘星际2人工智能AlphaStar:DeepMind科学家回应一切

来源&#xff1a;机器之心25 日凌晨&#xff0c;人工智能 AlphaStar 与职业玩家 MaNa 进行了一场史无前例的「人机大战」&#xff1a;虽然之前在内部比赛中 AI 十战十胜&#xff0c;但现场比赛中&#xff0c;MaNa 机智地戏耍了对手&#xff0c;为人类取得了一场胜利。赛后&…

FireMonkey 保存图片到JPG的方法 BMP转JPG

习惯VCL的做法了&#xff0c;到了FireMonkey里面&#xff0c;好像查不到单独的JPEG单元了&#xff0c;不少朋友就郁闷如何处理JPG了&#xff0c;这么大件事&#xff0c;不可能没有处理方法的&#xff0c;具体就请看代码&#xff1a;uses FMX.Surfaces;procedure TForm1.Button1…

2019年的人工智能,那些吹过的牛能实现吗?

编译&#xff1a;网易智能 毅力过去的一年里&#xff0c;世界各地的人工智能(AI)展现了强大的力量。在全球范围内&#xff0c;企业和政府将2018年视为人工智能突破性的一年。仅在英国&#xff0c;2018年风险投资家对人工智能企业的投资就增加了47%。人工智能使用的增加对消费者…

深度学习的时代将结束:25 年 16625 篇论文佐证

来源&#xff1a;云头条作者&#xff1a; Karen Hao丨《麻省理工学院科技评论》杂志的 AI 记者我们分析了16625篇论文&#xff0c;以洞察AI下一步的发展方向我们深入研读了25年来的AI研究论文&#xff0c;结果表明深度学习的时代即将结束。如今你听到的关于AI的几乎所有内容都归…

Deepmind AlphaStar 如何战胜人类职业玩家【中科院自动化所深度解析】

来源&#xff1a;德先生作者&#xff1a;朱圆恒&#xff0c;唐振韬&#xff0c;李伟凡&#xff0c;赵冬斌北京时间2019年1月25日2时&#xff0c;DeepMind在伦敦向世界展示了他们的最新成果——星际争霸2人工智能AlphaStar[1] 。图1. DeepMind AlphaStar挑战星际人类职业玩家直播…

DNA复制体结构和工作原理首次被揭示

来源&#xff1a;科学网DNA是生命遗传信息的载体&#xff0c;它的复制是生命繁衍过程当中最重要的一步。关于DNA复制分子机制的研究一直是生命科学中最基本的问题之一。近日&#xff0c;美国国立卫生研究院杰出研究员杨薇的课题组揭示了DNA复制体的结构和工作原理&#xff0c;相…

Linux文件查找

为了防止无良网站的爬虫抓取文章&#xff0c;特此标识&#xff0c;转载请注明文章出处。LaplaceDemon/SJQ。 http://www.cnblogs.com/shijiaqi1066/p/4076158.html Linux文件查找相关的命令一般涉及两个命令&#xff1a; locatefindlocate 格式&#xff1a; locate 文件名 loca…