轻拔琴弦|CV,人脸识别+表情检测+行人检测+人脸关键点检测!Open
【轻拔琴弦|CV,人脸识别+表情检测+行人检测+人脸关键点检测!Open】环境安装请看上一篇博客:传送门
以下来源于Openvino官方model , 在win10和ubuntu大体步骤相似 , 跑demo:想转ubuntu或者win10方法一样 , 我下面分别用win10和ubuntu跑几个demo , 大家可以试着做一下 。 效果展示
文章图片
一、准备流程:
在python环境中加载openvino打开openvino安装目录如:C:Intelopenvinopythonpython3.6
把目录下的openvino文件夹复制到
系统的python环境安装目录下如:C:Python36Libsite-packages2.编译
C:Intelopenvinodeployment_toolsinference_enginesamples路径下执行:
build_samples_msvc2017.bat
执行完后在
C:UserskangDocumentsIntelOpenVINO目录
可以看到生成的
inference_engine_samples_build_2017文件目录
在build目录中也可以找到cpu_extension:
cpu_extension=“C:UserskangDocumentsIntelOpenVINOinference_engine_samples_build_2017intel64Releasecpu_extension.dll”
下载模型 , 记录路径face-detection-adas-0001
landmarks-regression-retail-0009
记录xml地址
model_xml=“”model_bin=“”
二、参数说明
人脸检测基于MobileNetv1版本输入格式:[1x3x384x672]=BCHW输出格式:[1 , 1 , N , 7]=[image_id,label,conf,x_min,y_min,x_max,y_max]landmark提取landmark提取-基于卷积神经网络 , 提取5个点输入[1x3x48x48]=BCHW输出[1X10X1X1]=五个点坐标(x0,y0,x1,y1…x4,y4)python版本的api介绍同步调用 , 执行输入Im_exec_net.infer(inputs={“0”:face_roi})获取输出landmark_res=Im_exec_net.request[0].outputs[Im_output_blob]landmark_res=np.reshape(landmark_res,(5,2))三、附录代码:
importsysimportcv2importnumpyasnpimporttimeimportloggingaslogfromopenvino.inference_engineimportIENetwork,IEPluginmodel_xml="C:/Users/kang/Downloads/open_model_zoo-2019/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.xml"model_bin="C:/Users/kang/Downloads/open_model_zoo-2019/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.bin"plugin_dir="C:/Intel/openvino/deployment_tools/inference_engine/bin/intel64/Release"cpu_extension="C:/Users/kang/Documents/Intel/OpenVINO/inference_engine_samples_build_2017/intel64/Release/cpu_extension.dll"landmark_xml="C:/Users/kang/Downloads/open_model_zoo-2019/model_downloader/Retail/object_attributes/landmarks_regression/0009/dldt/landmarks-regression-retail-0009.xml"landmark_bin="C:/Users/kang/Downloads/open_model_zoo-2019/model_downloader/Retail/object_attributes/landmarks_regression/0009/dldt/landmarks-regression-retail-0009.bin"defface_landmark_demo():log.basicConfig(format="[%(levelname)s]%(message)s",level=log.INFO,stream=sys.stdout)#Plugininitializationforspecifieddeviceandloadextensionslibraryifspecifiedlog.info("Initializingpluginfor{}device...".format("CPU"))plugin=IEPlugin(device="CPU",plugin_dirs=plugin_dir)plugin.add_cpu_extension(cpu_extension)#lutlut=[]lut.append((0,0,255))lut.append((255,0,0))lut.append((0,255,0))lut.append((0,255,255))lut.append((255,0,255))#ReadIRlog.info("ReadingIR...")net=IENetwork(model=model_xml,weights=model_bin)landmark_net=IENetwork(model=landmark_xml,weights=landmark_bin)ifplugin.device=="CPU":supported_layers=plugin.get_supported_layers(net)not_supported_layers=[lforlinnet.layers.keys()iflnotinsupported_layers]iflen(not_supported_layers)!=0:log.error("Followinglayersarenotsupportedbythepluginforspecifieddevice{}:n{}".format(plugin.device,','.join(not_supported_layers)))log.error("Pleasetrytospecifycpuextensionslibrarypathindemo'scommandlineparametersusing-l""or--cpu_extensioncommandlineargument")sys.exit(1)assertlen(net.inputs.keys())==1,"Demosupportsonlysingleinputtopologies"assertlen(net.outputs)==1,"Demosupportsonlysingleoutputtopologies"input_blob=next(iter(net.inputs))out_blob=next(iter(net.outputs))lm_input_blob=next(iter(landmark_net.inputs))lm_out_blob=next(iter(landmark_net.outputs))log.info("LoadingIRtotheplugin...")exec_net=plugin.load(network=net,num_requests=2)lm_exec_net=plugin.load(network=landmark_net)#Readandpre-processinputimagen,c,h,w=net.inputs[input_blob].shapenm,cm,hm,wm=landmark_net.inputs[lm_input_blob].shapedelnetdellandmark_netcap=cv2.VideoCapture("C:/Users/kang/Downloads/material/av77002671.mp4")cur_request_id=0next_request_id=1log.info("Startinginferenceinasyncmode...")log.info("ToswitchbetweensyncandasyncmodespressTabbutton")log.info("TostopthedemoexecutionpressEscbutton")is_async_mode=Truerender_time=0ret,frame=cap.read()print("Toclosetheapplication,press'CTRL+C'oranykeywithfocusontheoutputwindow")whilecap.isOpened():ifis_async_mode:ret,next_frame=cap.read()else:ret,frame=cap.read()ifnotret:breakinitial_w=cap.get(3)initial_h=cap.get(4)inf_start=time.time()ifis_async_mode:in_frame=cv2.resize(next_frame,(w,h))in_frame=in_frame.transpose((2,0,1))#ChangedatalayoutfromHWCtoCHWin_frame=in_frame.reshape((n,c,h,w))exec_net.start_async(request_id=next_request_id,inputs={input_blob:in_frame})else:in_frame=cv2.resize(frame,(w,h))in_frame=in_frame.transpose((2,0,1))#ChangedatalayoutfromHWCtoCHWin_frame=in_frame.reshape((n,c,h,w))exec_net.start_async(request_id=cur_request_id,inputs={input_blob:in_frame})ifexec_net.requests[cur_request_id].wait(-1)==0:res=exec_net.requests[cur_request_id].outputs[out_blob]forobjinres[0][0]:ifobj[2]>0.5:xmin=int(obj[3]*initial_w)ymin=int(obj[4]*initial_h)xmax=int(obj[5]*initial_w)ymax=int(obj[6]*initial_h)ifxmin>0andymin>0and(xmax<initial_w)and(ymax<initial_h):roi=frame[ymin:ymax,xmin:xmax,:]rh,rw=roi.shape[:2]face_roi=cv2.resize(roi,(wm,hm))face_roi=face_roi.transpose((2,0,1))face_roi=face_roi.reshape((nm,cm,hm,wm))lm_exec_net.infer(inputs={'0':face_roi})landmark_res=lm_exec_net.requests[0].outputs[lm_out_blob]landmark_res=np.reshape(landmark_res,(5,2))forminrange(len(landmark_res)):x=landmark_res[m][0]*rwy=landmark_res[m][1]*rhcv2.circle(roi,(np.int32(x),np.int32(y)),3,lut[m],2,8,0)cv2.rectangle(frame,(xmin,ymin),(xmax,ymax),(0,0,255),2,8,0)inf_end=time.time()det_time=inf_end-inf_start#Drawperformancestatsinf_time_message="Inferencetime:{:.3f}ms,FPS:{:.3f}".format(det_time*1000,1000/(det_time*1000+1))render_time_message="OpenCVrenderingtime:{:.3f}ms".format(render_time*1000)async_mode_message="Asyncmodeison.Processingrequest{}".format(cur_request_id)ifis_async_modeelse"Asyncmodeisoff.Processingrequest{}".format(cur_request_id)cv2.putText(frame,inf_time_message,(15,15),cv2.FONT_HERSHEY_COMPLEX,0.5,(200,10,10),1)cv2.putText(frame,render_time_message,(15,30),cv2.FONT_HERSHEY_COMPLEX,0.5,(10,10,200),1)cv2.putText(frame,async_mode_message,(10,int(initial_h-20)),cv2.FONT_HERSHEY_COMPLEX,0.5,(10,10,200),1)render_start=time.time()cv2.imshow("facedetection",frame)render_end=time.time()render_time=render_end-render_startifis_async_mode:cur_request_id,next_request_id=next_request_id,cur_request_idframe=next_framekey=cv2.waitKey(1)ifkey==27:breakcv2.destroyAllWindows()delexec_netdellm_exec_netdelpluginif__name__=='__main__':sys.exit(face_landmark_demo()or0)1.测试环境:
推荐阅读
- 中国青年网|人脸追踪、双目活体对齐……系统梳理人脸识别开发的硬核技巧
- 抖音|巴铁宣布禁用中企APP,理由让国人脸红:丢人丢到国外了
- 人脸识别|HUAWEI Mate 40 系列确定于10月22日发布!支持3D人脸识别
- 轻拔琴弦|体现着一个人的修养涵养,使用手机
- 智东西|直播预告,腾讯优图专场上线,资深高级研究员玮剑将深入讲解3D人脸重建技术!|
- 3C毒物|4700元起必然大卖,iPhone12最新泄密!人脸识别更快
- 阿里巴巴|“人脸识别”事故频发,“猪脸识别”却很靠谱!阿里巴巴赌对了!
- 天波智能硬件|景区以人脸识别终端做好客流管控,国庆中秋双节将至
- 轻拔琴弦|构陷华为的汇丰卷入洗钱案件,这回还能洗白?,再次陷入大丑闻
- 科学家|宇宙中发现“人脸结构”,一直在盯着人类,外星文明监视器?