轻拔琴弦|CV,人脸识别+表情检测+行人检测+人脸关键点检测!Open( 二 )


ubuntu版本:18.04.1LTS
openvino版本:2020.1.023
模型文档链接:2.下载模型
进入open_model_zoo路径
cd/home/kang/open_model_zoo/tools/downloader
在模型列表中找到要下载的模型并下载:
./downloader.py--nameperson-vehicle-bike-detection-crossroad-0078
记录xml文件下载路径:
/home/kang/open_model_zoo/tools/downloader/intel/person-vehicle-bike-detection-crossroad-0078/FP32/person-vehicle-bike-detection-crossroad-0078.xml3.编译
执行下列命令
cd/opt/intel/openvino/deployment_tools/inference_engine/demos./build_demos.sh123
轻拔琴弦|CV,人脸识别+表情检测+行人检测+人脸关键点检测!Open
文章图片
进入crossroad_camera_demo路径 , 执行make
cd~/omz_demos_build/crossroad_camera_demomake-j412
轻拔琴弦|CV,人脸识别+表情检测+行人检测+人脸关键点检测!Open
文章图片
3.运行
cd~/omz_demos_build/intel64/Release./crossroad_camera_demo-m/home/kang/open_model_zoo/tools/downloader/intel/person-vehicle-bike-detection-crossroad-0078/FP32/person-vehicle-bike-detection-crossroad-0078.xml-dCPU-i/home/kang/Downloads/test_data/pedestrian.png123得到结果和图像信息 。
轻拔琴弦|CV,人脸识别+表情检测+行人检测+人脸关键点检测!Open
文章图片
同样也可以将xml进行python运行 。
效果展示
轻拔琴弦|CV,人脸识别+表情检测+行人检测+人脸关键点检测!Open
文章图片
一、准备流程:
在python环境中加载openvino打开openvino安装目录如:C:Intelopenvinopythonpython3.6
把目录下的openvino文件夹复制到
系统的python环境安装目录下如:C:Python36Libsite-packages2.编译
C:Intelopenvinodeployment_toolsinference_enginesamples路径下执行:
build_samples_msvc2017.bat
执行完后在
C:UserskangDocumentsIntelOpenVINO目录
可以看到生成的
inference_engine_samples_build_2017文件目录
在build目录中也可以找到cpu_extension:
cpu_extension=“C:UserskangDocumentsIntelOpenVINOinference_engine_samples_build_2017intel64Releasecpu_extension.dll”
下载模型 , 记录路径face-detection-adas-0001
emotions-recognition-retail-0003
model_xml=“”model_bin=“”
二、参数介绍:
emotions提取基于MobileNetv1版本·输入格式:[1x3x384x672]=BCHW·输出格式:[1,1,N,7]=[image_id,label,conf,x_min,y_min,x_max,y_max]表情识别网络–输入-[1x3x64x64]=BCHW·输出格式-[1,5,1,1]·检测五种表情(‘neutral’,‘happy’,‘sad’,‘surprise’,‘anger’)python版本的api介绍同步调用 , 执行输入landmark_res=exec_emotions_net.infer(inputs={input_blob:[face_roi]})获取输出landmark_res=landmark_res[‘prob_emotion’]landmark_res=np.reshape(landmark_res,(5))landmark_res=labels[np.argmax(landmark_res)]代码:
importsysimportcv2importnumpyasnpimporttimeimportloggingaslogfromopenvino.inference_engineimportIENetwork,IEPluginplugin_dir="C:/Intel/openvino/deployment_tools/inference_engine/bin/intel64/Release"cpu_extension="C:/Users/kang/Documents/Intel/OpenVINO/inference_engine_samples_build_2017/intel64/Release/cpu_extension.dll"#face-detection-adas-0001model_xml="C:/Users/kang/Downloads/openvino_sample_show/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.xml"model_bin="C:/Users/kang/Downloads/openvino_sample_show/open_model_zoo/model_downloader/Transportation/object_detection/face/pruned_mobilenet_reduced_ssd_shared_weights/dldt/face-detection-adas-0001.bin"#emotions-recognition-retail-0003emotions_xml="C:/Users/kang/Downloads/openvino_sample_show/open_model_zoo/model_downloader/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003.xml"emotions_bin="C:/Users/kang/Downloads/openvino_sample_show/open_model_zoo/model_downloader/Retail/object_attributes/emotions_recognition/0003/dldt/emotions-recognition-retail-0003.bin"labels=['neutral','happy','sad','surprise','anger']defface_emotions_demo():log.basicConfig(format="[%(levelname)s]%(message)s",level=log.INFO,stream=sys.stdout)#Plugininitializationforspecifieddeviceandloadextensionslibraryifspecifiedlog.info("Initializingpluginfor{}device...".format("CPU"))plugin=IEPlugin(device="CPU",plugin_dirs=plugin_dir)plugin.add_cpu_extension(cpu_extension)#ReadIRlog.info("ReadingIR...")net=IENetwork(model=model_xml,weights=model_bin)emotions_net=IENetwork(model=emotions_xml,weights=emotions_bin)ifplugin.device=="CPU":supported_layers=plugin.get_supported_layers(net)not_supported_layers=[lforlinnet.layers.keys()iflnotinsupported_layers]iflen(not_supported_layers)!=0:log.error("Followinglayersarenotsupportedbythepluginforspecifieddevice{}:n{}".format(plugin.device,','.join(not_supported_layers)))log.error("Pleasetrytospecifycpuextensionslibrarypathindemo'scommandlineparametersusing-l""or--cpu_extensioncommandlineargument")sys.exit(1)assertlen(net.inputs.keys())==1,"Demosupportsonlysingleinputtopologies"assertlen(net.outputs)==1,"Demosupportsonlysingleoutputtopologies"input_blob=next(iter(net.inputs))out_blob=next(iter(net.outputs))em_input_blob=next(iter(emotions_net.inputs))em_out_blob=next(iter(emotions_net.outputs))log.info("LoadingIRtotheplugin...")#生成可执行网络,异步执行num_requests=2exec_net=plugin.load(network=net,num_requests=2)exec_emotions_net=plugin.load(network=emotions_net)#Readandpre-processinputimagen,c,h,w=net.inputs[input_blob].shapeen,ec,eh,ew=emotions_net.inputs[em_input_blob].shapedelnetdelemotions_netcap=cv2.VideoCapture("C:/Users/kang/Downloads/openvino_sample_show/material/face_detection_demo.mp4")cur_request_id=0next_request_id=1log.info("Startinginferenceinasyncmode...")log.info("ToswitchbetweensyncandasyncmodespressTabbutton")log.info("TostopthedemoexecutionpressEscbutton")is_async_mode=Truerender_time=0ret,frame=cap.read()print("Toclosetheapplication,press'CTRL+C'oranykeywithfocusontheoutputwindow")whilecap.isOpened():ifis_async_mode:ret,next_frame=cap.read()else:ret,frame=cap.read()ifnotret:breakinitial_w=cap.get(3)initial_h=cap.get(4)inf_start=time.time()ifis_async_mode:in_frame=cv2.resize(next_frame,(w,h))in_frame=in_frame.transpose((2,0,1))#ChangedatalayoutfromHWCtoCHWin_frame=in_frame.reshape((n,c,h,w))exec_net.start_async(request_id=next_request_id,inputs={input_blob:in_frame})else:in_frame=cv2.resize(frame,(w,h))in_frame=in_frame.transpose((2,0,1))#ChangedatalayoutfromHWCtoCHWin_frame=in_frame.reshape((n,c,h,w))exec_net.start_async(request_id=cur_request_id,inputs={input_blob:in_frame})ifexec_net.requests[cur_request_id].wait(-1)==0:res=exec_net.requests[cur_request_id].outputs[out_blob]#输出格式:[1,1,N,7]从N行人脸中找到7个值=[image_id,label,conf,x_min,y_min,x_max,y_max]forobjinres[0][0]:ifobj[2]>0.5:xmin=int(obj[3]*initial_w)ymin=int(obj[4]*initial_h)xmax=int(obj[5]*initial_w)ymax=int(obj[6]*initial_h)ifxmin>0andymin>0and(xmax<initial_w)and(ymax<initial_h):roi=frame[ymin:ymax,xmin:xmax,:]face_roi=cv2.resize(roi,(ew,eh))face_roi=face_roi.transpose((2,0,1))face_roi=face_roi.reshape((en,ec,eh,ew))#解析结果landmark_res=exec_emotions_net.infer(inputs={input_blob:[face_roi]})landmark_res=landmark_res['prob_emotion']landmark_res=np.reshape(landmark_res,(5))landmark_res=labels[np.argmax(landmark_res)]cv2.putText(frame,landmark_res,(np.int32(xmin),np.int32(ymin)),cv2.FONT_HERSHEY_SIMPLEX,1.0,(255,0,0),2)cv2.rectangle(frame,(np.int32(xmin),np.int32(ymin)),(np.int32(xmax),np.int32(ymax)),(0,0,255),2,8,0)cv2.rectangle(frame,(xmin,ymin),(xmax,ymax),(0,0,255),2,8,0)inf_end=time.time()det_time=inf_end-inf_start#Drawperformancestatsinf_time_message="Inferencetime:{:.3f}ms,FPS:{:.3f}".format(det_time*1000,1000/(det_time*1000+1))render_time_message="OpenCVrenderingtime:{:.3f}ms".format(render_time*1000)async_mode_message="Asyncmodeison.Processingrequest{}".format(cur_request_id)ifis_async_modeelse"Asyncmodeisoff.Processingrequest{}".format(cur_request_id)cv2.putText(frame,inf_time_message,(15,15),cv2.FONT_HERSHEY_COMPLEX,0.5,(200,10,10),1)cv2.putText(frame,render_time_message,(15,30),cv2.FONT_HERSHEY_COMPLEX,0.5,(10,10,200),1)cv2.putText(frame,async_mode_message,(10,int(initial_h-20)),cv2.FONT_HERSHEY_COMPLEX,0.5,(10,10,200),1)render_start=time.time()cv2.imshow("faceemotionsdemo",frame)render_end=time.time()render_time=render_end-render_startifis_async_mode:cur_request_id,next_request_id=next_request_id,cur_request_idframe=next_framekey=cv2.waitKey(1)ifkey==27:breakcv2.destroyAllWindows()delexec_netdelexec_emotions_netdelpluginif__name__=='__main__':sys.exit(face_emotions_demo()or0)看不懂就对了 , 想获取更多视频教程源码私信小编01


推荐阅读