本文参考博客
https://blog.csdn.net/c20081052/article/details/95082377
https://blog.csdn.net/pengpengloveqiaoqiao/article/details/89487049
注意:摄像头要在镜头上贴红外滤光片 过滤掉可见光
效果展示,已经过滤到大部分波长可见光,手在镜头前来回移动,红框即检测到移动物体的位置,并框出来(海康威视的摄像头可见光好像不能完全被滤光片过滤掉,但是可以过滤到投影机的画面或者屏幕的画面,不影响使用)
1.将海康威视的C++ sdk 转化为 Python sdk (本文已经处理完了)
如何将海康威视的C++ sdk 转化为 Python 可用的sdk ,请参考博主 ciky奇 的文章,链接如下:https://blog.csdn.net/c20081052/article/details/95082377
如果最后一步Python调用时遇到报错,尝试将所有相关的文件全部复制到Python工程的路径下
环境:
1. windows10 64位
2. opecv
3. python3
4.海康威视网络摄像头64位 C++ sdk
https://www.hikvision.com/cn/download_61.html
下载SDK_Win64
5.其他库文件:
5.1 opencv-python
5.2 numpy
5.3 multiprocessing (多进程使用)
本文已将海康威视C++ sdk转为Python sdk处理好,可以直接下载使用,文末网址分享
如果HKIPCamera 一直报错,找不到模块 ,首先 python 和 opencv要安装好,并且设置好环境变量,如果还是报错:
一般是缺少VC++库的运行时组件,两个方案:
1. 从微软官网下载VC++库的运行时组件:
此组件也可以解决其他项目上的很多疑难杂症
官网链接: https://visualstudio.microsoft.com/zh-hans/downloads/
2、下载一个DirectX修复工具
链接:https://pan.baidu.com/s/1thfI2SyCpQoToBxuwFnxuA
提取码:8aza
(如果链接失效点击右侧下载)
打开后点击 工具—-> 选项—->扩展—->开始扩展
扩展完了以后,此时会默认选中同时更新C++,点击检测并修复,如果缺少组件就会自动安装,如果已经安装了但是不是最新的,会自动更新,等修复完了,再尝试运行Python程序
如果还是一直存在问题,请回头参考博文https://blog.csdn.net/c20081052/article/details/95082377一步一步重新操作
2.调用海康威视Python SDK
import cv2
import numpy as np
import HKIPcamera
def get_cam_frame(ip, account, password):
ip = str(ip) # 摄像头IP地址,要和本机IP在同一局域网
name = str(account) # 管理员用户名
pw = str(password) # 管理员密码
HKIPcamera.init(ip, name, pw)
while(True):
fram = HKIPcamera.getframe()
frame = np.array(fram)
cv2.imshow('frame',frame)
c = cv2.waitKey(10)
if c == 27:
break;
HKIPcamera.release()
if __name__ == '__main__':
get_cam_frame('192.168.1.64', 'admin', '123456')
3.调用多个海康威视摄像头
调用多个摄像头需要用到多进程,多线程不起作用
在第一步的代码基础上修改一下
import cv2
import numpy as np
import HKIPcamera
from multiprocessing import Process, Queue
def get_cam_frame(frameName, ip, account, password):
ip = str(ip) # 摄像头IP地址,要和本机IP在同一局域网
name = str(account) # 管理员用户名
pw = str(password) # 管理员密码
HKIPcamera.init(ip, name, pw)
while True:
fram = HKIPcamera.getframe()
frame = np.array(fram)
cv2.imshow(frameName, frame)
c = cv2.waitKey(10)
if c == 27:
break;
HKIPcamera.release()
if __name__ == '__main__':
p0 = Process(target=get_cam_frame, args=('frame0', '192.168.1.64', 'admin', '123456',))
p0.start()
p1 = Process(target=get_cam_frame, args=('frame1', '192.168.1.65', 'admin', '123456',))
p1.start()
# 以此类推 p2 p3 ... pn
4. 将多个摄像头画面拼接成一个
再在上述的代码基础上修改
import cv2
import numpy as np
import HKIPcamera
from multiprocessing import Process, Queue
import os
import signal
def get_cam_frame(frameName, ip, account, password, n):
ip = str(ip) # 摄像头IP地址,要和本机IP在同一局域网
name = str(account) # 管理员用户名
pw = str(password) # 管理员密码
HKIPcamera.init(ip, name, pw)
while True:
fram = HKIPcamera.getframe()
frame = np.array(fram)
# 由于摄像头画面是1920*1080,多个拼接的话,画面会非常大,所有重置一下每个摄像头的画面
frame = cv2.resize(frame, (768, 540), interpolation=cv2.INTER_CUBIC)
n.put(frame)
cv2.imshow(frameName, frame)
c = cv2.waitKey(10)
if c == 27:
break;
HKIPcamera.release()
if __name__ == '__main__':
q0 = Queue(3)
p0 = Process(target=get_cam_frame, args=('frame0', '192.168.1.64', 'admin', '123456', q0,))
p0.start()
q1 = Queue(3)
p1 = Process(target=get_cam_frame, args=('frame1', '192.168.1.65', 'admin', '12345', q1,))
p1.start()
while True:
if p0.is_alive() and p1.is_alive():
frame0 = q0.get()
frame1 = q1.get()
# 横着拼
full_frame = np.hstack([frame0, frame1])
# 竖着拼
# full_frame = np.vstack([frame0, frame1])
cv2.imshow('full_frame', full_frame)
c = cv2.waitKey(10)
if c==27:
os.kill(p0.pid, signal.SIGTERM)
os.kill(p1.pid, signal.SIGTERM)
break;
5.挨个摄像头透视变换
如果不了解opencv-python透视变换函数的参数,请先看下此博客的透视变换函数介绍,链接如下:
https://blog.csdn.net/wsp_1138886114/article/details/83374333
import cv2
import numpy as np
import HKIPcamera
from multiprocessing import Process, Queue
import os
import signal
def get_cam_frame(frameName, ip, account, password, n):
ip = str(ip) # 摄像头IP地址,要和本机IP在同一局域网
name = str(account) # 管理员用户名
pw = str(password) # 管理员密码
HKIPcamera.init(ip, name, pw)
while True:
fram = HKIPcamera.getframe()
frame = np.array(fram)
# 由于摄像头画面是1920*1080,多个拼接的话,画面会非常大,所有重置一下每个摄像头的画面
frame = cv2.resize(frame, (768, 540), interpolation=cv2.INTER_CUBIC)
n.put(frame)
cv2.imshow(frameName, frame)
c = cv2.waitKey(10)
if c == 27:
break;
HKIPcamera.release()
#第一个摄像头透视变化的四个点
frame0_x0, frame0_y0, frame0_x1, frame0_y1, frame0_x2, frame0_y2, frame0_x3, frame0_y3 = 0, 0, 0, 0, 0, 0, 0, 0
#第二个摄像头透视变化的四个点
frame1_x0, frame1_y0, frame1_x1, frame1_y1, frame1_x2, frame1_y2, frame1_x3, frame1_y3 = 0, 0, 0, 0, 0, 0, 0, 0
#简单来写,按照从第一个摄像头开始,鼠标每点击一次,获取一个点
#例如鼠标第一次点击,就等于给坐标frame0_x0, frame0_y0 赋值, 第二次点击就是给坐标frame0_x1, frame0_y1赋值,以此类推
clickNum = 0
def draw_point(event, x, y, flags, param):
global frame0_x0, frame0_y0, frame0_x1, frame0_y1, frame0_x2, frame0_y2, frame0_x3, frame0_y3
global frame1_x0, frame1_y0, frame1_x1, frame1_y1, frame1_x2, frame1_y2, frame1_x3, frame1_y3, clickNum
if event == cv2.EVENT_LBUTTONDOWN:
if clickNum == 0:
frame0_x0 = x
frame0_y0 = y
elif clickNum == 1:
frame0_x1 = x
frame0_y1 = y
elif clickNum == 2:
frame0_x2 = x
frame0_y2 = y
elif clickNum == 3:
frame0_x3 = x
frame0_y3 = y
elif clickNum == 4:
frame1_x0 = x
frame1_y0 = y
elif clickNum == 5:
frame1_x1 = x
frame1_y1 = y
elif clickNum == 6:
frame1_x2 = x
frame1_y2 = y
elif clickNum == 7:
frame1_x3 = x
frame1_y3 = y
clickNum += 1
# 透视变换
def perspective_transform():
global frame0, frame1, clickNum
global frame0_x0, frame0_y0, frame0_x1, frame0_y1, frame0_x2, frame0_y2, frame0_x3, frame0_y3
global frame1_x0, frame1_y0, frame1_x1, frame1_y1, frame1_x2, frame1_y2, frame1_x3, frame1_y3
if clickNum==8:
W_cols, H_rows = frame0.shape[:2]
pts0_1 = np.float32(
[[frame0_x0, frame0_y0], [frame0_x1, frame0_y1],
[frame0_x2, frame0_y2], [frame0_x3, frame0_y3]])
pts0_2 = np.float32([[0, 0], [W_cols, 0], [0, H_rows], [W_cols, H_rows]])
M0 = cv2.getPerspectiveTransform(pts0_1, pts0_2)
frame0 = cv2.warpPerspective(frame0, M0, (W_cols, H_rows))
# 这里的变换需要减去第一个摄像头的画面的宽度,768
pts1_1 = np.float32(
[[frame1_x0-768, frame1_y0], [frame1_x1-768, frame1_y1],
[frame1_x2-768, frame1_y2], [frame1_x3-768, frame1_y3]])
pts1_2 = np.float32([[0, 0], [W_cols, 0], [0, H_rows], [W_cols, H_rows]])
M1 = cv2.getPerspectiveTransform(pts1_1, pts1_2)
frame1 = cv2.warpPerspective(frame1, M1, (W_cols, H_rows))
if __name__ == '__main__':
q0 = Queue(3)
p0 = Process(target=get_cam_frame, args=('frame0', '192.168.1.64', 'admin', '123456', q0,))
p0.start()
q1 = Queue(3)
p1 = Process(target=get_cam_frame, args=('frame1', '192.168.1.65', 'admin', '123456', q1,))
p1.start()
cv2.namedWindow('full_frame')
# 鼠标点击事件
cv2.setMouseCallback('full_frame', draw_point)
while True:
if p0.is_alive() and p1.is_alive():
frame0 = q0.get()
frame1 = q1.get()
# 透视变换
perspective_transform()
# 透视变换完再拼接
# 横着拼
full_frame = np.hstack([frame0, frame1])
# 竖着拼
#full_frame = np.vstack([frame0, frame1])
cv2.imshow('full_frame', full_frame)
c = cv2.waitKey(10)
if c==27:
os.kill(p0.pid, signal.SIGTERM)
os.kill(p1.pid, signal.SIGTERM)
break;
6. 透视变换完后再在拼接后的画面上做动态物体检测,参考博客
https://blog.csdn.net/pengpengloveqiaoqiao/article/details/89487049
import cv2
import numpy as np
import HKIPcamera
from multiprocessing import Process, Queue
import os
import signal
def get_cam_frame(frameName, ip, account, password, n):
ip = str(ip) # 摄像头IP地址,要和本机IP在同一局域网
name = str(account) # 管理员用户名
pw = str(password) # 管理员密码
HKIPcamera.init(ip, name, pw)
while True:
fram = HKIPcamera.getframe()
frame = np.array(fram)
# 由于摄像头画面是1920*1080,多个拼接的话,画面会非常大,所有重置一下每个摄像头的画面
frame = cv2.resize(frame, (768, 540), interpolation=cv2.INTER_CUBIC)
n.put(frame)
cv2.imshow(frameName, frame)
c = cv2.waitKey(10)
if c == 27:
break;
HKIPcamera.release()
#第一个摄像头透视变化的四个点
frame0_x0, frame0_y0, frame0_x1, frame0_y1, frame0_x2, frame0_y2, frame0_x3, frame0_y3 = 0, 0, 0, 0, 0, 0, 0, 0
#第二个摄像头透视变化的四个点
frame1_x0, frame1_y0, frame1_x1, frame1_y1, frame1_x2, frame1_y2, frame1_x3, frame1_y3 = 0, 0, 0, 0, 0, 0, 0, 0
#简单来写,按照从第一个摄像头开始,鼠标每点击一次,获取一个点
#例如鼠标第一次点击,就等于给坐标frame0_x0, frame0_y0 赋值, 第二次点击就是给坐标frame0_x1, frame0_y1赋值,以此类推
clickNum = 0
def draw_point(event, x, y, flags, param):
global frame0_x0, frame0_y0, frame0_x1, frame0_y1, frame0_x2, frame0_y2, frame0_x3, frame0_y3
global frame1_x0, frame1_y0, frame1_x1, frame1_y1, frame1_x2, frame1_y2, frame1_x3, frame1_y3, clickNum
if event == cv2.EVENT_LBUTTONDOWN:
if clickNum == 0:
frame0_x0 = x
frame0_y0 = y
elif clickNum == 1:
frame0_x1 = x
frame0_y1 = y
elif clickNum == 2:
frame0_x2 = x
frame0_y2 = y
elif clickNum == 3:
frame0_x3 = x
frame0_y3 = y
elif clickNum == 4:
frame1_x0 = x
frame1_y0 = y
elif clickNum == 5:
frame1_x1 = x
frame1_y1 = y
elif clickNum == 6:
frame1_x2 = x
frame1_y2 = y
elif clickNum == 7:
frame1_x3 = x
frame1_y3 = y
clickNum += 1
# 透视变换
def perspective_transform():
global frame0, frame1, clickNum
global frame0_x0, frame0_y0, frame0_x1, frame0_y1, frame0_x2, frame0_y2, frame0_x3, frame0_y3
global frame1_x0, frame1_y0, frame1_x1, frame1_y1, frame1_x2, frame1_y2, frame1_x3, frame1_y3
if clickNum==8:
W_cols, H_rows = frame0.shape[:2]
pts0_1 = np.float32(
[[frame0_x0, frame0_y0], [frame0_x1, frame0_y1],
[frame0_x2, frame0_y2], [frame0_x3, frame0_y3]])
pts0_2 = np.float32([[0, 0], [W_cols, 0], [0, H_rows], [W_cols, H_rows]])
M0 = cv2.getPerspectiveTransform(pts0_1, pts0_2)
frame0 = cv2.warpPerspective(frame0, M0, (W_cols, H_rows))
# 这里的变换需要减去第一个摄像头的画面的宽度,768
pts1_1 = np.float32(
[[frame1_x0-768, frame1_y0], [frame1_x1-768, frame1_y1],
[frame1_x2-768, frame1_y2], [frame1_x3-768, frame1_y3]])
pts1_2 = np.float32([[0, 0], [W_cols, 0], [0, H_rows], [W_cols, H_rows]])
M1 = cv2.getPerspectiveTransform(pts1_1, pts1_2)
frame1 = cv2.warpPerspective(frame1, M1, (W_cols, H_rows))
if __name__ == '__main__':
q0 = Queue(3)
p0 = Process(target=get_cam_frame, args=('frame0', '192.168.1.64', 'admin', '123456', q0,))
p0.start()
q1 = Queue(3)
p1 = Process(target=get_cam_frame, args=('frame1', '192.168.1.65', 'admin', '123456', q1,))
p1.start()
cv2.namedWindow('full_frame')
# 鼠标点击事件
cv2.setMouseCallback('full_frame', draw_point)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
fgbg = cv2.createBackgroundSubtractorMOG2()
while True:
if p0.is_alive() and p1.is_alive():
frame0 = q0.get()
frame1 = q1.get()
# 透视变换
perspective_transform()
# 透视变换完再拼接
# 横着拼
full_frame = np.hstack([frame0, frame1])
# 竖着拼
#full_frame = np.vstack([frame0, frame1])
fgmask = fgbg.apply(full_frame)
mask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
contours, _ = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
if 100 < cv2.contourArea(c) < 40000:
x, y, w, h = cv2.boundingRect(c)
cv2.rectangle(full_frame, (x, y), (x + w, y + h), (0, 0, 255))
cv2.imshow('full_frame', full_frame)
c = cv2.waitKey(10)
if c==27:
os.kill(p0.pid, signal.SIGTERM)
os.kill(p1.pid, signal.SIGTERM)
break;