技术专栏
使用的darknet实现区域人头统计、目标跟踪
该功能使用的darknet框架,训练的yolo-tiny人头模型,之后使用opencv4.5.1版本调用的yolo-tiny模型,跟踪使用KCF跟踪算法,整体上实现了三个功能:
1、区域内的人头统计;
2、区域内的绊线检测功能;
3、区域内目标跟踪,统计人头数量的功能。
一、网址:https://github.com/AlexeyAB/darknet
二、参考训练参考我的另两篇博客:
Darknet下的Yolo v3_dragon_perfect的博客-CSDN博客
深度学习 客流统计 人流计数dragon_perfect的博客-CSDN博客人流统计算法
跟踪部分的代码如下:
#pragma once
#include "HeadDetect.h"
#include "opencv2/opencv.hpp"
#include "KCFLib/kcftracker.hpp"
using namespace cv;
using namespace std;
#define MAX_TRACK_COUNT 100//目标的最大个数
#define MAX_TRAJECTORY_COUNT 200//目标轨迹的最大个数
#define MAX_MISS_FRAME 10//最大丢失帧数
#define MAX_MATCH_FRAME 3//最大匹配帧数
#define MAX_TRACK_DIST 40000//最大跟踪距离
struct ZTracker
{
int nID;
int nMissTimes;
bool bMatchID;
bool bTrack;
bool bStatOK;
bool bInitDirection;
int nInitDirection;
cv::KalmanFilter *kf;
cv::Point predPoint;
cv::Point curCentre;
cv::Rect curRect;
int nTrajCount;
cv::Point centres[MAX_TRAJECTORY_COUNT];
cv::Rect blobs[MAX_TRAJECTORY_COUNT];
};
class ZHeadTrack
{
public:
ZHeadTrack();
~ZHeadTrack();
int m_InCount;
int m_OutCount;
int m_RegionCount;
ZTracker *m_pTrackers;
KCFTracker m_pKCF[MAX_TRACK_COUNT];
void trackInitial(cv::Point arrowStart, cv::Point arrowEnd, cv::Point lineStart, cv::Point lineEnd, vector<Point> regionPonits,int width,int height);
void trackProcess(HDBox_Container *pHDBox_Container, HDRect *rect,bool bMask);
void kcfProcess(HDBox_Container *pHDBox_Container, HDRect *rect, cv::Mat image);
void trackInOutStatistics();
void trackRegionStatistics();
int pointInLineSide(cv::Point point, cv::Point lineStart, cv::Point lineEnd);
float CalculateIou(cv::Rect& det, cv::Rect& track);
public:
int m_trackID;
int m_nArrowStart;
int m_nArrowEnd;
cv::Point m_lineStart;
cv::Point m_lineEnd;
cv::Mat m_detectRegion;
HDBox_Container m_boxContainer;
HDTracker_Region m_trackeRegion;
};
#include "HeadTrack.h"
ZHeadTrack::ZHeadTrack()
{
m_trackID = 0;
m_InCount = 0;
m_OutCount = 0;
m_RegionCount = 0;
m_nArrowStart = 0;
m_nArrowEnd = 0;
m_pTrackers = new ZTracker[MAX_TRACK_COUNT];
memset(m_pTrackers, 0, sizeof(ZTracker));
}
ZHeadTrack::~ZHeadTrack()
{
delete m_pTrackers;
}
void ZHeadTrack::trackInitial(cv::Point arrowStart, cv::Point arrowEnd, cv::Point lineStart, cv::Point lineEnd, vector<Point> regionPonits, int width, int height)
{
m_trackID = 0;
m_InCount = 0;
m_OutCount = 0;
m_nArrowStart = pointInLineSide(arrowStart, lineStart, lineEnd);
m_nArrowEnd = pointInLineSide(arrowEnd, lineStart, lineEnd);
m_lineStart = lineStart;
m_lineEnd = lineEnd;
vector<vector<Point>> vpts;
vpts.push_back(regionPonits);
cv::Mat detectRegion = cv::Mat::zeros(height, width, CV_8UC3);
cv::fillPoly(detectRegion, vpts, cv::Scalar(255, 255, 255));
cv::cvtColor(detectRegion, m_detectRegion, COLOR_BGR2GRAY);
//m_detectRegion = detectRegion.clone();
//cv::imshow("detectRegion", m_detectRegion);
//cv::waitKey(0);
}
void ZHeadTrack::trackProcess(HDBox_Container *pHDBox_Container, HDRect *rect, bool bMask)
{
int i = 0, j = 0;
//把太小的rect删除
m_boxContainer.headCount = 0;
int width = m_detectRegion.cols;
uchar *pDetectData = m_detectRegion.data;
int step1 = m_detectRegion.step1();
//cv::imshow("m_detectRegion", m_detectRegion);
//cv::waitKey(10);
for (i = 0;i< pHDBox_Container->headCount;i++)
{
HDRect *pHDRect = &pHDBox_Container->candidates[i];
int nx = (pHDRect->right + pHDRect->left) / 2;
int ny = (pHDRect->top + pHDRect->bottom) / 2;
int rectW = pHDRect->right - pHDRect->left;
int rectH = pHDRect->bottom - pHDRect->top;
if (bMask)
{
if (rectW > 60 && rectH > 60 && (*(pDetectData + ny*width + nx) == 255))
{
m_boxContainer.candidates[m_boxContainer.headCount++] = pHDBox_Container->candidates[i];
}
}
else
{
if (rectW > 40 && rectH > 40 && nx > rect->left && nx < rect->right && ny > rect->top && ny < rect->bottom)
{
m_boxContainer.candidates[m_boxContainer.headCount++] = pHDBox_Container->candidates[i];
}
}
}
bool bMatch[HD_MAX_HEADS] = { false };
for (i = 0;i< m_boxContainer.headCount;i++)
{
bMatch[i] = false;
}
for (i = 0; i < MAX_TRACK_COUNT; i++)
{
ZTracker *pTracker = &m_pTrackers[i];
if (pTracker->bTrack)
{
bool bMinst = false;
int nMatchID = -1;
int maxDist = MAX_TRACK_DIST;
for (j = 0; j < m_boxContainer.headCount; j++)
{
if (!bMatch[j])
{
HDRect *pHDRect = &m_boxContainer.candidates[j];
cv::Rect curRect;
curRect.x = pHDRect->left;
curRect.y = pHDRect->top;
curRect.width = pHDRect->right - pHDRect->left;
curRect.height = pHDRect->bottom - pHDRect->top;
int nx = (pHDRect->left + pHDRect->right) / 2;
int ny = (pHDRect->top + pHDRect->bottom) / 2;
int dist = (pTracker->predPoint.x - nx)*(pTracker->predPoint.x - nx) + (pTracker->predPoint.y - ny)*(pTracker->predPoint.y - ny);
if (dist < maxDist)
{
maxDist = dist;
pTracker->curRect = curRect;//后面更新用
pTracker->curCentre.x = nx;
pTracker->curCentre.y = ny;
nMatchID = j;
bMinst = true;
}
}
}
//找到了blob
if (bMinst)
{
bMatch[nMatchID] = true;
HDRect *pHDRect = &m_boxContainer.candidates[nMatchID];
cv::Rect curRect;
curRect.x = pHDRect->left;
curRect.y = pHDRect->top;
curRect.width = pHDRect->right - pHDRect->left;
curRect.height = pHDRect->bottom - pHDRect->top;
int nx = (pHDRect->left + pHDRect->right) / 2;
int ny = (pHDRect->top + pHDRect->bottom) / 2;
pTracker->bMatchID = true;
pTracker->nMissTimes = 0;
pTracker->curCentre.x = nx;
pTracker->curCentre.y = ny;
pTracker->curRect = curRect;
//更新预测值
Mat measurement = Mat::zeros(2, 1, CV_32F);
measurement.at<float>(0) = (float)nx;
measurement.at<float>(1) = (float)ny;
pTracker->kf->correct(measurement);
Mat prediction = pTracker->kf->predict();
pTracker->predPoint = Point(prediction.at<float>(0), prediction.at<float>(1)); //预测值(x',y')
cv::Point centre = pTracker->centres[pTracker->nTrajCount - 1];
if ((centre.x - nx)*(centre.x - nx) + (centre.y - ny)*(centre.y - ny) > 30)
{
pTracker->centres[pTracker->nTrajCount].x = nx;
pTracker->centres[pTracker->nTrajCount].y = ny;
pTracker->blobs[pTracker->nTrajCount] = curRect;
pTracker->nTrajCount++;
if (pTracker->nTrajCount >= MAX_TRAJECTORY_COUNT - 1)
{
pTracker->nTrajCount = MAX_TRAJECTORY_COUNT - 1;
for (int k = 1; k < pTracker->nTrajCount; k++)
{
pTracker->centres[k - 1] = pTracker->centres[k];
pTracker->blobs[k - 1] = pTracker->blobs[k];
}
}
}
}
else//没找到blob
{
pTracker->nMissTimes++;
//Mat prediction = pTracker->kf->predict();
//pTracker->predPoint = Point(prediction.at<float>(0), prediction.at<float>(1)); //预测值(x',y')
//更新预测值
Mat measurement = Mat::zeros(2, 1, CV_32F);
measurement.at<float>(0) = (float)pTracker->curCentre.x;
measurement.at<float>(1) = (float)pTracker->curCentre.y;
pTracker->kf->correct(measurement);
Mat prediction = pTracker->kf->predict();
pTracker->predPoint = Point(prediction.at<float>(0), prediction.at<float>(1)); //预测值(x',y')
if (pTracker->nMissTimes > MAX_MISS_FRAME)
{
pTracker->bTrack = false;
delete pTracker->kf;
}
}
}
}
//没有匹配上的,需要重新创建目标
for (i = 0; i < m_boxContainer.headCount; i++)
{
HDRect *pHDRect = &m_boxContainer.candidates[i];
cv::Rect curRect;
curRect.x = pHDRect->left;
curRect.y = pHDRect->top;
curRect.width = pHDRect->right - pHDRect->left;
curRect.height = pHDRect->bottom - pHDRect->top;
int nx = (pHDRect->left + pHDRect->right) / 2;
int ny = (pHDRect->top + pHDRect->bottom) / 2;
if (!bMatch[i])
{
for (j = 0; j < MAX_TRACK_COUNT; j++)
{
ZTracker *pTracker = &m_pTrackers[j];
if (!pTracker->bTrack)
{
pTracker->bTrack = true;
pTracker->bMatchID = true;
pTracker->bStatOK = false;
pTracker->bInitDirection = false;
pTracker->nID = ++m_trackID;
pTracker->curCentre.x = nx;
pTracker->curCentre.y = ny;
pTracker->curRect = curRect;
pTracker->nMissTimes = 0;
pTracker->predPoint.x = nx;
pTracker->predPoint.y = ny;
pTracker->kf = new cv::KalmanFilter(4, 2, 0);
pTracker->kf->transitionMatrix = (Mat_<float>(4, 4) << 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1);//转移矩阵A
cv::setIdentity(pTracker->kf->measurementMatrix); //测量矩阵H
cv::setIdentity(pTracker->kf->processNoiseCov, Scalar::all(1e-5)); //系统噪声方差矩阵Q
cv::setIdentity(pTracker->kf->measurementNoiseCov, Scalar::all(1e-1));//测量噪声方差矩阵R
cv::setIdentity(pTracker->kf->errorCovPost, Scalar::all(1)); //后验错误估计协方差矩阵P
pTracker->kf->statePost = (Mat_<float>(4, 1) << nx, ny, 0, 0);
Mat prediction = pTracker->kf->predict();
pTracker->predPoint = Point(prediction.at<float>(0), prediction.at<float>(1)); //预测值(x',y')
pTracker->centres[0].x = nx;
pTracker->centres[0].y = ny;
pTracker->blobs[0] = curRect;
pTracker->nTrajCount = 1;
break;
}
}
}
}
}
float ZHeadTrack::CalculateIou(cv::Rect& det, cv::Rect& track)
{
// get min/max points
auto xx1 = std::max(det.tl().x, track.tl().x);
auto yy1 = std::max(det.tl().y, track.tl().y);
auto xx2 = std::min(det.br().x, track.br().x);
auto yy2 = std::min(det.br().y, track.br().y);
auto w = std::max(0, xx2 - xx1);
auto h = std::max(0, yy2 - yy1);
// calculate area of intersection and union
float det_area = det.area();
float trk_area = track.area();
auto intersection_area = w * h;
float union_area = det_area + trk_area - intersection_area;
auto iou = intersection_area / union_area;
return iou;
}
void ZHeadTrack::kcfProcess(HDBox_Container *pHDBox_Container, HDRect *rect,cv::Mat image)
{
int i = 0, j = 0;
//HDBox_Container container;
m_boxContainer.headCount = 0;
for (i = 0; i < pHDBox_Container->headCount; i++)
{
HDRect *pHDRect = &pHDBox_Container->candidates[i];
int nx = (pHDRect->right + pHDRect->left) / 2;
int ny = (pHDRect->top + pHDRect->bottom) / 2;
int rectW = pHDRect->right - pHDRect->left;
int rectH = pHDRect->bottom - pHDRect->top;
if (rectW > 20 && rectH > 20 && nx > rect->left && nx < rect->right && ny > rect->top && ny < rect->bottom)
{
m_boxContainer.candidates[m_boxContainer.headCount++] = pHDBox_Container->candidates[i];
}
}
bool bMatch[HD_MAX_HEADS] = { false };
for (i = 0; i < m_boxContainer.headCount; i++)
{
bMatch[i] = false;
}
for (i = 0; i < MAX_TRACK_COUNT; i++)
{
KCFTracker *pKCF = &m_pKCF[i];
if (pKCF->bTrack)
{
cv::Rect tRect = pKCF->update(image);
pKCF->nTrackTimes++;
pKCF->curRect = tRect;
int nx = tRect.x + tRect.width / 2;
int ny = tRect.y + tRect.height / 2;
int dist = 10000000;
float dfiou = 0.4;
cv::Rect roiRect;
int nFlag = 0;
bool bOK = false;
int nlimit = 0.6*std::sqrt(tRect.width*tRect.width + tRect.height*tRect.height);
for (j = 0; j < m_boxContainer.headCount; j++)
{
HDRect *pHDRect = &m_boxContainer.candidates[j];
int x0 = (pHDRect->left + pHDRect->right) / 2;
int y0 = (pHDRect->top + pHDRect->bottom) / 2;
int tempDist = std::sqrt((nx - x0)*(nx - x0) + (ny - y0)*(ny - y0));
cv::Rect trackR;
trackR.x = pHDRect->left;
trackR.y = pHDRect->top;
trackR.width = pHDRect->right - pHDRect->left;
trackR.height = pHDRect->bottom - pHDRect->top;
float fiou = CalculateIou(tRect, trackR);
if (fiou > dfiou)
{
dfiou = fiou;
nFlag = j;
bOK = true;
roiRect = trackR;
}
}
if (bOK)
{
bMatch[nFlag] = true;
pKCF->nMissFrame = 0;
pKCF->init(roiRect, image);
pKCF->curRect = roiRect;
pKCF->nMatchTimes++;
if (!pKCF->bValid)
{
if (/*pKCF->nMatchTimes > MAX_MATCH_FRAME && */pKCF->nTrackTimes == pKCF->nMatchTimes)
{
pKCF->bValid = true;
pKCF->nID = ++m_trackID;
}
else
{
pKCF->bTrack = false;
pKCF->bValid = false;
}
}
}
else
{
pKCF->nMissFrame++;
if (pKCF->nMissFrame > MAX_MISS_FRAME)
{
pKCF->bTrack = false;
pKCF->bValid = false;
}
}
}
}
for (j = 0; j < m_boxContainer.headCount; j++)
{
if (!bMatch[j])
{
HDRect *pHDRect = &m_boxContainer.candidates[j];
for (i = 0; i < MAX_TRACK_COUNT; i++)
{
KCFTracker *pKCF = &m_pKCF[i];
if (!pKCF->bTrack)
{
pKCF->nID = -1;// ++m_trackID;
pKCF->nMissFrame = 0;
pKCF->nTrackTimes = 0;
pKCF->nMatchTimes = 0;
pKCF->bValid = false;
cv::Rect roiRect;
roiRect.x = pHDRect->left;
roiRect.y = pHDRect->top;
roiRect.width = pHDRect->right - pHDRect->left;
roiRect.height = pHDRect->bottom - pHDRect->top;
pKCF->bTrack = true;
pKCF->init(roiRect, image);
break;
}
}
}
}
}
int ZHeadTrack::pointInLineSide(cv::Point point, cv::Point lineStart, cv::Point lineEnd)
{
int x0 = 0, x1 = 0;
int y0 = 0, y1 = 0;
int v0 = 0, v1 = 0;
bool bFlag = false;
bool bFlagX = false;
bool bFlagY = false;
x0 = lineStart.x;
x1 = lineEnd.x;
y0 = lineStart.y;
y1 = lineEnd.y;
先保证点在线段内
if (x0 > x1)
{
bFlagX = point.x > x1 && point.x < x0;
}
else
{
bFlagX = point.x <x1 && point.x >x0;
}
if (y0 > y1)
{
bFlagY = point.y > y1 && point.y < y0;
}
else
{
bFlagY = point.y <y1 && point.y >y0;
}
bFlag = bFlagX || bFlagY;
if (!bFlag)
{
return 0;
}
v0 = (point.x - x0)*(y1 - y0) - (point.y - y0)*(x1 - x0);
v1 = x1 - x0;
if (x1 - x0 == 0)
{
if (v0 < 0)
{
return -1;
}
else
{
return 1;
}
}
else
{
if (v0*v1 < 0)
{
return -1;
}
else
{
return 1;
}
}
return 0;
}
void ZHeadTrack::trackInOutStatistics()
{
int i = 0, j = 0;
for (i = 0; i < MAX_TRACK_COUNT; i++)
{
ZTracker *pTracker = &m_pTrackers[i];
if (pTracker->bTrack && pTracker->nTrajCount > 20 && !pTracker->bStatOK)
{
连续确认跟踪起始点的方向
if (!pTracker->bInitDirection)
{
int count0 = 0;
for (j = 0; j < 10; j++)
{
int flag = pointInLineSide(pTracker->centres[j], m_lineStart, m_lineEnd);
count0 += flag;
}
if (count0 > 0)
{
pTracker->nInitDirection = 1;
}
else
{
pTracker->nInitDirection = -1;
}
}
跟踪过线需要连续确认
int count1 = 0;
for (j = pTracker->nTrajCount - 10; j < pTracker->nTrajCount - 1; j++)
{
int flag = pointInLineSide(pTracker->centres[j], m_lineStart, m_lineEnd);
if (flag != 0 && pTracker->nInitDirection != flag)
{
count1++;
}
}
if (count1 > 6)
{
if (pTracker->nInitDirection == m_nArrowStart)
{
m_InCount++;
}
else
{
m_OutCount++;
}
pTracker->bStatOK = true;
}
}
}
}
void ZHeadTrack::trackRegionStatistics()
{
int i = 0, j = 0;
m_trackeRegion.currentCount = 0;
for (i = 0; i < MAX_TRACK_COUNT; i++)
{
ZTracker *pTracker = &m_pTrackers[i];
if (pTracker->bTrack && pTracker->nTrajCount > 5 && !pTracker->bStatOK)
{
m_trackeRegion.rect[m_trackeRegion.currentCount].id = pTracker->nID;
m_trackeRegion.rect[m_trackeRegion.currentCount].left = pTracker->curRect.x;
m_trackeRegion.rect[m_trackeRegion.currentCount].top = pTracker->curRect.y;
m_trackeRegion.rect[m_trackeRegion.currentCount].right = pTracker->curRect.x + pTracker->curRect.width;
m_trackeRegion.rect[m_trackeRegion.currentCount].bottom = pTracker->curRect.y + pTracker->curRect.height;
m_trackeRegion.currentCount++;
m_RegionCount++;
pTracker->bStatOK = true;
}
}
}
效果展示:
跟踪的原理:
用yolo-tiny检测出来的结果,送到KCF跟踪 器中,连续匹配上两次,则确认为一个目标,之后采用tracking-by-detection的方式进行人头跟踪。
人头跟踪的demo(内涵KCF跟踪代码部分):
链接:百度网盘 请输入提取码
提取码:dr2a
声明:本文内容由易百纳平台入驻作者撰写,文章观点仅代表作者本人,不代表易百纳立场。如有内容侵权或者其他问题,请联系本站进行删除。
红包
点赞
收藏
评论
打赏
- 分享
- 举报
评论
0个
手气红包
暂无数据
相关专栏
-
浏览量:3947次2019-12-23 11:03:59
-
浏览量:625次2023-07-05 10:17:52
-
浏览量:655次2023-12-21 10:38:07
-
浏览量:6597次2021-02-06 17:02:32
-
浏览量:857次2023-08-19 09:30:52
-
浏览量:1187次2023-12-18 17:48:09
-
浏览量:2060次2020-02-24 09:57:27
-
浏览量:5487次2021-08-09 16:11:19
-
浏览量:733次2023-12-11 16:56:37
-
浏览量:1636次2023-12-16 11:15:45
-
浏览量:838次2023-04-17 15:47:23
-
浏览量:1668次2019-12-24 09:55:40
-
浏览量:1531次2023-11-01 10:56:09
-
浏览量:3528次2019-07-05 11:12:50
-
浏览量:2544次2019-12-31 16:23:45
-
浏览量:2584次2020-03-25 10:49:44
-
浏览量:1117次2023-06-02 17:42:09
-
浏览量:821次2023-09-19 11:13:35
-
浏览量:904次2023-09-08 14:00:44
切换马甲
上一页
下一页
打赏用户
共 0 位
我要创作
分享技术经验,可获取创作收益
分类专栏
-
10篇
热门专栏
- 使用YOLOv8和PySimpleGUI构建目标计数GUI
- 基于Yolov5的火灾检测系统
- 海思3559 yolov5模型转wk详细笔记
- 实战 | OpenCV中用HOG+SVM实现手写数字识(附代码)
- Pytorch版本YOLOv3模型转Darknet weights模型然后转caffemodel再转wk模型在nnie上面推理
- YOLOv4 darknet模型转换成caffemodel再转om在Atlas服务器上面推理(一)
- 科普 | 深度学习训练和推理有何不同?
- ubuntu18.04配置pytorch框架并进行fcn网络并训练 —— 深度学习(一)
- 基于mmpose的动作分类
- 解CenterNet中的Heatmap热图解释
置顶时间设置
结束时间
删除原因
-
广告/SPAM
-
恶意灌水
-
违规内容
-
文不对题
-
重复发帖
打赏作者
风清扬
您的支持将鼓励我继续创作!
打赏金额:
¥1
¥5
¥10
¥50
¥100
支付方式:
微信支付
打赏成功!
感谢您的打赏,如若您也想被打赏,可前往 发表专栏 哦~
举报反馈
举报类型
- 内容涉黄/赌/毒
- 内容侵权/抄袭
- 政治相关
- 涉嫌广告
- 侮辱谩骂
- 其他
详细说明
审核成功
发布时间设置
发布时间:
请选择发布时间设置
是否关联周任务-专栏模块
审核失败
失败原因
请选择失败原因
备注
请输入备注