继续添加注释
继续添加注释
This commit is contained in:
@@ -2,6 +2,8 @@
|
||||
|
||||
MTCNN-FaceNet-light with c++
|
||||
|
||||
2019-12-06 修改多处bug,卷积初始化后直接进行卷积
|
||||
只用opencv实现MTCNN和FaceNet
|
||||
|
||||
2019-12-24 添加BN层
|
||||
2019-12-06 修改多处bug,卷积初始化后直接进行卷积
|
||||
|
||||
2019-12-24 添加BN层
|
||||
@@ -291,7 +291,7 @@ void facenet::Inception_resnet_A(pBox *input, pBox *output, string filepath, flo
|
||||
convolution(conv7_wb, conv7_out, conv8_out);
|
||||
addbias(conv8_out, conv7_wb->pbias);
|
||||
|
||||
mulandaddInit(input, conv8_out, output, scale);
|
||||
mulandaddInit(input, conv8_out, output);
|
||||
mulandadd(input, conv8_out, output, scale);
|
||||
relu(output, conv8_wb->pbias);
|
||||
|
||||
@@ -541,7 +541,7 @@ void facenet::Inception_resnet_B(pBox *input, pBox *output, string filepath, flo
|
||||
convolution(conv5_wb, conv5_out, conv6_out);
|
||||
addbias(conv6_out, conv5_wb->pbias);
|
||||
|
||||
mulandaddInit(input, conv6_out, output, scale);
|
||||
mulandaddInit(input, conv6_out, output);
|
||||
mulandadd(input, conv6_out, output, scale);
|
||||
relu(output, conv6_wb->pbias);
|
||||
|
||||
@@ -839,7 +839,7 @@ void facenet::Inception_resnet_C(pBox *input, pBox *output, string filepath, flo
|
||||
convolution(conv5_wb, conv5_out, conv6_out);
|
||||
addbias(conv6_out, conv5_wb->pbias);
|
||||
|
||||
mulandaddInit(input, conv6_out, output, scale);
|
||||
mulandaddInit(input, conv6_out, output);
|
||||
mulandadd(input, conv6_out, output, scale);
|
||||
relu(output, conv6_wb->pbias);
|
||||
|
||||
@@ -962,7 +962,7 @@ void facenet::Inception_resnet_C_None(pBox *input, pBox *output, string filepath
|
||||
convolution(conv5_wb, conv5_out, conv6_out);
|
||||
addbias(conv6_out, conv5_wb->pbias);
|
||||
|
||||
mulandaddInit(input, conv6_out, output, 1);
|
||||
mulandaddInit(input, conv6_out, output);
|
||||
mulandadd(input, conv6_out, output);
|
||||
|
||||
freepBox(conv1_out);
|
||||
|
||||
198
src/network.cpp
198
src/network.cpp
@@ -2,8 +2,8 @@
|
||||
|
||||
/**
|
||||
* 卷积以后偏移
|
||||
* @param pbox
|
||||
* @param pbias
|
||||
* @param pbox feature map
|
||||
* @param pbias 偏移量
|
||||
*/
|
||||
void addbias(struct pBox *pbox, mydataFmt *pbias) {
|
||||
if (pbox->pdata == NULL) {
|
||||
@@ -29,8 +29,8 @@ void addbias(struct pBox *pbox, mydataFmt *pbias) {
|
||||
|
||||
/**
|
||||
* mat图片转成pbox结构体初始化
|
||||
* @param image
|
||||
* @param pbox
|
||||
* @param image mat格式的图片
|
||||
* @param pbox 结构体pbox
|
||||
*/
|
||||
void image2MatrixInit(Mat &image, struct pBox *pbox) {
|
||||
if ((image.data == NULL) || (image.type() != CV_8UC3)) {
|
||||
@@ -48,9 +48,9 @@ void image2MatrixInit(Mat &image, struct pBox *pbox) {
|
||||
|
||||
/**
|
||||
* mat图片转成pbox结构体
|
||||
* @param image
|
||||
* @param pbox
|
||||
* @param num
|
||||
* @param image mat格式的图片
|
||||
* @param pbox 结构体pbox
|
||||
* @param num 选择mtcnn还是facenet 0-mtcnn 非0-facenet 缺省为0
|
||||
*/
|
||||
void image2Matrix(const Mat &image, const struct pBox *pbox, int num) {
|
||||
if ((image.data == NULL) || (image.type() != CV_8UC3)) {
|
||||
@@ -120,11 +120,11 @@ void MeanAndDev(const Mat &image, mydataFmt &p, mydataFmt &q) {
|
||||
|
||||
/**
|
||||
* 卷积补偿初始化
|
||||
* @param pbox
|
||||
* @param outpBox
|
||||
* @param pad
|
||||
* @param padw
|
||||
* @param padh
|
||||
* @param pbox 输入feature map
|
||||
* @param outpBox 输出feature map
|
||||
* @param pad 补偿 正方形算子(-1为不规则补偿,0为不需要补偿)
|
||||
* @param padw 补偿 不规则算子的宽度
|
||||
* @param padh 补偿 不规则算子的高度
|
||||
*/
|
||||
void featurePadInit(const pBox *pbox, pBox *outpBox, const int pad, const int padw, const int padh) {
|
||||
if (pad < -1) {
|
||||
@@ -147,11 +147,11 @@ void featurePadInit(const pBox *pbox, pBox *outpBox, const int pad, const int pa
|
||||
|
||||
/**
|
||||
* 卷积补偿
|
||||
* @param pbox
|
||||
* @param outpBox
|
||||
* @param pad
|
||||
* @param padw
|
||||
* @param padh
|
||||
* @param pbox 输入feature map
|
||||
* @param outpBox 输出feature map
|
||||
* @param pad 补偿 正方形算子(-1为不规则补偿,0为不需要补偿)
|
||||
* @param padw 补偿 不规则算子的宽度
|
||||
* @param padh 补偿 不规则算子的高度
|
||||
*/
|
||||
void featurePad(const pBox *pbox, pBox *outpBox, const int pad, const int padw, const int padh) {
|
||||
mydataFmt *p = outpBox->pdata;
|
||||
@@ -183,9 +183,9 @@ void featurePad(const pBox *pbox, pBox *outpBox, const int pad, const int padw,
|
||||
|
||||
/**
|
||||
* 卷积初始化
|
||||
* @param weight
|
||||
* @param pbox
|
||||
* @param outpBox
|
||||
* @param weight 卷积权重
|
||||
* @param pbox 输入feature map
|
||||
* @param outpBox 输出feature map
|
||||
*/
|
||||
void convolutionInit(const Weight *weight, pBox *pbox, pBox *outpBox) {
|
||||
outpBox->channel = weight->selfChannel;
|
||||
@@ -214,9 +214,9 @@ void convolutionInit(const Weight *weight, pBox *pbox, pBox *outpBox) {
|
||||
|
||||
/**
|
||||
* 卷积
|
||||
* @param weight
|
||||
* @param pbox
|
||||
* @param outpBox
|
||||
* @param weight 卷积权重
|
||||
* @param pbox 输入feature map
|
||||
* @param outpBox 输出feature map
|
||||
*/
|
||||
void convolution(const Weight *weight, const pBox *pbox, pBox *outpBox) {
|
||||
int ckh, ckw, ckd, stride, cknum, ckpad, imginputh, imginputw, imginputd, Nh, Nw;
|
||||
@@ -267,11 +267,11 @@ void convolution(const Weight *weight, const pBox *pbox, pBox *outpBox) {
|
||||
|
||||
/**
|
||||
* 最大值池化初始化
|
||||
* @param pbox
|
||||
* @param Matrix
|
||||
* @param kernelSize
|
||||
* @param stride
|
||||
* @param flag
|
||||
* @param pbox 输入feature map
|
||||
* @param Matrix 输出feature map
|
||||
* @param kernelSize 池化算子大小
|
||||
* @param stride 步长
|
||||
* @param flag 标志位
|
||||
*/
|
||||
void maxPoolingInit(const pBox *pbox, pBox *Matrix, int kernelSize, int stride, int flag) {
|
||||
if (flag == 1) {
|
||||
@@ -289,10 +289,10 @@ void maxPoolingInit(const pBox *pbox, pBox *Matrix, int kernelSize, int stride,
|
||||
|
||||
/**
|
||||
* 最大值池化
|
||||
* @param pbox
|
||||
* @param Matrix
|
||||
* @param kernelSize
|
||||
* @param stride
|
||||
* @param pbox 输入feature map
|
||||
* @param Matrix 输出feature map
|
||||
* @param kernelSize 池化算子大小
|
||||
* @param stride 步长
|
||||
*/
|
||||
void maxPooling(const pBox *pbox, pBox *Matrix, int kernelSize, int stride) {
|
||||
if (pbox->pdata == NULL) {
|
||||
@@ -348,10 +348,10 @@ void maxPooling(const pBox *pbox, pBox *Matrix, int kernelSize, int stride) {
|
||||
|
||||
/**
|
||||
* 平均值池化初始化
|
||||
* @param pbox
|
||||
* @param Matrix
|
||||
* @param kernelSize
|
||||
* @param stride
|
||||
* @param pbox 输入feature map
|
||||
* @param Matrix 输出feature map
|
||||
* @param kernelSize 池化算子大小
|
||||
* @param stride 步长
|
||||
*/
|
||||
void avePoolingInit(const pBox *pbox, pBox *Matrix, int kernelSize, int stride) {
|
||||
Matrix->width = ceil((float) (pbox->width - kernelSize) / stride + 1);
|
||||
@@ -364,10 +364,10 @@ void avePoolingInit(const pBox *pbox, pBox *Matrix, int kernelSize, int stride)
|
||||
|
||||
/**
|
||||
* 平均值池化
|
||||
* @param pbox
|
||||
* @param Matrix
|
||||
* @param kernelSize
|
||||
* @param stride
|
||||
* @param pbox 输入feature map
|
||||
* @param Matrix 输出feature map
|
||||
* @param kernelSize 池化算子大小
|
||||
* @param stride 步长
|
||||
*/
|
||||
void avePooling(const pBox *pbox, pBox *Matrix, int kernelSize, int stride) {
|
||||
if (pbox->pdata == NULL) {
|
||||
@@ -402,8 +402,8 @@ void avePooling(const pBox *pbox, pBox *Matrix, int kernelSize, int stride) {
|
||||
|
||||
/**
|
||||
* 激活函数 有系数 初始化
|
||||
* @param prelu
|
||||
* @param width
|
||||
* @param prelu 激活函数权重
|
||||
* @param width 长度
|
||||
*/
|
||||
void pReluInit(struct pRelu *prelu, int width) {
|
||||
prelu->width = width;
|
||||
@@ -414,9 +414,9 @@ void pReluInit(struct pRelu *prelu, int width) {
|
||||
|
||||
/**
|
||||
* 激活函数 有系数
|
||||
* @param pbox
|
||||
* @param pbias
|
||||
* @param prelu_gmma
|
||||
* @param pbox 输入feature
|
||||
* @param pbias 偏移
|
||||
* @param prelu_gmma 激活函数权重
|
||||
*/
|
||||
void prelu(struct pBox *pbox, mydataFmt *pbias, mydataFmt *prelu_gmma) {
|
||||
if (pbox->pdata == NULL) {
|
||||
@@ -445,8 +445,8 @@ void prelu(struct pBox *pbox, mydataFmt *pbias, mydataFmt *prelu_gmma) {
|
||||
|
||||
/**
|
||||
* 激活函数 没有系数
|
||||
* @param pbox
|
||||
* @param pbias
|
||||
* @param pbox 输入feature
|
||||
* @param pbias 偏移
|
||||
*/
|
||||
void relu(struct pBox *pbox, mydataFmt *pbias) {
|
||||
if (pbox->pdata == NULL) {
|
||||
@@ -473,8 +473,8 @@ void relu(struct pBox *pbox, mydataFmt *pbias) {
|
||||
|
||||
/**
|
||||
* 全连接初始化
|
||||
* @param weight
|
||||
* @param outpBox
|
||||
* @param weight 权重参数
|
||||
* @param outpBox 输出feature map
|
||||
*/
|
||||
void fullconnectInit(const Weight *weight, pBox *outpBox) {
|
||||
outpBox->channel = weight->selfChannel;
|
||||
@@ -487,9 +487,9 @@ void fullconnectInit(const Weight *weight, pBox *outpBox) {
|
||||
|
||||
/**
|
||||
* 全连接
|
||||
* @param weight
|
||||
* @param pbox
|
||||
* @param outpBox
|
||||
* @param weight 权重参数
|
||||
* @param pbox 输入feature map
|
||||
* @param outpBox 输出feature map
|
||||
*/
|
||||
void fullconnect(const Weight *weight, const pBox *pbox, pBox *outpBox) {
|
||||
if (pbox->pdata == NULL) {
|
||||
@@ -511,11 +511,11 @@ void fullconnect(const Weight *weight, const pBox *pbox, pBox *outpBox) {
|
||||
|
||||
/**
|
||||
* 一维数组与二位矩阵相乘
|
||||
* @param matrix
|
||||
* @param v
|
||||
* @param v_w
|
||||
* @param v_h
|
||||
* @param p
|
||||
* @param matrix 输入feature map
|
||||
* @param v 权重
|
||||
* @param v_w 权重矩阵的宽度
|
||||
* @param v_h 权重矩阵的高度
|
||||
* @param p 输出feature map
|
||||
*/
|
||||
void vectorXmatrix(mydataFmt *matrix, mydataFmt *v, int v_w, int v_h, mydataFmt *p) {
|
||||
for (int i = 0; i < v_h; i++) {
|
||||
@@ -528,9 +528,9 @@ void vectorXmatrix(mydataFmt *matrix, mydataFmt *v, int v_w, int v_h, mydataFmt
|
||||
|
||||
/**
|
||||
* 读取模型文件
|
||||
* @param filename
|
||||
* @param dataNumber
|
||||
* @param pTeam
|
||||
* @param filename 文件路径
|
||||
* @param dataNumber 参数个数数组
|
||||
* @param pTeam 变量数组
|
||||
* @param length
|
||||
*/
|
||||
void readData(string filename, long dataNumber[], mydataFmt *pTeam[], int length) {
|
||||
@@ -584,17 +584,17 @@ void readData(string filename, long dataNumber[], mydataFmt *pTeam[], int length
|
||||
|
||||
/**
|
||||
* 卷积和全连接初始化
|
||||
* @param weight
|
||||
* @param schannel
|
||||
* @param lchannel
|
||||
* @param kersize
|
||||
* @param stride
|
||||
* @param pad
|
||||
* @param w
|
||||
* @param h
|
||||
* @param padw
|
||||
* @param padh
|
||||
* @return
|
||||
* @param weight 权重
|
||||
* @param schannel 卷积核个数
|
||||
* @param lchannel 上一层feature map个数
|
||||
* @param kersize 卷积核大小
|
||||
* @param stride 卷积步长
|
||||
* @param pad 卷积是否补偿
|
||||
* @param w 卷积核宽度
|
||||
* @param h 卷积核高度
|
||||
* @param padw 补偿宽度
|
||||
* @param padh 补偿高度
|
||||
* @return 参数长度
|
||||
*/
|
||||
// w sc lc ks s p kw kh
|
||||
long ConvAndFcInit(struct Weight *weight, int schannel, int lchannel, int kersize,
|
||||
@@ -623,8 +623,10 @@ long ConvAndFcInit(struct Weight *weight, int schannel, int lchannel, int kersiz
|
||||
return byteLenght;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* softmax
|
||||
* @param pbox feature map
|
||||
*/
|
||||
void softmax(const struct pBox *pbox) {
|
||||
if (pbox->pdata == NULL) {
|
||||
cout << "the softmax's pdata is NULL , Please check !" << endl;
|
||||
@@ -766,6 +768,14 @@ void refineAndSquareBbox(vector<struct Bbox> &vecBbox, const int &height, const
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 残差融合初始化
|
||||
* @param output 输出feature map
|
||||
* @param c1 输入feature map
|
||||
* @param c2 输入feature map
|
||||
* @param c3 输入feature map
|
||||
* @param c4 输入feature map
|
||||
*/
|
||||
void conv_mergeInit(pBox *output, pBox *c1, pBox *c2, pBox *c3, pBox *c4) {
|
||||
output->channel = 0;
|
||||
output->height = c1->height;
|
||||
@@ -787,6 +797,14 @@ void conv_mergeInit(pBox *output, pBox *c1, pBox *c2, pBox *c3, pBox *c4) {
|
||||
memset(output->pdata, 0, output->width * output->height * output->channel * sizeof(mydataFmt));
|
||||
}
|
||||
|
||||
/**
|
||||
* 残差网络融合
|
||||
* @param output 输出feature map
|
||||
* @param c1 输入feature map
|
||||
* @param c2 输入feature map
|
||||
* @param c3 输入feature map
|
||||
* @param c4 输入feature map
|
||||
*/
|
||||
void conv_merge(pBox *output, pBox *c1, pBox *c2, pBox *c3, pBox *c4) {
|
||||
// cout << "output->channel:" << output->channel << endl;
|
||||
if (c1 != 0) {
|
||||
@@ -816,7 +834,16 @@ void conv_merge(pBox *output, pBox *c1, pBox *c2, pBox *c3, pBox *c4) {
|
||||
} else { cout << "conv_mergeInit" << endl; }
|
||||
}
|
||||
|
||||
void mulandaddInit(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float scale) {
|
||||
/**
|
||||
* 残差网络做多次按比例相加初始化
|
||||
* @param inpbox 输入feature map
|
||||
* @param temppbox 输入feature map
|
||||
* @param outpBox 输出feature map
|
||||
*/
|
||||
void mulandaddInit(const pBox *inpbox, const pBox *temppbox, pBox *outpBox) {
|
||||
if (!((inpbox->width == temppbox->width) && (inpbox->height == temppbox->height) &&
|
||||
(inpbox->channel == temppbox->channel)))
|
||||
cout << "the mulandaddInit is failed!!" << endl;
|
||||
outpBox->channel = temppbox->channel;
|
||||
outpBox->width = temppbox->width;
|
||||
outpBox->height = temppbox->height;
|
||||
@@ -825,6 +852,13 @@ void mulandaddInit(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, floa
|
||||
memset(outpBox->pdata, 0, outpBox->width * outpBox->height * outpBox->channel * sizeof(mydataFmt));
|
||||
}
|
||||
|
||||
/**
|
||||
* 残差网络做多次按比例相加
|
||||
* @param inpbox 输入feature map
|
||||
* @param temppbox 输入feature map
|
||||
* @param outpBox 输出feature map
|
||||
* @param scale 比例系数
|
||||
*/
|
||||
void mulandadd(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float scale) {
|
||||
mydataFmt *ip = inpbox->pdata;
|
||||
mydataFmt *tp = temppbox->pdata;
|
||||
@@ -837,10 +871,10 @@ void mulandadd(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float sc
|
||||
|
||||
/**
|
||||
* BN初始化
|
||||
* @param var
|
||||
* @param mean
|
||||
* @param beta
|
||||
* @param width
|
||||
* @param var 方差
|
||||
* @param mean 平均值
|
||||
* @param beta beta
|
||||
* @param width 参数个数
|
||||
*/
|
||||
void BatchNormInit(struct BN *var, struct BN *mean, struct BN *beta, int width) {
|
||||
var->width = width;
|
||||
@@ -861,10 +895,10 @@ void BatchNormInit(struct BN *var, struct BN *mean, struct BN *beta, int width)
|
||||
|
||||
/**
|
||||
* BN实现
|
||||
* @param pbox
|
||||
* @param var
|
||||
* @param mean
|
||||
* @param beta
|
||||
* @param pbox 输入feature map
|
||||
* @param var 方差
|
||||
* @param mean 平均值
|
||||
* @param beta beta
|
||||
*/
|
||||
void BatchNorm(struct pBox *pbox, struct BN *var, struct BN *mean, struct BN *beta) {
|
||||
if (pbox->pdata == NULL) {
|
||||
|
||||
@@ -68,7 +68,7 @@ void conv_merge(pBox *output, pBox *c1 = 0, pBox *c2 = 0, pBox *c3 = 0, pBox *c4
|
||||
|
||||
void conv_mergeInit(pBox *output, pBox *c1 = 0, pBox *c2 = 0, pBox *c3 = 0, pBox *c4 = 0);
|
||||
|
||||
void mulandaddInit(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float scale);
|
||||
void mulandaddInit(const pBox *inpbox, const pBox *temppbox, pBox *outpBox);
|
||||
|
||||
void mulandadd(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float scale = 1);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user