整理代码结构

整理代码结构

Co-Authored-By: Chris Kong <609027949@qq.com>
This commit is contained in:
2019-12-28 17:48:50 +08:00
parent d7a51fe42e
commit 729eecac2e
8 changed files with 301 additions and 404 deletions

View File

@@ -50,7 +50,7 @@ void image2Matrix(const Mat &image, const struct pBox *pbox, int num) {
mydataFmt mymean, mystddev;
// prewhiten
if (num != 0) {
meanAndDev(image, mymean, mystddev);
MeanAndDev(image, mymean, mystddev);
cout << mymean << "----" << mystddev << endl;
size = image.cols * image.rows * image.channels();
sqr = sqrt(double(size));
@@ -78,14 +78,11 @@ void image2Matrix(const Mat &image, const struct pBox *pbox, int num) {
}
}
void meanAndDev(const Mat &image, mydataFmt &p, mydataFmt &q) {
void MeanAndDev(const Mat &image, mydataFmt &p, mydataFmt &q) {
mydataFmt meansum = 0, stdsum = 0;
for (int rowI = 0; rowI < image.rows; rowI++) {
for (int colK = 0; colK < image.cols; colK++) {
meansum += image.at<Vec3b>(rowI, colK)[0] + image.at<Vec3b>(rowI, colK)[1] + image.at<Vec3b>(rowI, colK)[2];
// cout << int(image.at<Vec3b>(rowI, colK)[0]) << endl;
// cout << int(image.at<Vec3b>(rowI, colK)[1]) << endl;
// cout << int(image.at<Vec3b>(rowI, colK)[2]) << endl;
}
}
p = meansum / (image.cols * image.rows * image.channels());
@@ -172,12 +169,6 @@ void convolutionInit(const Weight *weight, pBox *pbox, pBox *outpBox) {
}
void convolution(const Weight *weight, const pBox *pbox, pBox *outpBox) {
// if (weight->pad != 0) {
// pBox *padpbox = new pBox;
// featurePadInit(outpBox, padpbox, weight->pad, weight->padw, weight->padh);
// featurePad(outpBox, padpbox, weight->pad, weight->padw, weight->padh);
// *outpBox = *padpbox;
// }
int ckh, ckw, ckd, stride, cknum, ckpad, imginputh, imginputw, imginputd, Nh, Nw;
mydataFmt *ck, *imginput;
// float *output = outpBox->pdata;
@@ -214,15 +205,6 @@ void convolution(const Weight *weight, const pBox *pbox, pBox *outpBox) {
+ (k * stride + i1)
+ m * imginputh * imginputw]
* ck[i * ckh * ckw * ckd + m * ckh * ckw + n * ckw + i1];
// cout << "(" << imginput[(j * stride + n) * imginputw
// + (k * stride + i1)
// + m * imginputh * imginputw] << ")x("
// << ck[i * ckh * ckw * ckd + m * ckh * ckw + n * ckw + i1] << ")="
// << imginput[(j * stride + n) * imginputw
// + (k * stride + i1)
// + m * imginputh * imginputw]
// * ck[i * ckh * ckw * ckd + m * ckh * ckw + n * ckw + i1] << endl;
// cout << temp << endl;
}
}
}
@@ -231,7 +213,6 @@ void convolution(const Weight *weight, const pBox *pbox, pBox *outpBox) {
}
}
}
// cout << "output->pdata:" << (outpBox->pdata[10]) << endl;
}
void maxPoolingInit(const pBox *pbox, pBox *Matrix, int kernelSize, int stride, int flag) {
@@ -340,6 +321,13 @@ void avePooling(const pBox *pbox, pBox *Matrix, int kernelSize, int stride) {
}
}
/**
* 激活函数 有系数
* @param pbox
* @param pbias
* @param prelu_gmma
*/
void prelu(struct pBox *pbox, mydataFmt *pbias, mydataFmt *prelu_gmma) {
if (pbox->pdata == NULL) {
cout << "the pRelu feature is NULL!!" << endl;
@@ -365,6 +353,11 @@ void prelu(struct pBox *pbox, mydataFmt *pbias, mydataFmt *prelu_gmma) {
}
}
/**
* 激活函数 没有系数
* @param pbox
* @param pbias
*/
void relu(struct pBox *pbox, mydataFmt *pbias) {
if (pbox->pdata == NULL) {
cout << "the Relu feature is NULL!!" << endl;
@@ -411,24 +404,17 @@ void fullconnect(const Weight *weight, const pBox *pbox, pBox *outpBox) {
// row no trans A's row A'col
//cblas_sgemv(CblasRowMajor, CblasNoTrans, weight->selfChannel, weight->lastChannel, 1, weight->pdata, weight->lastChannel, pbox->pdata, 1, 0, outpBox->pdata, 1);
vectorXmatrix(pbox->pdata, weight->pdata,
pbox->width * pbox->height * pbox->channel,
weight->lastChannel, weight->selfChannel,
outpBox->pdata);
}
void vectorXmatrix(mydataFmt *matrix, mydataFmt *v, int size, int v_w, int v_h, mydataFmt *p) {
void vectorXmatrix(mydataFmt *matrix, mydataFmt *v, int v_w, int v_h, mydataFmt *p) {
for (int i = 0; i < v_h; i++) {
p[i] = 0;
for (int j = 0; j < v_w; j++) {
p[i] += matrix[j] * v[i * v_w + j];
// cout << p[i] << endl;
}
// cout << p[i] << endl;
// p[i] = -0.0735729;
// cout << "...." << endl;
// break;
}
// cout << "...." << endl;
}
void readData(string filename, long dataNumber[], mydataFmt *pTeam[], int length) {
@@ -481,19 +467,15 @@ void readData(string filename, long dataNumber[], mydataFmt *pTeam[], int length
}
// w sc lc ks s p kw kh
long initConvAndFc(struct Weight *weight, int schannel, int lchannel, int kersize,
long ConvAndFcInit(struct Weight *weight, int schannel, int lchannel, int kersize,
int stride, int pad, int w, int h, int padw, int padh) {
weight->selfChannel = schannel;
weight->lastChannel = lchannel;
weight->kernelSize = kersize;
// if (kersize == 0) {
weight->h = h;
weight->w = w;
// }
// if (pad == -1) {
weight->padh = padh;
weight->padw = padw;
// }
weight->stride = stride;
weight->pad = pad;
weight->pbias = (mydataFmt *) malloc(schannel * sizeof(mydataFmt));
@@ -511,7 +493,7 @@ long initConvAndFc(struct Weight *weight, int schannel, int lchannel, int kersiz
return byteLenght;
}
void initpRelu(struct pRelu *prelu, int width) {
void pReluInit(struct pRelu *prelu, int width) {
prelu->width = width;
prelu->pdata = (mydataFmt *) malloc(width * sizeof(mydataFmt));
if (prelu->pdata == NULL)cout << "prelu apply for memory failed!!!!";
@@ -652,7 +634,76 @@ void refineAndSquareBbox(vector<struct Bbox> &vecBbox, const int &height, const
}
}
void initBN(struct BN *var, struct BN *mean, struct BN *beta, int width) {
void conv_mergeInit(pBox *output, pBox *c1, pBox *c2, pBox *c3, pBox *c4) {
output->channel = 0;
output->height = c1->height;
output->width = c1->width;
if (c1 != 0) {
output->channel = c1->channel;
if (c2 != 0) {
output->channel += c2->channel;
if (c3 != 0) {
output->channel += c3->channel;
if (c4 != 0) {
output->channel += c4->channel;
}
}
}
}
output->pdata = (mydataFmt *) malloc(output->width * output->height * output->channel * sizeof(mydataFmt));
if (output->pdata == NULL)cout << "the conv_mergeInit is failed!!" << endl;
memset(output->pdata, 0, output->width * output->height * output->channel * sizeof(mydataFmt));
}
void conv_merge(pBox *output, pBox *c1, pBox *c2, pBox *c3, pBox *c4) {
// cout << "output->channel:" << output->channel << endl;
if (c1 != 0) {
long count1 = c1->height * c1->width * c1->channel;
//output->pdata = c1->pdata;
for (long i = 0; i < count1; i++) {
output->pdata[i] = c1->pdata[i];
}
if (c2 != 0) {
long count2 = c2->height * c2->width * c2->channel;
for (long i = 0; i < count2; i++) {
output->pdata[count1 + i] = c2->pdata[i];
}
if (c3 != 0) {
long count3 = c3->height * c3->width * c3->channel;
for (long i = 0; i < count3; i++) {
output->pdata[count1 + count2 + i] = c3->pdata[i];
}
if (c4 != 0) {
long count4 = c4->height * c4->width * c4->channel;
for (long i = 0; i < count4; i++) {
output->pdata[count1 + count2 + count3 + i] = c4->pdata[i];
}
}
}
}
} else { cout << "conv_mergeInit" << endl; }
}
void mulandaddInit(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float scale) {
outpBox->channel = temppbox->channel;
outpBox->width = temppbox->width;
outpBox->height = temppbox->height;
outpBox->pdata = (mydataFmt *) malloc(outpBox->width * outpBox->height * outpBox->channel * sizeof(mydataFmt));
if (outpBox->pdata == NULL)cout << "the mulandaddInit is failed!!" << endl;
memset(outpBox->pdata, 0, outpBox->width * outpBox->height * outpBox->channel * sizeof(mydataFmt));
}
void mulandadd(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float scale) {
mydataFmt *ip = inpbox->pdata;
mydataFmt *tp = temppbox->pdata;
mydataFmt *op = outpBox->pdata;
long dis = inpbox->width * inpbox->height * inpbox->channel;
for (long i = 0; i < dis; i++) {
op[i] = ip[i] + tp[i] * scale;
}
}
void BatchNormInit(struct BN *var, struct BN *mean, struct BN *beta, int width) {
var->width = width;
var->pdata = (mydataFmt *) malloc(width * sizeof(mydataFmt));
if (var->pdata == NULL)cout << "prelu apply for memory failed!!!!";
@@ -689,18 +740,8 @@ void BatchNorm(struct pBox *pbox, struct BN *var, struct BN *mean, struct BN *be
for (int channel = 0; channel < pbox->channel; channel++) {
temp = gamma / sqrt(((vp[channel]) + epsilon));
for (int col = 0; col < dis; col++) {
// *pp = *pp + *vp;
// cout << ((*pp) / (sqrt(*vp + bias))) << endl;
// cout << ((*pp) * (*mp) / (sqrt(*vp + bias))) << endl;
// if (*pp == 0) {
// cout << *vp << "===" << *mp << "===" << *bp << endl;
// }
*pp = temp * (*pp) + ((bp[channel]) - temp * (mp[channel]));
// cout << *pp << endl;
pp++;
}
// vp++;
// mp++;
// bp++;
}
}