From 05d26d4b0fb41ccfe0ce0b7a202b30955443e947 Mon Sep 17 00:00:00 2001 From: ChrisKong <609027949@qq.com> Date: Mon, 30 Dec 2019 10:51:48 +0800 Subject: [PATCH] create project create project --- src/facenet.cpp | 965 ++++++++++++++++++++++++++++++++++++++++++++++++ src/facenet.h | 54 +++ src/network.cpp | 467 +++++++++++++++++++++++ src/network.h | 55 +++ src/pBox.cpp | 25 ++ src/pBox.h | 68 ++++ src/pikaqiu.cpp | 54 +++ 7 files changed, 1688 insertions(+) create mode 100644 src/facenet.cpp create mode 100644 src/facenet.h create mode 100644 src/network.cpp create mode 100644 src/network.h create mode 100644 src/pBox.cpp create mode 100644 src/pBox.h create mode 100644 src/pikaqiu.cpp diff --git a/src/facenet.cpp b/src/facenet.cpp new file mode 100644 index 0000000..75ab964 --- /dev/null +++ b/src/facenet.cpp @@ -0,0 +1,965 @@ +// +// Created by ChrisKong on 2019/10/17. +// + +#include "facenet.h" + +facenet::facenet() { + +} + +facenet::~facenet() { + +} + +void facenet::printData(pBox *in) { + for (long i = 0; i < in->height * in->width * in->channel; ++i) { + printf("%f\n", in->pdata[i]); + } + cout << "printData" << endl; +} + +void facenet::run(Mat &image, mydataFmt *o, int count) { + cout << "=====This is No." + to_string(count) + " Picture=====" << endl; + pBox *output = new pBox; + pBox *input; +// prewhiten(image); + Stem(image, output); +// printData(output); +// return; + cout << "Stem Finally" << endl; + input = output; + output = new pBox; + for (int i = 0; i < 5; ++i) { +// model_128/block35_1_list.txt + string filepath = "../model_" + to_string(Num) + "/block35_" + to_string((i + 1)) + "_list.txt"; + Inception_resnet_A(input, output, filepath, 0.17); + input = output; + output = new pBox; + } + cout << "Inception_resnet_A Finally" << endl; + Reduction_A(input, output); + cout << "Reduction_A Finally" << endl; + input = output; +// freepBox(output); + output = new pBox; + for (int j = 0; j < 10; ++j) { +// model_128/block17_1_list.txt + string filepath = "../model_" + to_string(Num) + "/block17_" + to_string((j + 1)) + "_list.txt"; + Inception_resnet_B(input, output, filepath, 0.1); + input = output; + output = new pBox; + } + cout << "Inception_resnet_B Finally" << endl; + Reduction_B(input, output); + cout << "Reduciotn_B Finally" << endl; + input = output; +// freepBox(output); + output = new pBox; + for (int k = 0; k < 5; ++k) { +// model_128/block8_1_list.txt + string filepath = "../model_" + to_string(Num) + "/block8_" + to_string((k + 1)) + "_list.txt"; +// cout << filepath << endl; + Inception_resnet_C(input, output, filepath, 0.2); + input = output; +// freepBox(output); + output = new pBox; + } + cout << "Inception_resnet_C Finally" << endl; + Inception_resnet_C_None(input, output, "../model_" + to_string(Num) + "/Block8_list.txt"); + cout << "Inception_resnet_C_None Finally" << endl; + input = output; +// freepBox(output); + output = new pBox; + AveragePooling(input, output); + cout << "AveragePooling Finally" << endl; + input = output; +// freepBox(output); + output = new pBox; + Flatten(input, output); + cout << "Flatten Finally" << endl; + input = output; + output = new pBox; + fully_connect(input, output, "../model_" + to_string(Num) + "/Bottleneck_list.txt"); + cout << "Fully_Connect Finally" << endl; + mydataFmt sq = 0, sum = 0; + for (int i = 0; i < Num; ++i) { + sq = pow(output->pdata[i], 2); + sum += sq; + } + mydataFmt divisor = 0; + if (sum < 1e-10) { + divisor = sqrt(1e-10); + } else { + divisor = sqrt(sum); + } + for (int j = 0; j < Num; ++j) { + o[j] = output->pdata[j] / divisor; + } +// memcpy(o, output->pdata, Num * sizeof(mydataFmt)); + freepBox(output); +} + +void facenet::Stem(Mat &image, pBox *output) { + pBox *rgb = new pBox; + pBox *conv1_out = new pBox; + pBox *conv2_out = new pBox; + pBox *conv3_out = new pBox; + pBox *conv4_out = new pBox; + pBox *conv5_out = new pBox; + + struct Weight *conv1_wb = new Weight; + struct Weight *conv2_wb = new Weight; + struct Weight *conv3_wb = new Weight; + struct Weight *conv4_wb = new Weight; + struct Weight *conv5_wb = new Weight; + struct Weight *conv6_wb = new Weight; + + struct pBox *pooling1_out = new pBox; + + struct pRelu *prelu_gmma1 = new pRelu; + struct pRelu *prelu_gmma2 = new pRelu; + struct pRelu *prelu_gmma3 = new pRelu; + struct pRelu *prelu_gmma4 = new pRelu; + struct pRelu *prelu_gmma5 = new pRelu; + struct pRelu *prelu_gmma6 = new pRelu; + + + long conv1 = initConvAndFc(conv1_wb, 32, 3, 3, 2, 0); + initpRelu(prelu_gmma1, 32); + long conv2 = initConvAndFc(conv2_wb, 32, 32, 3, 1, 0); + initpRelu(prelu_gmma2, 32); + long conv3 = initConvAndFc(conv3_wb, 64, 32, 3, 1, 1); + initpRelu(prelu_gmma3, 64); + long conv4 = initConvAndFc(conv4_wb, 80, 64, 1, 1, 0); + initpRelu(prelu_gmma4, 80); + long conv5 = initConvAndFc(conv5_wb, 192, 80, 3, 1, 0); + initpRelu(prelu_gmma5, 192); + long conv6 = initConvAndFc(conv6_wb, 256, 192, 3, 2, 0); + initpRelu(prelu_gmma6, 256); + long dataNumber[18] = {conv1, 0, 0, conv2, 0, 0, conv3, 0, 0, conv4, 0, 0, conv5, 0, 0, conv6, 0, + 0}; + + mydataFmt *pointTeam[18] = {conv1_wb->pdata, conv1_wb->pbias, prelu_gmma1->pdata, \ + conv2_wb->pdata, conv2_wb->pbias, prelu_gmma2->pdata, \ + conv3_wb->pdata, conv3_wb->pbias, prelu_gmma3->pdata, \ + conv4_wb->pdata, conv4_wb->pbias, prelu_gmma4->pdata, \ + conv5_wb->pdata, conv5_wb->pbias, prelu_gmma5->pdata, \ + conv6_wb->pdata, conv6_wb->pbias, prelu_gmma6->pdata,}; + string filename = "../model_" + to_string(Num) + "/stem_list.txt"; + readData(filename, dataNumber, pointTeam); + + + +// if (firstFlag) { + image2MatrixInit(image, rgb); + image2Matrix(image, rgb, 1); + + convolutionInit(conv1_wb, rgb, conv1_out); + //conv1 149 x 149 x 32 + convolution(conv1_wb, rgb, conv1_out); + prelu(conv1_out, conv1_wb->pbias, prelu_gmma1->pdata); + convolutionInit(conv2_wb, conv1_out, conv2_out); + //conv2 147 x 147 x 32 + convolution(conv2_wb, conv1_out, conv2_out); + prelu(conv2_out, conv2_wb->pbias, prelu_gmma2->pdata); + + convolutionInit(conv3_wb, conv2_out, conv3_out); + //conv3 147 x 147 x 64 + convolution(conv3_wb, conv2_out, conv3_out); + prelu(conv3_out, conv3_wb->pbias, prelu_gmma3->pdata); + + maxPoolingInit(conv3_out, pooling1_out, 3, 2); + //maxPooling 73 x 73 x 64 + maxPooling(conv3_out, pooling1_out, 3, 2); + + convolutionInit(conv4_wb, pooling1_out, conv4_out); + //conv4 73 x 73 x 80 + convolution(conv4_wb, pooling1_out, conv4_out); + prelu(conv4_out, conv4_wb->pbias, prelu_gmma4->pdata); + + convolutionInit(conv5_wb, conv4_out, conv5_out); + //conv5 71 x 71 x 192 + convolution(conv5_wb, conv4_out, conv5_out); + prelu(conv5_out, conv5_wb->pbias, prelu_gmma5->pdata); + + + convolutionInit(conv6_wb, conv5_out, output); + //conv6 35 x 35 x 256 + convolution(conv6_wb, conv5_out, output); + prelu(output, conv6_wb->pbias, prelu_gmma6->pdata); +// firstFlag = false; +// } + + freepBox(conv1_out); + freepBox(conv2_out); + freepBox(conv3_out); + freepBox(conv4_out); + freepBox(conv5_out); + freepBox(pooling1_out); + + freepBox(rgb); + + freeWeight(conv1_wb); + freeWeight(conv2_wb); + freeWeight(conv3_wb); + freeWeight(conv4_wb); + freeWeight(conv5_wb); + freeWeight(conv6_wb); + + freepRelu(prelu_gmma1); + freepRelu(prelu_gmma2); + freepRelu(prelu_gmma3); + freepRelu(prelu_gmma4); + freepRelu(prelu_gmma5); + freepRelu(prelu_gmma6); +} + +void facenet::Inception_resnet_A(pBox *input, pBox *output, string filepath, float scale) { + pBox *conv1_out = new pBox; + pBox *conv2_out = new pBox; + pBox *conv3_out = new pBox; + pBox *conv4_out = new pBox; + pBox *conv5_out = new pBox; + pBox *conv6_out = new pBox; + pBox *conv7_out = new pBox; + pBox *conv8_out = new pBox; + + struct Weight *conv1_wb = new Weight; + struct Weight *conv2_wb = new Weight; + struct Weight *conv3_wb = new Weight; + struct Weight *conv4_wb = new Weight; + struct Weight *conv5_wb = new Weight; + struct Weight *conv6_wb = new Weight; + struct Weight *conv7_wb = new Weight; + struct Weight *conv8_wb = new Weight; + + struct pRelu *prelu_gmma1 = new pRelu; + struct pRelu *prelu_gmma2 = new pRelu; + struct pRelu *prelu_gmma3 = new pRelu; + struct pRelu *prelu_gmma4 = new pRelu; + struct pRelu *prelu_gmma5 = new pRelu; + struct pRelu *prelu_gmma6 = new pRelu; + struct pRelu *prelu_gmma8 = new pRelu; + + long conv1 = initConvAndFc(conv1_wb, 32, 256, 1, 1, 0); + initpRelu(prelu_gmma1, 32); + + long conv2 = initConvAndFc(conv2_wb, 32, 256, 1, 1, 0); + initpRelu(prelu_gmma2, 32); + long conv3 = initConvAndFc(conv3_wb, 32, 32, 3, 1, 1); + initpRelu(prelu_gmma3, 32); + + long conv4 = initConvAndFc(conv4_wb, 32, 256, 1, 1, 0); + initpRelu(prelu_gmma4, 32); + long conv5 = initConvAndFc(conv5_wb, 32, 32, 3, 1, 1); + initpRelu(prelu_gmma5, 32); + long conv6 = initConvAndFc(conv6_wb, 32, 32, 3, 1, 1); + initpRelu(prelu_gmma6, 32); + + long conv7 = initConvAndFc(conv7_wb, 256, 96, 1, 1, 0); + + long conv8 = initConvAndFc(conv8_wb, 256, 0, 0, 0, 0); + initpRelu(prelu_gmma8, 256); + + long dataNumber[23] = {conv1, 0, 0, conv2, 0, 0, conv3, 0, 0, conv4, 0, 0, conv5, 0, 0, conv6, 0, + 0, conv7, 256, conv8, 0, 0}; + + mydataFmt *pointTeam[23] = {conv1_wb->pdata, conv1_wb->pbias, prelu_gmma1->pdata, \ + conv2_wb->pdata, conv2_wb->pbias, prelu_gmma2->pdata, \ + conv3_wb->pdata, conv3_wb->pbias, prelu_gmma3->pdata, \ + conv4_wb->pdata, conv4_wb->pbias, prelu_gmma4->pdata, \ + conv5_wb->pdata, conv5_wb->pbias, prelu_gmma5->pdata, \ + conv6_wb->pdata, conv6_wb->pbias, prelu_gmma6->pdata, \ + conv7_wb->pdata, conv7_wb->pbias, \ + conv8_wb->pdata, conv8_wb->pbias, prelu_gmma8->pdata}; + + readData(filepath, dataNumber, pointTeam); + + + convolutionInit(conv1_wb, input, conv1_out); + //conv1 35 x 35 x 32 + convolution(conv1_wb, input, conv1_out); + prelu(conv1_out, conv1_wb->pbias, prelu_gmma1->pdata); + + convolutionInit(conv2_wb, input, conv2_out); + //conv2 35 x 35 x 32 + convolution(conv2_wb, input, conv2_out); + prelu(conv2_out, conv2_wb->pbias, prelu_gmma2->pdata); + convolutionInit(conv3_wb, conv2_out, conv3_out); + //conv3 35 x 35 x 32 + convolution(conv3_wb, conv2_out, conv3_out); + prelu(conv3_out, conv3_wb->pbias, prelu_gmma3->pdata); + + convolutionInit(conv4_wb, input, conv4_out); + //conv4 35 x 35 x 32 + convolution(conv4_wb, input, conv4_out); + prelu(conv4_out, conv4_wb->pbias, prelu_gmma4->pdata); + convolutionInit(conv5_wb, conv4_out, conv5_out); + //conv5 35 x 35 x 32 + convolution(conv5_wb, conv4_out, conv5_out); + prelu(conv5_out, conv5_wb->pbias, prelu_gmma5->pdata); + convolutionInit(conv6_wb, conv5_out, conv6_out); + //conv6 35 x 35 x 32 + convolution(conv6_wb, conv5_out, conv6_out); + prelu(conv6_out, conv6_wb->pbias, prelu_gmma6->pdata); + + conv_mergeInit(conv7_out, conv1_out, conv3_out, conv6_out); + //35 × 35 × 96 + conv_merge(conv7_out, conv1_out, conv3_out, conv6_out); + + convolutionInit(conv7_wb, conv7_out, conv8_out); + //35*35*256 + convolution(conv7_wb, conv7_out, conv8_out); + addbias(conv8_out, conv7_wb->pbias); + + mulandaddInit(input, conv8_out, output, scale); + mulandadd(input, conv8_out, output, scale); + prelu(output, conv8_wb->pbias, prelu_gmma8->pdata); + + freepBox(conv1_out); + freepBox(conv2_out); + freepBox(conv3_out); + freepBox(conv4_out); + freepBox(conv5_out); + freepBox(conv6_out); + freepBox(conv7_out); + freepBox(conv8_out); + + freeWeight(conv1_wb); + freeWeight(conv2_wb); + freeWeight(conv3_wb); + freeWeight(conv4_wb); + freeWeight(conv5_wb); + freeWeight(conv6_wb); + freeWeight(conv7_wb); + freeWeight(conv8_wb); + + freepRelu(prelu_gmma1); + freepRelu(prelu_gmma2); + freepRelu(prelu_gmma3); + freepRelu(prelu_gmma4); + freepRelu(prelu_gmma5); + freepRelu(prelu_gmma6); + freepRelu(prelu_gmma8); +} + +void facenet::Reduction_A(pBox *input, pBox *output) { + pBox *conv1_out = new pBox; + pBox *conv2_out = new pBox; + pBox *conv3_out = new pBox; + pBox *conv4_out = new pBox; + + struct Weight *conv1_wb = new Weight; + struct Weight *conv2_wb = new Weight; + struct Weight *conv3_wb = new Weight; + struct Weight *conv4_wb = new Weight; + + struct pBox *pooling1_out = new pBox; + + struct pRelu *prelu_gmma1 = new pRelu; + struct pRelu *prelu_gmma2 = new pRelu; + struct pRelu *prelu_gmma3 = new pRelu; + struct pRelu *prelu_gmma4 = new pRelu; + + long conv1 = initConvAndFc(conv1_wb, 384, 256, 3, 2, 0); + initpRelu(prelu_gmma1, 384); + long conv2 = initConvAndFc(conv2_wb, 192, 256, 1, 1, 0); + initpRelu(prelu_gmma2, 192); + long conv3 = initConvAndFc(conv3_wb, 192, 192, 3, 1, 0); + initpRelu(prelu_gmma3, 192); + long conv4 = initConvAndFc(conv4_wb, 256, 192, 3, 2, 0); + initpRelu(prelu_gmma4, 256); + long dataNumber[12] = {conv1, 0, 0, conv2, 0, 0, conv3, 0, 0, conv4, 0, 0}; + + mydataFmt *pointTeam[12] = {conv1_wb->pdata, conv1_wb->pbias, prelu_gmma1->pdata, \ + conv2_wb->pdata, conv2_wb->pbias, prelu_gmma2->pdata, \ + conv3_wb->pdata, conv3_wb->pbias, prelu_gmma3->pdata, \ + conv4_wb->pdata, conv4_wb->pbias, prelu_gmma4->pdata}; + string filename = "../model_" + to_string(Num) + "/Mixed_6a_list.txt"; + readData(filename, dataNumber, pointTeam); + + maxPoolingInit(input, pooling1_out, 3, 2); + // 17*17*256 + maxPooling(input, pooling1_out, 3, 2); + + convolutionInit(conv1_wb, input, conv1_out); + //conv1 17 x 17 x 384 + convolution(conv1_wb, input, conv1_out); + prelu(conv1_out, conv1_wb->pbias, prelu_gmma1->pdata); + + convolutionInit(conv2_wb, input, conv2_out); + //conv2 35 x 35 x 192 + convolution(conv2_wb, input, conv2_out); + prelu(conv2_out, conv2_wb->pbias, prelu_gmma2->pdata); + + convolutionInit(conv3_wb, conv2_out, conv3_out); + //conv3 35 x 35 x 192 + convolution(conv3_wb, conv2_out, conv3_out); + prelu(conv3_out, conv3_wb->pbias, prelu_gmma3->pdata); + + convolutionInit(conv4_wb, conv3_out, conv4_out); + //conv4 17 x 17 x 256 + convolution(conv4_wb, conv3_out, conv4_out); + prelu(conv4_out, conv4_wb->pbias, prelu_gmma4->pdata); + conv_mergeInit(output, pooling1_out, conv1_out, conv4_out); + //17×17×896 + conv_merge(output, pooling1_out, conv1_out, conv4_out); + + freepBox(conv1_out); + freepBox(conv2_out); + freepBox(conv3_out); + freepBox(conv4_out); + + freeWeight(conv1_wb); + freeWeight(conv2_wb); + freeWeight(conv3_wb); + freeWeight(conv4_wb); + + freepBox(pooling1_out); + + freepRelu(prelu_gmma1); + freepRelu(prelu_gmma2); + freepRelu(prelu_gmma3); + freepRelu(prelu_gmma4); +} + +void facenet::Inception_resnet_B(pBox *input, pBox *output, string filepath, float scale) { + pBox *conv1_out = new pBox; + pBox *conv2_out = new pBox; + pBox *conv3_out = new pBox; + pBox *conv4_out = new pBox; + pBox *conv5_out = new pBox; + pBox *conv6_out = new pBox; + + struct Weight *conv1_wb = new Weight; + struct Weight *conv2_wb = new Weight; + struct Weight *conv3_wb = new Weight; + struct Weight *conv4_wb = new Weight; + struct Weight *conv5_wb = new Weight; + struct Weight *conv6_wb = new Weight; + + struct pRelu *prelu_gmma1 = new pRelu; + struct pRelu *prelu_gmma2 = new pRelu; + struct pRelu *prelu_gmma3 = new pRelu; + struct pRelu *prelu_gmma4 = new pRelu; + struct pRelu *prelu_gmma6 = new pRelu; + + long conv1 = initConvAndFc(conv1_wb, 128, 896, 1, 1, 0); + initpRelu(prelu_gmma1, 128); + long conv2 = initConvAndFc(conv2_wb, 128, 896, 1, 1, 0); + initpRelu(prelu_gmma2, 128); + long conv3 = initConvAndFc(conv3_wb, 128, 128, 0, 1, -1, 1, 7, 0, 3);//[1,7] + initpRelu(prelu_gmma3, 128); + long conv4 = initConvAndFc(conv4_wb, 128, 128, 0, 1, -1, 7, 1, 3, 0);//[7,1] + initpRelu(prelu_gmma4, 128); + + long conv5 = initConvAndFc(conv5_wb, 896, 256, 1, 1, 0); + + long conv6 = initConvAndFc(conv6_wb, 896, 0, 0, 0, 0); + initpRelu(prelu_gmma6, 896); + + long dataNumber[17] = {conv1, 0, 0, conv2, 0, 0, conv3, 0, 0, conv4, 0, 0, conv5, 896, conv6, 0, + 0}; + + mydataFmt *pointTeam[17] = {conv1_wb->pdata, conv1_wb->pbias, prelu_gmma1->pdata, \ + conv2_wb->pdata, conv2_wb->pbias, prelu_gmma2->pdata, \ + conv3_wb->pdata, conv3_wb->pbias, prelu_gmma3->pdata, \ + conv4_wb->pdata, conv4_wb->pbias, prelu_gmma4->pdata, \ + conv5_wb->pdata, conv5_wb->pbias, \ + conv6_wb->pdata, conv6_wb->pbias, prelu_gmma6->pdata}; + + + readData(filepath, dataNumber, pointTeam); + + + convolutionInit(conv1_wb, input, conv1_out); + //conv1 17*17*128 + convolution(conv1_wb, input, conv1_out); + prelu(conv1_out, conv1_wb->pbias, prelu_gmma1->pdata); + + convolutionInit(conv2_wb, input, conv2_out); + //conv2 17*17*128 + convolution(conv2_wb, input, conv2_out); + prelu(conv2_out, conv2_wb->pbias, prelu_gmma2->pdata); + + convolutionInit(conv3_wb, conv2_out, conv3_out); + //conv3 17*17*128 + convolution(conv3_wb, conv2_out, conv3_out); + prelu(conv3_out, conv3_wb->pbias, prelu_gmma3->pdata); + + convolutionInit(conv4_wb, conv3_out, conv4_out); + //conv4 17*17*128 + convolution(conv4_wb, conv3_out, conv4_out); + prelu(conv4_out, conv4_wb->pbias, prelu_gmma4->pdata); + + conv_mergeInit(conv5_out, conv1_out, conv4_out); + //17*17*256 + conv_merge(conv5_out, conv1_out, conv4_out); + + convolutionInit(conv5_wb, conv5_out, conv6_out); + //conv5 17*17*896 + convolution(conv5_wb, conv5_out, conv6_out); + addbias(conv6_out, conv5_wb->pbias); + + mulandaddInit(input, conv6_out, output, scale); + mulandadd(input, conv6_out, output, scale); + prelu(output, conv6_wb->pbias, prelu_gmma6->pdata); + + freepBox(conv1_out); + freepBox(conv2_out); + freepBox(conv3_out); + freepBox(conv4_out); + freepBox(conv5_out); + freepBox(conv6_out); + + freeWeight(conv1_wb); + freeWeight(conv2_wb); + freeWeight(conv3_wb); + freeWeight(conv4_wb); + freeWeight(conv5_wb); + freeWeight(conv6_wb); + + freepRelu(prelu_gmma1); + freepRelu(prelu_gmma2); + freepRelu(prelu_gmma3); + freepRelu(prelu_gmma4); +// freepRelu(prelu_gmma5); + freepRelu(prelu_gmma6); +} + +void facenet::Reduction_B(pBox *input, pBox *output) { + pBox *conv1_out = new pBox; + pBox *conv2_out = new pBox; + pBox *conv3_out = new pBox; + pBox *conv4_out = new pBox; + pBox *conv5_out = new pBox; + pBox *conv6_out = new pBox; + pBox *conv7_out = new pBox; + + struct Weight *conv1_wb = new Weight; + struct Weight *conv2_wb = new Weight; + struct Weight *conv3_wb = new Weight; + struct Weight *conv4_wb = new Weight; + struct Weight *conv5_wb = new Weight; + struct Weight *conv6_wb = new Weight; + struct Weight *conv7_wb = new Weight; + + struct pBox *pooling1_out = new pBox; + + struct pRelu *prelu_gmma1 = new pRelu; + struct pRelu *prelu_gmma2 = new pRelu; + struct pRelu *prelu_gmma3 = new pRelu; + struct pRelu *prelu_gmma4 = new pRelu; + struct pRelu *prelu_gmma5 = new pRelu; + struct pRelu *prelu_gmma6 = new pRelu; + struct pRelu *prelu_gmma7 = new pRelu; + + long conv1 = initConvAndFc(conv1_wb, 256, 896, 1, 1, 0); + initpRelu(prelu_gmma1, 256); + long conv2 = initConvAndFc(conv2_wb, 384, 256, 3, 2, 0); + initpRelu(prelu_gmma2, 384); + + long conv3 = initConvAndFc(conv3_wb, 256, 896, 1, 1, 0); + initpRelu(prelu_gmma3, 256); + long conv4 = initConvAndFc(conv4_wb, 256, 256, 3, 2, 0); + initpRelu(prelu_gmma4, 256); + + long conv5 = initConvAndFc(conv5_wb, 256, 896, 1, 1, 0); + initpRelu(prelu_gmma5, 256); + long conv6 = initConvAndFc(conv6_wb, 256, 256, 3, 1, 1); + initpRelu(prelu_gmma6, 256); + long conv7 = initConvAndFc(conv7_wb, 256, 256, 3, 2, 0); + initpRelu(prelu_gmma7, 256); + + long dataNumber[21] = {conv1, 0, 0, conv2, 0, 0, conv3, 0, 0, conv4, 0, 0, conv5, 0, 0, conv6, + 0, 0, conv7, 0, 0}; + + mydataFmt *pointTeam[21] = {conv1_wb->pdata, conv1_wb->pbias, prelu_gmma1->pdata, \ + conv2_wb->pdata, conv2_wb->pbias, prelu_gmma2->pdata, \ + conv3_wb->pdata, conv3_wb->pbias, prelu_gmma3->pdata, \ + conv4_wb->pdata, conv4_wb->pbias, prelu_gmma4->pdata, \ + conv5_wb->pdata, conv5_wb->pbias, prelu_gmma5->pdata, \ + conv6_wb->pdata, conv6_wb->pbias, prelu_gmma6->pdata, \ + conv7_wb->pdata, conv7_wb->pbias, prelu_gmma7->pdata,}; + string filename = "../model_" + to_string(Num) + "/Mixed_7a_list.txt"; + readData(filename, dataNumber, pointTeam); + + + maxPoolingInit(input, pooling1_out, 3, 2, 1); + // 8*8*896 + maxPooling(input, pooling1_out, 3, 2); + + convolutionInit(conv1_wb, input, conv1_out); + //conv1 17 x 17 x 256 + convolution(conv1_wb, input, conv1_out); + prelu(conv1_out, conv1_wb->pbias, prelu_gmma1->pdata); + + convolutionInit(conv2_wb, conv1_out, conv2_out); + //conv2 8 x 8 x 384 + convolution(conv2_wb, conv1_out, conv2_out); + prelu(conv2_out, conv2_wb->pbias, prelu_gmma2->pdata); + + convolutionInit(conv3_wb, input, conv3_out); + //conv3 17 x 17 x 256 + convolution(conv3_wb, input, conv3_out); + prelu(conv3_out, conv3_wb->pbias, prelu_gmma3->pdata); + + convolutionInit(conv4_wb, conv3_out, conv4_out); + //conv4 8 x 8 x 256 + convolution(conv4_wb, conv3_out, conv4_out); + prelu(conv4_out, conv4_wb->pbias, prelu_gmma4->pdata); + + convolutionInit(conv5_wb, input, conv5_out); + //conv5 17 x 17 x 256 + convolution(conv5_wb, input, conv5_out); + prelu(conv5_out, conv5_wb->pbias, prelu_gmma5->pdata); + + convolutionInit(conv6_wb, conv5_out, conv6_out); + //conv6 17 x 17 x 256 + convolution(conv6_wb, conv5_out, conv6_out); + prelu(conv6_out, conv6_wb->pbias, prelu_gmma6->pdata); + + convolutionInit(conv7_wb, conv6_out, conv7_out); + //conv6 8 x 8 x 256 + convolution(conv7_wb, conv6_out, conv7_out); + prelu(conv7_out, conv7_wb->pbias, prelu_gmma7->pdata); + + conv_mergeInit(output, conv2_out, conv4_out, conv7_out, pooling1_out); + //8*8*1792 + conv_merge(output, conv2_out, conv4_out, conv7_out, pooling1_out); + + freepBox(conv1_out); + freepBox(conv2_out); + freepBox(conv3_out); + freepBox(conv4_out); + freepBox(conv5_out); + freepBox(conv6_out); + freepBox(conv7_out); + + freeWeight(conv1_wb); + freeWeight(conv2_wb); + freeWeight(conv3_wb); + freeWeight(conv4_wb); + freeWeight(conv5_wb); + freeWeight(conv6_wb); + freeWeight(conv7_wb); + + freepBox(pooling1_out); + + freepRelu(prelu_gmma1); + freepRelu(prelu_gmma2); + freepRelu(prelu_gmma3); + freepRelu(prelu_gmma4); + freepRelu(prelu_gmma5); + freepRelu(prelu_gmma6); + freepRelu(prelu_gmma7); +} + +void facenet::Inception_resnet_C(pBox *input, pBox *output, string filepath, float scale) { + pBox *conv1_out = new pBox; + pBox *conv2_out = new pBox; + pBox *conv3_out = new pBox; + pBox *conv4_out = new pBox; + pBox *conv5_out = new pBox; + pBox *conv6_out = new pBox; + + struct Weight *conv1_wb = new Weight; + struct Weight *conv2_wb = new Weight; + struct Weight *conv3_wb = new Weight; + struct Weight *conv4_wb = new Weight; + struct Weight *conv5_wb = new Weight; + struct Weight *conv6_wb = new Weight; + + struct pRelu *prelu_gmma1 = new pRelu; + struct pRelu *prelu_gmma2 = new pRelu; + struct pRelu *prelu_gmma3 = new pRelu; + struct pRelu *prelu_gmma4 = new pRelu; + struct pRelu *prelu_gmma6 = new pRelu; + + + long conv1 = initConvAndFc(conv1_wb, 192, 1792, 1, 1, 0); + initpRelu(prelu_gmma1, 192); + long conv2 = initConvAndFc(conv2_wb, 192, 1792, 1, 1, 0); + initpRelu(prelu_gmma2, 192); + long conv3 = initConvAndFc(conv3_wb, 192, 192, 0, 1, -1, 1, 3, 0, 1); + initpRelu(prelu_gmma3, 192); + long conv4 = initConvAndFc(conv4_wb, 192, 192, 0, 1, -1, 3, 1, 1, 0); + initpRelu(prelu_gmma4, 192); + long conv5 = initConvAndFc(conv5_wb, 1792, 384, 1, 1, 0); + + long conv6 = initConvAndFc(conv6_wb, 1792, 0, 0, 0, 0); + initpRelu(prelu_gmma6, 1792); + + long dataNumber[17] = {conv1, 0, 0, conv2, 0, 0, conv3, 0, 0, conv4, 0, 0, conv5, 1792, conv6, 0, + 0}; + + + mydataFmt *pointTeam[17] = {conv1_wb->pdata, conv1_wb->pbias, prelu_gmma1->pdata, \ + conv2_wb->pdata, conv2_wb->pbias, prelu_gmma2->pdata, \ + conv3_wb->pdata, conv3_wb->pbias, prelu_gmma3->pdata, \ + conv4_wb->pdata, conv4_wb->pbias, prelu_gmma4->pdata, \ + conv5_wb->pdata, conv5_wb->pbias, \ + conv6_wb->pdata, conv6_wb->pbias, prelu_gmma6->pdata}; + +// string filename = "../model_128/Repeat_2_list.txt"; +// int length = sizeof(dataNumber) / sizeof(*dataNumber); + readData(filepath, dataNumber, pointTeam); + + convolutionInit(conv1_wb, input, conv1_out); + //conv1 8 x 8 x 192 + convolution(conv1_wb, input, conv1_out); + prelu(conv1_out, conv1_wb->pbias, prelu_gmma1->pdata); + + convolutionInit(conv2_wb, input, conv2_out); + //conv2 8 x 8 x 192 + convolution(conv2_wb, input, conv2_out); + prelu(conv2_out, conv2_wb->pbias, prelu_gmma2->pdata); + + convolutionInit(conv3_wb, conv2_out, conv3_out); + //conv3 8 x 8 x 192 + convolution(conv3_wb, conv2_out, conv3_out); + prelu(conv3_out, conv3_wb->pbias, prelu_gmma3->pdata); + + convolutionInit(conv4_wb, conv3_out, conv4_out); + //conv4 8 x 8 x 192 + convolution(conv4_wb, conv3_out, conv4_out); + prelu(conv4_out, conv4_wb->pbias, prelu_gmma4->pdata); + + conv_mergeInit(conv5_out, conv1_out, conv4_out); + // 8*8*384 + conv_merge(conv5_out, conv1_out, conv4_out); + + convolutionInit(conv5_wb, conv5_out, conv6_out); + //conv5 8 x 8 x 1792 + convolution(conv5_wb, conv5_out, conv6_out); + addbias(conv6_out, conv5_wb->pbias); + + mulandaddInit(input, conv6_out, output, scale); + mulandadd(input, conv6_out, output, scale); + prelu(output, conv6_wb->pbias, prelu_gmma6->pdata); + + freepBox(conv1_out); + freepBox(conv2_out); + freepBox(conv3_out); + freepBox(conv4_out); + freepBox(conv5_out); + freepBox(conv6_out); + + freeWeight(conv1_wb); + freeWeight(conv2_wb); + freeWeight(conv3_wb); + freeWeight(conv4_wb); + freeWeight(conv5_wb); + freeWeight(conv6_wb); + + freepRelu(prelu_gmma1); + freepRelu(prelu_gmma2); + freepRelu(prelu_gmma3); + freepRelu(prelu_gmma4); +// freepRelu(prelu_gmma5); + freepRelu(prelu_gmma6); +} + +void facenet::Inception_resnet_C_None(pBox *input, pBox *output, string filepath) { + pBox *conv1_out = new pBox; + pBox *conv2_out = new pBox; + pBox *conv3_out = new pBox; + pBox *conv4_out = new pBox; + pBox *conv5_out = new pBox; + pBox *conv6_out = new pBox; + + struct Weight *conv1_wb = new Weight; + struct Weight *conv2_wb = new Weight; + struct Weight *conv3_wb = new Weight; + struct Weight *conv4_wb = new Weight; + struct Weight *conv5_wb = new Weight; + + struct pRelu *prelu_gmma1 = new pRelu; + struct pRelu *prelu_gmma2 = new pRelu; + struct pRelu *prelu_gmma3 = new pRelu; + struct pRelu *prelu_gmma4 = new pRelu; + + + long conv1 = initConvAndFc(conv1_wb, 192, 1792, 1, 1, 0); + initpRelu(prelu_gmma1, 192); + long conv2 = initConvAndFc(conv2_wb, 192, 1792, 1, 1, 0); + initpRelu(prelu_gmma2, 192); + long conv3 = initConvAndFc(conv3_wb, 192, 192, 0, 1, -1, 1, 3, 0, 1); + initpRelu(prelu_gmma3, 192); + long conv4 = initConvAndFc(conv4_wb, 192, 192, 0, 1, -1, 3, 1, 1, 0); + initpRelu(prelu_gmma4, 192); + long conv5 = initConvAndFc(conv5_wb, 1792, 384, 1, 1, 0); + + long dataNumber[14] = {conv1, 0, 0, conv2, 0, 0, conv3, 0, 0, conv4, 0, 0, conv5, 1792}; + + + mydataFmt *pointTeam[14] = {conv1_wb->pdata, conv1_wb->pbias, prelu_gmma1->pdata, \ + conv2_wb->pdata, conv2_wb->pbias, prelu_gmma2->pdata, \ + conv3_wb->pdata, conv3_wb->pbias, prelu_gmma3->pdata, \ + conv4_wb->pdata, conv4_wb->pbias, prelu_gmma4->pdata, \ + conv5_wb->pdata, conv5_wb->pbias}; + +// string filename = "../model_128/Repeat_2_list.txt"; +// int length = sizeof(dataNumber) / sizeof(*dataNumber); + readData(filepath, dataNumber, pointTeam); + + convolutionInit(conv1_wb, input, conv1_out); + //conv1 8 x 8 x 192 + convolution(conv1_wb, input, conv1_out); + prelu(conv1_out, conv1_wb->pbias, prelu_gmma1->pdata); + + convolutionInit(conv2_wb, input, conv2_out); + //conv2 8 x 8 x 192 + convolution(conv2_wb, input, conv2_out); + prelu(conv2_out, conv2_wb->pbias, prelu_gmma2->pdata); + + convolutionInit(conv3_wb, conv2_out, conv3_out); + //conv3 8 x 8 x 192 + convolution(conv3_wb, conv2_out, conv3_out); + prelu(conv3_out, conv3_wb->pbias, prelu_gmma3->pdata); + + convolutionInit(conv4_wb, conv3_out, conv4_out); + //conv4 8 x 8 x 192 + convolution(conv4_wb, conv3_out, conv4_out); + prelu(conv4_out, conv4_wb->pbias, prelu_gmma4->pdata); + + conv_mergeInit(conv5_out, conv1_out, conv4_out); + // 8*8*384 + conv_merge(conv5_out, conv1_out, conv4_out); + + convolutionInit(conv5_wb, conv5_out, conv6_out); + //conv5 8 x 8 x 1792 + convolution(conv5_wb, conv5_out, conv6_out); + addbias(conv6_out, conv5_wb->pbias); + + mulandaddInit(input, conv6_out, output, 1); + mulandadd(input, conv6_out, output); + + freepBox(conv1_out); + freepBox(conv2_out); + freepBox(conv3_out); + freepBox(conv4_out); + freepBox(conv5_out); + freepBox(conv6_out); + + freeWeight(conv1_wb); + freeWeight(conv2_wb); + freeWeight(conv3_wb); + freeWeight(conv4_wb); + freeWeight(conv5_wb); + + freepRelu(prelu_gmma1); + freepRelu(prelu_gmma2); + freepRelu(prelu_gmma3); + freepRelu(prelu_gmma4); +} + +void facenet::AveragePooling(pBox *input, pBox *output) { +// cout << "size:" << input->height << endl; + avePoolingInit(input, output, input->height, 2); + avePooling(input, output, input->height, 2); +} + +void facenet::Flatten(pBox *input, pBox *output) { + output->width = input->channel; + output->height = 1; + output->channel = 1; + output->pdata = (mydataFmt *) malloc(output->channel * output->width * output->height * sizeof(mydataFmt)); + if (output->pdata == NULL)cout << "the maxPoolingInit is failed!!" << endl; + memcpy(output->pdata, input->pdata, output->channel * output->width * output->height * sizeof(mydataFmt)); +} + +//参数还未设置 +void facenet::fully_connect(pBox *input, pBox *output, string filepath) { + struct Weight *conv1_wb = new Weight; + struct pRelu *prelu_gmma1 = new pRelu; + long conv1 = initConvAndFc(conv1_wb, Num, 1792, input->height, 1, 0); + initpRelu(prelu_gmma1, Num); + long dataNumber[3] = {conv1, 0, 0}; + +// cout << to_string(sum) << endl; + mydataFmt *pointTeam[3] = {conv1_wb->pdata, conv1_wb->pbias, prelu_gmma1->pdata}; +// string filename = "../model_128/Bottleneck_list.txt"; +// int length = sizeof(dataNumber) / sizeof(*dataNumber); + readData(filepath, dataNumber, pointTeam); + + fullconnectInit(conv1_wb, output); + + //conv1 8 x 8 x 192 + fullconnect(conv1_wb, input, output); +// prelu(output, conv1_wb->pbias, prelu_gmma1->pdata); + + freeWeight(conv1_wb); + freepRelu(prelu_gmma1); +} + +void facenet::conv_mergeInit(pBox *output, pBox *c1, pBox *c2, pBox *c3, pBox *c4) { + output->channel = 0; + output->height = c1->height; + output->width = c1->width; + if (c1 != 0) { + output->channel = c1->channel; + if (c2 != 0) { + output->channel += c2->channel; + if (c3 != 0) { + output->channel += c3->channel; + if (c4 != 0) { + output->channel += c4->channel; + } + } + } + } else { cout << "conv_mergeInit" << endl; } + output->pdata = (mydataFmt *) malloc(output->width * output->height * output->channel * sizeof(mydataFmt)); + if (output->pdata == NULL)cout << "the conv_mergeInit is failed!!" << endl; + memset(output->pdata, 0, output->width * output->height * output->channel * sizeof(mydataFmt)); +} + +void facenet::conv_merge(pBox *output, pBox *c1, pBox *c2, pBox *c3, pBox *c4) { +// cout << "output->channel:" << output->channel << endl; + if (c1 != 0) { + long count1 = c1->height * c1->width * c1->channel; + //output->pdata = c1->pdata; + for (long i = 0; i < count1; i++) { + output->pdata[i] = c1->pdata[i]; + } + if (c2 != 0) { + long count2 = c2->height * c2->width * c2->channel; + for (long i = 0; i < count2; i++) { + output->pdata[count1 + i] = c2->pdata[i]; + } + if (c3 != 0) { + long count3 = c3->height * c3->width * c3->channel; + for (long i = 0; i < count3; i++) { + output->pdata[count1 + count2 + i] = c3->pdata[i]; + } + if (c4 != 0) { + long count4 = c4->height * c4->width * c4->channel; + for (long i = 0; i < count4; i++) { + output->pdata[count1 + count2 + count3 + i] = c4->pdata[i]; + } + } + } + } + } else { cout << "conv_mergeInit" << endl; } +// cout << "output->pdata:" << *(output->pdata) << endl; +} + +void facenet::mulandaddInit(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float scale) { + outpBox->channel = temppbox->channel; + outpBox->width = temppbox->width; + outpBox->height = temppbox->height; + outpBox->pdata = (mydataFmt *) malloc(outpBox->width * outpBox->height * outpBox->channel * sizeof(mydataFmt)); + if (outpBox->pdata == NULL)cout << "the mulandaddInit is failed!!" << endl; + memset(outpBox->pdata, 0, outpBox->width * outpBox->height * outpBox->channel * sizeof(mydataFmt)); +} + +void facenet::mulandadd(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float scale) { + mydataFmt *ip = inpbox->pdata; + mydataFmt *tp = temppbox->pdata; + mydataFmt *op = outpBox->pdata; + long dis = inpbox->width * inpbox->height * inpbox->channel; + for (long i = 0; i < dis; i++) { + op[i] = ip[i] + tp[i] * scale; + } +} \ No newline at end of file diff --git a/src/facenet.h b/src/facenet.h new file mode 100644 index 0000000..1145a66 --- /dev/null +++ b/src/facenet.h @@ -0,0 +1,54 @@ +// +// Created by Lenovo on 2019/10/17. +// + +#ifndef MAIN_FACENET_H +#define MAIN_FACENET_H + +#include "network.h" + + +class facenet { +public: + facenet(); + + ~facenet(); + + void run(Mat &image, mydataFmt *o, int count = 1); + +private: + void Stem(Mat &image, pBox *output); + + void Inception_resnet_A(pBox *input, pBox *output, string filepath = "", float scale = 1.0); + + void Reduction_A(pBox *input, pBox *output); + + void Inception_resnet_B(pBox *input, pBox *output, string filepath = "", float scale = 1.0); + + void Reduction_B(pBox *input, pBox *output); + + void Inception_resnet_C(pBox *input, pBox *output, string filepath = "", float scale = 1.0); + + void Inception_resnet_C_None(pBox *input, pBox *output, string filepath = ""); + + void AveragePooling(pBox *input, pBox *output); + + void fully_connect(pBox *input, pBox *output, string filepath = ""); + + void conv_merge(pBox *output, pBox *c1 = 0, pBox *c2 = 0, pBox *c3 = 0, pBox *c4 = 0); + + void conv_mergeInit(pBox *output, pBox *c1 = 0, pBox *c2 = 0, pBox *c3 = 0, pBox *c4 = 0); + + void mulandaddInit(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float scale); + + void mulandadd(const pBox *inpbox, const pBox *temppbox, pBox *outpBox, float scale = 1); + + void Flatten(pBox *input, pBox *output); + + void printData(pBox *output); + + + +}; + +#endif //MAIN_FACENET_H diff --git a/src/network.cpp b/src/network.cpp new file mode 100644 index 0000000..5373fc7 --- /dev/null +++ b/src/network.cpp @@ -0,0 +1,467 @@ +#include "network.h" + +void addbias(struct pBox *pbox, mydataFmt *pbias) { + if (pbox->pdata == NULL) { + cout << "Relu feature is NULL!!" << endl; + return; + } + if (pbias == NULL) { + cout << "the Relu bias is NULL!!" << endl; + return; + } + mydataFmt *op = pbox->pdata; + mydataFmt *pb = pbias; + + long dis = pbox->width * pbox->height; + for (int channel = 0; channel < pbox->channel; channel++) { + for (int col = 0; col < dis; col++) { + *op = *op + *pb; + op++; + } + pb++; + } +} + +void image2MatrixInit(Mat &image, struct pBox *pbox) { + if ((image.data == NULL) || (image.type() != CV_8UC3)) { + cout << "image's type is wrong!!Please set CV_8UC3" << endl; + return; + } + pbox->channel = image.channels(); + pbox->height = image.rows; + pbox->width = image.cols; + + pbox->pdata = (mydataFmt *) malloc(pbox->channel * pbox->height * pbox->width * sizeof(mydataFmt)); + if (pbox->pdata == NULL)cout << "the image2MatrixInit failed!!" << endl; + memset(pbox->pdata, 0, pbox->channel * pbox->height * pbox->width * sizeof(mydataFmt)); +} + +void image2Matrix(const Mat &image, const struct pBox *pbox, int num) { + if ((image.data == NULL) || (image.type() != CV_8UC3)) { + cout << "image's type is wrong!!Please set CV_8UC3" << endl; + return; + } + if (pbox->pdata == NULL) { + return; + } + mydataFmt *p = pbox->pdata; + for (int rowI = 0; rowI < image.rows; rowI++) { + for (int colK = 0; colK < image.cols; colK++) { + if (num == 0) { + *p = (image.at(rowI, colK)[0] - 127.5) * 0.0078125; + *(p + image.rows * image.cols) = (image.at(rowI, colK)[1] - 127.5) * 0.0078125; + *(p + 2 * image.rows * image.cols) = (image.at(rowI, colK)[2] - 127.5) * 0.0078125; + p++; + } else { + double mean, stddev, sqr, stddev_adj; + int size; + Mat temp_m, temp_sd; + meanStdDev(image, temp_m, temp_sd); + mean = temp_m.at(0, 0); + stddev = temp_sd.at(0, 0); + size = image.cols * image.rows * image.channels(); + sqr = sqrt(double(size)); + + if (stddev >= 1.0 / sqr) { + stddev_adj = stddev; + } else { + stddev_adj = 1.0 / sqr; + } +// cout << mean << "|" << stddev << "|" << size << "|" << stddev_adj << "|" << endl; + for (int i = 0; i < image.rows; i++) { + for (int j = 0; j < image.cols; j++) { + image.at(i, j); + *p = (image.at(i, j)[0] - mean) / stddev_adj; + *(p + image.rows * image.cols) = (image.at(i, j)[1] - mean) / stddev_adj; + *(p + 2 * image.rows * image.cols) = (image.at(i, j)[2] - mean) / stddev_adj; +// cout << (image.at(i, j)[0] - mean) / stddev_adj << endl; +// return; + } + } + } + } + } +} + +void featurePadInit(const pBox *pbox, pBox *outpBox, const int pad, const int padw, const int padh) { + if (pad < -1) { + cout << "the data needn't to pad,please check you network!" << endl; + return; + } + outpBox->channel = pbox->channel; + if (pad == -1) { + outpBox->height = pbox->height + 2 * padh; + outpBox->width = pbox->width + 2 * padw; + } else { + outpBox->height = pbox->height + 2 * pad; + outpBox->width = pbox->width + 2 * pad; + } + long RowByteNum = outpBox->width * sizeof(mydataFmt); + outpBox->pdata = (mydataFmt *) malloc(outpBox->channel * outpBox->height * RowByteNum); + if (outpBox->pdata == NULL)cout << "the featurePadInit is failed!!" << endl; + memset(outpBox->pdata, 0, outpBox->channel * outpBox->height * RowByteNum); +} + +void featurePad(const pBox *pbox, pBox *outpBox, const int pad, const int padw, const int padh) { + mydataFmt *p = outpBox->pdata; + mydataFmt *pIn = pbox->pdata; + if (pad == -1) { + for (int row = 0; row < outpBox->channel * outpBox->height; row++) { + if ((row % outpBox->height) < padh || (row % outpBox->height > (outpBox->height - padh - 1))) { + p += outpBox->width; + continue; + } + p += padw; + memcpy(p, pIn, pbox->width * sizeof(mydataFmt)); + p += pbox->width + padw; + pIn += pbox->width; + } + } else { + for (int row = 0; row < outpBox->channel * outpBox->height; row++) { + if ((row % outpBox->height) < pad || (row % outpBox->height > (outpBox->height - pad - 1))) { + p += outpBox->width; + continue; + } + p += pad; + memcpy(p, pIn, pbox->width * sizeof(mydataFmt)); + p += pbox->width + pad; + pIn += pbox->width; + } + } +} + +void convolutionInit(const Weight *weight, pBox *pbox, pBox *outpBox) { + outpBox->channel = weight->selfChannel; +// ((imginputh - ckh + 2 * ckpad) / stride) + 1; + if (weight->kernelSize == 0) { + outpBox->width = ((pbox->width - weight->w + 2 * weight->padw) / weight->stride) + 1; +// outpBox->width = (pbox->width - weight->w) / weight->stride + 1; +// outpBox->height = (pbox->height - weight->h) / weight->stride + 1; + outpBox->height = (pbox->height - weight->h + 2 * weight->padh) / weight->stride + 1; + } else { + outpBox->width = ((pbox->width - weight->kernelSize + 2 * weight->pad) / weight->stride) + 1; + outpBox->height = ((pbox->height - weight->kernelSize + 2 * weight->pad) / weight->stride) + 1; + } +// cout << outpBox->pdata << endl; + outpBox->pdata = (mydataFmt *) malloc(outpBox->width * outpBox->height * outpBox->channel * sizeof(mydataFmt)); +// cout << outpBox->pdata << endl; + if (outpBox->pdata == NULL)cout << "the convolutionInit is failed!!" << endl; + memset(outpBox->pdata, 0, outpBox->width * outpBox->height * outpBox->channel * sizeof(mydataFmt)); + if (weight->pad != 0) { + pBox *padpbox = new pBox; + featurePadInit(pbox, padpbox, weight->pad, weight->padw, weight->padh); + featurePad(pbox, padpbox, weight->pad, weight->padw, weight->padh); + *pbox = *padpbox; + } +} + +void convolution(const Weight *weight, const pBox *pbox, pBox *outpBox) { +// if (weight->pad != 0) { +// pBox *padpbox = new pBox; +// featurePadInit(outpBox, padpbox, weight->pad, weight->padw, weight->padh); +// featurePad(outpBox, padpbox, weight->pad, weight->padw, weight->padh); +// *outpBox = *padpbox; +// } + int ckh, ckw, ckd, stride, cknum, ckpad, imginputh, imginputw, imginputd, Nh, Nw; + mydataFmt *ck, *imginput; +// float *output = outpBox->pdata; + float temp; + ck = weight->pdata; + if (weight->kernelSize == 0) { + ckh = weight->h; + ckw = weight->w; + } else { + ckh = weight->kernelSize; + ckw = weight->kernelSize; + } + ckd = weight->lastChannel; + cknum = weight->selfChannel; + ckpad = weight->pad; + stride = weight->stride; + imginput = pbox->pdata; + imginputh = pbox->height; + imginputw = pbox->width; + imginputd = pbox->channel; + Nh = outpBox->height; + Nw = outpBox->width; +// Nh = ((imginputh - ckh + 2 * ckpad) / stride) + 1; +// Nw = ((imginputw - ckw + 2 * ckpad) / stride) + 1; + for (int i = 0; i < cknum; ++i) { + for (int j = 0; j < Nh; j++) { + for (int k = 0; k < Nw; k++) { + temp = 0; + + for (int m = 0; m < ckd; ++m) { + for (int n = 0; n < ckh; ++n) { + for (int i1 = 0; i1 < ckw; ++i1) { + temp += imginput[(j * stride + n) * imginputw + + (k * stride + i1) + + m * imginputh * imginputw] + * ck[i * ckh * ckw * ckd + m * ckh * ckw + n * ckw + i1]; + } + } + } + //按照顺序存储 + outpBox->pdata[i * outpBox->height * outpBox->width + j * outpBox->width + k] = temp; + } + } + } +// cout << "output->pdata:" << (outpBox->pdata[10]) << endl; +} + +void maxPoolingInit(const pBox *pbox, pBox *Matrix, int kernelSize, int stride, int flag) { + if (flag == 1) { + Matrix->width = floor((float) (pbox->width - kernelSize) / stride + 1); + Matrix->height = floor((float) (pbox->height - kernelSize) / stride + 1); + } else { + Matrix->width = ceil((float) (pbox->width - kernelSize) / stride + 1); + Matrix->height = ceil((float) (pbox->height - kernelSize) / stride + 1); + } + Matrix->channel = pbox->channel; + Matrix->pdata = (mydataFmt *) malloc(Matrix->channel * Matrix->width * Matrix->height * sizeof(mydataFmt)); + if (Matrix->pdata == NULL)cout << "the maxPoolingI nit is failed!!" << endl; + memset(Matrix->pdata, 0, Matrix->channel * Matrix->width * Matrix->height * sizeof(mydataFmt)); +} + +void maxPooling(const pBox *pbox, pBox *Matrix, int kernelSize, int stride) { + if (pbox->pdata == NULL) { + cout << "the feature2Matrix pbox is NULL!!" << endl; + return; + } + mydataFmt *p = Matrix->pdata; + mydataFmt *pIn; + mydataFmt *ptemp; + mydataFmt maxNum = 0; + if ((pbox->width - kernelSize) % stride == 0 && (pbox->height - kernelSize) % stride == 0) { + for (int row = 0; row < Matrix->height; row++) { + for (int col = 0; col < Matrix->width; col++) { + pIn = pbox->pdata + row * stride * pbox->width + col * stride; + for (int channel = 0; channel < pbox->channel; channel++) { + ptemp = pIn + channel * pbox->height * pbox->width; + maxNum = *ptemp; + for (int kernelRow = 0; kernelRow < kernelSize; kernelRow++) { + for (int i = 0; i < kernelSize; i++) { + if (maxNum < *(ptemp + i + kernelRow * pbox->width)) + maxNum = *(ptemp + i + kernelRow * pbox->width); + } + } + *(p + channel * Matrix->height * Matrix->width) = maxNum; + } + p++; + } + } + } else { + int diffh = 0, diffw = 0; + for (int channel = 0; channel < pbox->channel; channel++) { + pIn = pbox->pdata + channel * pbox->height * pbox->width; + for (int row = 0; row < Matrix->height; row++) { + for (int col = 0; col < Matrix->width; col++) { + ptemp = pIn + row * stride * pbox->width + col * stride; + maxNum = *ptemp; + diffh = row * stride - pbox->height + 1; + diffw = col * stride - pbox->width + 1; + for (int kernelRow = 0; kernelRow < kernelSize; kernelRow++) { + if ((kernelRow + diffh) > 0)break; + for (int i = 0; i < kernelSize; i++) { + if ((i + diffw) > 0)break; + if (maxNum < *(ptemp + i + kernelRow * pbox->width)) + maxNum = *(ptemp + i + kernelRow * pbox->width); + } + } + *p++ = maxNum; + } + } + } + } +} + +void avePoolingInit(const pBox *pbox, pBox *Matrix, int kernelSize, int stride) { + Matrix->width = ceil((float) (pbox->width - kernelSize) / stride + 1); + Matrix->height = ceil((float) (pbox->height - kernelSize) / stride + 1); + Matrix->channel = pbox->channel; + Matrix->pdata = (mydataFmt *) malloc(Matrix->channel * Matrix->width * Matrix->height * sizeof(mydataFmt)); + if (Matrix->pdata == NULL)cout << "the maxPoolingInit is failed!!" << endl; + memset(Matrix->pdata, 0, Matrix->channel * Matrix->width * Matrix->height * sizeof(mydataFmt)); +} + +void avePooling(const pBox *pbox, pBox *Matrix, int kernelSize, int stride) { + if (pbox->pdata == NULL) { + cout << "the feature2Matrix pbox is NULL!!" << endl; + return; + } + mydataFmt *p = Matrix->pdata; + mydataFmt *pIn; + mydataFmt *ptemp; + mydataFmt sumNum = 0; + if ((pbox->width - kernelSize) % stride == 0 && (pbox->height - kernelSize) % stride == 0) { + for (int row = 0; row < Matrix->height; row++) { + for (int col = 0; col < Matrix->width; col++) { + pIn = pbox->pdata + row * stride * pbox->width + col * stride; + + for (int channel = 0; channel < pbox->channel; channel++) { + + ptemp = pIn + channel * pbox->height * pbox->width; + sumNum = 0; + for (int kernelRow = 0; kernelRow < kernelSize; kernelRow++) { + for (int i = 0; i < kernelSize; i++) { + sumNum += *(ptemp + i + kernelRow * pbox->width); + } + } + *(p + channel * Matrix->height * Matrix->width) = sumNum / (kernelSize * kernelSize); + } + p++; + } + } + } +} + + +void prelu(struct pBox *pbox, mydataFmt *pbias, mydataFmt *prelu_gmma) { + if (pbox->pdata == NULL) { + cout << "the Relu feature is NULL!!" << endl; + return; + } + if (pbias == NULL) { + cout << "the Relu bias is NULL!!" << endl; + return; + } + mydataFmt *op = pbox->pdata; + mydataFmt *pb = pbias; + mydataFmt *pg = prelu_gmma; + + long dis = pbox->width * pbox->height; + for (int channel = 0; channel < pbox->channel; channel++) { + for (int col = 0; col < dis; col++) { + *op = *op + *pb; + *op = (*op > 0) ? (*op) : ((*op) * (*pg)); + op++; + } + pb++; + pg++; + } +} + +void fullconnectInit(const Weight *weight, pBox *outpBox) { + outpBox->channel = weight->selfChannel; + outpBox->width = 1; + outpBox->height = 1; + outpBox->pdata = (mydataFmt *) malloc(weight->selfChannel * sizeof(mydataFmt)); + if (outpBox->pdata == NULL)cout << "the fullconnectInit is failed!!" << endl; + memset(outpBox->pdata, 0, weight->selfChannel * sizeof(mydataFmt)); +} + +void fullconnect(const Weight *weight, const pBox *pbox, pBox *outpBox) { + if (pbox->pdata == NULL) { + cout << "the fc feature is NULL!!" << endl; + return; + } + if (weight->pdata == NULL) { + cout << "the fc weight is NULL!!" << endl; + return; + } + memset(outpBox->pdata, 0, weight->selfChannel * sizeof(mydataFmt)); + //Y←αAX + βY β must be 0(zero) + // row no trans A's row A'col + //cblas_sgemv(CblasRowMajor, CblasNoTrans, weight->selfChannel, weight->lastChannel, 1, weight->pdata, weight->lastChannel, pbox->pdata, 1, 0, outpBox->pdata, 1); + vectorXmatrix(pbox->pdata, weight->pdata, + pbox->width * pbox->height * pbox->channel, + weight->lastChannel, weight->selfChannel, + outpBox->pdata); +} + +void vectorXmatrix(mydataFmt *matrix, mydataFmt *v, int size, int v_w, int v_h, mydataFmt *p) { + for (int i = 0; i < v_h; i++) { + p[i] = 0; + for (int j = 0; j < v_w; j++) { + p[i] += matrix[j] * v[i * v_w + j]; +// cout << p[i] << endl; + } +// cout << p[i] << endl; +// p[i] = -0.0735729; +// cout << "...." << endl; +// break; + } +// cout << "...." << endl; +} + +void readData(string filename, long dataNumber[], mydataFmt *pTeam[], int length) { + ifstream in(filename.data()); + string line; + if (in) { + int i = 0; + int count = 0; + int pos = 0; + while (getline(in, line)) { + try { + if (i < dataNumber[count]) { + line.erase(0, 1); + pos = line.find(']'); + line.erase(pos, 1); + pos = line.find('\r'); + if (pos != -1) { + line.erase(pos, 1); + } + *(pTeam[count])++ = atof(line.data()); + } else { + count++; + if ((length != 0) && (count == length)) + break; + dataNumber[count] += dataNumber[count - 1]; + line.erase(0, 1); + pos = line.find(']'); + line.erase(pos, 1); + pos = line.find('\r'); + if (pos != -1) { + line.erase(pos, 1); + } + *(pTeam[count])++ = atof(line.data()); + } + i++; + } + catch (exception &e) { + cout << " error " << i << endl; + return; + } + } + } else { + cout << "no such file" << filename << endl; + } +} + +// w sc lc ks s p kw kh +long initConvAndFc(struct Weight *weight, int schannel, int lchannel, int kersize, + int stride, int pad, int w, int h, int padw, int padh) { + weight->selfChannel = schannel; + weight->lastChannel = lchannel; + weight->kernelSize = kersize; +// if (kersize == 0) { + weight->h = h; + weight->w = w; +// } +// if (pad == -1) { + weight->padh = padh; + weight->padw = padw; +// } + weight->stride = stride; + weight->pad = pad; + weight->pbias = (mydataFmt *) malloc(schannel * sizeof(mydataFmt)); + if (weight->pbias == NULL)cout << "Memory request not successful!!!"; + memset(weight->pbias, 0, schannel * sizeof(mydataFmt)); + long byteLenght; + if (kersize == 0) { + byteLenght = weight->selfChannel * weight->lastChannel * weight->h * weight->w; + } else { + byteLenght = weight->selfChannel * weight->lastChannel * weight->kernelSize * weight->kernelSize; + } + weight->pdata = (mydataFmt *) malloc(byteLenght * sizeof(mydataFmt)); + if (weight->pdata == NULL)cout << "Memory request not successful!!!"; + memset(weight->pdata, 0, byteLenght * sizeof(mydataFmt)); + return byteLenght; +} + +void initpRelu(struct pRelu *prelu, int width) { + prelu->width = width; + prelu->pdata = (mydataFmt *) malloc(width * sizeof(mydataFmt)); + if (prelu->pdata == NULL)cout << "prelu apply for memory failed!!!!"; + memset(prelu->pdata, 0, width * sizeof(mydataFmt)); +} \ No newline at end of file diff --git a/src/network.h b/src/network.h new file mode 100644 index 0000000..543fb32 --- /dev/null +++ b/src/network.h @@ -0,0 +1,55 @@ +#ifndef NETWORK_H +#define NETWORK_H + +#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/highgui/highgui.hpp" +#include +#include +#include +#include +#include +#include +#include +#include "pBox.h" +//#include + +using namespace cv; + +void addbias(struct pBox *pbox, mydataFmt *pbias); + +void image2Matrix(const Mat &image, const struct pBox *pbox, int num = 0); + +void maxPooling(const pBox *pbox, pBox *Matrix, int kernelSize, int stride); + +void avePooling(const pBox *pbox, pBox *Matrix, int kernelSize, int stride); + +void featurePad(const pBox *pbox, pBox *outpBox, const int pad, const int padw = 0, const int padh = 0); + +void prelu(struct pBox *pbox, mydataFmt *pbias, mydataFmt *prelu_gmma); + +void fullconnect(const Weight *weight, const pBox *pbox, pBox *outpBox); + +void readData(string filename, long dataNumber[], mydataFmt *pTeam[], int length = 0); + +long initConvAndFc(struct Weight *weight, int schannel, int lchannel, int kersize, int stride, int pad, + int w = 0, int h = 0, int padw = 0, int padh = 0); + +void initpRelu(struct pRelu *prelu, int width); + +void image2MatrixInit(Mat &image, struct pBox *pbox); + +void featurePadInit(const pBox *pbox, pBox *outpBox, const int pad, const int padw = 0, const int padh = 0); + +void maxPoolingInit(const pBox *pbox, pBox *Matrix, int kernelSize, int stride, int flag = 0); + +void avePoolingInit(const pBox *pbox, pBox *Matrix, int kernelSize, int stride); + +void convolutionInit(const Weight *weight, pBox *pbox, pBox *outpBox); + +void fullconnectInit(const Weight *weight, pBox *outpBox); + +void vectorXmatrix(mydataFmt *matrix, mydataFmt *v, int size, int v_w, int v_h, mydataFmt *p); + +void convolution(const Weight *weight, const pBox *pbox, pBox *outpBox); + +#endif \ No newline at end of file diff --git a/src/pBox.cpp b/src/pBox.cpp new file mode 100644 index 0000000..32e821a --- /dev/null +++ b/src/pBox.cpp @@ -0,0 +1,25 @@ +#include"pBox.h" + +void freepBox(struct pBox *pbox) { + if (pbox->pdata == NULL)cout << "pbox is NULL!" << endl; + else + free(pbox->pdata); + pbox->pdata = NULL; + delete pbox; +} + +void freepRelu(struct pRelu *prelu) { + if (prelu->pdata == NULL)cout << "prelu is NULL!" << endl; + else + free(prelu->pdata); + prelu->pdata = NULL; + delete prelu; +} + +void freeWeight(struct Weight *weight) { + if (weight->pdata == NULL)cout << "weight is NULL!" << endl; + else + free(weight->pdata); + weight->pdata = NULL; + delete weight; +} \ No newline at end of file diff --git a/src/pBox.h b/src/pBox.h new file mode 100644 index 0000000..41ea211 --- /dev/null +++ b/src/pBox.h @@ -0,0 +1,68 @@ +#ifndef PBOX_H +#define PBOX_H + +#include +#include +#include +#include + +using namespace std; +//#define mydataFmt double +#define Num 128 +typedef double mydataFmt; + + +struct pBox : public cv::String { + mydataFmt *pdata; + int width; + int height; + int channel; +}; + + +struct pRelu { + mydataFmt *pdata; + int width; +}; + + +struct Weight { + mydataFmt *pdata; + mydataFmt *pbias; + int lastChannel; + int selfChannel; + int kernelSize; + int stride; + int pad; + int w; + int h; + int padw; + int padh; +}; + +class pBox1 { +public: + vector>> pdata; +}; + +class pRelu1 { +public: + vector pdata; +}; + +class Weight1 { +public: + vector>>> pdata; + vector pbias; + int stride; + int padw; + int padh; +}; + +void freepBox(struct pBox *pbox); + +void freeWeight(struct Weight *weight); + +void freepRelu(struct pRelu *prelu); + +#endif \ No newline at end of file diff --git a/src/pikaqiu.cpp b/src/pikaqiu.cpp new file mode 100644 index 0000000..bd75578 --- /dev/null +++ b/src/pikaqiu.cpp @@ -0,0 +1,54 @@ +#include "network.h" +#include "facenet.h" +#include + +int main() { + int b = 0; + if (b == 0) { + Mat image = imread("../1.jpg"); +// Mat image = imread("../2.png"); + Mat Image; + resize(image, Image, Size(299, 299), 0, 0, cv::INTER_LINEAR); + facenet ggg; + mydataFmt *o = new mydataFmt[Num]; + ggg.run(Image, o, 0); +// imshow("result", Image); + imwrite("../result.jpg", Image); + + for (int i = 0; i < Num; ++i) { + cout << o[i] << endl; + } + + waitKey(0); + image.release(); + } else { + Mat image; + VideoCapture cap(0); + if (!cap.isOpened()) + cout << "fail to open!" << endl; + cap >> image; + if (!image.data) { + cout << "读取视频失败" << endl; + return -1; + } + + clock_t start; + int stop = 1200; + //while (stop--) { + while (true) { + start = clock(); + cap >> image; + resize(image, image, Size(299, 299), 0, 0, cv::INTER_LINEAR); + facenet ggg; + mydataFmt *o = new mydataFmt[Num]; + ggg.run(image, o, 0); + imshow("result", image); + if (waitKey(1) >= 0) break; + start = clock() - start; + cout << "time is " << (double) start / CLOCKS_PER_SEC * 1000 << "ms" << endl; + } + waitKey(0); + image.release(); + } + return 0; +}