I have the following error i don't have any idea how to fix it. can you please help me to fix this error? The full code is below.
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "math.h"
#include
#include
#include
#include
using namespace cv;
using namespace std;
// Gradient Checking
#define G_CHECKING 0
// Conv2 parameter
#define CONV_FULL 0
#define CONV_SAME 1
#define CONV_VALID 2
// Pooling methods
#define POOL_MAX 0
#define POOL_MEAN 1
#define POOL_MAX 2
#define POOL_STOCHASTIC 1
#define ATD at
#define elif else if
int NumHiddenNeurons = 50; //200
int NumHiddenLayers = 2;
int nclasses = 4; //10
int KernelSize = 3; //13
int KernelAmount = 8;
int PoolingDim = 2; //4
int batch;
int Pooling_Methed = POOL_STOCHASTIC;
typedef struct ConvKernel{
Mat W;
double b;
Mat Wgrad;
double bgrad;
}ConvK;
typedef struct ConvLayer{
vector layer;
int kernelAmount;
}Cvl;
typedef struct Network{
Mat W;
Mat b;
Mat Wgrad;
Mat bgrad;
}Ntw;
typedef struct SoftmaxRegession{
Mat Weight;
Mat Wgrad;
Mat b;
Mat bgrad;
double cost;
}SMR;
Mat
concatenateMat(vector>&vec){
int subFeatures = vec[0][0].rows * vec[0][0].cols;
int height = vec[0].size() * subFeatures;
int width = vec.size();
Mat res = Mat::zeros(height, width, CV_64FC1);
for(int i=0; i&vec){
int height = vec[0].rows;
int width = vec[0].cols;
Mat res = Mat::zeros(height * width, vec.size(), CV_64FC1);
for(int i=0; i>&vec, int vsize){
int sqDim = M.rows / vsize;
int Dim = sqrt ((double) sqDim);
for(int i=0; i oneColumn;
for(int j=0; j> 8) & 255;
ch3 = (i >> 16) & 255;
ch4 = (i >> 24) & 255;
return((int) ch1 << 24) + ((int)ch2 << 16) + ((int)ch3 << 8) + ch4;
}
void
read_Mnist(string filename, vector&vec){
ifstream file(filename, ios::binary);
if (file.is_open()){
int magic_number = 0;
int number_of_images = 0;
int n_rows = 0;
int n_cols = 0;
file.read((char*) &magic_number, sizeof(magic_number));
magic_number = ReverseInt(magic_number);
file.read((char*) &number_of_images,sizeof(number_of_images));
number_of_images = ReverseInt(number_of_images);
file.read((char*) &n_rows, sizeof(n_rows));
n_rows = ReverseInt(n_rows);
file.read((char*) &n_cols, sizeof(n_cols));
n_cols = ReverseInt(n_cols);
for(int i = 0; i < number_of_images; ++i){
Mat tpmat = Mat::zeros(n_rows, n_cols, CV_8UC1);
for(int r = 0; r < n_rows; ++r){
for(int c = 0; c < n_cols; ++c){
unsigned char temp = 0;
file.read((char*) &temp, sizeof(temp));
tpmat.at(r, c) = (int) temp;
}
}
vec.push_back(tpmat);
}
}
}
void
read_Mnist_Label(string filename, Mat &mat)
{
ifstream file(filename, ios::binary);
if (file.is_open()){
int magic_number = 0;
int number_of_images = 0;
int n_rows = 0;
int n_cols = 0;
file.read((char*) &magic_number, sizeof(magic_number));
magic_number = ReverseInt(magic_number);
file.read((char*) &number_of_images,sizeof(number_of_images));
number_of_images = ReverseInt(number_of_images);
for(int i = 0; i < number_of_images; ++i){
unsigned char temp = 0;
file.read((char*) &temp, sizeof(temp));
mat.ATD(0, i) = (double)temp;
}
}
}
Mat
sigmoid(Mat &M){
Mat temp;
exp(-M, temp);
return 1.0 / (temp + 1.0);
}
Mat
dsigmoid(Mat &a){
Mat res = 1.0 - a;
res = res.mul(a);
return res;
}
Mat
ReLU(Mat& M){
Mat res(M);
for(int i=0; i 0.0) res.ATD(i, j) = 1.0;
}
}
return res;
}
// Mimic rot90() in Matlab/GNU Octave.
Mat
rot90(Mat &M, int k){
Mat res;
if(k == 0) return M;
elif(k == 1){
flip(M.t(), res, 0);
}else{
flip(rot90(M, k - 1).t(), res, 0);
}
return res;
}
// A Matlab/Octave style 2-d convolution function.
// from http://blog.timmlinder.com/2011/07/opencv-equivalent-to-matlabs-conv2-function/
Mat
conv2(Mat &img, Mat &kernel, int convtype) {
Mat dest;
Mat source = img;
if(CONV_FULL == convtype) {
source = Mat();
int additionalRows = kernel.rows-1, additionalCols = kernel.cols-1;
copyMakeBorder(img, source, (additionalRows+1)/2, additionalRows/2, (additionalCols+1)/2, additionalCols/2, BORDER_CONSTANT, Scalar(0));
}
Point anchor(kernel.cols - kernel.cols/2 - 1, kernel.rows - kernel.rows/2 - 1);
int borderMode = BORDER_CONSTANT;
Mat fkernal;
flip(kernel, fkernal, -1);
filter2D(source, dest, img.depth(), fkernal, anchor, 0, borderMode);
if(CONV_VALID == convtype) {
dest = dest.colRange((kernel.cols-1)/2, dest.cols - kernel.cols/2)
.rowRange((kernel.rows-1)/2, dest.rows - kernel.rows/2);
}
return dest;
}
// get KroneckerProduct
// for upsample
// see function kron() in Matlab/Octave
Mat
kron(Mat &a, Mat &b){
Mat res = Mat::zeros(a.rows * b.rows, a.cols * b.cols, CV_64FC1);
for(int i=0; i= M.ATD(i, j) && (val - M.ATD(i, j) < minDiff)){
minDiff = val - M.ATD(i, j);
res.x = j;
res.y = i;
}
}
}
return res;
}
Mat
Pooling(Mat &M, int pVert, int pHori, int poolingMethod, vector&locat, bool isTest){
int remX = M.cols % pHori;
int remY = M.rows % pVert;
Mat newM;
if(remX == 0 && remY == 0) M.copyTo(newM);
else{
Rect roi = Rect(remX, remY, M.cols - remX, M.rows - remY);
M(roi).copyTo(newM);
}
Mat res = Mat::zeros(newM.rows / pVert, newM.cols / pHori, CV_64FC1);
for(int i=0; i&locat){
Mat res;
if(POOL_MEAN == poolingMethod){
Mat one = Mat::ones(pVert, pHori, CV_64FC1);
res = kron(M, one) / (pVert * pHori);
}elif(POOL_MAX == poolingMethod || POOL_STOCHASTIC == poolingMethod){
res = Mat::zeros(M.rows * pVert, M.cols * pHori, CV_64FC1);
for(int i=0; i(i);
for(int j=0; j();
}
}
convk.W = convk.W * (2 * epsilon) - epsilon;
convk.b = 0;
convk.Wgrad = Mat::zeros(width, width, CV_64FC1);
convk.bgrad = 0;
}
void
weightRandomInit(Ntw &ntw, int inputsize, int hiddensize, int nsamples){
double epsilon = sqrt((double)6) / sqrt((double)(hiddensize + inputsize + 1));
double *pData;
ntw.W = Mat::ones(hiddensize, inputsize, CV_64FC1);
for(int i=0; i(i);
for(int j=0; j();
}
}
ntw.W = ntw.W * (2 * epsilon) - epsilon;
ntw.b = Mat::zeros(hiddensize, 1, CV_64FC1);
ntw.Wgrad = Mat::zeros(hiddensize, inputsize, CV_64FC1);
ntw.bgrad = Mat::zeros(hiddensize, 1, CV_64FC1);
}
void
weightRandomInit(SMR &smr, int nclasses, int nfeatures){
double epsilon = 0.01;
smr.Weight = Mat::ones(nclasses, nfeatures, CV_64FC1);
double *pData;
for(int i = 0; i(i);
for(int j=0; j();
}
}
smr.Weight = smr.Weight * (2 * epsilon) - epsilon;
smr.b = Mat::zeros(nclasses, 1, CV_64FC1);
smr.cost = 0.0;
smr.Wgrad = Mat::zeros(nclasses, nfeatures, CV_64FC1);
smr.bgrad = Mat::zeros(nclasses, 1, CV_64FC1);
}
void
ConvNetInitPrarms(Cvl &cvl, vector&HiddenLayers, SMR &smr, int imgDim, int nsamples){
// Init Conv layers
for(int j=0; j&x, Mat &y, Cvl &cvl, vector&hLayers, SMR &smr, double lambda){
int nsamples = x.size();
// Conv & Pooling
vector> Conv1st;
vector> Pool1st;
vector>> PoolLoc;
for(int k=0; k tpConv1st;
vector tpPool1st;
vector> PLperSample;
for(int i=0; i PLperKernel;
Mat temp = rot90(cvl.layer[i].W, 2);
Mat tmpconv = conv2(x[k], temp, CONV_VALID);
tmpconv += cvl.layer[i].b;
//tmpconv = sigmoid(tmpconv);
tmpconv = ReLU(tmpconv);
tpConv1st.push_back(tmpconv);
tmpconv = Pooling(tmpconv, PoolingDim, PoolingDim, Pooling_Methed, PLperKernel, false);
PLperSample.push_back(PLperKernel);
tpPool1st.push_back(tmpconv);
}
PoolLoc.push_back(PLperSample);
Conv1st.push_back(tpConv1st);
Pool1st.push_back(tpPool1st);
}
Mat convolvedX = concatenateMat(Pool1st);
// full connected layers
vector acti;
acti.push_back(convolvedX);
for(int i=1; i<=NumHiddenLayers; i++){
Mat tmpacti = hLayers[i - 1].W * acti[i - 1] + repeat(hLayers[i - 1].b, 1, convolvedX.cols);
acti.push_back(sigmoid(tmpacti));
}
Mat M = smr.Weight * acti[acti.size() - 1] + repeat(smr.b, 1, nsamples);
Mat tmp;
reduce(M, tmp, 0, CV_REDUCE_MAX);
M -= repeat(tmp, M.rows, 1);
Mat p;
exp(M, p);
reduce(p, tmp, 0, CV_REDUCE_SUM);
divide(p, repeat(tmp, p.rows, 1), p);
// softmax regression
Mat groundTruth = Mat::zeros(nclasses, nsamples, CV_64FC1);
for(int i=0; i delta(acti.size());
delta[delta.size() -1] = -smr.Weight.t() * (groundTruth - p);
delta[delta.size() -1] = delta[delta.size() -1].mul(dsigmoid(acti[acti.size() - 1]));
for(int i = delta.size() - 2; i >= 0; i--){
delta[i] = hLayers[i].W.t() * delta[i + 1];
if(i > 0) delta[i] = delta[i].mul(dsigmoid(acti[i]));
}
for(int i=NumHiddenLayers - 1; i >=0; i--){
hLayers[i].Wgrad = delta[i + 1] * acti[i].t();
hLayers[i].Wgrad /= nsamples;
reduce(delta[i + 1], tmp, 1, CV_REDUCE_SUM);
hLayers[i].bgrad = tmp / nsamples;
}
//bp - Conv layer
Mat one = Mat::ones(PoolingDim, PoolingDim, CV_64FC1);
vector> Delta;
vector> convDelta;
unconcatenateMat(delta[0], Delta, cvl.kernelAmount);
for(int k=0; k tmp;
for(int i=0; i&hLayers, SMR &smr, vector&x, Mat &y, double lambda){
//Gradient Checking (remember to disable this part after you're sure the
//cost function and dJ function are correct)
getNetworkCost(x, y, cvl, hLayers, smr, lambda);
Mat grad(cvl.layer[0].Wgrad);
cout<<"test network !!!!"<&x, Mat &y, Cvl &cvl, vector&HiddenLayers, SMR &smr, double lambda, int MaxIter, double lrate){
if (G_CHECKING){
gradientChecking(cvl, HiddenLayers, smr, x, y, lambda);
}else{
int converge = 0;
double lastcost = 0.0;
//double lrate = getLearningRate(x);
cout<<"Network Learning, trained learning rate: "< batchX;
for(int i=0; i&x, Cvl &cvl, vector&hLayers, SMR &smr, double lambda){
int nsamples = x.size();
vector> Conv1st;
vector> Pool1st;
vector PLperKernel;
for(int k=0; k tpConv1st;
vector tpPool1st;
for(int i=0; i acti;
acti.push_back(convolvedX);
for(int i=1; i<=NumHiddenLayers; i++){
Mat tmpacti = hLayers[i - 1].W * acti[i - 1] + repeat(hLayers[i - 1].b, 1, convolvedX.cols);
acti.push_back(sigmoid(tmpacti));
}
Mat M = smr.Weight * acti[acti.size() - 1] + repeat(smr.b, 1, nsamples);
Mat tmp;
reduce(M, tmp, 0, CV_REDUCE_MAX);
M -= repeat(tmp, M.rows, 1);
Mat p;
exp(M, p);
reduce(p, tmp, 0, CV_REDUCE_SUM);
divide(p, repeat(tmp, p.rows, 1), p);
log(p, tmp);
Mat result = Mat::ones(1, tmp.cols, CV_64FC1);
for(int i=0; i maxele){
maxele = tmp.ATD(j, i);
which = j;
}
}
result.ATD(0, i) = which;
}
// deconstruct
for(int i=0; i trainX;
vector testX;
Mat trainY, testY;
printf(" S1 ");
readData(trainX, trainY,imagePath, lablePath, 100);
readData(testX, testY, imagePath, lablePath, 100);
printf(" S2 ");
cout<<"Read trainX successfully, including "< HiddenLayers;
SMR smr;
printf(" S5 ");
ConvNetInitPrarms(cvl, HiddenLayers, smr, imgDim, nsamples);
printf(" S6 ");
// Train network using Back Propogation
batch = nsamples / 100;
Mat tpX = concatenateMat(trainX);
double lrate = getLearningRate(tpX);
cout<<"lrate = "<
↧