Hi, I am attempting to train data acquired from 4 video feeds. However the code crashes on the line SVM.train(). I have tried everything and have no idea why it is doing this. The exact error I get is:
Exception thrown at 0x55148C52 (opencv_ml2411.dll) in Final Design.exe: 0xC0000005: Access violation writing location 0xC7F891AB.
If I use only the first 2 videos, it works fine.
The code is shown below (it is not yet optimised so its quite a lot sorry):
#include
#include
#include
#include
using namespace cv;
using namespace std;
//sensitivity values
const static int SENSITIVITY = 55;
const static int BLUR_SIZE = 25;
void FindConnectedComponents(const Mat &ThresholdImage2, vector>&blobs);
int main() {
//Images to compare
Mat background, frame;
//Grayscale images
Mat background_gray, frame_gray;
//Difference Image
Mat DifferenceImage;
//Threshold Image
Mat ThresholdImage, ThresholdImage2;
//Blur Image
Mat BlurImage;
//vector of vector points for connected comp.
vector >blobs;
//Initialise Sample Number
int sample_num = 0;
//video capture object
VideoCapture capture;
//Moments
Moments mom;
//Hu moments
double Hu[7];
//open static background
background = imread("background.jpg");
//convert background to grayscale
cvtColor(background, background_gray, COLOR_BGR2GRAY);
//Declare Training Data for SVM
double trainingData[100][7]; //[samples][features]
//Number of Hand Waving Samples
while (sample_num < 25) {
//Check for last frame of video
if (capture.get(CV_CAP_PROP_POS_FRAMES) < capture.get(CV_CAP_PROP_FRAME_COUNT)) {
//read current frame
capture.read(frame);
//convert frame to grayscale
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
//perform frame differencing
absdiff(background_gray, frame_gray, DifferenceImage);
//obtain threshold image
threshold(DifferenceImage, ThresholdImage, SENSITIVITY, 255, THRESH_BINARY);
//blur image
blur(ThresholdImage, BlurImage, Size(BLUR_SIZE, BLUR_SIZE));
//threshold again
threshold(BlurImage, ThresholdImage2, SENSITIVITY, 255, THRESH_BINARY);
//Find Connected Components
FindConnectedComponents(ThresholdImage2, blobs);
//Find Moments of the blob
mom = moments(blobs[0], true);
//Find Humoments of the blob
HuMoments(mom, Hu);
//Add Humoments to trainingData
for (int i = 0; i < 7; i++) {
trainingData[sample_num][i] = Hu[i];
}
//Next Sample
sample_num++;
//cout << sample_num << endl;
//show results
imshow("Capture", frame);
imshow("Threshold Image", ThresholdImage2);
cvWaitKey(50);
}
else {
//release the capture before re-opening and looping again.
capture.release();
//Open Hand Waving Video
capture.open("handwaving.mp4");
if (!capture.isOpened()) {
cout << "ERROR ACQUIRING VIDEO FEED #1\n";
getchar();
return -1;
}
}
}
capture.release();
//Number of Figting Samples
while (sample_num < 50) {
//Check for last frame of video
if (capture.get(CV_CAP_PROP_POS_FRAMES) < capture.get(CV_CAP_PROP_FRAME_COUNT)) {
//read current frame
capture.read(frame);
//convert frame to grayscale
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
//perform frame differencing
absdiff(background_gray, frame_gray, DifferenceImage);
//obtain threshold image
threshold(DifferenceImage, ThresholdImage, SENSITIVITY, 255, THRESH_BINARY);
//blur image
blur(ThresholdImage, BlurImage, Size(BLUR_SIZE, BLUR_SIZE));
//threshold again
threshold(BlurImage, ThresholdImage2, SENSITIVITY, 255, THRESH_BINARY);
//Find Connected Components
FindConnectedComponents(ThresholdImage2, blobs);
//Find Moments of the blob
mom = moments(blobs[0], true);
//Find Humoments of the blob
HuMoments(mom, Hu);
//Add Humoments to trainingData
for (int i = 0; i < 7; i++) {
trainingData[sample_num][i] = Hu[i];
}
//Next Sample
sample_num++;
//cout << sample_num << endl;
//show results
imshow("Capture", frame);
imshow("Threshold Image", ThresholdImage2);
cvWaitKey(50);
}
else {
//release the capture before re-opening and looping again.
capture.release();
//Open Fighting Video
capture.open("fight.mp4");
if (!capture.isOpened()) {
cout << "ERROR ACQUIRING VIDEO FEED #1\n";
getchar();
return -1;
}
}
}
capture.release();
//Number of Crouching Samples
while (sample_num < 75) {
//Check for last frame of video
if (capture.get(CV_CAP_PROP_POS_FRAMES) < capture.get(CV_CAP_PROP_FRAME_COUNT)) {
//read current frame
capture.read(frame);
//convert frame to grayscale
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
//perform frame differencing
absdiff(background_gray, frame_gray, DifferenceImage);
//obtain threshold image
threshold(DifferenceImage, ThresholdImage, SENSITIVITY, 255, THRESH_BINARY);
//blur image
blur(ThresholdImage, BlurImage, Size(BLUR_SIZE, BLUR_SIZE));
//threshold again
threshold(BlurImage, ThresholdImage2, SENSITIVITY, 255, THRESH_BINARY);
//Find Connected Components
FindConnectedComponents(ThresholdImage2, blobs);
//Find Moments of the blob
Moments mom = moments(blobs[0], true);
//Find Humoments of the blob
double Hu[7];
HuMoments(mom, Hu);
//Add Humoments to trainingData
for (int i = 0; i < 7; i++) {
trainingData[sample_num][i] = Hu[i];
}
//Next Sample
sample_num++;
//cout << sample_num << endl;
//show results
imshow("Capture", frame);
imshow("Threshold Image", ThresholdImage2);
cvWaitKey(50);
}
else {
//release the capture before re-opening and looping again.
capture.release();
//Open Crouching Video
capture.open("crouch.mp4");
if (!capture.isOpened()) {
cout << "ERROR ACQUIRING VIDEO FEED #1\n";
getchar();
return -1;
}
}
}
capture.release();
//Number of Walking Samples
while (sample_num < 100) {
//Check for last frame of video
if (capture.get(CV_CAP_PROP_POS_FRAMES) < capture.get(CV_CAP_PROP_FRAME_COUNT)) {
//read current frame
capture.read(frame);
//convert frame to grayscale
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
//perform frame differencing
absdiff(background_gray, frame_gray, DifferenceImage);
//obtain threshold image
threshold(DifferenceImage, ThresholdImage, SENSITIVITY, 255, THRESH_BINARY);
//blur image
blur(ThresholdImage, BlurImage, Size(BLUR_SIZE, BLUR_SIZE));
//threshold again
threshold(BlurImage, ThresholdImage2, SENSITIVITY, 255, THRESH_BINARY);
//Find Connected Components
FindConnectedComponents(ThresholdImage2, blobs);
//Find Moments of the blob
Moments mom = moments(blobs[0], true);
//Find Humoments of the blob
double Hu[7];
HuMoments(mom, Hu);
//Add Humoments to trainingData
for (int i = 0; i < 7; i++) {
trainingData[sample_num][i] = Hu[i];
}
//Next Sample
sample_num++;
//cout << sample_num << endl;
//show results
imshow("Capture", frame);
imshow("Threshold Image", ThresholdImage2);
cvWaitKey(50);
}
else {
//release the capture before re-opening and looping again.
capture.release();
//Open Walking Video
capture.open("walk.mp4");
if (!capture.isOpened()) {
cout << "ERROR ACQUIRING VIDEO FEED #1\n";
getchar();
return -1;
}
}
}
capture.release();
//Declare Total Number os labels (1 for each sample)
int labels[100][1];
//Set label for Hand Waving Samples
for (int i = 0; i < 25; i++) {
labels[i][1] = 1;
}
//Set label for Fighting Samples
for (int i = 25; i < 50; i++) {
labels[i][1] = 2;
}
//Set label for Crouching Samples
for (int i = 50; i < 75; i++) {
labels[i][1] = 3;
}
//Set label for Walking Samples
for (int i = 75; i < 100; i++) {
labels[i][1] = 4;
}
//Convert label array to Matrix
Mat matlabels(100, 1, CV_32SC1, labels);
//Convert trainingData Array to Matrix
Mat mattrainingData(100, 7, CV_32FC1, trainingData);
//Setup training parameters
CvSVMParams params;
params.svm_type = CvSVM::C_SVC;
params.kernel_type = CvSVM::LINEAR;
params.term_crit = cvTermCriteria(CV_TERMCRIT_ITER, 100, 1e-6);
//Train
CvSVM SVM;
SVM.train(mattrainingData, matlabels, Mat(), Mat(), params);
//Mat matinput(1, 7, CV_32FC1, testData);
//float ans = SVM.predict(matinput);
return 0;
}
void FindConnectedComponents(const Mat &ThresholdImage2, vector>&blobs) {
//initialize vector
blobs.clear();
blobs.resize(NULL);
// Fill the label_image with the blobs
// 0 - background
// 255 - unlabelled foreground
// 2+ - labelled foreground
//array of laebls
Mat label_image;
//32 bit signed integers
ThresholdImage2.convertTo(label_image, CV_32SC1);
//start of labelling
int label_count = 2;
//Find foreground pixel location(x,y)
for (int y = 0; y < label_image.rows; y++) {
int *row = (int*)label_image.ptr(y);
for (int x = 0; x < label_image.cols; x++) {
if (row[x] != 255) {
continue;
}
Rect rect;
//Use Floodfill to find all neighbouring pixels that are similar
floodFill(label_image, Point(x, y), label_count, &rect, 0, 0, 4);
vector blob;
//Search pixels for those with the same label
for (int i = rect.y; i < (rect.y + rect.height); i++) {
int *row2 = (int*)label_image.ptr(i);
for (int j = rect.x; j < (rect.x + rect.width); j++) {
if (row2[j] != label_count) {
continue;
}
//Set all these points with same label as one blob
blob.push_back(Point2i(j, i));
}
}
//Add blob with label to vector of blobs
blobs.push_back(blob);
//next label
label_count++;
}
}
}
↧