The function DetectAndRecognizeFaces is to detect a face and pass the detected image to the Recognize method which returns a name of recognized face based on the label.
LoadTrainingSet function is to fetch data from SQL database and LoadTrainingData method is to train the eigenfacerecognizer.
The problem is that the predict function never returns -1 for Unknown facees and always returns match if the detected face is not present in the database.
The Code:
private void DetectAndRecognizeFaces()
{
Image<Gray, byte> grayframe = ImageFrame.Convert<Gray, byte>();
//Assign user-defined Values to parameter variables:
minNeighbors = int.Parse(comboBoxMinNeigh.Text); // the 3rd parameter
windowsSize = int.Parse(textBoxWinSize.Text); // the 5th parameter
scaleIncreaseRate = Double.Parse(comboBoxScIncRte.Text); //the 2nd parameter
//detect faces from the gray-scale image and store into an array of type 'var',i.e 'MCvAvgComp[]'
var faces = haar.DetectMultiScale(grayframe, scaleIncreaseRate, minNeighbors, Size.Empty); //the actual face detection happens here
MessageBox.Show("Total Faces Detected: " + faces.Length.ToString());
Bitmap BmpInput = grayframe.ToBitmap();
Bitmap ExtractedFace; //empty
Graphics grp;
//MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);
faceRecognizer.Load(recognizeFilePath);
foreach (var face in faces)
{
t = t + 1;
result = ImageFrame.Copy(face).Convert<Gray, byte>().Resize(100, 100, Inter.Cubic);
//set the size of the empty box(ExtractedFace) which will later contain the detected face
ExtractedFace = new Bitmap(face.Width, face.Height);
//assign the empty box to graphics for painting
grp = Graphics.FromImage(ExtractedFace);
//graphics fills the empty box with exact pixels of the face to be extracted from input image
grp.DrawImage(BmpInput, 0, 0, face, GraphicsUnit.Pixel);
string name = Recognise(result);
if (name == "Unknown")
{
ImageFrame.Draw(face, new Bgr(Color.Red), 3);
MessageBox.Show("Face Name is: " + name.ToString());
ImageFrame.Draw(name, new Point(face.X - 2, face.Y - 2), FontFace.HersheyComplex, 0.5,
new Bgr(0, 0, 255), 1, LineType.EightConnected, bottomLeftOrigin);
}
else
{
ImageFrame.Draw(face, new Bgr(Color.Green), 3);
MessageBox.Show("Face Name is: " + name.ToString());
ImageFrame.Draw(name, new Point(face.X - 2, face.Y - 2), FontFace.HersheyComplex, 0.5,
new Bgr(0, 255, 0), 1, LineType.EightConnected, bottomLeftOrigin);
}
CamImageBox.Image = ImageFrame;
}
public string Recognise(Image<Gray, byte> Input_image, int Eigen_Thresh = -1)
{
if (_IsTrained)
{
faceRecognizer.Load(recognizeFilePath);
FaceRecognizer.PredictionResult ER = faceRecognizer.Predict(Input_image);
if (ER.Label == -1)
{
Eigen_Label = "Unknown";
Eigen_Distance = 0;
return Eigen_Label;
}
else
{
Eigen_Label = Names_List[ER.Label];
Eigen_Distance = (float)ER.Distance;
if (Eigen_Thresh > -1) Eigen_threshold = Eigen_Thresh;
//Only use the post threshold rule if we are using an Eigen Recognizer
//since Fisher and LBHP threshold set during the constructor will work correctly
switch (Recognizer_Type)
{
case ("EMGU.CV.EigenFaceRecognizer"):
if (Eigen_Distance > Eigen_threshold) return Eigen_Label;
else return "Unknown";
case ("EMGU.CV.LBPHFaceRecognizer"):
case ("EMGU.CV.FisherFaceRecognizer"):
default:
return Eigen_Label; //the threshold set in training controls unknowns
}
}
}
else return "";
}
private void LoadTrainingSet()
{
Bitmap bmpImage;
for (int i = 0; i < totalRows; i++)
{
byte[] fetchedBytes = (byte[])dataTable.Rows[i]["FaceImage"];
MemoryStream stream = new MemoryStream(fetchedBytes);
//stream.Write(fetchedBytes, 0, fetchedBytes.Length);
bmpImage = new Bitmap(stream);
trainingImages.Add(new Emgu.CV.Image<Gray, Byte>(bmpImage).Resize(100, 100, Inter.Cubic));
//string faceName = (string)dataTable.Rows[i]["FaceName"];
int faceName = (int)dataTable.Rows[i]["FaceID"];
NameLabels.Add(faceName);
NameLable = (string)dataTable.Rows[i]["FaceName"];
Names_List.Add(NameLable);
//ContTrain = NameLabels[i];
}
LoadTrainedData();
}
public void LoadTrainedData()
{
if (trainingImages.ToArray().Length != 0)
{
var faceImages = new Image<Gray, byte>[trainingImages.Count()];
var facesIDs = new int[NameLabels.Count()];
//var facesNames = new string[Names_List.Count()];
//int[] faceLabels = new int[NameLabels.Count()];
//MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
for (int i = 0; i < trainingImages.ToArray().Length; i++)
{
faceImages[i] = trainingImages[i];
facesIDs[i] = NameLabels[i];
}
try
{
faceRecognizer.Train(faceImages, facesIDs);
faceRecognizer.Save(recognizeFilePath);
_IsTrained = true;
}
catch (Exception error)
{
MessageBox.Show(error.ToString());
}
}
}
Related
I want to make image stitching with openCvSharp but something goes wrong. Since I do not get the right output of 2 image stitched together.
I was fallowing the tutorial in python link and converting it in to C# code.
Here are 2 photo examples
The output where image should be stitched I get wrong result. It looks the same as image 2.
I gues something goes wrong in line, or somewhere after the line
Cv2.WarpPerspective(trainImg, result, H, new OpenCvSharp.Size(width, height));
Here is the full code.
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
bool debugImages = true;
string locationFolder = "";
OpenFileDialog dlg = new OpenFileDialog();
dlg.CheckFileExists = true;
dlg.Multiselect = true;
if (dlg.ShowDialog() == System.Windows.Forms.DialogResult.OK)
{
locationFolder = Path.GetDirectoryName(dlg.FileNames[0]) + "\\output\\";
List<Mat> imagesMat = new List<Mat>();
for (var i = 0; i < dlg.FileNames.Length; i++)
{
using (Bitmap fromFile = new Bitmap(dlg.FileNames[i]))
{
Mat source = BitmapConverter.ToMat(fromFile);
imagesMat.Add(source);
}
}
if (imagesMat.Count != 2)
throw new Exception("Select only 2 images!!!");
int imageCounter = 0;
Mat trainImg = imagesMat[0];
Mat queryImg = imagesMat[1];
Mat trainImg_gray = new Mat();
Mat queryImg_gray = new Mat();
Cv2.CvtColor(trainImg, trainImg_gray, ColorConversionCodes.BGRA2GRAY);
Cv2.CvtColor(queryImg, queryImg_gray, ColorConversionCodes.BGRA2GRAY);
// detecting keypoints
// FastFeatureDetector, StarDetector, SIFT, SURF, ORB, BRISK, MSER, GFTTDetector, DenseFeatureDetector, SimpleBlobDetector
string method = "SURF";
string feature_matching = "bf"; //bf, knn
var descriptor = SURF.Create(500, 4, 2, true);
Mat descriptors1 = new Mat();
Mat descriptors2 = new Mat();
KeyPoint[] kpsA;
KeyPoint[] kpsB;
descriptor.DetectAndCompute(trainImg_gray, null, out kpsA, descriptors1);
descriptor.DetectAndCompute(queryImg_gray, null, out kpsB, descriptors2);
// Match descriptor vectors
//var flannMatcher = new FlannBasedMatcher();
DMatch[] matches;
if (feature_matching == "bf")
matches = matchKeyPointsBF(descriptors1, descriptors2, method);
else
matches = matchKeyPointsKNN(descriptors1, descriptors2, 0.75, method);
var bfView = new Mat();
Cv2.DrawMatches(trainImg, kpsA, queryImg, kpsB, matches, bfView, null, flags: DrawMatchesFlags.NotDrawSinglePoints);
if (debugImages)
{
using (Bitmap resultBitmap = BitmapConverter.ToBitmap(bfView))
resultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //1
}
Mat H = getHomography(kpsA, kpsB, descriptors1, descriptors2, matches, 4);
if (H == null)
throw new Exception("No Homography!!!");
//for (var i = 0; i < H.Cols; i++)
//{
// for (var j = 0; j < H.Rows; j++)
// Console.Write(H.At<float>(i, j) + " ");
// Console.WriteLine("");
//}
double width = trainImg.Size().Width + queryImg.Size().Width;
double height = trainImg.Size().Height + queryImg.Size().Height;
Mat result = new Mat();
Cv2.WarpPerspective(trainImg, result, H, new OpenCvSharp.Size(width, height));
if (debugImages)
{
using (Bitmap resultBitmap = BitmapConverter.ToBitmap(result))
resultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //1
}
result[new Rect(new OpenCvSharp.Point(0, 0), new OpenCvSharp.Size(queryImg.Size().Width, queryImg.Size().Height))] = queryImg;
if (debugImages)
{
using (Bitmap resultBitmap = BitmapConverter.ToBitmap(result))
resultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //2
}
//# transform the panorama image to grayscale and threshold it
Mat gray = result.Clone();
Cv2.CvtColor(result, gray, ColorConversionCodes.BGR2GRAY);
Mat thresh = new Mat();
double thresh2 = Cv2.Threshold(gray, thresh, 0, 255, ThresholdTypes.Binary);
//# Finds contours from the binary image
OpenCvSharp.Point[][] cnts;
HierarchyIndex[] hierarchy;
Cv2.FindContours(thresh, out cnts, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple);
OpenCvSharp.Point[] cnts2 = new OpenCvSharp.Point[cnts[0].Length];
for (var k = 0; k < cnts[0].Length; k++)
cnts2[k] = cnts[0][k];
//InputArray ptsA = InputArray.Create(cnts2);
//var c = Cv2.ContourArea(ptsA, true);
OpenCvSharp.Rect xywh = Cv2.BoundingRect(cnts2);
result = result[new Rect(new OpenCvSharp.Point(xywh.X, xywh.Y), new OpenCvSharp.Size(xywh.Width, xywh.Height))];
//result = result[new Rect(new OpenCvSharp.Point(0, 0), new OpenCvSharp.Size(256, 256))];
Bitmap endResultBitmap = BitmapConverter.ToBitmap(result);
endResultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //4
Environment.Exit(-1);
}
}
private BFMatcher createMatcher(string method, bool crossCheck)
{
//"Create and return a Matcher Object"
if (method == "SURF" || method == "SIFT")
return new BFMatcher(NormTypes.L2, crossCheck);
else //if (method == "ORB" || method == "BRISK")
return new BFMatcher(NormTypes.Hamming, crossCheck);
}
private DMatch[] matchKeyPointsBF(Mat featuresA, Mat featuresB, string method)
{
BFMatcher bf = createMatcher(method, crossCheck: true);
// # Match descriptors.
DMatch[] bfMatches = bf.Match(featuresA, featuresB);
//# Sort the features in order of distance.
//# The points with small distance (more similarity) are ordered first in the vector
DMatch[] rawMatches = bfMatches.OrderBy(a => a.Distance).ToArray();
if (rawMatches.Length > 100)
Array.Resize(ref rawMatches, 100);
return rawMatches;
}
private DMatch[] matchKeyPointsKNN(Mat featuresA, Mat featuresB, double ratio, string method)
{
BFMatcher bf = createMatcher(method, crossCheck: false);
// # compute the raw matches and initialize the list of actual matches
DMatch[][] rawMatches = bf.KnnMatch(featuresA, featuresB, 2);
List<DMatch> rawMatches2 = new List<DMatch>();
//# loop over the raw matches
DMatch prevmatchN = rawMatches[0][0];
rawMatches2.Add(prevmatchN);
for (int m = 0; m < rawMatches.Length; m++)
{
for (int n = 0; n < rawMatches[m].Length; n++)
{
//# ensure the distance is within a certain ratio of each
//# other (i.e. Lowe's ratio test)
DMatch matchN = rawMatches[m][n];
if (n == 0)
prevmatchN = matchN;
if (prevmatchN.Distance < matchN.Distance * (ratio))
rawMatches2.Add(matchN);
if (rawMatches2.Count >= 100)
break;
}
}
return rawMatches2.ToArray();
}
private Mat getHomography(KeyPoint[] kpsA, KeyPoint[] kpsB, Mat featuresA, Mat featuresB, DMatch[] matches, int reprojThresh)
{
//# convert the keypoints to numpy arrays
Point2f[] PtA = new Point2f[matches.Length];
Point2f[] PtB = new Point2f[matches.Length];
for (int i = 0; i < matches.Length; i++)
{
KeyPoint kpsAI = kpsA[matches[i].QueryIdx];
KeyPoint kpsBI = kpsB[matches[i].TrainIdx];
PtA[i] = new Point2f(kpsAI.Pt.X, kpsAI.Pt.Y);
PtB[i] = new Point2f(kpsBI.Pt.X, kpsBI.Pt.Y);
}
InputArray ptsA = InputArray.Create(PtA);
InputArray ptsB = InputArray.Create(PtB);
if (matches.Length > 4)
{
//You get the homography matrix usin
Mat H = Cv2.FindHomography(ptsA, ptsB, HomographyMethods.Ransac, reprojThresh);
//and then to get any point on the target picture from the original picture:
//Mat targetPoint = new Mat();
//Cv2.PerspectiveTransform(ptsA, targetPoint, H);
return H;
}
else
return null;
}
}
How to calculate the actual speed of all moving objects on video using the Lucals-kanade algorithm to calculate the optical flow ?
I need to do this on this video. The camera is fixed in one place (Fig. 1)
I find the key points and track them using the Lucas-Kanade algorithm (Fig. 2)
How to use this algorithm to update the actual speed of each car?
Thank you for your answers!
My code:
public class OpticalFlowLK : BaseFilter
{
private Mat prevFrame;
private Mat nextFrame;
private bool prevFrameEmpty = true;
private GFTTDetector gFTTDetector;
private Stopwatch sWatch;
private double time = 0.04;
public OpticalFlowLK()
{
TAG = "[Optical Flow Lucas Kanade]";
gFTTDetector = new GFTTDetector(500);
sWatch = new Stopwatch();
}
protected override Mat ProcessFrame(ref Mat frame)
{
Mat rez = new Mat();
frame.CopyTo(rez);
nextFrame = new Mat();
Mat gray = new Mat();
var tmpImg = gray.ToImage<Gray, Byte>();
CvInvoke.CvtColor(frame, nextFrame, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
if (!prevFrameEmpty)
{
VectorOfKeyPoint prevFeatures = new VectorOfKeyPoint(gFTTDetector.Detect(prevFrame));
//Features2DToolbox.DrawKeypoints(rez, prevFeatures, rez, new Bgr(0, 0, 255));
PointF[] prevPts = new PointF[prevFeatures.Size];
for (int i = 0; i < prevFeatures.Size; i++)
{
prevPts[i] = prevFeatures[i].Point;
}
PointF[] nextPts;
byte[] status;
float[] errors;
sWatch.Start();
CvInvoke.CalcOpticalFlowPyrLK(prevFrame, nextFrame, prevPts, new Size(20, 20), 1, new MCvTermCriteria(20, 0.03), out nextPts, out status, out errors);
sWatch.Stop();
sWatch.Reset();
prevFrame = nextFrame.Clone();
for (int i = 0; i < status.Length; i++)
{
Point prevPt = new Point((int)prevPts[i].X,(int)nextPts[i].Y);
Point nextPt = new Point((int)nextPts[i].X,(int)nextPts[i].Y);
double lenght = Math.Sqrt(Math.Pow(prevPt.X - nextPt.X, 2) + Math.Pow(prevPt.Y - nextPt.Y, 2));
if (lenght > 3)
{
CvInvoke.Circle(rez, nextPt, 1, new MCvScalar(0, 255, 0), 2);
}
}
sWatch.Stop();
prevFrameEmpty = false;
}
else if (prevFrameEmpty)
{
prevFrame = nextFrame.Clone();
prevFrameEmpty = false;
}
return rez;
}
protected override bool InitFilter(ref Mat frame)
{
throw new NotImplementedException();
}
}
I am using the below code to capture the face http://www.codeproject.com/Articles/239849/Multiple-face-detection-and-recognition-in-real. its working as i expected. but i wanted to increase height and weight of detected area.
The below is sample code:
public partial class FrmPrincipal : Form
{
//Declararation of all variables, vectors and haarcascades
Image<Bgr, Byte> currentFrame;
Capture grabber;
HaarCascade face;
HaarCascade eye;
MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);
Image<Gray, byte> result, TrainedFace = null;
Image<Gray, byte> gray = null;
List<Image<Gray, byte>> trainingImages = new List<Image<Gray, byte>>();
List<string> labels= new List<string>();
List<string> NamePersons = new List<string>();
int ContTrain, NumLabels, t;
string name, names = null;
public FrmPrincipal()
{
InitializeComponent();
//Load haarcascades for face detection
face = new HaarCascade("haarcascade_frontalface_default.xml");
//eye = new HaarCascade("haarcascade_eye.xml");
try
{
//Load of previus trainned faces and labels for each image
string Labelsinfo = File.ReadAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt");
string[] Labels = Labelsinfo.Split('%');
NumLabels = Convert.ToInt16(Labels[0]);
ContTrain = NumLabels;
string LoadFaces;
for (int tf = 1; tf < NumLabels+1; tf++)
{
LoadFaces = "face" + tf + ".bmp";
trainingImages.Add(new Image<Gray, byte>(Application.StartupPath + "/TrainedFaces/" + LoadFaces));
labels.Add(Labels[tf]);
}
}
catch(Exception e)
{
//MessageBox.Show(e.ToString());
MessageBox.Show("Nothing in binary database, please add at least a face(Simply train the prototype with the Add Face Button).", "Triained faces load", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
}
}
private void button1_Click(object sender, EventArgs e)
{
//Initialize the capture device
grabber = new Capture();
grabber.QueryFrame();
//Initialize the FrameGraber event
Application.Idle += new EventHandler(FrameGrabber);
button1.Enabled = false;
}
private void button2_Click(object sender, System.EventArgs e)
{
try
{
//Trained face counter
ContTrain = ContTrain + 1;
//Get a gray frame from capture device
gray = grabber.QueryGrayFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.2,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
//Action for each element detected
foreach (MCvAvgComp f in facesDetected[0])
{
TrainedFace = currentFrame.Copy(f.rect).Convert<Gray, byte>();
break;
}
//resize face detected image for force to compare the same size with the
//test image with cubic interpolation type method
TrainedFace = result.Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
trainingImages.Add(TrainedFace);
labels.Add(textBox1.Text);
//Show face added in gray scale
imageBox1.Image = TrainedFace;
//Write the number of triained faces in a file text for further load
File.WriteAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", trainingImages.ToArray().Length.ToString() + "%");
//Write the labels of triained faces in a file text for further load
for (int i = 1; i < trainingImages.ToArray().Length + 1; i++)
{
trainingImages.ToArray()[i - 1].Save(Application.StartupPath + "/TrainedFaces/face" + i + ".bmp");
File.AppendAllText(Application.StartupPath + "/TrainedFaces/TrainedLabels.txt", labels.ToArray()[i - 1] + "%");
}
MessageBox.Show(textBox1.Text + "´s face detected and added :)", "Training OK", MessageBoxButtons.OK, MessageBoxIcon.Information);
}
catch
{
MessageBox.Show("Enable the face detection first", "Training Fail", MessageBoxButtons.OK, MessageBoxIcon.Exclamation);
}
}
void FrameGrabber(object sender, EventArgs e)
{
label3.Text = "0";
//label4.Text = "";
NamePersons.Add("");
//Get the current frame form capture device
currentFrame = grabber.QueryFrame().Resize(320, 240, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//Convert it to Grayscale
gray = currentFrame.Convert<Gray, Byte>();
//Face Detector
MCvAvgComp[][] facesDetected = gray.DetectHaarCascade(
face,
1.4,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
//Action for each element detected
foreach (MCvAvgComp f in facesDetected[0])
{
t = t + 1;
result = currentFrame.Copy(f.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
//draw the face detected in the 0th (gray) channel with blue color
currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);
if (trainingImages.ToArray().Length != 0)
{
//TermCriteria for face recognition with numbers of trained images like maxIteration
MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
//Eigen face recognizer
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(
trainingImages.ToArray(),
labels.ToArray(),
3000,
ref termCrit);
name = recognizer.Recognize(result);
//Draw the label for each face detected and recognized
currentFrame.Draw(name, ref font, new Point(f.rect.X - 2, f.rect.Y - 2), new Bgr(Color.LightGreen));
}
NamePersons[t-1] = name;
NamePersons.Add("");
//Set the number of faces detected on the scene
label3.Text = facesDetected[0].Length.ToString();
/*
//Set the region of interest on the faces
gray.ROI = f.rect;
MCvAvgComp[][] eyesDetected = gray.DetectHaarCascade(
eye,
1.1,
10,
Emgu.CV.CvEnum.HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
new Size(20, 20));
gray.ROI = Rectangle.Empty;
foreach (MCvAvgComp ey in eyesDetected[0])
{
Rectangle eyeRect = ey.rect;
eyeRect.Offset(f.rect.X, f.rect.Y);
currentFrame.Draw(eyeRect, new Bgr(Color.Blue), 2);
}
*/
}
t = 0;
//Names concatenation of persons recognized
for (int nnn = 0; nnn < facesDetected[0].Length; nnn++)
{
names = names + NamePersons[nnn] + ", ";
}
//Show the faces procesed and recognized
imageBoxFrameGrabber.Image = currentFrame;
label4.Text = names;
names = "";
//Clear the list(vector) of names
NamePersons.Clear();
}
private void button3_Click(object sender, EventArgs e)
{
Process.Start("Donate.html");
}
Suggest me for the best solution. If not What is the other way to do it?
Thanks in Advance!
Rather trivial, but need to understand the code
After you get the result in variable "f", you should change this line
currentFrame.Draw(f.rect, new Bgr(Color.Red), 2);
to draw the new rectangle according to the position of the detected one. Note that the result doesn't change, you change only what you show the user.
So it should be somewhat like that:
Rectangle newFaceRect = new Rectangle();
newFaceRect.Location = f.Location;
newFaceRect.Y = (int)(f.Y - face.Height / 4);
newFaceRect.X = (int)(f.X - face.Width / 4);
newFaceRect.Height = (int)(f.Height * 1.5);
newFaceRect.Width = (int)(f.Width * 1.5);
currentFrame.Draw(newFaceRect, new Bgr(Color.Black), 2);
I've got the message above, while I'm trying to run my algorithm on more than 26 frames.
My program is in C#, with my experience with OpenCV in java(Android) I know that I need to release the matrix, do I need to do it here also?
this is the part with the problem:
private void toolStripButton2_Click(object sender, EventArgs e)
{
for (int i = 0; i < FRAME_SIZE; i++)
{
ColorImage = _Capture.QueryFrame();
if (ColorImage != null)
{
GrayImage = ColorImage.Convert<Gray, float>();
toolStripProgressBar1.Value = i;
if (i == 0)
{
Max_G2 = new Image<Gray, float>(GrayImage.Width, GrayImage.Height);
mainimage = new Image<Bgr, byte>(GrayImage.Width, GrayImage.Height);
}
FindMax(GrayImage.Copy());
}
else
{
toolStripStatusLabel1.Text = "The video is too short!";
break;
}
}
}
private void FindMax(Image<Gray, float> CurrGray)
{
Image<Gray, float> TempImg = new Image<Gray, float>(CurrGray.Width, CurrGray.Height);
Matrix<float> kernel1 = new Matrix<float>(new float[1, 2] { { -1, 1 } });
Matrix<float> kernel2 = new Matrix<float>(new float[2, 1] { { -1 }, { 1 } });
Point anchor = new Point(0, 0);
CvInvoke.cvFilter2D(CurrGray, TempImg, kernel1, anchor);
CvInvoke.cvFilter2D(CurrGray, CurrGray, kernel2, anchor);
TempImg = TempImg.Pow(2);
CurrGray = CurrGray.Pow(2);
CurrGray = CurrGray.Add(TempImg);
CurrGray = CurrGray.Pow(0.5);
Max_G2._Max(CurrGray);
}
1 more thing, I already tried to dispose all the matrix and images, but it doesn't work for me.
What do I miss here?
Thanks!
EDIT 1: ( Code with dispose)
private void toolStripButton2_Click(object sender, EventArgs e)
{
for (int i = 0; i < FRAME_SIZE; i++)
{
ColorImage = _Capture.QueryFrame();
if (ColorImage != null)
{
GrayImage = ColorImage.Convert<Gray, float>();
toolStripProgressBar1.Value = i;
if (i == 0)
{
Max_G2 = new Image<Gray, float>(GrayImage.Width, GrayImage.Height);
TempImg = new Image<Gray, float>(GrayImage.Width, GrayImage.Height);
mainimage = new Image<Bgr, byte>(GrayImage.Width, GrayImage.Height);
}
FindMax(GrayImage);
ColorImage.Dispose();
ColorImage = null;
GrayImage.Dispose();
GrayImage = null;
Thread.Sleep(100);
}
else
{
toolStripStatusLabel1.Text = "The video is too short!";
break;
}
}
}
private void FindMax(Image<Gray, float> CurrGray)
{
Matrix<float> kernel1 = new Matrix<float>(new float[1, 2] { { -1, 1 } });
Matrix<float> kernel2 = new Matrix<float>(new float[2, 1] { { -1 }, { 1 } });
Point anchor = new Point(0, 0);
CvInvoke.cvFilter2D(CurrGray, TempImg, kernel1, anchor);
CvInvoke.cvFilter2D(CurrGray, CurrGray, kernel2, anchor);
TempImg.Pow(2);
CurrGray.Pow(2);
CurrGray.Add(TempImg);
CurrGray.Pow(0.5);
Max_G2._Max(CurrGray);
CurrGray.Dispose();
CurrGray = null;
}
There is an image for the surface, and a text is written on the image for 184 rows of date..
Thus, it is expected to see 184 different text written image files are generated with all the same background images. (The code is declared below...)
The problem is that the first text is written for all 184 different data.. I think I have to remove something in the loop. But what is that ??
for (int i = 0; i < dt.Rows.Count; i++) {
date = Convert.ToDateTime(dt.Rows[i]["PAYMENT_DATE"]);
branchCode = Convert.ToInt32(dt.Rows[i]["BRANCH_CODE"]);
refNum = Convert.ToInt32(dt.Rows[i]["REF_NUM"]);
accountNumber = Convert.ToInt32(dt.Rows[i]["ACCOUNT_NUMBER"]);
email = dt.Rows[i]["EMAIL"].ToString();
tableCode = dt.Rows[i]["CUSTOMER_TABLE_CODE"].ToString();
TranLogKey logKey = new TranLogKey(date, branchCode, refNum);
TranLogEntry entry = Log.SelectLogEntry(logKey, false);
if (Intertech.Core.Framework.Context.CurrentContext.LogEntry == null)
Intertech.Core.Framework.Context.CurrentContext.LogEntry = entry;
try {
receiptText = TransactionManager.GenerateReceipt(true, logKey, null, null, accountNumber, false);
}
catch (Exception exp) {
continue;
}
if (receiptText != null) {
if (receiptText.IndexOf("SURETTİR\r\n") != -1)
receiptText = receiptText.Substring(receiptText.IndexOf("SURETTİR\r\n") + 10).Trim();
if (receiptText.IndexOf("İşlemi Yapan") != -1)
receiptText = receiptText.Substring(0, receiptText.IndexOf("İşlemi Yapan"));
if (receiptText.IndexOf("MÜŞTERİ İMZASI") != -1)
receiptText = receiptText.Substring(0, receiptText.IndexOf("MÜŞTERİ İMZASI"));
Bitmap bmp = (Bitmap)Bitmap.FromFile(imageDir);
Graphics g = Graphics.FromImage(bmp);
SizeF size;
Font font = new Font("Courier New", 8, FontStyle.Regular);
byte ALPHA = 200;
size = g.MeasureString(receiptText, font);
Bitmap waterbmp = new Bitmap((int)size.Width, (int)size.Height);
Graphics waterg = Graphics.FromImage(waterbmp);
waterg.DrawString(receiptText, font, new SolidBrush(System.Drawing.Color.Black), 2, 2);
DrawWatermark(ref waterbmp, ref bmp, LeftIndex, TopIndex, ALPHA);
bmp.Save(ms, System.Drawing.Imaging.ImageFormat.Jpeg);
try {
GoServices.Core.SendMailOutside("The Portugal Life", "info#portugal.tr.friendship.pt",
"blgnklc#skype-account.com", " e-Wish " + email, "", new string[] { "dekont.jpg" }, new object[] { ms.ToArray() });
LogNotificationState("K", refNum, accountNumber, 2, true, null, tableCode);
}
catch (Exception ex) {
LogNotificationState("K", 0, -1, 2, false, ex, tableCode);
}
}
private static void DrawWatermark(ref Bitmap watermark_bm, ref Bitmap result_bm, int x, int y, byte ALPHA) {
System.Drawing.Color clr;
int py, px;
for (py = 0; py <= watermark_bm.Height - 1; py++) {
for (px = 0; px <= watermark_bm.Width - 1; px++) {
clr = watermark_bm.GetPixel(px, py);
if (clr.A != 0 || clr.R != 0 || clr.G != 0 || clr.B != 0)
watermark_bm.SetPixel(px, py, System.Drawing.Color.FromArgb(ALPHA, clr.R, clr.G, clr.B));
}
}
Graphics gr = Graphics.FromImage(result_bm);
gr.DrawImage(watermark_bm, x, y);
}
I would look at how you are creating the bitmap.
Bitmap bmp = (Bitmap)Bitmap.FromFile(imageDir);
does this line need to be in there everytime, i.e is imagedir the same all the time?
Do you dispose of the generated bitmap after creating it?
what happens in this function:
DrawWatermark(ref waterbmp, ref bmp, LeftIndex, TopIndex, ALPHA);
You seem to then save the bmp file but not dispose of it?
I have seen some strange behaviour from GDI+ so I would start here.