Currently I'm trying to write a small programm detecting faces.
I want to cut the grayframe for detection into pieces of the size of the rectangles of the detected faces.
This is my method for the operation:
public List<PreviewImage> GetDetectedSnippets(Capture capture, ProcessType processType)
{
var mat = capture?.QueryFrame();
var imageList = new List<PreviewImage>();
if (mat == null)
return imageList;
var imageframe = mat.ToImage<Bgr, byte>();
var grayframe = imageframe.Convert<Gray, byte>();
Rectangle[] faces = null;
try
{
switch (processType)
{
case ProcessType.Front:
{
faces = _cascadeFrontDefault.DetectMultiScale(grayframe, 1.25, 10, Size.Empty);
}
break;
case ProcessType.Profile:
{
faces = _cascadeProfileFace.DetectMultiScale(grayframe, 1.25, 10, Size.Empty);
}
break;
default:
{
return imageList;
}
}
}
catch (Exception ex)
{
Debug.WriteLine("Could not process snapshot: " + ex);
return imageList;
}
foreach (var face in faces)
{
var detectedImage = imageframe.Clone();
detectedImage.Draw(face, new Bgr(Color.BlueViolet), 4);
var detectedGrayframe = grayframe.GrabCut(face, 1); // This isn't working. Here should the grayframe be cutted into a smaller piece.
imageList.Add(new PreviewImage(detectedImage, detectedGrayframe));
}
return imageList;
}
And this is the previewImage class:
public class PreviewImage
{
public Image<Bgr, byte> Original { get; }
public Image<Gray, byte> Grayframe { get; }
public PreviewImage(Image<Bgr, byte> original, Image<Gray, byte> grayframe)
{
Original = original;
Grayframe = grayframe;
}
}
How can I cut the grayframe into a piece with the size of the given rectangle?
This will do the work:
grayframe.ROI = face;
var detectedGrayframe = grayframe.Copy();
grayframe.ROI = Rectange.Empty;
Related
I am creating an Attendance System using 4 cameras for facial recognition. I am using Emgu CV 3.0 in C#. Now, in my attendance form, which consist of 4 imagebox, the application suddenly stops and it goes back to the main form and shows an error to the button which reference the attendance form. The error was:
Attempted to read or write protected memory. This is often an indication that other memory is corrupt.
Here is the code where the error occured:
private void btn_attendance_Click(object sender, EventArgs e)
{
attendance attendance = new attendance();
attendance.ShowDialog();
}
Here is the code for the Attendance form without the recognition part:
public partial class attendance : Form
{
private Capture cam1, cam2, cam3, cam4;
private CascadeClassifier _cascadeClassifier;
private RecognizerEngine _recognizerEngine;
private String _trainerDataPath = "\\traineddata_v2";
private readonly String dbpath = "Server=localhost;Database=faculty_attendance_system;Uid=root;Pwd=root;";
MySqlConnection conn;
public attendance()
{
InitializeComponent();
conn = new MySqlConnection("Server=localhost;Database=faculty_attendance_system;Uid=root;Pwd=root;");
}
private void btn_home_Click(object sender, EventArgs e)
{
this.Close();
}
private void attendance_Load(object sender, EventArgs e)
{
time_now.Start();
lbl_date.Text = DateTime.Now.ToString("");
_recognizerEngine = new RecognizerEngine(dbpath, _trainerDataPath);
_cascadeClassifier = new CascadeClassifier(Application.StartupPath + "/haarcascade_frontalface_default.xml");
cam1 = new Capture(0);
cam2 = new Capture(1);
cam3 = new Capture(3);
cam4 = new Capture(4);
Application.Idle += new EventHandler(ProcessFrame);
}
private void ProcessFrame(Object sender, EventArgs args)
{
Image<Bgr, byte> nextFrame_cam1 = cam1.QueryFrame().ToImage<Bgr, Byte>();
Image<Bgr, byte> nextFrame_cam2 = cam2.QueryFrame().ToImage<Bgr, Byte>();
Image<Bgr, byte> nextFrame_cam3 = cam3.QueryFrame().ToImage<Bgr, Byte>();
Image<Bgr, byte> nextFrame_cam4 = cam4.QueryFrame().ToImage<Bgr, Byte>();
using (nextFrame_cam1)
{
if (nextFrame_cam1 != null)
{
Image<Gray, byte> grayframe = nextFrame_cam1.Convert<Gray, byte>();
var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.5, 10, Size.Empty, Size.Empty);
foreach (var face in faces)
{
nextFrame_cam1.Draw(face, new Bgr(Color.Green), 3);
var predictedUserId = _recognizerEngine.RecognizeUser(new Image<Gray, byte>(nextFrame_cam1.Bitmap));
}
imageBox1.Image = nextFrame_cam1;
}
}
using (nextFrame_cam2)
{
if (nextFrame_cam2!= null)
{
Image<Gray, byte> grayframe = nextFrame_cam2.Convert<Gray, byte>();
var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.5, 10, Size.Empty, Size.Empty);
foreach (var face in faces)
{
nextFrame_cam2.Draw(face, new Bgr(Color.Green), 3);
var predictedUserId = _recognizerEngine.RecognizeUser(new Image<Gray, byte>(nextFrame_cam2.Bitmap));
}
imageBox2.Image = nextFrame_cam2;
}
}
using (nextFrame_cam3)
{
if (nextFrame_cam3!= null)
{
Image<Gray, byte> grayframe = nextFrame_cam3.Convert<Gray, byte>();
var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.5, 10, Size.Empty, Size.Empty);
foreach (var face in faces)
{
nextFrame_cam3.Draw(face, new Bgr(Color.Green), 3);
var predictedUserId = _recognizerEngine.RecognizeUser(new Image<Gray, byte>(nextFrame_cam3.Bitmap));
}
imageBox3.Image = nextFrame_cam3;
}
}
using (nextFrame_cam4)
{
if (nextFrame_cam4!= null)
{
Image<Gray, byte> grayframe = nextFrame_cam4.Convert<Gray, byte>();
var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.5, 10, Size.Empty, Size.Empty);
foreach (var face in faces)
{
nextFrame_cam4.Draw(face, new Bgr(Color.Green), 3);
var predictedUserId = _recognizerEngine.RecognizeUser(new Image<Gray, byte>(nextFrame_cam4.Bitmap));
}
imageBox4.Image = nextFrame_cam4;
}
}
}
}
Plz read this post to know what is memory leakage.
http://www.dotnetfunda.com/articles/show/625/best-practices-no-5-detecting-net-application-memory-leaks
Your error indicate that you are creating many instances of a class or any recursive call of function.
Use Using() to create object of Emgu so that as soon as your code terminate the managed or unmanaged memory will be disposed.
public partial class attendance : Form
{
private Capture cam1, cam2, cam3, cam4;
private CascadeClassifier _cascadeClassifier;
private RecognizerEngine _recognizerEngine;
private String _trainerDataPath = "\\traineddata_v2";
private readonly String dbpath = "Server=localhost;Database=faculty_attendance_system;Uid=root;Pwd=root;";
MySqlConnection conn;
public attendance()
{
InitializeComponent();
conn = new MySqlConnection("Server=localhost;Database=faculty_attendance_system;Uid=root;Pwd=root;");
}
private void btn_home_Click(object sender, EventArgs e)
{
this.Close();
}
private void attendance_Load(object sender, EventArgs e)
{
time_now.Start();
lbl_date.Text = DateTime.Now.ToString("");
_recognizerEngine = new RecognizerEngine(dbpath, _trainerDataPath);
_cascadeClassifier = new CascadeClassifier(Application.StartupPath + "/haarcascade_frontalface_default.xml");
cam1 = new Capture(0);
cam2 = new Capture(1);
cam3 = new Capture(3);
cam4 = new Capture(4);
Application.Idle += new EventHandler(ProcessFrame);
}
private void ProcessFrame(Object sender, EventArgs args)
{
using (Image<Bgr, byte> nextFrame_cam1 = cam1.QueryFrame().ToImage<Bgr, Byte>())
{
if (nextFrame_cam1 != null)
{
Image<Gray, byte> grayframe = nextFrame_cam1.Convert<Gray, byte>();
var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.5, 10, Size.Empty, Size.Empty);
foreach (var face in faces)
{
nextFrame_cam1.Draw(face, new Bgr(Color.Green), 3);
var predictedUserId = _recognizerEngine.RecognizeUser(new Image<Gray, byte>(nextFrame_cam1.Bitmap));
}
imageBox1.Image = nextFrame_cam1;
}
}
using (Image<Bgr, byte> nextFrame_cam2 = cam2.QueryFrame().ToImage<Bgr, Byte>())
{
if (nextFrame_cam2 != null)
{
Image<Gray, byte> grayframe = nextFrame_cam2.Convert<Gray, byte>();
var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.5, 10, Size.Empty, Size.Empty);
foreach (var face in faces)
{
nextFrame_cam2.Draw(face, new Bgr(Color.Green), 3);
var predictedUserId = _recognizerEngine.RecognizeUser(new Image<Gray, byte>(nextFrame_cam2.Bitmap));
}
imageBox2.Image = nextFrame_cam2;
}
}
using (Image<Bgr, byte> nextFrame_cam3 = cam3.QueryFrame().ToImage<Bgr, Byte>())
{
if (nextFrame_cam3 != null)
{
Image<Gray, byte> grayframe = nextFrame_cam3.Convert<Gray, byte>();
var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.5, 10, Size.Empty, Size.Empty);
foreach (var face in faces)
{
nextFrame_cam3.Draw(face, new Bgr(Color.Green), 3);
var predictedUserId = _recognizerEngine.RecognizeUser(new Image<Gray, byte>(nextFrame_cam3.Bitmap));
}
imageBox3.Image = nextFrame_cam3;
}
}
using (Image<Bgr, byte> nextFrame_cam4 = cam4.QueryFrame().ToImage<Bgr, Byte>())
{
if (nextFrame_cam4 != null)
{
Image<Gray, byte> grayframe = nextFrame_cam4.Convert<Gray, byte>();
var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.5, 10, Size.Empty, Size.Empty);
foreach (var face in faces)
{
nextFrame_cam4.Draw(face, new Bgr(Color.Green), 3);
var predictedUserId = _recognizerEngine.RecognizeUser(new Image<Gray, byte>(nextFrame_cam4.Bitmap));
}
imageBox4.Image = nextFrame_cam4;
}
}
}
}
Plz fowllow this document for standard way to work with EMGU.CV for face recongnization.
http://www.emgu.com/wiki/index.php/Face_detection
The function DetectAndRecognizeFaces is to detect a face and pass the detected image to the Recognize method which returns a name of recognized face based on the label.
LoadTrainingSet function is to fetch data from SQL database and LoadTrainingData method is to train the eigenfacerecognizer.
The problem is that the predict function never returns -1 for Unknown facees and always returns match if the detected face is not present in the database.
The Code:
private void DetectAndRecognizeFaces()
{
Image<Gray, byte> grayframe = ImageFrame.Convert<Gray, byte>();
//Assign user-defined Values to parameter variables:
minNeighbors = int.Parse(comboBoxMinNeigh.Text); // the 3rd parameter
windowsSize = int.Parse(textBoxWinSize.Text); // the 5th parameter
scaleIncreaseRate = Double.Parse(comboBoxScIncRte.Text); //the 2nd parameter
//detect faces from the gray-scale image and store into an array of type 'var',i.e 'MCvAvgComp[]'
var faces = haar.DetectMultiScale(grayframe, scaleIncreaseRate, minNeighbors, Size.Empty); //the actual face detection happens here
MessageBox.Show("Total Faces Detected: " + faces.Length.ToString());
Bitmap BmpInput = grayframe.ToBitmap();
Bitmap ExtractedFace; //empty
Graphics grp;
//MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);
faceRecognizer.Load(recognizeFilePath);
foreach (var face in faces)
{
t = t + 1;
result = ImageFrame.Copy(face).Convert<Gray, byte>().Resize(100, 100, Inter.Cubic);
//set the size of the empty box(ExtractedFace) which will later contain the detected face
ExtractedFace = new Bitmap(face.Width, face.Height);
//assign the empty box to graphics for painting
grp = Graphics.FromImage(ExtractedFace);
//graphics fills the empty box with exact pixels of the face to be extracted from input image
grp.DrawImage(BmpInput, 0, 0, face, GraphicsUnit.Pixel);
string name = Recognise(result);
if (name == "Unknown")
{
ImageFrame.Draw(face, new Bgr(Color.Red), 3);
MessageBox.Show("Face Name is: " + name.ToString());
ImageFrame.Draw(name, new Point(face.X - 2, face.Y - 2), FontFace.HersheyComplex, 0.5,
new Bgr(0, 0, 255), 1, LineType.EightConnected, bottomLeftOrigin);
}
else
{
ImageFrame.Draw(face, new Bgr(Color.Green), 3);
MessageBox.Show("Face Name is: " + name.ToString());
ImageFrame.Draw(name, new Point(face.X - 2, face.Y - 2), FontFace.HersheyComplex, 0.5,
new Bgr(0, 255, 0), 1, LineType.EightConnected, bottomLeftOrigin);
}
CamImageBox.Image = ImageFrame;
}
public string Recognise(Image<Gray, byte> Input_image, int Eigen_Thresh = -1)
{
if (_IsTrained)
{
faceRecognizer.Load(recognizeFilePath);
FaceRecognizer.PredictionResult ER = faceRecognizer.Predict(Input_image);
if (ER.Label == -1)
{
Eigen_Label = "Unknown";
Eigen_Distance = 0;
return Eigen_Label;
}
else
{
Eigen_Label = Names_List[ER.Label];
Eigen_Distance = (float)ER.Distance;
if (Eigen_Thresh > -1) Eigen_threshold = Eigen_Thresh;
//Only use the post threshold rule if we are using an Eigen Recognizer
//since Fisher and LBHP threshold set during the constructor will work correctly
switch (Recognizer_Type)
{
case ("EMGU.CV.EigenFaceRecognizer"):
if (Eigen_Distance > Eigen_threshold) return Eigen_Label;
else return "Unknown";
case ("EMGU.CV.LBPHFaceRecognizer"):
case ("EMGU.CV.FisherFaceRecognizer"):
default:
return Eigen_Label; //the threshold set in training controls unknowns
}
}
}
else return "";
}
private void LoadTrainingSet()
{
Bitmap bmpImage;
for (int i = 0; i < totalRows; i++)
{
byte[] fetchedBytes = (byte[])dataTable.Rows[i]["FaceImage"];
MemoryStream stream = new MemoryStream(fetchedBytes);
//stream.Write(fetchedBytes, 0, fetchedBytes.Length);
bmpImage = new Bitmap(stream);
trainingImages.Add(new Emgu.CV.Image<Gray, Byte>(bmpImage).Resize(100, 100, Inter.Cubic));
//string faceName = (string)dataTable.Rows[i]["FaceName"];
int faceName = (int)dataTable.Rows[i]["FaceID"];
NameLabels.Add(faceName);
NameLable = (string)dataTable.Rows[i]["FaceName"];
Names_List.Add(NameLable);
//ContTrain = NameLabels[i];
}
LoadTrainedData();
}
public void LoadTrainedData()
{
if (trainingImages.ToArray().Length != 0)
{
var faceImages = new Image<Gray, byte>[trainingImages.Count()];
var facesIDs = new int[NameLabels.Count()];
//var facesNames = new string[Names_List.Count()];
//int[] faceLabels = new int[NameLabels.Count()];
//MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
for (int i = 0; i < trainingImages.ToArray().Length; i++)
{
faceImages[i] = trainingImages[i];
facesIDs[i] = NameLabels[i];
}
try
{
faceRecognizer.Train(faceImages, facesIDs);
faceRecognizer.Save(recognizeFilePath);
_IsTrained = true;
}
catch (Exception error)
{
MessageBox.Show(error.ToString());
}
}
}
I've got the message above, while I'm trying to run my algorithm on more than 26 frames.
My program is in C#, with my experience with OpenCV in java(Android) I know that I need to release the matrix, do I need to do it here also?
this is the part with the problem:
private void toolStripButton2_Click(object sender, EventArgs e)
{
for (int i = 0; i < FRAME_SIZE; i++)
{
ColorImage = _Capture.QueryFrame();
if (ColorImage != null)
{
GrayImage = ColorImage.Convert<Gray, float>();
toolStripProgressBar1.Value = i;
if (i == 0)
{
Max_G2 = new Image<Gray, float>(GrayImage.Width, GrayImage.Height);
mainimage = new Image<Bgr, byte>(GrayImage.Width, GrayImage.Height);
}
FindMax(GrayImage.Copy());
}
else
{
toolStripStatusLabel1.Text = "The video is too short!";
break;
}
}
}
private void FindMax(Image<Gray, float> CurrGray)
{
Image<Gray, float> TempImg = new Image<Gray, float>(CurrGray.Width, CurrGray.Height);
Matrix<float> kernel1 = new Matrix<float>(new float[1, 2] { { -1, 1 } });
Matrix<float> kernel2 = new Matrix<float>(new float[2, 1] { { -1 }, { 1 } });
Point anchor = new Point(0, 0);
CvInvoke.cvFilter2D(CurrGray, TempImg, kernel1, anchor);
CvInvoke.cvFilter2D(CurrGray, CurrGray, kernel2, anchor);
TempImg = TempImg.Pow(2);
CurrGray = CurrGray.Pow(2);
CurrGray = CurrGray.Add(TempImg);
CurrGray = CurrGray.Pow(0.5);
Max_G2._Max(CurrGray);
}
1 more thing, I already tried to dispose all the matrix and images, but it doesn't work for me.
What do I miss here?
Thanks!
EDIT 1: ( Code with dispose)
private void toolStripButton2_Click(object sender, EventArgs e)
{
for (int i = 0; i < FRAME_SIZE; i++)
{
ColorImage = _Capture.QueryFrame();
if (ColorImage != null)
{
GrayImage = ColorImage.Convert<Gray, float>();
toolStripProgressBar1.Value = i;
if (i == 0)
{
Max_G2 = new Image<Gray, float>(GrayImage.Width, GrayImage.Height);
TempImg = new Image<Gray, float>(GrayImage.Width, GrayImage.Height);
mainimage = new Image<Bgr, byte>(GrayImage.Width, GrayImage.Height);
}
FindMax(GrayImage);
ColorImage.Dispose();
ColorImage = null;
GrayImage.Dispose();
GrayImage = null;
Thread.Sleep(100);
}
else
{
toolStripStatusLabel1.Text = "The video is too short!";
break;
}
}
}
private void FindMax(Image<Gray, float> CurrGray)
{
Matrix<float> kernel1 = new Matrix<float>(new float[1, 2] { { -1, 1 } });
Matrix<float> kernel2 = new Matrix<float>(new float[2, 1] { { -1 }, { 1 } });
Point anchor = new Point(0, 0);
CvInvoke.cvFilter2D(CurrGray, TempImg, kernel1, anchor);
CvInvoke.cvFilter2D(CurrGray, CurrGray, kernel2, anchor);
TempImg.Pow(2);
CurrGray.Pow(2);
CurrGray.Add(TempImg);
CurrGray.Pow(0.5);
Max_G2._Max(CurrGray);
CurrGray.Dispose();
CurrGray = null;
}
HI Im using the code for face detection. but not im going to continue with face recognition. But im get stack here where, how for the next step. However, im using the emgu version 2.2
if (faces.Length > 0)
{
foreach (var face in faces)
{
ImageFrame.Draw(face.rect, new Bgr(Color.Green), 2);
//Extract face
ExtractedFace = new Bitmap(face.rect.Width, face.rect.Height);
FaceConvas = Graphics.FromImage(ExtractedFace);
FaceConvas.DrawImage(GrayBmpInput, 0, 0, face.rect, GraphicsUnit.Pixel);
ExtcFacesArr[faceNo] = ExtractedFace;
faceNo++;
}
faceNo = 0;
picExtcFaces.Image = ExtcFacesArr[faceNo];
CamImageBox.Image = ImageFrame;
}
}
Where should i continue with the face recognition and do have any good reference online in C# code?
You code is almost correct, but i think you do not have idea what to do next.I am doing face recognition in one of my app for showing a mask on face.I am doing like this.
Image mask = Image.FromFile("mask.png");
public Bitmap getFacedBitmap(Bitmap bbb)
{
using (Image<Bgr, byte> nextFrame = new Image<Bgr, byte>(bbb))
{
if (nextFrame != null)
{
// there's only one channel (greyscale), hence the zero index
//var faces = nextFrame.DetectHaarCascade(haar)[0];
Image<Gray, byte> grayframe = nextFrame.Convert<Gray, byte>();
//Image<Gray, Byte> gray = nextFrame.Convert<Gray, Byte>();
var faces = grayframe.DetectHaarCascade(haar, 1.3, 2, HAAR_DETECTION_TYPE.SCALE_IMAGE, new Size(nextFrame.Width / 8, nextFrame.Height / 8))[0];
if (faces.Length > 0)
{
foreach (var face in faces)
{
//ImageFrame.Draw(face.rect, new Bgr(Color.Green), 2);
//
using(Graphics g = Graphics.FromImage(bbb))
{
g.DrawImage(mask,face.rect);
g.Save()
}
}
}
}
}
retun bbb;
}
I have the following, but I can't figure out how to find ALL the matches in a source image.
static void Main()
{
using (var template = Cv.LoadImage(#"images\logo.png", LoadMode.GrayScale))
using (var source = Cv.LoadImage(#"images\manyLogos.png", LoadMode.GrayScale))
using (var sourceColour = Cv.LoadImage(#"images\manyLogos.png", LoadMode.Color))
{
var width = source.Width - template.Width + 1;
var height = source.Height - template.Height + 1;
using (var result = Cv.CreateImage(Cv.Size(width, height), BitDepth.F32, 1))
{
Cv.MatchTemplate(source, template, result, MatchTemplateMethod.SqDiff);
var THRESHOLD = 0.08D;
double minVal, maxVal;
CvPoint minLoc, maxLoc;
Cv.MinMaxLoc(result, out minVal, out maxVal, out minLoc, out maxLoc);
var outlineColor = (minVal > THRESHOLD) ? CvColor.Green : CvColor.Red;
Cv.Rectangle(sourceColour, Cv.Point(minLoc.X, minLoc.Y), Cv.Point(minLoc.X + template.Width, minLoc.Y + template.Height), outlineColor, 1, 0, 0);
}
using (var window = new CvWindow("Test"))
{
while (CvWindow.WaitKey(10) < 0)
{
window.Image = sourceColour;
}
}
}
}
I can outline the best match, just not all the matches. I need to get all the matches somehow.
Using matchTemplate method, your output image will give you pixels values which represents how well your template is matched at this specific location. In you case, the lower the value, the best the match, since you used MatchTemplateMethod.SqDiff.
You problem is that when you use the minMaxLoc function, you get what you asks for, which is the best match in this case, the min).
All matches are the pixels whose value are under the threshold that you set up.
Since I'm not used to csharp, here is how it would go in C++, you can do the translation:
// after your call to MatchTemplate
float threshold = 0.08;
cv::Mat thresholdedImage;
cv::threshold(result, thresholdedImage, threshold, 255, CV_THRESH_BINARY);
// the above will set pixels to 0 in thresholdedImage if their value in result is lower than the threshold, to 255 if it is larger.
// in C++ it could also be written cv::Mat thresholdedImage = result < threshold;
// Now loop over pixels of thresholdedImage, and draw your matches
for (int r = 0; r < thresholdedImage.rows; ++r) {
for (int c = 0; c < thresholdedImage.cols; ++c) {
if (!thresholdedImage.at<unsigned char>(r, c)) // = thresholdedImage(r,c) == 0
cv::circle(sourceColor, cv::Point(c, r), template.cols/2, CV_RGB(0,255,0), 1);
}
}
Translating from C++ and using OpenCvSharp wrapper, the above code, replacing minMaxLoc lines, worked for me:
double threshold=0.9
var thresholdImage=Cv.CreateImage(newImageSize, BitDepth.F32,1);
Cv.Threshold(result, thresholdImage, threshold, 255, ThresholdType.Binary);
for (int r = 0; r < thresholdImage.GetSize().Height; r++)
{
for (int c = 0; c < thresholdImage.GetSize().Width; c++)
{
if (thresholdImage.GetRow(r)[c].Val0 > 0)
{
Cv.Rectangle(soruceColour, Cv.Point(c, r), Cv.Point(c + template.Width, r + template.Height), CvColor.Red, 1, 0, 0);
}
}
}
here is the solution using Min_Max and Match_Template methods. hope it will help.
public void multipleTemplateMatch(string SourceImages, string tempImage)
{
Image<Bgr, byte> image_source = new Image<Bgr, byte>(SourceImages);
Image<Bgr, byte> image_partial1 = new Image<Bgr, byte>(tempImage);
double threshold = 0.9;
ImageFinder imageFinder = new ImageFinder(image_source, image_partial1, threshold);
imageFinder.FindThenShow();
}
and here is the class which will help.
class ImageFinder
{
private List<Rectangle> rectangles;
public Image<Bgr, byte> BaseImage { get; set; }
public Image<Bgr, byte> SubImage { get; set; }
public Image<Bgr, byte> ResultImage { get; set; }
public double Threashold { get; set; }
public List<Rectangle> Rectangles
{
get { return rectangles; }
}
public ImageFinder(Image<Bgr, byte> baseImage, Image<Bgr, byte> subImage, double threashold)
{
rectangles = new List<Rectangle>();
BaseImage = baseImage;
SubImage = subImage;
Threashold = threashold;
}
public void FindThenShow()
{
FindImage();
DrawRectanglesOnImage();
ShowImage();
}
public void DrawRectanglesOnImage()
{
ResultImage = BaseImage.Copy();
foreach (var rectangle in this.rectangles)
{
ResultImage.Draw(rectangle, new Bgr(Color.Blue), 1);
}
}
public void FindImage()
{
rectangles = new List<Rectangle>();
using (Image<Bgr, byte> imgSrc = BaseImage.Copy())
{
while (true)
{
using (Image<Gray, float> result = imgSrc.MatchTemplate(SubImage, TemplateMatchingType.CcoeffNormed))
{
double[] minValues, maxValues;
Point[] minLocations, maxLocations;
result.MinMax(out minValues, out maxValues, out minLocations, out maxLocations);
if (maxValues[0] > Threashold)
{
Rectangle match = new Rectangle(maxLocations[0], SubImage.Size);
imgSrc.Draw(match, new Bgr(Color.Blue), -1);
rectangles.Add(match);
}
else
{
break;
}
}
}
}
}
public void ShowImage()
{
Random rNo = new Random();
string outFilename = "matched Templates" + rNo.Next();
CvInvoke.Imshow(outFilename, ResultImage);
}
}
if you find this helpful please vote is as useful.
thanks