c# emgu display largestcontour - c#

I am trying to obtain the largest bounding rectangle of a canny image. I think it works but I don't know how to visualize the rectangle. In the end I want to get my topleft and bottom right coordinates of the rectangle but I think that when I am able to import the image to my void that I can also get those values from the image. I am new to this so I am learning by doing, reading and asking. If I just need to look into some theory then please enlighten me! Willing to learn.
When I run this code my console output is:
Emgu.CV.Util.VectorOfPoint
The code is this:
private void CannyFrame(object sender, EventArgs e)
{
if (_capture != null && _capture.Ptr != IntPtr.Zero)
{
_capture.Retrieve(imgOriginal, 0);
CvInvoke.CvtColor(imgOriginal, imgHSV, ColorConversion.Bgr2Hsv); //Convert the captured frame from BGR to HSV
CvInvoke.InRange(imgHSV, new ScalarArray(new MCvScalar(iLowH, iLowS, iLowV)), new ScalarArray(new MCvScalar(iHighH, iHighS, iHighV)), imgThres);
CvInvoke.Canny(imgThres, imgCanny, 100, 200, 3);
Form1.FindLargestContour(imgCanny, imgContour);
pictureBox3.Image = imgContour.Bitmap;
}
}
public static VectorOfPoint FindLargestContour(IInputOutputArray imgCanny, IInputOutputArray imgContour)
{
int largest_contour_index = 0;
double largest_area = 0;
VectorOfPoint largestContour;
using (Mat hierachy = new Mat())
using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
{
IOutputArray hierarchy;
CvInvoke.FindContours(imgCanny, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxSimple);
for (int i = 0; i < contours.Size; i++)
{
MCvScalar color = new MCvScalar(0, 0, 255);
double a = CvInvoke.ContourArea(contours[i], false); // Find the area of contour
if (a > largest_area)
{
largest_area = a;
largest_contour_index = i; //Store the index of largest contour
}
CvInvoke.DrawContours(imgContour, contours, largest_contour_index, new MCvScalar(255, 0, 0));
}
CvInvoke.DrawContours(imgContour, contours, largest_contour_index, new MCvScalar(0, 0, 255), 3, LineType.EightConnected, hierachy);
largestContour = new VectorOfPoint(contours[largest_contour_index].ToArray());
}
Console.WriteLine(largestContour);
return largestContour;
}

I have worked around my problem by making
public static VectorOfPoint FindLargestContour(IInputOutputArray imgCanny, IInputOutputArray imgContour)
in to
private static void FindLargestContour(IInputOutputArray imgCanny, IInputOutputArray imgContour)
In the void I deleted
return largestContour;
Not sure if it is the way to go but I got a little further with my code this way.

Related

How to sort contours in EMGUCV/C# from left to right?

How do we sort contours in EMGUCV 4.5 from left to right? There is no sorting function defined in the library.
Has anyone tried sorting contours in the new version of EMGUCV ?
Have extracted the bounding rect and sorted using linq. See below
static void Sort(Mat img, VectorOfVectorOfPoint contours)
{
List<Rectangle> rects= new List<Rectangle>();
for (int i = 0; i < contours.Size; i++)
{
var rect = CvInvoke.BoundingRectangle(contours[i]);
rects.Add(rect);
CvInvoke.Rectangle(img, rect, new MCvScalar(0, 0, 0), 3);
CvInvoke.Imshow("img", img);
CvInvoke.WaitKey();
}
var rectList = rects.Distinct().OrderBy(r => r.Left).ThenBy(r
=> r.Top).ToList();
}

OpenCV FindContours does not find the correct rectangle

Using CvInvoke.Canny and CvInvoke.FindContours I'm trying to find the rectangle containing the item name (Schematic: Maple Desk). This rectangle is shown in the image below:
I'm able to find a lot of rectangles but I'm not able to get this one. Tried a lot of different thresholds for Canny but to no effect. The following image shows all rectangles I currently get:
Any ideas how to tackle this? Do I need to use other thresholds or another approach? I already experimented using grayscale and blurring but that didn't give better result. I added my current source below and the original image I'm using is this:
public Mat ProcessImage(Mat img)
{
UMat filter = new UMat();
UMat cannyEdges = new UMat();
Mat rectangleImage = new Mat(img.Size, DepthType.Cv8U, 3);
//Convert the image to grayscale and filter out the noise
//CvInvoke.CvtColor(img, filter, ColorConversion.Bgr2Gray);
//Remove noise
//CvInvoke.GaussianBlur(filter, filter, new System.Drawing.Size(3, 3), 1);
// Canny and edge detection
double cannyThreshold = 1.0; //180.0
double cannyThresholdLinking = 1.0; //120.0
//CvInvoke.Canny(filter, cannyEdges, cannyThreshold, cannyThresholdLinking);
CvInvoke.Canny(img, cannyEdges, cannyThreshold, cannyThresholdLinking);
// Find rectangles
List<RotatedRect> rectangleList = new List<RotatedRect>();
using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
{
CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
int count = contours.Size;
for (int i = 0; i < count; i++)
{
using (VectorOfPoint contour = contours[i])
using (VectorOfPoint approxContour = new VectorOfPoint())
{
CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);
// Only consider contours with area greater than 250
if (CvInvoke.ContourArea(approxContour, false) > 250)
{
// The contour has 4 vertices.
if (approxContour.Size == 4)
{
// Determine if all the angles in the contour are within [80, 100] degree
bool isRectangle = true;
System.Drawing.Point[] pts = approxContour.ToArray();
LineSegment2D[] edges = Emgu.CV.PointCollection.PolyLine(pts, true);
for (int j = 0; j < edges.Length; j++)
{
double angle = Math.Abs(edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
if (angle < 80 || angle > 100)
{
isRectangle = false;
break;
}
}
if (isRectangle) rectangleList.Add(CvInvoke.MinAreaRect(approxContour));
}
}
}
}
}
// Draw rectangles
foreach (RotatedRect rectangle in rectangleList)
{
CvInvoke.Polylines(rectangleImage, Array.ConvertAll(rectangle.GetVertices(), System.Drawing.Point.Round), true,
new Bgr(Color.DarkOrange).MCvScalar, 2);
}
//Drawing a light gray frame around the image
CvInvoke.Rectangle(rectangleImage,
new Rectangle(System.Drawing.Point.Empty,
new System.Drawing.Size(rectangleImage.Width - 1, rectangleImage.Height - 1)),
new MCvScalar(120, 120, 120));
//Draw the labels
CvInvoke.PutText(rectangleImage, "Rectangles", new System.Drawing.Point(20, 20),
FontFace.HersheyDuplex, 0.5, new MCvScalar(120, 120, 120));
Mat result = new Mat();
CvInvoke.VConcat(new Mat[] { img, rectangleImage }, result);
return result;
}
Edit 1:
After some more fine tuning with the following thresholds for Canny
cannyThreshold 100
cannyThresholdLinking 400
And using a minimum size for all ContourAreas of 10000 I can get the following result:
Edit 2:
For those interested, solved using the current detection, no changes were needed. Used the 2 detected rectangles in the screenshot above to get the location of the missing rectangle containing the item name.
Result can be found here:
https://github.com/josdemmers/NewWorldCompanion
For those interested, solved using the current detection, no changes were needed. Used the 2 detected rectangles in the screenshot above to get the location of the missing rectangle containing the item name.
Result can be found here: https://github.com/josdemmers/NewWorldCompanion

How to overlay two picturebox c# but the picturebox in the background can be seen

Good day
i don't know if my title is correct. sorry for my bad english
How to overlay two picturebox using c# inoder to achieve the image below, and change the opacity of upper picture box on runtime.
what i need to achieve is something like this. i have two images and i need to overlay them
first image:
enter image description here
and i have second image with a text of: Another Text on image.
and the location of the text is lower than the text location of the first image
(i can't upload more than two image because i don't have 10 reputation yet.)
i need to do like on the image below, but using two picturebox and can change the opacity in order for the second picturebox below the first one to be seen
and the output of the two image:
enter image description here
i created the output image using java. i know that i can run the jar file using c#. but the user required to changed the opacity on run time. so how can i do this?
this is the java code i used
BufferedImage biInner = ImageIO.read(inner);
BufferedImage biOutter = ImageIO.read(outter);
System.out.println(biInner);
System.out.println(biOutter);
Graphics2D g = biOutter.createGraphics();
g.setComposite(AlphaComposite.getInstance(AlphaComposite.SRC_OVER, 0.5f));
int x = (biOutter.getWidth() - biInner.getWidth()) / 2;
int y = (biOutter.getHeight() - biInner.getHeight()) / 2;
System.out.println(x + "x" + y);
g.drawImage(biInner, x, y, null);
g.dispose();
ImageIO.write(biOutter, "PNG", new File(output));
i hope my question is understandable. thank you
Here you go, just a sample, but blueBox is transparent (0.5):
public sealed partial class Form1 : Form
{
private readonly Bitmap m_BlueBox;
private readonly Bitmap m_YellowBox;
public Form1()
{
InitializeComponent();
DoubleBuffered = true;
m_YellowBox = CreateBox(Color.Yellow);
m_BlueBox = CreateBox(Color.Blue);
m_BlueBox = ChangeOpacity(m_BlueBox, 0.5f);
}
public static Bitmap ChangeOpacity(Image img, float opacityvalue)
{
var bmp = new Bitmap(img.Width, img.Height);
using (var graphics = Graphics.FromImage(bmp))
{
var colormatrix = new ColorMatrix();
colormatrix.Matrix33 = opacityvalue;
var imgAttribute = new ImageAttributes();
imgAttribute.SetColorMatrix(colormatrix, ColorMatrixFlag.Default, ColorAdjustType.Bitmap);
graphics.DrawImage(img, new Rectangle(0, 0, bmp.Width, bmp.Height), 0, 0, img.Width, img.Height,
GraphicsUnit.Pixel, imgAttribute);
}
return bmp;
}
private static Bitmap CreateBox(Color color)
{
var bmp = new Bitmap(200, 200);
for (var x = 0; x < bmp.Width; x++)
{
for (var y = 0; y < bmp.Height; y++)
{
bmp.SetPixel(x, y, color);
}
}
return bmp;
}
private void Form1_Paint(object sender, PaintEventArgs e)
{
e.Graphics.DrawImage(m_YellowBox, new Point(10, 10));
e.Graphics.DrawImage(m_BlueBox, new Point(70, 70));
}
}

Crop image out of two points (C#)

Update:
Updated code added.
However for some reason it still doesn't work properly. If the coordinates are Bitmap coordinates, what could be the reason? The first code sample I put here doesn't work properly and the second gives me an OutOfMemoryException.
I've ran into a problem trying to crop an image between two points. In my project I have a pictureBox (named AP), and the general idea is that the user clicks on two points and the program crops the image between these two corners. I've tried two methods, one with Bitmap.Crop and the other one with Graphics.DrawImage, but both seemed to fail for the same reason and didn't work at all (cropped a much smaller portion of the image).
Code:
private void AP_Click(object sender, EventArgs e)
{
// Setting the corners
else if (mark_shape == 0)
{
var mouseEventArgs = e as MouseEventArgs;
if (picture_corners_set == 0)
{
northEast = AP.PointToClient(new Point(mouseEventArgs.X, mouseEventArgs.Y));
picture_corners_set = 1;
}
else if (picture_corners_set == 1)
{
southWest = AP.PointToClient(new Point(mouseEventArgs.X, mouseEventArgs.Y));
Rectangle imageRectangle = new Rectangle(southWest.X, northEast.Y, (northEast.X - southWest.X), (southWest.Y - northEast.Y));
var bmp = new Bitmap(imageRectangle.Width, imageRectangle.Height);
using (var gr = Graphics.FromImage(bmp))
{
gr.DrawImage(AP.Image, 0, 0, imageRectangle, GraphicsUnit.Pixel);
}
AP.Image = bmp;
enableAllButtons();
}
}
}
Since your cropped Bitmap image size is the same as the width/height of the user selection, I'm guessing you want that cropped image to be in the top/left of the new Bitmap and fill it. As it is, you're telling the DrawImage() method to draw that portion of the Bitmap in the same location, albeit in a smaller size Bitmap.
The correct way to do this is to draw the source rectangle image at (0, 0):
private Point pt1, pt2;
private void AP_Click(object sender, EventArgs e)
{
// ... obviously other code here ...
else if (mark_shape == 0) // Setting the corners
{
Point pt = AP.PointToClient(Cursor.Position);
if (picture_corners_set == 0)
{
pt1 = new Point(pt.X, pt.Y);
picture_corners_set = 1;
}
else if (picture_corners_set == 1)
{
pt2 = new Point(pt.X, pt.Y);
picture_corners_set = 0;
Rectangle imageRectangle = new Rectangle(new Point(Math.Min(pt1.X, pt2.X), Math.Min(pt1.Y, pt2.Y)), new Size(Math.Abs(pt2.X - pt1.X) + 1, Math.Abs(pt2.Y - pt1.Y) + 1));
var bmp = new Bitmap(imageRectangle.Width, imageRectangle.Height);
using (var gr = Graphics.FromImage(bmp))
{
gr.DrawImage(AP.Image, 0, 0, imageRectangle, GraphicsUnit.Pixel);
}
AP.Image = bmp;
enableAllButtons();
}
}
}
There are several other overloads you could use to do this, but the one above makes it pretty clear that imageRectangle is being drawn at (0, 0).

How can I find a template image in another image? Kinect and AForge preferred

I copied the AForge-Sample from here:
http://www.aforgenet.com/framework/features/template_matching.html
And hoped, it would work with 2 Bitmaps as sources as in the following code:
Bitmap findTemplate (Bitmap sourceImage, Bitmap template)
{
// create template matching algorithm's instance
// (set similarity threshold to x.y%, 1.0f = 100%)
ExhaustiveTemplateMatching tm = new ExhaustiveTemplateMatching( 0.4f );
// find all matchings with specified above similarity
TemplateMatch[] matchings = tm.ProcessImage( sourceImage, template ); **// "Unsupported pixel format of the source or template image." as error message**
// highlight found matchings
BitmapData data = sourceImage.LockBits(
new Rectangle( 0, 0, sourceImage.Width, sourceImage.Height ),
ImageLockMode.ReadWrite, sourceImage.PixelFormat );
foreach ( TemplateMatch m in matchings )
{
AForge.Imaging.Drawing.Rectangle( data, m.Rectangle, System.Drawing.Color.White );
// do something else with matching
}
sourceImage.UnlockBits( data );
return sourceImage;
}
But when calling TemplateMatch[] matchings = tm.P.... it gives the error mentioned above.
The template is generated this way:
Bitmap templatebitmap=(Bitmap)AForge.Imaging.Image.FromFile("template.jpg");
the source is generated with the kinect-webcam, where the PlanarImage is formatted as Bitmap (method copied from somewhere, but it was working up to now)
Bitmap PImageToBitmap(PlanarImage PImage)
{
Bitmap bmap = new Bitmap(
PImage.Width,
PImage.Height,
System.Drawing.Imaging.PixelFormat.Format32bppRgb);
BitmapData bmapdata = bmap.LockBits(
new Rectangle(0, 0, PImage.Width,
PImage.Height),
ImageLockMode.WriteOnly,
bmap.PixelFormat);
IntPtr ptr = bmapdata.Scan0;
Marshal.Copy(PImage.Bits,
0,
ptr,
PImage.Width *
PImage.BytesPerPixel *
PImage.Height);
bmap.UnlockBits(bmapdata);
return bmap;
}
So, is anbody able to help me, where my mistake might be?
Or maybe anyone knows a better way to match a template with a Kinect?
The overall job is to detect a known object with the kinect, in my case a rubberduck.
Thank you in advamce.
here's the solution using AForge
but it is slow take around 5 seconds but it works
As usuall u need to introduce AForge framework download and install it.
specify using AForge namespace
and copy paste to make it work
System.Drawing.Bitmap sourceImage = (Bitmap)Bitmap.FromFile(#"C:\SavedBMPs\1.jpg");
System.Drawing.Bitmap template = (Bitmap)Bitmap.FromFile(#"C:\SavedBMPs\2.jpg");
// create template matching algorithm's instance
// (set similarity threshold to 92.5%)
ExhaustiveTemplateMatching tm = new ExhaustiveTemplateMatching(0.921f);
// find all matchings with specified above similarity
TemplateMatch[] matchings = tm.ProcessImage(sourceImage, template);
// highlight found matchings
BitmapData data = sourceImage.LockBits(
new Rectangle(0, 0, sourceImage.Width, sourceImage.Height),
ImageLockMode.ReadWrite, sourceImage.PixelFormat);
foreach (TemplateMatch m in matchings)
{
Drawing.Rectangle(data, m.Rectangle, Color.White);
MessageBox.Show(m.Rectangle.Location.ToString());
// do something else with matching
}
sourceImage.UnlockBits(data);
So, I just implemented it myself. But it is so slow - so if anyone has an idea to improve, feel free to criticize my code:
public class Position
{
public int bestRow { get; set; }
public int bestCol { get; set; }
public double bestSAD { get; set; }
public Position(int row, int col, double sad)
{
bestRow = row;
bestCol = col;
bestSAD = sad;
}
}
Position element_position = new Position(0, 0, double.PositiveInfinity);
Position ownSearch(Bitmap search, Bitmap template) {
Position position = new Position(0,0,double.PositiveInfinity);
double minSAD = double.PositiveInfinity;
// loop through the search image
for (int x = 0; x <= search.PhysicalDimension.Width - template.PhysicalDimension.Width; x++)
{
for (int y = 0; y <= search.PhysicalDimension.Height - template.PhysicalDimension.Height; y++)
{
position_label2.Content = "Running: X=" + x + " Y=" + y;
double SAD = 0.0;
// loop through the template image
for (int i = 0; i < template.PhysicalDimension.Width; i++)
{
for (int j = 0; j < template.PhysicalDimension.Height; j++)
{
int r = Math.Abs(search.GetPixel(x + i, y + j).R - template.GetPixel(i, j).R);
int g = Math.Abs(search.GetPixel(x + i, y + j).G - template.GetPixel(i, j).G);
int b = Math.Abs(search.GetPixel(x + i, y + j).B - template.GetPixel(i, j).B);
int a = template.GetPixel(i, j).A;
SAD = SAD + ((r + g + b)*a/255 );
}
}
// save the best found position
if (minSAD > SAD)
{
minSAD = SAD;
// give me VALUE_MAX
position.bestRow = x;
position.bestCol = y;
position.bestSAD = SAD;
}
}
}
return position;
}

Categories

Resources