I am trying to detect rectangles in Emgucv in c#, I was playing around with the following code that I got off the internet. I am new to this so I hope someone could help me out:
public class ShapeDectection
{
public Image<Bgr, Byte> img;
public PictureBox Picture;
public PictureBox Result;
public double dCannyThres;
private UMat uimage;
private UMat cannyEdges;
private List<Triangle2DF> triangleList;
private List<RotatedRect> boxList;
private Image<Bgr, Byte> triangleRectImage;
public ShapeDectection(PictureBox pic, string filepath, PictureBox results)
{
Picture = pic;
Result = results;
img = new Image<Bgr, Byte>(filepath);
triangleList = new List<Triangle2DF>();
boxList = new List<RotatedRect>();
uimage = new UMat();
cannyEdges = new UMat();
dCannyThres = 180.0;
fnFindTriangleRect();
MessageBox.Show("done");
}
private void fnFindTriangleRect()
{
CvInvoke.CvtColor(img, uimage, ColorConversion.Bgr2Gray);
UMat pyrDown = new UMat();
CvInvoke.PyrDown(uimage, pyrDown);
CvInvoke.PyrUp(pyrDown, uimage);
triangleRectImage = img.CopyBlank();
using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
{
CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
int count = contours.Size;
MessageBox.Show("count = " + count);
for (int i = 0; i < count; i++)
{
using (VectorOfPoint contour = contours[i])
using (VectorOfPoint approxContour = new VectorOfPoint())
{
CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);
if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contour with area > 250
{
MessageBox.Show("approxContour.Size = " + approxContour.Size);
if (approxContour.Size == 3) //The contour has 3 vertices, is a triangle
{
Point[] pts = approxContour.ToArray();
triangleList.Add(new Triangle2DF(pts[0], pts[1], pts[2]));
}
else if (approxContour.Size == 4) // The contour has 4 vertices
{
#region Determine if all the angles in the contours are within [80,100] degree
bool isRectangle = true;
Point[] pts = approxContour.ToArray();
LineSegment2D[] edges = PointCollection.PolyLine(pts, true);
for (int j = 0; j < edges.Length; j++)
{
double dAngle = Math.Abs(edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
MessageBox.Show("" + dAngle);
if (dAngle < 80 || dAngle > 100)
{
isRectangle = false;
break;
}
}
#endregion
if (isRectangle) boxList.Add(CvInvoke.MinAreaRect(approxContour));
}
}
}
}
}
foreach (Triangle2DF triangle in triangleList)
{
triangleRectImage.Draw(triangle, new Bgr(Color.DarkBlue), 2);
}
foreach (RotatedRect box in boxList)
triangleRectImage.Draw(box, new Bgr(Color.Red), 2);
Result.Image = triangleRectImage.ToBitmap();
}
}
I am trying to detect the shapes in the following picture
however this is the result:
as you can see no shape was detected by the script as the number of contours was zero.how can I modify the script so that I can detect these shapes?
I can't seem to find "Canny edge detection"
UMat cannyEdges = new UMat();
CvInvoke.Canny(uimage, cannyEdges, cannyThreshold, cannyThresholdLinking);
in your code. Here is the link to EmguCV wiki page http://www.emgu.com/wiki/index.php/Shape_(Triangle,_Rectangle,_Circle,_Line)_Detection_in_CSharp
Check there for missing parts.
Related
I just started OCR project a few weeks ago. I stuck with image skewed problem I tried many different methods nothing seems to work so plz help:)
Skewed image given below
I want final image such as given below
I already tried to deskew image but I was unable to get final image.
public Image<Gray, byte> ImageDeskewOuter(Image<Gray, byte> img)
{
img = img.Resize(img.Height, img.Width, Inter.Linear);
Image<Gray, byte> tmp = new Image<Gray, byte>(img.Bitmap);
tmp = tmp.ThresholdToZero(new Gray(180));
int nZero = tmp.CountNonzero()[0] == 0 ? 1 : tmp.CountNonzero()[0];
if (tmp.Bytes.Length / nZero < 10)
img = tmp.Not();
else
img = img.ThresholdToZero(new Gray(80)).InRange(new Gray(0), new Gray(60)).Not();
tmp = new Image<Gray, byte>(img.Bitmap).Canny(50, 150);
List<Rectangle> rlist = new List<Rectangle>();
Rectangle min = new Rectangle();
Rectangle max = new Rectangle();
VectorOfVectorOfPoint contour = new VectorOfVectorOfPoint();
Mat hier = new Mat();
CvInvoke.FindContours(tmp, contour, hier, RetrType.External, ChainApproxMethod.ChainApproxSimple);
if (contour.Size > 0)
{
for (int i = 0; i < contour.Size; i++)
{
Rectangle rec = CvInvoke.BoundingRectangle(contour[i]);
if (rec.Width > 30 && rec.Width < 120 && rec.Height > 50 && rec.Height < 120)
{
rlist.Add(rec);
}
}
min = rlist.OrderBy(x => x.X).FirstOrDefault();
max = rlist.OrderByDescending(x => x.X).FirstOrDefault();
Rectangle roi = Rectangle.Union(min, max);
img.ROI = roi;
}
if (rlist.Count > 0)
{
double angle = LineAngle(min.X, min.Bottom, max.X, max.Bottom, min.X, min.Bottom, max.X, min.Bottom) + 3;
img = img.Rotate(angle, new Gray(255), false);
}
return img;
}
Final image using above function
Step 1 > Select desired contour and apply CvInvoke.MinAreaRect() and copy image area into new image
Step 2> Create a bitmap image whose width is sum of all cropped image width(if you want to put each image side by side)
Step 3> Using Graphics draw image on bitmap
public Image<Gray, byte> ImageDeskew(Image<Gray, byte> img)
{
img = img.Resize(img.Height, img.Width, Inter.Linear);
Image<Gray, byte> tmp = new Image<Gray, byte>(img.Bitmap);
tmp = new Image<Gray, byte>(AdjustContrast(tmp.Bitmap, 70));
img = tmp.Not().ThresholdAdaptive(new Gray(255), AdaptiveThresholdType.MeanC, ThresholdType.Binary, 45, new Gray(10));
tmp = new Image<Gray, byte>(img.Bitmap).Canny(50, 150);
List<Image<Gray, byte>> imglist = new List<Image<Gray, byte>>();
Rectangle min = new Rectangle();
Rectangle max = new Rectangle();
VectorOfVectorOfPoint contour = new VectorOfVectorOfPoint();
Mat hier = new Mat();
CvInvoke.FindContours(tmp, contour, hier, RetrType.External, ChainApproxMethod.ChainApproxSimple);
if (contour.Size > 0)
{
for (int i = 0; i < contour.Size; i++)
{
RotatedRect rRect = CvInvoke.MinAreaRect(contour[i]);
float area = rRect.Size.Width * rRect.Size.Height;
if (area > (img.Bytes.Length / 10))
{
rlist.Add(rec);
if (rRect.Angle > -45) imglist.Add(img.Copy(rRect));
else imglist.Add(img.Copy(rRect).Rotate(-90, new Gray(255), false));
}
}
}
if (imglist.Count > 0)
{
int xPx = imglist.Sum(x => x.Width);
Bitmap bitmap = new Bitmap(xPx, img.Height);
using (Graphics g = Graphics.FromImage(bitmap))
{
xPx = 0;
imglist.Reverse();
foreach (Image<Gray, byte> i in imglist)
{
g.DrawImage(i.Not().Bitmap, xPx, 0);
xPx += i.Width;
}
}
img = new Image<Gray, byte>(bitmap).Not();
}
return img;
}
I want to make image stitching with openCvSharp but something goes wrong. Since I do not get the right output of 2 image stitched together.
I was fallowing the tutorial in python link and converting it in to C# code.
Here are 2 photo examples
The output where image should be stitched I get wrong result. It looks the same as image 2.
I gues something goes wrong in line, or somewhere after the line
Cv2.WarpPerspective(trainImg, result, H, new OpenCvSharp.Size(width, height));
Here is the full code.
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
bool debugImages = true;
string locationFolder = "";
OpenFileDialog dlg = new OpenFileDialog();
dlg.CheckFileExists = true;
dlg.Multiselect = true;
if (dlg.ShowDialog() == System.Windows.Forms.DialogResult.OK)
{
locationFolder = Path.GetDirectoryName(dlg.FileNames[0]) + "\\output\\";
List<Mat> imagesMat = new List<Mat>();
for (var i = 0; i < dlg.FileNames.Length; i++)
{
using (Bitmap fromFile = new Bitmap(dlg.FileNames[i]))
{
Mat source = BitmapConverter.ToMat(fromFile);
imagesMat.Add(source);
}
}
if (imagesMat.Count != 2)
throw new Exception("Select only 2 images!!!");
int imageCounter = 0;
Mat trainImg = imagesMat[0];
Mat queryImg = imagesMat[1];
Mat trainImg_gray = new Mat();
Mat queryImg_gray = new Mat();
Cv2.CvtColor(trainImg, trainImg_gray, ColorConversionCodes.BGRA2GRAY);
Cv2.CvtColor(queryImg, queryImg_gray, ColorConversionCodes.BGRA2GRAY);
// detecting keypoints
// FastFeatureDetector, StarDetector, SIFT, SURF, ORB, BRISK, MSER, GFTTDetector, DenseFeatureDetector, SimpleBlobDetector
string method = "SURF";
string feature_matching = "bf"; //bf, knn
var descriptor = SURF.Create(500, 4, 2, true);
Mat descriptors1 = new Mat();
Mat descriptors2 = new Mat();
KeyPoint[] kpsA;
KeyPoint[] kpsB;
descriptor.DetectAndCompute(trainImg_gray, null, out kpsA, descriptors1);
descriptor.DetectAndCompute(queryImg_gray, null, out kpsB, descriptors2);
// Match descriptor vectors
//var flannMatcher = new FlannBasedMatcher();
DMatch[] matches;
if (feature_matching == "bf")
matches = matchKeyPointsBF(descriptors1, descriptors2, method);
else
matches = matchKeyPointsKNN(descriptors1, descriptors2, 0.75, method);
var bfView = new Mat();
Cv2.DrawMatches(trainImg, kpsA, queryImg, kpsB, matches, bfView, null, flags: DrawMatchesFlags.NotDrawSinglePoints);
if (debugImages)
{
using (Bitmap resultBitmap = BitmapConverter.ToBitmap(bfView))
resultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //1
}
Mat H = getHomography(kpsA, kpsB, descriptors1, descriptors2, matches, 4);
if (H == null)
throw new Exception("No Homography!!!");
//for (var i = 0; i < H.Cols; i++)
//{
// for (var j = 0; j < H.Rows; j++)
// Console.Write(H.At<float>(i, j) + " ");
// Console.WriteLine("");
//}
double width = trainImg.Size().Width + queryImg.Size().Width;
double height = trainImg.Size().Height + queryImg.Size().Height;
Mat result = new Mat();
Cv2.WarpPerspective(trainImg, result, H, new OpenCvSharp.Size(width, height));
if (debugImages)
{
using (Bitmap resultBitmap = BitmapConverter.ToBitmap(result))
resultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //1
}
result[new Rect(new OpenCvSharp.Point(0, 0), new OpenCvSharp.Size(queryImg.Size().Width, queryImg.Size().Height))] = queryImg;
if (debugImages)
{
using (Bitmap resultBitmap = BitmapConverter.ToBitmap(result))
resultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //2
}
//# transform the panorama image to grayscale and threshold it
Mat gray = result.Clone();
Cv2.CvtColor(result, gray, ColorConversionCodes.BGR2GRAY);
Mat thresh = new Mat();
double thresh2 = Cv2.Threshold(gray, thresh, 0, 255, ThresholdTypes.Binary);
//# Finds contours from the binary image
OpenCvSharp.Point[][] cnts;
HierarchyIndex[] hierarchy;
Cv2.FindContours(thresh, out cnts, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple);
OpenCvSharp.Point[] cnts2 = new OpenCvSharp.Point[cnts[0].Length];
for (var k = 0; k < cnts[0].Length; k++)
cnts2[k] = cnts[0][k];
//InputArray ptsA = InputArray.Create(cnts2);
//var c = Cv2.ContourArea(ptsA, true);
OpenCvSharp.Rect xywh = Cv2.BoundingRect(cnts2);
result = result[new Rect(new OpenCvSharp.Point(xywh.X, xywh.Y), new OpenCvSharp.Size(xywh.Width, xywh.Height))];
//result = result[new Rect(new OpenCvSharp.Point(0, 0), new OpenCvSharp.Size(256, 256))];
Bitmap endResultBitmap = BitmapConverter.ToBitmap(result);
endResultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //4
Environment.Exit(-1);
}
}
private BFMatcher createMatcher(string method, bool crossCheck)
{
//"Create and return a Matcher Object"
if (method == "SURF" || method == "SIFT")
return new BFMatcher(NormTypes.L2, crossCheck);
else //if (method == "ORB" || method == "BRISK")
return new BFMatcher(NormTypes.Hamming, crossCheck);
}
private DMatch[] matchKeyPointsBF(Mat featuresA, Mat featuresB, string method)
{
BFMatcher bf = createMatcher(method, crossCheck: true);
// # Match descriptors.
DMatch[] bfMatches = bf.Match(featuresA, featuresB);
//# Sort the features in order of distance.
//# The points with small distance (more similarity) are ordered first in the vector
DMatch[] rawMatches = bfMatches.OrderBy(a => a.Distance).ToArray();
if (rawMatches.Length > 100)
Array.Resize(ref rawMatches, 100);
return rawMatches;
}
private DMatch[] matchKeyPointsKNN(Mat featuresA, Mat featuresB, double ratio, string method)
{
BFMatcher bf = createMatcher(method, crossCheck: false);
// # compute the raw matches and initialize the list of actual matches
DMatch[][] rawMatches = bf.KnnMatch(featuresA, featuresB, 2);
List<DMatch> rawMatches2 = new List<DMatch>();
//# loop over the raw matches
DMatch prevmatchN = rawMatches[0][0];
rawMatches2.Add(prevmatchN);
for (int m = 0; m < rawMatches.Length; m++)
{
for (int n = 0; n < rawMatches[m].Length; n++)
{
//# ensure the distance is within a certain ratio of each
//# other (i.e. Lowe's ratio test)
DMatch matchN = rawMatches[m][n];
if (n == 0)
prevmatchN = matchN;
if (prevmatchN.Distance < matchN.Distance * (ratio))
rawMatches2.Add(matchN);
if (rawMatches2.Count >= 100)
break;
}
}
return rawMatches2.ToArray();
}
private Mat getHomography(KeyPoint[] kpsA, KeyPoint[] kpsB, Mat featuresA, Mat featuresB, DMatch[] matches, int reprojThresh)
{
//# convert the keypoints to numpy arrays
Point2f[] PtA = new Point2f[matches.Length];
Point2f[] PtB = new Point2f[matches.Length];
for (int i = 0; i < matches.Length; i++)
{
KeyPoint kpsAI = kpsA[matches[i].QueryIdx];
KeyPoint kpsBI = kpsB[matches[i].TrainIdx];
PtA[i] = new Point2f(kpsAI.Pt.X, kpsAI.Pt.Y);
PtB[i] = new Point2f(kpsBI.Pt.X, kpsBI.Pt.Y);
}
InputArray ptsA = InputArray.Create(PtA);
InputArray ptsB = InputArray.Create(PtB);
if (matches.Length > 4)
{
//You get the homography matrix usin
Mat H = Cv2.FindHomography(ptsA, ptsB, HomographyMethods.Ransac, reprojThresh);
//and then to get any point on the target picture from the original picture:
//Mat targetPoint = new Mat();
//Cv2.PerspectiveTransform(ptsA, targetPoint, H);
return H;
}
else
return null;
}
}
How to calculate the actual speed of all moving objects on video using the Lucals-kanade algorithm to calculate the optical flow ?
I need to do this on this video. The camera is fixed in one place (Fig. 1)
I find the key points and track them using the Lucas-Kanade algorithm (Fig. 2)
How to use this algorithm to update the actual speed of each car?
Thank you for your answers!
My code:
public class OpticalFlowLK : BaseFilter
{
private Mat prevFrame;
private Mat nextFrame;
private bool prevFrameEmpty = true;
private GFTTDetector gFTTDetector;
private Stopwatch sWatch;
private double time = 0.04;
public OpticalFlowLK()
{
TAG = "[Optical Flow Lucas Kanade]";
gFTTDetector = new GFTTDetector(500);
sWatch = new Stopwatch();
}
protected override Mat ProcessFrame(ref Mat frame)
{
Mat rez = new Mat();
frame.CopyTo(rez);
nextFrame = new Mat();
Mat gray = new Mat();
var tmpImg = gray.ToImage<Gray, Byte>();
CvInvoke.CvtColor(frame, nextFrame, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
if (!prevFrameEmpty)
{
VectorOfKeyPoint prevFeatures = new VectorOfKeyPoint(gFTTDetector.Detect(prevFrame));
//Features2DToolbox.DrawKeypoints(rez, prevFeatures, rez, new Bgr(0, 0, 255));
PointF[] prevPts = new PointF[prevFeatures.Size];
for (int i = 0; i < prevFeatures.Size; i++)
{
prevPts[i] = prevFeatures[i].Point;
}
PointF[] nextPts;
byte[] status;
float[] errors;
sWatch.Start();
CvInvoke.CalcOpticalFlowPyrLK(prevFrame, nextFrame, prevPts, new Size(20, 20), 1, new MCvTermCriteria(20, 0.03), out nextPts, out status, out errors);
sWatch.Stop();
sWatch.Reset();
prevFrame = nextFrame.Clone();
for (int i = 0; i < status.Length; i++)
{
Point prevPt = new Point((int)prevPts[i].X,(int)nextPts[i].Y);
Point nextPt = new Point((int)nextPts[i].X,(int)nextPts[i].Y);
double lenght = Math.Sqrt(Math.Pow(prevPt.X - nextPt.X, 2) + Math.Pow(prevPt.Y - nextPt.Y, 2));
if (lenght > 3)
{
CvInvoke.Circle(rez, nextPt, 1, new MCvScalar(0, 255, 0), 2);
}
}
sWatch.Stop();
prevFrameEmpty = false;
}
else if (prevFrameEmpty)
{
prevFrame = nextFrame.Clone();
prevFrameEmpty = false;
}
return rez;
}
protected override bool InitFilter(ref Mat frame)
{
throw new NotImplementedException();
}
}
I have the following, but I can't figure out how to find ALL the matches in a source image.
static void Main()
{
using (var template = Cv.LoadImage(#"images\logo.png", LoadMode.GrayScale))
using (var source = Cv.LoadImage(#"images\manyLogos.png", LoadMode.GrayScale))
using (var sourceColour = Cv.LoadImage(#"images\manyLogos.png", LoadMode.Color))
{
var width = source.Width - template.Width + 1;
var height = source.Height - template.Height + 1;
using (var result = Cv.CreateImage(Cv.Size(width, height), BitDepth.F32, 1))
{
Cv.MatchTemplate(source, template, result, MatchTemplateMethod.SqDiff);
var THRESHOLD = 0.08D;
double minVal, maxVal;
CvPoint minLoc, maxLoc;
Cv.MinMaxLoc(result, out minVal, out maxVal, out minLoc, out maxLoc);
var outlineColor = (minVal > THRESHOLD) ? CvColor.Green : CvColor.Red;
Cv.Rectangle(sourceColour, Cv.Point(minLoc.X, minLoc.Y), Cv.Point(minLoc.X + template.Width, minLoc.Y + template.Height), outlineColor, 1, 0, 0);
}
using (var window = new CvWindow("Test"))
{
while (CvWindow.WaitKey(10) < 0)
{
window.Image = sourceColour;
}
}
}
}
I can outline the best match, just not all the matches. I need to get all the matches somehow.
Using matchTemplate method, your output image will give you pixels values which represents how well your template is matched at this specific location. In you case, the lower the value, the best the match, since you used MatchTemplateMethod.SqDiff.
You problem is that when you use the minMaxLoc function, you get what you asks for, which is the best match in this case, the min).
All matches are the pixels whose value are under the threshold that you set up.
Since I'm not used to csharp, here is how it would go in C++, you can do the translation:
// after your call to MatchTemplate
float threshold = 0.08;
cv::Mat thresholdedImage;
cv::threshold(result, thresholdedImage, threshold, 255, CV_THRESH_BINARY);
// the above will set pixels to 0 in thresholdedImage if their value in result is lower than the threshold, to 255 if it is larger.
// in C++ it could also be written cv::Mat thresholdedImage = result < threshold;
// Now loop over pixels of thresholdedImage, and draw your matches
for (int r = 0; r < thresholdedImage.rows; ++r) {
for (int c = 0; c < thresholdedImage.cols; ++c) {
if (!thresholdedImage.at<unsigned char>(r, c)) // = thresholdedImage(r,c) == 0
cv::circle(sourceColor, cv::Point(c, r), template.cols/2, CV_RGB(0,255,0), 1);
}
}
Translating from C++ and using OpenCvSharp wrapper, the above code, replacing minMaxLoc lines, worked for me:
double threshold=0.9
var thresholdImage=Cv.CreateImage(newImageSize, BitDepth.F32,1);
Cv.Threshold(result, thresholdImage, threshold, 255, ThresholdType.Binary);
for (int r = 0; r < thresholdImage.GetSize().Height; r++)
{
for (int c = 0; c < thresholdImage.GetSize().Width; c++)
{
if (thresholdImage.GetRow(r)[c].Val0 > 0)
{
Cv.Rectangle(soruceColour, Cv.Point(c, r), Cv.Point(c + template.Width, r + template.Height), CvColor.Red, 1, 0, 0);
}
}
}
here is the solution using Min_Max and Match_Template methods. hope it will help.
public void multipleTemplateMatch(string SourceImages, string tempImage)
{
Image<Bgr, byte> image_source = new Image<Bgr, byte>(SourceImages);
Image<Bgr, byte> image_partial1 = new Image<Bgr, byte>(tempImage);
double threshold = 0.9;
ImageFinder imageFinder = new ImageFinder(image_source, image_partial1, threshold);
imageFinder.FindThenShow();
}
and here is the class which will help.
class ImageFinder
{
private List<Rectangle> rectangles;
public Image<Bgr, byte> BaseImage { get; set; }
public Image<Bgr, byte> SubImage { get; set; }
public Image<Bgr, byte> ResultImage { get; set; }
public double Threashold { get; set; }
public List<Rectangle> Rectangles
{
get { return rectangles; }
}
public ImageFinder(Image<Bgr, byte> baseImage, Image<Bgr, byte> subImage, double threashold)
{
rectangles = new List<Rectangle>();
BaseImage = baseImage;
SubImage = subImage;
Threashold = threashold;
}
public void FindThenShow()
{
FindImage();
DrawRectanglesOnImage();
ShowImage();
}
public void DrawRectanglesOnImage()
{
ResultImage = BaseImage.Copy();
foreach (var rectangle in this.rectangles)
{
ResultImage.Draw(rectangle, new Bgr(Color.Blue), 1);
}
}
public void FindImage()
{
rectangles = new List<Rectangle>();
using (Image<Bgr, byte> imgSrc = BaseImage.Copy())
{
while (true)
{
using (Image<Gray, float> result = imgSrc.MatchTemplate(SubImage, TemplateMatchingType.CcoeffNormed))
{
double[] minValues, maxValues;
Point[] minLocations, maxLocations;
result.MinMax(out minValues, out maxValues, out minLocations, out maxLocations);
if (maxValues[0] > Threashold)
{
Rectangle match = new Rectangle(maxLocations[0], SubImage.Size);
imgSrc.Draw(match, new Bgr(Color.Blue), -1);
rectangles.Add(match);
}
else
{
break;
}
}
}
}
}
public void ShowImage()
{
Random rNo = new Random();
string outFilename = "matched Templates" + rNo.Next();
CvInvoke.Imshow(outFilename, ResultImage);
}
}
if you find this helpful please vote is as useful.
thanks
For a couple of days now I've tried to figure out why my nine-slice code does not work as expected. As far as I can see, there seems to be an issue with the Graphics.DrawImage method which handles my nine slice images incorrectly. So my problem is how to compensate for the incorrect scaling that is performed when running my code on the compact framework. I might add that this code of course works perfectly when running in the full framework environment. The problem only occurs when scaling the image to a larger image not the other way around. Here is the snippet:
public class NineSliceBitmapSnippet
{
private Bitmap m_OriginalBitmap;
public int CornerLength { get; set; }
/// <summary>
/// Initializes a new instance of the NineSliceBitmapSnippet class.
/// </summary>
public NineSliceBitmapSnippet(Bitmap bitmap)
{
CornerLength = 5;
m_OriginalBitmap = bitmap;
}
public Bitmap ScaleSingleBitmap(Size size)
{
Bitmap scaledBitmap = new Bitmap(size.Width, size.Height);
int[] horizontalTargetSlices = Slice(size.Width);
int[] verticalTargetSlices = Slice(size.Height);
int[] horizontalSourceSlices = Slice(m_OriginalBitmap.Width);
int[] verticalSourceSlices = Slice(m_OriginalBitmap.Height);
using (Graphics graphics = Graphics.FromImage(scaledBitmap))
{
using (Brush brush = new SolidBrush(Color.Fuchsia))
{
graphics.FillRectangle(brush, new Rectangle(0, 0, size.Width, size.Height));
}
int horizontalTargetOffset = 0;
int verticalTargetOffset = 0;
int horizontalSourceOffset = 0;
int verticalSourceOffset = 0;
for (int x = 0; x < horizontalTargetSlices.Length; x++)
{
verticalTargetOffset = 0;
verticalSourceOffset = 0;
for (int y = 0; y < verticalTargetSlices.Length; y++)
{
Rectangle destination = new Rectangle(horizontalTargetOffset, verticalTargetOffset, horizontalTargetSlices[x], verticalTargetSlices[y]);
Rectangle source = new Rectangle(horizontalSourceOffset, verticalSourceOffset, horizontalSourceSlices[x], verticalSourceSlices[y]);
graphics.DrawImage(m_OriginalBitmap, destination, source, GraphicsUnit.Pixel);
verticalTargetOffset += verticalTargetSlices[y];
verticalSourceOffset += verticalSourceSlices[y];
}
horizontalTargetOffset += horizontalTargetSlices[x];
horizontalSourceOffset += horizontalSourceSlices[x];
}
}
return scaledBitmap;
}
public int[] Slice(int length)
{
int cornerLength = CornerLength;
if (length <= (cornerLength * 2))
throw new Exception("Image to small for sliceing up");
int[] slices = new int[3];
slices[0] = cornerLength;
slices[1] = length - (2 * cornerLength);
slices[2] = cornerLength;
return slices;
}
}
So, my question is, does anybody now how I could compensate the incorrect scaling?
/Dan
After some more trial and error I've finally found a solution to my problem. The scaling problems has always been to the top-center, right-center, bottom-center and left-center slices since they're always stretched in only one direction according to the logic of nine slice scaling. If I apply a temporarely square stretch to those slices before applying the correct stretch the final bitmap will be correct. Once again the problem is only visible in the .Net Compact Framework of a Windows CE device (Smart Device). Here's a snippet with code adjusting for the bug in CF. My only concern now is that the slices that get square stretched will take much more memory due to the correction code. On the other hand this step is only a short period of time so I might get away with it. ;)
public class NineSliceBitmapSnippet
{
private Bitmap m_OriginalBitmap;
public int CornerLength { get; set; }
public NineSliceBitmapSnippet(Bitmap bitmap)
{
CornerLength = 5;
m_OriginalBitmap = bitmap;
}
public Bitmap Scale(Size size)
{
if (m_OriginalBitmap != null)
{
return ScaleSingleBitmap(size);
}
return null;
}
public Bitmap ScaleSingleBitmap(Size size)
{
Bitmap scaledBitmap = new Bitmap(size.Width, size.Height);
int[] horizontalTargetSlices = Slice(size.Width);
int[] verticalTargetSlices = Slice(size.Height);
int[] horizontalSourceSlices = Slice(m_OriginalBitmap.Width);
int[] verticalSourceSlices = Slice(m_OriginalBitmap.Height);
using (Graphics graphics = Graphics.FromImage(scaledBitmap))
{
using (Brush brush = new SolidBrush(Color.Fuchsia))
{
graphics.FillRectangle(brush, new Rectangle(0, 0, size.Width, size.Height));
}
int horizontalTargetOffset = 0;
int verticalTargetOffset = 0;
int horizontalSourceOffset = 0;
int verticalSourceOffset = 0;
for (int x = 0; x < horizontalTargetSlices.Length; x++)
{
verticalTargetOffset = 0;
verticalSourceOffset = 0;
for (int y = 0; y < verticalTargetSlices.Length; y++)
{
Rectangle destination = new Rectangle(horizontalTargetOffset, verticalTargetOffset, horizontalTargetSlices[x], verticalTargetSlices[y]);
Rectangle source = new Rectangle(horizontalSourceOffset, verticalSourceOffset, horizontalSourceSlices[x], verticalSourceSlices[y]);
bool isWidthAffectedByVerticalStretch = (y == 1 && (x == 0 || x == 2) && destination.Height > source.Height);
bool isHeightAffectedByHorizontalStretch = (x == 1 && (y == 0 || y == 2) && destination.Width > source.Width);
if (isHeightAffectedByHorizontalStretch)
{
BypassDrawImageError(graphics, destination, source, Orientation.Horizontal);
}
else if (isWidthAffectedByVerticalStretch)
{
BypassDrawImageError(graphics, destination, source, Orientation.Vertical);
}
else
{
graphics.DrawImage(m_OriginalBitmap, destination, source, GraphicsUnit.Pixel);
}
verticalTargetOffset += verticalTargetSlices[y];
verticalSourceOffset += verticalSourceSlices[y];
}
horizontalTargetOffset += horizontalTargetSlices[x];
horizontalSourceOffset += horizontalSourceSlices[x];
}
}
return scaledBitmap;
}
private void BypassDrawImageError(Graphics graphics, Rectangle destination, Rectangle source, Orientation orientationAdjustment)
{
Size adjustedSize = Size.Empty;
switch (orientationAdjustment)
{
case Orientation.Horizontal:
adjustedSize = new Size(destination.Width, destination.Width);
break;
case Orientation.Vertical:
adjustedSize = new Size(destination.Height, destination.Height);
break;
default:
break;
}
using (Bitmap quadScaledBitmap = new Bitmap(adjustedSize.Width, adjustedSize.Height))
{
using (Graphics tempGraphics = Graphics.FromImage(quadScaledBitmap))
{
tempGraphics.Clear(Color.Fuchsia);
tempGraphics.DrawImage(m_OriginalBitmap, new Rectangle(0, 0, adjustedSize.Width, adjustedSize.Height), source, GraphicsUnit.Pixel);
}
graphics.DrawImage(quadScaledBitmap, destination, new Rectangle(0, 0, quadScaledBitmap.Width, quadScaledBitmap.Height), GraphicsUnit.Pixel);
}
}
public int[] Slice(int length)
{
int cornerLength = CornerLength;
if (length <= (cornerLength * 2))
throw new Exception("Image to small for sliceing up");
int[] slices = new int[3];
slices[0] = cornerLength;
slices[1] = length - (2 * cornerLength);
slices[2] = cornerLength;
return slices;
}
}