Panoramic image stitching using EmguCV - c#

I am doing a project on panoramic stitching of Images using Emgu CV (Open CV for C#). Till now I have done some work that stitches images but the output is kinda weird. This is what I am getting:
My panorama:
This is what the Emgu CV Stitcher.stitch method gives:
Stiched by inbuilt stitcher
Clearly I am missing something. Moreover if I add more images, the output gets more stretchy like this one:
I am not able to figure out what am i missing. Here is my code till now:
http://pastebin.com/Ke2Zz4m9
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.CV.CvEnum;
using Emgu.CV.Features2D;
using Emgu.CV.Structure;
using Emgu.CV.UI;
using Emgu.CV.Util;
using Emgu.CV.GPU;
namespace Project
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
Image<Bgr, float> one = new Image<Bgr, float>("D:\\Venice_panorama_part_01.jpg");
Image<Bgr, float> two = new Image<Bgr, float>("D:\\Venice_panorama_part_02.jpg");
Image<Bgr, float> third = new Image<Bgr, float>("D:\\Venice_panorama_part_03.jpg");
Image<Bgr, float> fourth = new Image<Bgr, float>("D:\\Venice_panorama_part_04.jpg");
Image<Bgr, float> fifth = new Image<Bgr, float>("D:\\Venice_panorama_part_05.jpg");
Image<Bgr, float> sixth = new Image<Bgr, float>("D:\\Venice_panorama_part_06.jpg");
Image<Bgr, float> seventh = new Image<Bgr, float>("D:\\Venice_panorama_part_07.jpg");
Image<Bgr, float> eighth = new Image<Bgr, float>("D:\\Venice_panorama_part_08.jpg");
Image<Bgr, Byte> result = FindMatch(two, third);
result = convert(result);
Image<Bgr, float> twoPlusThree = result.Convert<Bgr, float>();
Image<Bgr, Byte> result2 = FindMatch(fourth, fifth);
result2 = convert(result2);
Image<Bgr, float> fourPlusFive = result2.Convert<Bgr, float>();
Image<Bgr, Byte> result3 = FindMatch(sixth, seventh);
result3 = convert(result3);
Image<Bgr, float> sixPlusSeven = result3.Convert<Bgr, float>();
Image<Bgr, Byte> result4 = FindMatch(one, twoPlusThree);
result4 = convert(result4);
Image<Bgr, float> oneTwoThree = result4.Convert<Bgr, float>();
Image<Bgr, Byte> result5 = FindMatch(oneTwoThree, fourPlusFive);
result5 = convert(result5);
Image<Bgr, float> oneTwoThreeFourFive = result5.Convert<Bgr, float>();
Image<Bgr, Byte> result6 = FindMatch(sixPlusSeven, eighth);
result6 = convert(result6);
Image<Bgr, float> sixSevenEigth = result6.Convert<Bgr, float>();
Image<Bgr, Byte> result7 = FindMatch(oneTwoThreeFourFive, sixSevenEigth);
result7 = convert(result7);
result.Save("D:\\result1.jpg");
result2.Save("D:\\result2.jpg");
result3.Save("D:\\result3.jpg");
result4.Save("D:\\result4.jpg");
result5.Save("D:\\result5.jpg");
result6.Save("D:\\result6.jpg");
result7.Save("D:\\result7.jpg");
this.Close();
}
public static Image<Bgr, Byte> FindMatch(Image<Bgr, float> fImage, Image<Bgr, float> lImage)
{
HomographyMatrix homography = null;
SURFDetector surfCPU = new SURFDetector(500, false);
int k = 2;
double uniquenessThreshold = 0.8;
Matrix<int> indices;
Matrix<byte> mask;
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
Image<Gray, Byte> fImageG = fImage.Convert<Gray, Byte>();
Image<Gray, Byte> lImageG = lImage.Convert<Gray, Byte>();
if (GpuInvoke.HasCuda)
{
GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(fImageG))
//extract features from the object image
using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float>(DistanceType.L2))
{
modelKeyPoints = new VectorOfKeyPoint();
surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
// extract features from the observed image
using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(lImageG))
using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, k, 1, true))
using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuObservedDescriptors.Size.Height, k, 1, true))
using (GpuMat<Byte> gpuMask = new GpuMat<byte>(gpuMatchIndices.Size.Height, 1, 1))
using (Stream stream = new Stream())
{
matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
indices = new Matrix<int>(gpuMatchIndices.Size);
mask = new Matrix<byte>(gpuMask.Size);
//gpu implementation of voteForUniquess
using (GpuMat<float> col0 = gpuMatchDist.Col(0))
using (GpuMat<float> col1 = gpuMatchDist.Col(1))
{
GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
}
observedKeyPoints = new VectorOfKeyPoint();
surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);
//wait for the stream to complete its tasks
//We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
stream.WaitForCompletion();
gpuMask.Download(mask);
gpuMatchIndices.Download(indices);
if (GpuInvoke.CountNonZero(gpuMask) >= 4)
{
int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
}
}
}
}
else
{
//extract features from the object image
modelKeyPoints = new VectorOfKeyPoint();
Matrix<float> modelDescriptors = surfCPU.DetectAndCompute(fImageG, null, modelKeyPoints);
// extract features from the observed image
observedKeyPoints = new VectorOfKeyPoint();
Matrix<float> observedDescriptors = surfCPU.DetectAndCompute(lImageG, null, observedKeyPoints);
BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
matcher.Add(modelDescriptors);
indices = new Matrix<int>(observedDescriptors.Rows, k);
using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
{
matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
mask = new Matrix<byte>(dist.Rows, 1);
mask.SetValue(255);
Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
}
int nonZeroCount = CvInvoke.cvCountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
}
}
Image<Bgr, Byte> mImage = fImage.Convert<Bgr, Byte>();
Image<Bgr, Byte> oImage = lImage.Convert<Bgr, Byte>();
Image<Bgr, Byte> result = new Image<Bgr, byte>(mImage.Width + oImage.Width, mImage.Height);
if (homography != null)
{ //draw a rectangle along the projected model
Rectangle rect = fImage.ROI;
PointF[] pts = new PointF[] {
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)};
homography.ProjectPoints(pts);
HomographyMatrix origin = new HomographyMatrix(); //I perform a copy of the left image with a not real shift operation on the origin
origin.SetIdentity();
origin.Data[0, 2] = 0;
origin.Data[1, 2] = 0;
Image<Bgr, Byte> mosaic = new Image<Bgr, byte>(mImage.Width + oImage.Width + 2000, mImage.Height*2);
Image<Bgr, byte> warp_image = mosaic.Clone();
mosaic = mImage.WarpPerspective(origin, mosaic.Width, mosaic.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_DEFAULT, new Bgr(0, 0, 0));
warp_image = oImage.WarpPerspective(homography, warp_image.Width, warp_image.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_INVERSE_MAP, new Bgr(200, 0, 0));
Image<Gray, byte> warp_image_mask = oImage.Convert<Gray, byte>();
warp_image_mask.SetValue(new Gray(255));
Image<Gray, byte> warp_mosaic_mask = mosaic.Convert<Gray, byte>();
warp_mosaic_mask.SetZero();
warp_mosaic_mask = warp_image_mask.WarpPerspective(homography, warp_mosaic_mask.Width, warp_mosaic_mask.Height, Emgu.CV.CvEnum.INTER.CV_INTER_LINEAR, Emgu.CV.CvEnum.WARP.CV_WARP_INVERSE_MAP, new Gray(0));
warp_image.Copy(mosaic, warp_mosaic_mask);
return mosaic;
}
return null;
}
private Image<Bgr, Byte> convert(Image<Bgr, Byte> img)
{
Image<Gray, byte> imgGray = img.Convert<Gray, byte>();
Image<Gray, byte> mask = imgGray.CopyBlank();
Contour<Point> largestContour = null;
double largestarea = 0;
for (var contours = imgGray.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
RETR_TYPE.CV_RETR_EXTERNAL); contours != null; contours = contours.HNext)
{
if (contours.Area > largestarea)
{
largestarea = contours.Area;
largestContour = contours;
}
}
CvInvoke.cvSetImageROI(img, largestContour.BoundingRectangle);
return img;
}
}
}

Actually there is nothing wrong with your code, and this image is totally correct. Please notice when you stitch all the images together, you are taking the first(left) image as a reference plane and set it as the front direction, all the subsequent images that are originally oriented to the right direction were projected to a plane on the front. Think of you are sitting inside a room, the wall in front of you appears rectangular while the one on your right side may look trapezoidal. This is because of the so-called "perspective distortion"/homography, and the larger the horizontal angle of view, the more noticeable this phenomenon.
So if one intend to stitch a series of images that covers a wide angle of view, he typically tries the cylindrical or spherical surface instead of a planar surface. You may find this option by searching the reference manual.

Related

Why can I not see canny edge-detected contours in real-time?

I am trying to find and display the contours after canny edge detection.
My code is working but I only see a black screen - and not the contours.
What is wrong?
If I remove the related part for finding contours, I succesfully see canny detected edges.
private void Device_NewFrame(object sender, NewFrameEventArgs eventArgs)
{
video = (Bitmap)eventArgs.Frame.Clone();
Bitmap video2 = (Bitmap)eventArgs.Frame.Clone();
if (mode == 1)
{
Grayscale gray = new Grayscale(0.2125, 0.7154, 0.0721);
Bitmap video3 = gray.Apply(video2);
CannyEdgeDetector canny = new CannyEdgeDetector(0, 10);
canny.ApplyInPlace(video3);
//pic.Image = video3;
//Image<Gray, byte> imgOutput = video3.Convert<Gray, byte>().ThresholdBinary(new Gray(100), new Gray(255));
Mat hier = new Mat();
Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
Image<Gray, byte> imgout = new Image<Gray, byte>(video.Width, video.Height, new Gray(0));
CvInvoke.FindContours(imgout, contours, hier, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
CvInvoke.DrawContours(imgout, contours, -1, new MCvScalar(255, 0, 0));
pic.Image = imgout.Bitmap;
}
else
pic.Image = video;
}

How to fix skewed image for OCR?

I just started OCR project a few weeks ago. I stuck with image skewed problem I tried many different methods nothing seems to work so plz help:)
Skewed image given below
I want final image such as given below
I already tried to deskew image but I was unable to get final image.
public Image<Gray, byte> ImageDeskewOuter(Image<Gray, byte> img)
{
img = img.Resize(img.Height, img.Width, Inter.Linear);
Image<Gray, byte> tmp = new Image<Gray, byte>(img.Bitmap);
tmp = tmp.ThresholdToZero(new Gray(180));
int nZero = tmp.CountNonzero()[0] == 0 ? 1 : tmp.CountNonzero()[0];
if (tmp.Bytes.Length / nZero < 10)
img = tmp.Not();
else
img = img.ThresholdToZero(new Gray(80)).InRange(new Gray(0), new Gray(60)).Not();
tmp = new Image<Gray, byte>(img.Bitmap).Canny(50, 150);
List<Rectangle> rlist = new List<Rectangle>();
Rectangle min = new Rectangle();
Rectangle max = new Rectangle();
VectorOfVectorOfPoint contour = new VectorOfVectorOfPoint();
Mat hier = new Mat();
CvInvoke.FindContours(tmp, contour, hier, RetrType.External, ChainApproxMethod.ChainApproxSimple);
if (contour.Size > 0)
{
for (int i = 0; i < contour.Size; i++)
{
Rectangle rec = CvInvoke.BoundingRectangle(contour[i]);
if (rec.Width > 30 && rec.Width < 120 && rec.Height > 50 && rec.Height < 120)
{
rlist.Add(rec);
}
}
min = rlist.OrderBy(x => x.X).FirstOrDefault();
max = rlist.OrderByDescending(x => x.X).FirstOrDefault();
Rectangle roi = Rectangle.Union(min, max);
img.ROI = roi;
}
if (rlist.Count > 0)
{
double angle = LineAngle(min.X, min.Bottom, max.X, max.Bottom, min.X, min.Bottom, max.X, min.Bottom) + 3;
img = img.Rotate(angle, new Gray(255), false);
}
return img;
}
Final image using above function
Step 1 > Select desired contour and apply CvInvoke.MinAreaRect() and copy image area into new image
Step 2> Create a bitmap image whose width is sum of all cropped image width(if you want to put each image side by side)
Step 3> Using Graphics draw image on bitmap
public Image<Gray, byte> ImageDeskew(Image<Gray, byte> img)
{
img = img.Resize(img.Height, img.Width, Inter.Linear);
Image<Gray, byte> tmp = new Image<Gray, byte>(img.Bitmap);
tmp = new Image<Gray, byte>(AdjustContrast(tmp.Bitmap, 70));
img = tmp.Not().ThresholdAdaptive(new Gray(255), AdaptiveThresholdType.MeanC, ThresholdType.Binary, 45, new Gray(10));
tmp = new Image<Gray, byte>(img.Bitmap).Canny(50, 150);
List<Image<Gray, byte>> imglist = new List<Image<Gray, byte>>();
Rectangle min = new Rectangle();
Rectangle max = new Rectangle();
VectorOfVectorOfPoint contour = new VectorOfVectorOfPoint();
Mat hier = new Mat();
CvInvoke.FindContours(tmp, contour, hier, RetrType.External, ChainApproxMethod.ChainApproxSimple);
if (contour.Size > 0)
{
for (int i = 0; i < contour.Size; i++)
{
RotatedRect rRect = CvInvoke.MinAreaRect(contour[i]);
float area = rRect.Size.Width * rRect.Size.Height;
if (area > (img.Bytes.Length / 10))
{
rlist.Add(rec);
if (rRect.Angle > -45) imglist.Add(img.Copy(rRect));
else imglist.Add(img.Copy(rRect).Rotate(-90, new Gray(255), false));
}
}
}
if (imglist.Count > 0)
{
int xPx = imglist.Sum(x => x.Width);
Bitmap bitmap = new Bitmap(xPx, img.Height);
using (Graphics g = Graphics.FromImage(bitmap))
{
xPx = 0;
imglist.Reverse();
foreach (Image<Gray, byte> i in imglist)
{
g.DrawImage(i.Not().Bitmap, xPx, 0);
xPx += i.Width;
}
}
img = new Image<Gray, byte>(bitmap).Not();
}
return img;
}

EmguCV Blob Counter

INPUT IMAGE
Hi I am try to learn EmguCV 3.3 and I have a question about blob counting.As you see in INPUT IMAGE I have black uneven blobs.
I am try to do something like this.
OUTPUT IMAGE
I need to draw rectangle around blobs and count them.
I tryied some approches but non of it work.
I need Help();
You can use FindCountours() or SimpleBlobDetection() to achieve that, here is an example code uses the first one:
Image<Gray, Byte> grayImage = new Image<Gray,Byte>(mRGrc.jpg);
Image<Gray, Byte> canny = new Image<Gray, byte>(grayImage.Size);
int counter = 0;
using (MemStorage storage = new MemStorage())
for (Contour<Point> contours = grayImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, storage);contours != null; contours = contours.HNext)
{
contours.ApproxPoly(contours.Perimeter * 0.05, storage);
CvInvoke.cvDrawContours(canny, contours, new MCvScalar(255), new MCvScalar(255), -1, 1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0));
counter++;
}
using (MemStorage store = new MemStorage())
for (Contour<Point> contours1= grayImage.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, store); contours1 != null; contours1 = contours1.HNext)
{
Rectangle r = CvInvoke.cvBoundingRect(contours1, 1);
canny.Draw(r, new Gray(255), 1);
}
Console.Writeline("Number of blobs: " + counter);

How to extract identified inner and outer contours using EmguCV?

I'm doing a project to process brain injuries using Image Processing. In order to improve its accuracy, I need to extract only the brain matter from the skull.
Using EmguCV I was able to identify the inner and outer contours (Blue and Dark blue). Is there anyway to extract these identified contours into another image?
Image<Gray, byte> grayImage = new Image<Gray, byte>(bitmap);
Image<Bgr, byte> color = new Image<Bgr, byte>(bitmap);
grayImage = grayImage.ThresholdBinary(new Gray(220), new Gray(255));
using (MemStorage storage = new MemStorage())
{
for (Contour<Point> contours = grayImage.FindContours(
Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_TREE, storage); contours != null; contours = contours.HNext)
{
Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.015, storage);
if (currentContour.BoundingRectangle.Width > 20)
{
CvInvoke.cvDrawContours(color, contours, new MCvScalar(100), new MCvScalar(255), -1, 2, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, new Point(0, 0));
}
}
}
EmguCV 2.2.2
Expected output:

How to find the max occurred color in the picture using EMGU CV in C#?

I have an image of a "windows control" lets say a Text-box and I want to get background color of the text written within the text box by finding max color occurred in that picture by pixel color comparison.
I searched in google and I found that every one is talking about histogram and also some code is given to find out histogram of an image but no one described the procedure after finding histogram.
the code I found on some sites is like
// Create a grayscale image
Image<Gray, Byte> img = new Image<Gray, byte>(bmp);
// Fill image with random values
img.SetRandUniform(new MCvScalar(), new MCvScalar(255));
// Create and initialize histogram
DenseHistogram hist = new DenseHistogram(256, new RangeF(0.0f, 255.0f));
// Histogram Computing
hist.Calculate<Byte>(new Image<Gray, byte>[] { img }, true, null);
Currently I have used the code which takes a line segment from the image and finds the max color but which is not the right way to do it.
the currently used code is as follows
Image<Bgr, byte> image = new Image<Bgr, byte>(temp);
int height = temp.Height / 2;
Dictionary<Bgr, int> colors = new Dictionary<Bgr, int>();
for (int i = 0; i < (image.Width); i++)
{
Bgr pixel = new Bgr();
pixel = image[height, i];
if (colors.ContainsKey(pixel))
colors[pixel] += 1;
else
colors.Add(pixel, 1);
}
Bgr result = colors.FirstOrDefault(x => x.Value == colors.Values.Max()).Key;
please help me if any one knows how to get it. Take this image as input ==>
Emgu.CV's DenseHistogram exposes the method MinMax() which finds the maximum and minimum bin of the histogram.
So after computing your histogram like in your first code snippet:
// Create a grayscale image
Image<Gray, Byte> img = new Image<Gray, byte>(bmp);
// Fill image with random values
img.SetRandUniform(new MCvScalar(), new MCvScalar(255));
// Create and initialize histogram
DenseHistogram hist = new DenseHistogram(256, new RangeF(0.0f, 255.0f));
// Histogram Computing
hist.Calculate<Byte>(new Image<Gray, byte>[] { img }, true, null);
...find the peak of the histogram with this method:
float minValue, maxValue;
Point[] minLocation;
Point[] maxLocation;
hist.MinMax(out minValue, out maxValue, out minLocation, out maxLocation);
// This is the value you are looking for (the bin representing the highest peak in your
// histogram is the also the main color of your image).
var mainColor = maxLocation[0].Y;
I found a code snippet in stackoverflow which does my work.
code goes like this
int BlueHist;
int GreenHist;
int RedHist;
Image<Bgr, Byte> img = new Image<Bgr, byte>(bmp);
DenseHistogram Histo = new DenseHistogram(255, new RangeF(0, 255));
Image<Gray, Byte> img2Blue = img[0];
Image<Gray, Byte> img2Green = img[1];
Image<Gray, Byte> img2Red = img[2];
Histo.Calculate(new Image<Gray, Byte>[] { img2Blue }, true, null);
double[] minV, maxV;
Point[] minL, maxL;
Histo.MinMax(out minV, out maxV, out minL, out maxL);
BlueHist = maxL[0].Y;
Histo.Clear();
Histo.Calculate(new Image<Gray, Byte>[] { img2Green }, true, null);
Histo.MinMax(out minV, out maxV, out minL, out maxL);
GreenHist = maxL[0].Y;
Histo.Clear();
Histo.Calculate(new Image<Gray, Byte>[] { img2Red }, true, null);
Histo.MinMax(out minV, out maxV, out minL, out maxL);
RedHist = maxL[0].Y;

Categories

Resources