I'm trying to setup a basic Image Template Matching solution which leverages SIFT.
In my example below, it works without issue:
Source:
Template:
Code:
public static async Task Main(string[] args)
{
// Load the source and template images
Mat source = CvInvoke.Imread(#"C:\Users\Downloads\why.png", Emgu.CV.CvEnum.ImreadModes.Unchanged);
Mat template = CvInvoke.Imread(#"C:\Users\Downloads\why_template.png", Emgu.CV.CvEnum.ImreadModes.Unchanged);
// Create the SIFT detector
SIFT detector = new SIFT();
// Detect keypoints and descriptors in the source and template images
VectorOfKeyPoint sourceKeyPoints = new VectorOfKeyPoint();
Matrix<float> sourceDescriptors = new Matrix<float>(source.Rows, 128);
detector.DetectAndCompute(source, null, sourceKeyPoints, sourceDescriptors, false);
VectorOfKeyPoint templateKeyPoints = new VectorOfKeyPoint();
Matrix<float> templateDescriptors = new Matrix<float>(template.Rows, 128);
detector.DetectAndCompute(template, null, templateKeyPoints, templateDescriptors, false);
// Use a brute-force matcher to find matches between the source and template keypoints
BFMatcher matcher = new BFMatcher(DistanceType.L2);
VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();
matcher.KnnMatch(sourceDescriptors, templateDescriptors, matches, 2, null);
// Filter matches using the Lowe's ratio test
VectorOfDMatch goodMatches = new VectorOfDMatch();
for (int i = 0; i < matches.Size; i++)
{
try
{
if (matches[i][0].Distance < 0.2 * matches[i][1].Distance)
{
goodMatches.Push(new MDMatch[] { matches[i][0] });
}
}
catch(Exception e)
{
Console.WriteLine(e);
continue;
}
}
Mat result = new Mat();
Features2DToolbox.DrawMatches(template, templateKeyPoints, source, sourceKeyPoints, goodMatches, result, new MCvScalar(255, 0, 0), new MCvScalar(255, 0, 0), null, Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);
CvInvoke.Imshow("Result", result);
CvInvoke.WaitKey();
}
Result:
When I change my source image and template to the following:
Source:
Template:
The following error is thrown:
System.Runtime.InteropServices.SEHException (0x80004005): External component has thrown an exception.
at Emgu.CV.Util.VectorOfDMatch.VectorOfDMatchGetItem(IntPtr vec, Int32 index, MDMatch& element)
at Emgu.CV.Util.VectorOfDMatch.get_Item(Int32 index)
matches.Size has a value 318, but the individual elements seem to have no value:
Related
Here is code, what I use to save Image to .tiff file:
var encoder = new TiffBitmapEncoder();
encoder.Frames.Add(BitmapFrame.Create((myImage.Source as DrawingImage).ToBitmapSource()));
using FileStream stream = new FileStream(filePath, FileMode.Create);
encoder.Save(stream);
Here is extension for DrawingImage, which converts DrawingImage to BitmapSource:
public static class DrawingImageExtension
{
public static BitmapSource ToBitmapSource(this DrawingImage source)
{
var drawingVisual = new DrawingVisual();
DrawingContext drawingContext = drawingVisual.RenderOpen();
drawingContext.DrawImage(source, new Rect(new Point(0, 0), new Size(source.Width, source.Height)));
drawingContext.Close();
var bmp = new RenderTargetBitmap((int)source.Width, (int)source.Height, 96, 96, PixelFormats.Pbgra32);
bmp.Render(drawingVisual);
return bmp;
}
}
It works, but when I try to save a large image, part of the text stops being displayed. I add the text to the picture as follows:
private GlyphRunDrawing GetGlyphVertex(string str)
{
var emSize = 10;
var glyphIndices = new ushort[str.Length];
var advanceWidths = new double[str.Length];
double strWidth = 0, strHeight = 0;
for (int i = 0; i < str.Length; i++)
{
var glyphIndex = font.CharacterToGlyphMap[str[i]];
glyphIndices[i] = glyphIndex;
advanceWidths[i] = font.AdvanceWidths[glyphIndex] * emSize;
strWidth += advanceWidths[i];
if (i == 0) strHeight = font.AdvanceHeights[glyphIndex] * emSize;
}
var baselineOrigin = new Point(...); //It doesn't matter
var glyphRun = new GlyphRun(
font, 0, false, emSize,
glyphIndices, baselineOrigin, advanceWidths,
null, null, null, null, null, null);
return new GlyphRunDrawing(textColor, glyphRun);
}
Then I just add this GlyphRunDrawing to common DrawingGroup.
Here is the part of the big picture with which the problem arises:
From some point until the end of the image, the text stops being displayed. With smaller pictures, everything works correctly. What could be the problem?
I am trying to obtain the largest bounding rectangle of a canny image. I think it works but I don't know how to visualize the rectangle. In the end I want to get my topleft and bottom right coordinates of the rectangle but I think that when I am able to import the image to my void that I can also get those values from the image. I am new to this so I am learning by doing, reading and asking. If I just need to look into some theory then please enlighten me! Willing to learn.
When I run this code my console output is:
Emgu.CV.Util.VectorOfPoint
The code is this:
private void CannyFrame(object sender, EventArgs e)
{
if (_capture != null && _capture.Ptr != IntPtr.Zero)
{
_capture.Retrieve(imgOriginal, 0);
CvInvoke.CvtColor(imgOriginal, imgHSV, ColorConversion.Bgr2Hsv); //Convert the captured frame from BGR to HSV
CvInvoke.InRange(imgHSV, new ScalarArray(new MCvScalar(iLowH, iLowS, iLowV)), new ScalarArray(new MCvScalar(iHighH, iHighS, iHighV)), imgThres);
CvInvoke.Canny(imgThres, imgCanny, 100, 200, 3);
Form1.FindLargestContour(imgCanny, imgContour);
pictureBox3.Image = imgContour.Bitmap;
}
}
public static VectorOfPoint FindLargestContour(IInputOutputArray imgCanny, IInputOutputArray imgContour)
{
int largest_contour_index = 0;
double largest_area = 0;
VectorOfPoint largestContour;
using (Mat hierachy = new Mat())
using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
{
IOutputArray hierarchy;
CvInvoke.FindContours(imgCanny, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxSimple);
for (int i = 0; i < contours.Size; i++)
{
MCvScalar color = new MCvScalar(0, 0, 255);
double a = CvInvoke.ContourArea(contours[i], false); // Find the area of contour
if (a > largest_area)
{
largest_area = a;
largest_contour_index = i; //Store the index of largest contour
}
CvInvoke.DrawContours(imgContour, contours, largest_contour_index, new MCvScalar(255, 0, 0));
}
CvInvoke.DrawContours(imgContour, contours, largest_contour_index, new MCvScalar(0, 0, 255), 3, LineType.EightConnected, hierachy);
largestContour = new VectorOfPoint(contours[largest_contour_index].ToArray());
}
Console.WriteLine(largestContour);
return largestContour;
}
I have worked around my problem by making
public static VectorOfPoint FindLargestContour(IInputOutputArray imgCanny, IInputOutputArray imgContour)
in to
private static void FindLargestContour(IInputOutputArray imgCanny, IInputOutputArray imgContour)
In the void I deleted
return largestContour;
Not sure if it is the way to go but I got a little further with my code this way.
I try to capture desktop screenshot using SharpDX. My application is able to capture screenshot but without labels in Windows Explorer.
I tryed 2 solutions but without change. I tried find in documentation any information, but without change.
Here is mi code:
public void SCR()
{
uint numAdapter = 0; // # of graphics card adapter
uint numOutput = 0; // # of output device (i.e. monitor)
// create device and factory
var device = new SharpDX.Direct3D11.Device(SharpDX.Direct3D.DriverType.Hardware);
var factory = new Factory1();
// creating CPU-accessible texture resource
var texdes = new SharpDX.Direct3D11.Texture2DDescription
{
CpuAccessFlags = SharpDX.Direct3D11.CpuAccessFlags.Read,
BindFlags = SharpDX.Direct3D11.BindFlags.None,
Format = Format.B8G8R8A8_UNorm,
Height = factory.Adapters1[numAdapter].Outputs[numOutput].Description.DesktopBounds.Height,
Width = factory.Adapters1[numAdapter].Outputs[numOutput].Description.DesktopBounds.Width,
OptionFlags = SharpDX.Direct3D11.ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1
};
texdes.SampleDescription.Count = 1;
texdes.SampleDescription.Quality = 0;
texdes.Usage = SharpDX.Direct3D11.ResourceUsage.Staging;
var screenTexture = new SharpDX.Direct3D11.Texture2D(device, texdes);
// duplicate output stuff
var output = new Output1(factory.Adapters1[numAdapter].Outputs[numOutput].NativePointer);
var duplicatedOutput = output.DuplicateOutput(device);
SharpDX.DXGI.Resource screenResource = null;
SharpDX.DataStream dataStream;
Surface screenSurface;
var i = 0;
var miliseconds = 2500000;
while (true)
{
i++;
// try to get duplicated frame within given time
try
{
SharpDX.DXGI.OutputDuplicateFrameInformation duplicateFrameInformation;
duplicatedOutput.AcquireNextFrame(miliseconds, out duplicateFrameInformation, out screenResource);
}
catch (SharpDX.SharpDXException e)
{
if (e.ResultCode.Code == SharpDX.DXGI.ResultCode.WaitTimeout.Result.Code)
{
// this has not been a successful capture
// thanks #Randy
// keep retrying
continue;
}
else
{
throw e;
}
}
device.ImmediateContext.CopyResource(screenResource.QueryInterface<SharpDX.Direct3D11.Resource>(), screenTexture);
screenSurface = screenTexture.QueryInterface<Surface>();
// screenSurface.Map(SharpDX.DXGI.MapFlags.Read, out dataStream);
int width = output.Description.DesktopBounds.Width;
int height = output.Description.DesktopBounds.Height;
var boundsRect = new System.Drawing.Rectangle(0, 0, width, height);
var mapSource = device.ImmediateContext.MapSubresource(screenTexture, 0, MapMode.Read, SharpDX.Direct3D11.MapFlags.None);
using (var bitmap = new System.Drawing.Bitmap(width, height, PixelFormat.Format32bppArgb))
{
// Copy pixels from screen capture Texture to GDI bitmap
var bitmapData = bitmap.LockBits(boundsRect, ImageLockMode.WriteOnly, bitmap.PixelFormat);
var sourcePtr = mapSource.DataPointer;
var destinationPtr = bitmapData.Scan0;
for (int y = 0; y < height; y++)
{
// Copy a single line
Utilities.CopyMemory(destinationPtr, sourcePtr, width * 4);
// Advance pointers
sourcePtr = IntPtr.Add(sourcePtr, mapSource.RowPitch);
destinationPtr = IntPtr.Add(destinationPtr, bitmapData.Stride);
}
// Release source and dest locks
bitmap.UnlockBits(bitmapData);
device.ImmediateContext.UnmapSubresource(screenTexture, 0);
bitmap.Save(string.Format(#"d:\scr\{0}.png", i));
}
// var image = FromByte(ToByte(dataStream));
//var image = getImageFromDXStream(1920, 1200, dataStream);
//image.Save(string.Format(#"d:\scr\{0}.png", i));
// dataStream.Close();
//screenSurface.Unmap();
screenSurface.Dispose();
screenResource.Dispose();
duplicatedOutput.ReleaseFrame();
}
}
After few hours of research and googling i found working solution:
From:
PixelFormat.Format32bppArgb
To:
PixelFormat.Format32bppRgb
I'm trying to create a SharpDX.Direct3D11.Texture2D from in-memory data but always get a SharpDXException (HRESULT: 0x80070057, "The parameter is incorrect."). I have used a Texture1D for this purpose before which can be created without a problem.
I have reduced the code to this sample which still produces the exception:
using (var device = new Device(DriverType.Hardware, DeviceCreationFlags.Debug)) {
// empty stream sufficient for example
var stream = new DataStream(16 * 4, true, true);
var description1D = new Texture1DDescription() {
Width = 16,
ArraySize = 1,
Format = Format.R8G8B8A8_UNorm,
MipLevels = 1,
};
using (var texture1D = new Texture1D(device, description1D, new[] { new DataBox(stream.DataPointer) })) {
// no exception on Texture1D
}
var description2D = new Texture2DDescription() {
Width = 8,
Height = 2,
ArraySize = 1,
MipLevels = 1,
Format = Format.R8G8B8A8_UNorm,
SampleDescription = new SampleDescription(1, 0),
};
using (var texture2D = new Texture2D(device, description2D, new[] { new DataBox(stream.DataPointer) })) {
// HRESULT: [0x80070057], Module: [Unknown], ApiCode: [Unknown/Unknown], Message: The parameter is incorrect.
}
}
Creating the texture without passing the data works fine. Can someone tell me how to fix the Texture2D initialization?
You need to pass the row stride of a texture 2D into the DataBox. Something like:
new DataBox(stream.DataPointer, 8 * 4)
Or in a more generic manner:
new DataBox(stream.DataPointer, description2D.Width
* (int)FormatHelper.SizeOfInBytes(description2D.Format))
i am new on EMGU CV. I would like to SURF detect more than one patterns with using cam. Like this video. But now, i try to develop this just one pattern for starting point.
I examined EMGUCV's SURF example. When i try to implement this codes to cam capture's example, error turns on run time. I searched more but did not find any code example.
So, do you suggest me a code snippet or tutorial which is explained good.
Thank very much already now.
Codes are below which i am working on;
...........................................
FrameRaw = capture.QueryFrame();
CamImageBox.Image = FrameRaw;
Run(FrameRaw);
...........................................
private void Run(Image<Bgr, byte> TempImage)
{
Image<Gray, Byte> modelImage = new Image<Gray, byte>("sample.jpg");
Image<Gray, Byte> observedImage = TempImage.Convert<Gray, Byte>();
// Image<Gray, Byte> observedImage = new Image<Gray,byte>("box_in_scene.png");
Stopwatch watch;
HomographyMatrix homography = null;
SURFDetector surfCPU = new SURFDetector(500, false);
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
Matrix<int> indices;
Matrix<float> dist;
Matrix<byte> mask;
if (GpuInvoke.HasCuda)
{
GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))
//extract features from the object image
using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2))
{
modelKeyPoints = new VectorOfKeyPoint();
surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
watch = Stopwatch.StartNew();
// extract features from the observed image
using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))
using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, 2, 1))
using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuMatchIndices.Size, 1))
{
observedKeyPoints = new VectorOfKeyPoint();
surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);
matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null);
indices = new Matrix<int>(gpuMatchIndices.Size);
dist = new Matrix<float>(indices.Size);
gpuMatchIndices.Download(indices);
gpuMatchDist.Download(dist);
mask = new Matrix<byte>(dist.Rows, 1);
mask.SetValue(255);
Features2DTracker.VoteForUniqueness(dist, 0.8, mask);
int nonZeroCount = CvInvoke.cvCountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
}
watch.Stop();
}
}
}
else
{
//extract features from the object image
modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
//MKeyPoint[] kpts = modelKeyPoints.ToArray();
Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);
watch = Stopwatch.StartNew();
// extract features from the observed image
observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32);
matcher.Add(modelDescriptors);
int k = 2;
indices = new Matrix<int>(observedDescriptors.Rows, k);
dist = new Matrix<float>(observedDescriptors.Rows, k);
matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
mask = new Matrix<byte>(dist.Rows, 1);
mask.SetValue(255);
Features2DTracker.VoteForUniqueness(dist, 0.8, mask);
int nonZeroCount = CvInvoke.cvCountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
}
watch.Stop();
}
//Draw the matched keypoints
Image<Bgr, Byte> result = Features2DTracker.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DTracker.KeypointDrawType.NOT_DRAW_SINGLE_POINTS);
#region draw the projected region on the image
if (homography != null)
{ //draw a rectangle along the projected model
Rectangle rect = modelImage.ROI;
PointF[] pts = new PointF[] {
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)};
homography.ProjectPoints(pts);
result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
}
#endregion
// ImageViewer.Show(result, String.Format("Matched using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds));
}
I found the SURF tutorial you used, but I don't see why it should cause an error. Have you been able to execute the tutorial code by itself, without the GPU acceleration complication?
Moreover, what error occurred?