Memory Leak Issue. Eye-tracking in Unity with OpenCVSharp - c#

I've been working on this project for a few months now, where I'm trying to integrate eye-tracking into Unity using OpenCVSharp. I've managed to get everything working, including the actual tracking of the pupil etc, however I've got a memory leak. Basically after 20-30seconds of the program running it freezes and the console errors saying "Unable to allocate (insert number here) bits". After looking at the memory usage during running of the program, you can see its use steadily climb until it maxes then crashes.
Now I've spent quite a while trying to fix the issue, and read a lot of help posts about releasing images/storage etc correctly. Despite the fact I'm doing this, it doesn't appear to be releasing them correctly. I tried using the garbage collector to force it to reclaim the memory however that didn't seem to work either. Am I just doing something fundamentally wrong with the images and how I reclaim them? Or is having the creation of new images each frame (even though I'm releasing them) causing the problem.
Any help would be greatly appreciated. Here's the code below, you can ignore a lot of the stuff within the update function as its to do with the actual tracking section and calibration. I realise the code is pretty messy, sorry about that! The main section to worry about is EyeDetection().
using UnityEngine;
using System.Collections;
using System;
using System.IO;
using OpenCvSharp;
using OpenCvSharp.Blob;
//using System.Xml;
//using System.Threading;
//using AForge;
//using OpenCvSharp.Extensions;
//using System.Windows.Media;
//using System.Windows.Media.Imaging;
public class CaptureScript2 : MonoBehaviour
{
//public GameObject planeObj;
public WebCamTexture webcamTexture; //Texture retrieved from the webcam
//public Texture2D texImage; //Texture to apply to plane
public string deviceName;
private int devId = 1;
private int imWidth = 800; //camera width
private int imHeight = 600; //camera height
private string errorMsg = "No errors found!";
private static IplImage camImage; //Ipl image of the converted webcam texture
//private static IplImage yuv;
//private static IplImage dst;
private CvCapture cap; //Current camera capture
//private IplImage eyeLeft;
//private IplImage eyeRight;
//private IplImage eyeLeftFinal;
//private IplImage eyeRightFinal;
private double leftEyeX;
private double leftEyeY;
private double rightEyeX;
private double rightEyeY;
private int calibState;
private double LTRCPx;
private double LTLCPx;
private double LBLCPy;
private double LTLCPy;
private double RTRCPx;
private double RTLCPx;
private double RBLCPy;
private double RTLCPy;
private double gazeWidth;
private double gazeHeight;
private double gazeScaleX;
private double gazeScaleY;
public static CvMemStorage storageFace;
public static CvMemStorage storage;
public static double gazePosX;
public static double gazePosY;
private bool printed = true;
//private CvRect r;
//private IplImage smallImg;
CvColor[] colors = new CvColor[]
{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
//scale for small image
const double Scale = 1.25;
const double scaleEye = 10.0;
const double ScaleFactor = 2.5;
//must show 2 eyes on the screen
const int MinNeighbors = 2;
const int MinNeighborsFace = 1;
// Use this for initialization
void Start ()
{
//Webcam initialisation
WebCamDevice[] devices = WebCamTexture.devices;
Debug.Log ("num:" + devices.Length);
for (int i=0; i<devices.Length; i++)
{
print (devices [i].name);
if (devices [i].name.CompareTo (deviceName) == 1)
{
devId = i;
}
}
if (devId >= 0)
{
//mainImage = new IplImage (imWidth, imHeight, BitDepth.U8, 3);
}
//create capture from current device
cap = Cv.CreateCameraCapture(devId);
//set properties of the capture
Cv.SetCaptureProperty(cap, CaptureProperty.FrameWidth, imWidth);
Cv.SetCaptureProperty(cap, CaptureProperty.FrameHeight, imHeight);
//create window to display capture
//Cv.NamedWindow("Eye tracking", WindowMode.AutoSize);
Cv.NamedWindow ("EyeLeft", WindowMode.AutoSize);
Cv.NamedWindow ("EyeRight", WindowMode.AutoSize);
Cv.NamedWindow ("Face", WindowMode.AutoSize);
calibState = 1;
}
void Update ()
{
if(Input.GetKeyDown(KeyCode.Space) && calibState < 3)
{
calibState++;
}
if(Input.GetMouseButtonDown(0) && calibState < 4)
{
printed = false;
calibState++;
Cv.DestroyAllWindows();
Cv.ReleaseCapture(cap);
cap = Cv.CreateCameraCapture(devId);
}
//if device is connected
if (devId >= 0)
{
//cap = Cv.CreateCameraCapture(devId);
//Cv.Release
//retrieve the current frame from camera
camImage = Cv.QueryFrame(cap);
//detect eyes and apply circles
//
EyeDetection();
Cv.ReleaseImage(camImage);
//PupilTracking();
switch(calibState)
{
case 1:
LTRCPx = leftEyeX;
RTRCPx = rightEyeX;
break;
case 2:
LTLCPx = leftEyeX;
LTLCPy = leftEyeY;
RTLCPx = rightEyeX;
RTLCPy = rightEyeY;
break;
case 3:
LBLCPy = leftEyeY;// + rightEyeY) /2 ;
RBLCPy = rightEyeY;
break;
case 4:
//gazeWidth = (((LTRCPx - LTLCPx) + (RTRCPx - RTLCPx)) / 2) * -1;
//gazeHeight = ((LBLCPy - LTLCPy) + (RBLCPy - RTLCPy)) /2;
gazeWidth = LTLCPx -LTRCPx;
gazeHeight = LBLCPy - LTLCPy;
gazeScaleX = (Screen.width/gazeWidth);
gazeScaleY = Screen.height/gazeHeight;
gazePosX = gazeScaleX *(leftEyeX - LTRCPx);
gazePosY = gazeScaleY *(leftEyeY - LTLCPy);
break;
}
//Cv.ReleaseCapture(cap);
}
else
{
Debug.Log ("Can't find camera!");
}
//print (calibState);
if(printed == false)
{
print ("Gaze pos x = " + gazePosX);
print ("Gaze pos Y = " + gazePosY);
print ("Scale x = " + gazeScaleX);
print ("Scale y = " + gazeScaleY);
print ("Gaze width = " + gazeWidth);
print ("Gaze Height = " + gazeHeight);
print ("left eye x = " + leftEyeX);
print ("left eye Y = " + leftEyeY);
print ("calib state = " + calibState);
printed = true;
}
//Cv.ShowImage("Eye tracking", mainImage);
//Cv.ShowImage ("EyeLeft", grayEyeLeft);
//Cv.ShowImage ("EyeRight", grayEyeRight);
}
void EyeDetection()
{
IplImage mainImage = new IplImage (imWidth, imHeight, BitDepth.U8, 3);
IplImage smallImg = new IplImage(mainImage.Width, mainImage.Height ,BitDepth.U8, 1);
Cv.Resize (camImage, mainImage, Interpolation.Linear);
IplImage gray = new IplImage(mainImage.Size, BitDepth.U8, 1);
Cv.CvtColor (mainImage, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
Cv.ReleaseImage (gray);
//IplImage hack = Cv.LoadImage("\\Users\\User\\Desktop\\Honours Projects\\Project10\\Project\\Assets\\bug.jpeg");
//Cv.Erode (hack, hack);
//Cv.ReleaseImage (hack);
//uint sizeStore = 2877212;
CvHaarClassifierCascade cascadeFace = CvHaarClassifierCascade.FromFile("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml");
CvMemStorage storageFace = new CvMemStorage();
storageFace.Clear ();
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascadeFace, storageFace, ScaleFactor, MinNeighborsFace, 0, new CvSize(30,30));
for(int j = 0; j < faces.Total; j++)
{
CvRect face = faces[j].Value.Rect;
CvHaarClassifierCascade cascadeEye = CvHaarClassifierCascade.FromFile ("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_eye.xml");
IplImage faceImg = new IplImage(face.Width, face.Height, BitDepth.U8, 1);
IplImage faceImgColour = new IplImage(face.Width, face.Height, BitDepth.U8, 3);
CvMemStorage storage = new CvMemStorage();
storage.Clear ();
Cv.SetImageROI(smallImg, face);
Cv.Copy (smallImg, faceImg);
Cv.ResetImageROI(smallImg);
Cv.SetImageROI(mainImage, face);
Cv.Copy (mainImage, faceImgColour);
Cv.ResetImageROI(mainImage);
Cv.ShowImage ("Face", faceImgColour);
CvSeq<CvAvgComp> eyes = Cv.HaarDetectObjects(faceImg, cascadeEye, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
for(int i = 0; i < eyes.Total; i++)
{
CvRect r = eyes[i].Value.Rect;
Cv.SetImageROI(faceImgColour, r);
if(i == 1)
{
IplImage eyeLeft = new IplImage(new CvSize(r.Width, r.Height), BitDepth.U8, 3);
Cv.Copy(faceImgColour, eyeLeft);
IplImage yuv = new IplImage(eyeLeft.Size, BitDepth.U8, 3);
IplImage dst = new IplImage(eyeLeft.Size, BitDepth.U8, 3);
IplImage grayEyeLeft = new IplImage(eyeLeft.Size, BitDepth.U8, 1);
IplImage eyeLeftFinal = new IplImage(Cv.Round(grayEyeLeft.Width * scaleEye), Cv.Round(grayEyeLeft.Height * scaleEye), BitDepth.U8, 1);
Cv.CvtColor(eyeLeft, yuv, ColorConversion.BgrToCrCb);
Cv.Not(yuv, dst);
Cv.CvtColor(dst,eyeLeft,ColorConversion.CrCbToBgr);
Cv.CvtColor(eyeLeft, grayEyeLeft, ColorConversion.BgrToGray);
Cv.Resize (grayEyeLeft, eyeLeftFinal, Interpolation.Linear);
Cv.Threshold(eyeLeftFinal, eyeLeftFinal, 230, 230, ThresholdType.Binary);
CvBlobs b1 = new CvBlobs(eyeLeftFinal);
if(b1.Count > 0)
{
leftEyeX = b1.LargestBlob().Centroid.X;
leftEyeY = b1.LargestBlob().Centroid.Y;
}
Cv.ShowImage ("EyeLeft", eyeLeftFinal);
Cv.ReleaseImage (yuv);
Cv.ReleaseImage (dst);
Cv.ReleaseImage (grayEyeLeft);
Cv.ReleaseImage (eyeLeftFinal);
b1.Clear();
Cv.ReleaseImage (eyeLeft);
}
if(i == 0)
{
IplImage eyeRight = new IplImage(new CvSize(r.Width, r.Height), BitDepth.U8, 3);
Cv.Copy(faceImgColour, eyeRight);
IplImage yuv2 = new IplImage(eyeRight.Size, BitDepth.U8, 3);
IplImage dst2 = new IplImage(eyeRight.Size, BitDepth.U8, 3);
IplImage grayEyeRight = new IplImage(eyeRight.Size, BitDepth.U8, 1);
IplImage eyeRightFinal = new IplImage(Cv.Round(grayEyeRight.Width * scaleEye), Cv.Round(grayEyeRight.Height * scaleEye), BitDepth.U8, 1);
Cv.CvtColor(eyeRight, yuv2, ColorConversion.BgrToCrCb);
Cv.Not(yuv2, dst2);
Cv.CvtColor(dst2,eyeRight,ColorConversion.CrCbToBgr);
Cv.CvtColor(eyeRight, grayEyeRight, ColorConversion.BgrToGray);
Cv.Resize (grayEyeRight, eyeRightFinal, Interpolation.Linear);
Cv.Threshold(eyeRightFinal, eyeRightFinal, 230, 230, ThresholdType.Binary);
CvBlobs b2 = new CvBlobs(eyeRightFinal);
if(b2.Count > 0)
{
rightEyeX = b2.LargestBlob().Centroid.X;
rightEyeY = b2.LargestBlob().Centroid.Y;
}
Cv.ShowImage ("EyeRight", eyeRightFinal);
Cv.ReleaseImage (yuv2);
Cv.ReleaseImage (dst2);
Cv.ReleaseImage (grayEyeRight);
Cv.ReleaseImage (eyeRightFinal);
b2.Clear ();
Cv.ReleaseImage (eyeRight);
}
Cv.ResetImageROI(faceImgColour);
}
//Cv.ShowImage("Eye tracking", mainImage);
Cv.ReleaseImage (faceImg);
Cv.ReleaseImage (faceImgColour);
Cv.ReleaseMemStorage(storage);
Cv.ReleaseHaarClassifierCascade(cascadeEye);
}
Cv.ReleaseMemStorage(storageFace);
Cv.ReleaseHaarClassifierCascade(cascadeFace);
//PupilTracking ();
Cv.ReleaseImage(smallImg);
Cv.ReleaseImage (mainImage);
GC.Collect();
}
void OnGUI ()
{
GUI.Label (new Rect (200, 200, 100, 90), errorMsg);
}
void OnDestroy()
{
Cv.DestroyAllWindows();
Cv.ReleaseCapture(cap);
}

I am not familiar with OpenCV, but as a general rule:
I would limit instantiation in the Update loop, like new CvMemStorage()
Don't load data in the Update loop: CvHaarClassifierCascade.FromFile("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml"); That should be loaded once on start and assigned to a class variable.
Allocate on start and Release only if needed.
I find that in most situations there's plenty of RAM to go around. I allocate on Start() what is going to be used over and over, especially 60 times per second in the Update() loop!
But Loading XML data, Allocating and releasing variables like storage or cascadeEye, is bound to create issues when the app is trying to do so 60 times a second.
Creating and destroying objects is a very, very, very expensive. So do so wisely and sparingly, especially when dealing with complex data structures like the OpenCV objects, bitmaps or loaders.
hth.

Related

How to fix Emgu VideoCapture / ImageViewer memory leak?

I'm using Emgu CV to detect an object using a HAAR cascade, then I am sending the bounding box of the HAAR cascade to a CSRT motion tracker. Then I compute the centroid of the CSRT motion tracker and have a pan/tilt telescope mount that will move the camera until the centroid of the tracker and image are the same. In the code below I am using an .avi video file but I will eventually be using this with a live video camera.
I am using ImageViewer to display both the HAAR cascade and CSRT motion tracker at the same time. The problem is the CSRT motion tracker viewer is using all my RAM. If I comment out the viewer.ShowDialog(); line then there is no memory leak, but I also can't see the tracker.
This is on Windows 7 by the way, running Visual Studio 2017, .NET 4.7.3, Emgu 3.4.3.3016.
The HAAR cascade function was also causing a memory leak, but I was able to fix it by using .Dispose() on the mat file at the end of the function. It didn't help with the CSRT motion tracker function.
public void Tracker()
{
if (!this.detectedBBox.Width.Equals(0))
{
Emgu.CV.UI.ImageViewer viewer = new Emgu.CV.UI.ImageViewer();
Emgu.CV.Tracking.TrackerCSRT myTracker = new Emgu.CV.Tracking.TrackerCSRT();
using (Emgu.CV.VideoCapture capture1 = new Emgu.CV.VideoCapture("c:\\Users\\Windows7\\33a.avi"))
using (Emgu.CV.VideoStab.CaptureFrameSource frameSource = new Emgu.CV.VideoStab.CaptureFrameSource(capture1))
{
Rectangle myRectangle = this.detectedBBox;
Emgu.CV.Mat myFrame = frameSource.NextFrame().Clone();
myTracker.Init(myFrame, myRectangle);
Application.Idle += delegate (object c, EventArgs f)
{
myFrame = frameSource.NextFrame().Clone();
myTracker.Update(myFrame, out myRectangle);
if (myFrame != null)
{
int fXcenter = myFrame.Width / 2;
int fYcenter = myFrame.Height / 2;
int dx;
int dy;
int swidth = myRectangle.Width;
int sheight = myRectangle.Height;
int shalfwidth = swidth / 2;
int shalfheight = sheight / 2;
int sXcentroid = myRectangle.X + shalfwidth;
int sYcentroid = myRectangle.Y + shalfheight;
if (sXcentroid >= fXcenter) { dx = sXcentroid - fXcenter; } else { dx = fXcenter - sXcentroid; }
if (sYcentroid >= fYcenter) { dy = sYcentroid - fYcenter; } else { dy = fXcenter - sYcentroid; }
string caption = "Center point: (" + sXcentroid + "," + sYcentroid + ")";
string caption2 = "Dist from center: (" + dx + "," + dy + ")";
Emgu.CV.CvInvoke.Rectangle(myFrame, myRectangle, new Emgu.CV.Structure.Bgr(Color.Red).MCvScalar, 2);
Emgu.CV.CvInvoke.PutText(myFrame, caption, new System.Drawing.Point(10, 20), Emgu.CV.CvEnum.FontFace.HersheyComplex, .5, new Emgu.CV.Structure.Bgr(0, 255, 0).MCvScalar);
Emgu.CV.CvInvoke.PutText(myFrame, caption2, new System.Drawing.Point(10, 35), Emgu.CV.CvEnum.FontFace.HersheyComplex, .5, new Emgu.CV.Structure.Bgr(0, 255, 0).MCvScalar);
Point start = new Point(fXcenter, fYcenter);
Point end = new Point(sXcentroid, sYcentroid);
Emgu.CV.Structure.LineSegment2D line = new Emgu.CV.Structure.LineSegment2D(start, end);
Emgu.CV.CvInvoke.Line(myFrame, start, end, new Emgu.CV.Structure.Bgr(0, 255, 0).MCvScalar, 2, new Emgu.CV.CvEnum.LineType(), 0);
string caption3 = "Line length: " + line.Length.ToString();
Emgu.CV.CvInvoke.PutText(myFrame, caption3, new System.Drawing.Point(10, 50), Emgu.CV.CvEnum.FontFace.HersheyComplex, .5, new Emgu.CV.Structure.Bgr(0, 255, 0).MCvScalar);
}
viewer.Image = myFrame;
};
viewer.Text = "Tracker";
viewer.ShowDialog();
}
}
}
Everything in the code works except for the memory leak.

Calculate actual velocity using optical flow Lucas-Kanade and EmguCV

How to calculate the actual speed of all moving objects on video using the Lucals-kanade algorithm to calculate the optical flow ?
I need to do this on this video. The camera is fixed in one place (Fig. 1)
I find the key points and track them using the Lucas-Kanade algorithm (Fig. 2)
How to use this algorithm to update the actual speed of each car?
Thank you for your answers!
My code:
public class OpticalFlowLK : BaseFilter
{
private Mat prevFrame;
private Mat nextFrame;
private bool prevFrameEmpty = true;
private GFTTDetector gFTTDetector;
private Stopwatch sWatch;
private double time = 0.04;
public OpticalFlowLK()
{
TAG = "[Optical Flow Lucas Kanade]";
gFTTDetector = new GFTTDetector(500);
sWatch = new Stopwatch();
}
protected override Mat ProcessFrame(ref Mat frame)
{
Mat rez = new Mat();
frame.CopyTo(rez);
nextFrame = new Mat();
Mat gray = new Mat();
var tmpImg = gray.ToImage<Gray, Byte>();
CvInvoke.CvtColor(frame, nextFrame, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
if (!prevFrameEmpty)
{
VectorOfKeyPoint prevFeatures = new VectorOfKeyPoint(gFTTDetector.Detect(prevFrame));
//Features2DToolbox.DrawKeypoints(rez, prevFeatures, rez, new Bgr(0, 0, 255));
PointF[] prevPts = new PointF[prevFeatures.Size];
for (int i = 0; i < prevFeatures.Size; i++)
{
prevPts[i] = prevFeatures[i].Point;
}
PointF[] nextPts;
byte[] status;
float[] errors;
sWatch.Start();
CvInvoke.CalcOpticalFlowPyrLK(prevFrame, nextFrame, prevPts, new Size(20, 20), 1, new MCvTermCriteria(20, 0.03), out nextPts, out status, out errors);
sWatch.Stop();
sWatch.Reset();
prevFrame = nextFrame.Clone();
for (int i = 0; i < status.Length; i++)
{
Point prevPt = new Point((int)prevPts[i].X,(int)nextPts[i].Y);
Point nextPt = new Point((int)nextPts[i].X,(int)nextPts[i].Y);
double lenght = Math.Sqrt(Math.Pow(prevPt.X - nextPt.X, 2) + Math.Pow(prevPt.Y - nextPt.Y, 2));
if (lenght > 3)
{
CvInvoke.Circle(rez, nextPt, 1, new MCvScalar(0, 255, 0), 2);
}
}
sWatch.Stop();
prevFrameEmpty = false;
}
else if (prevFrameEmpty)
{
prevFrame = nextFrame.Clone();
prevFrameEmpty = false;
}
return rez;
}
protected override bool InitFilter(ref Mat frame)
{
throw new NotImplementedException();
}
}

How to detect rectangles in image with emgu cv?

I am trying to detect rectangles in Emgucv in c#, I was playing around with the following code that I got off the internet. I am new to this so I hope someone could help me out:
public class ShapeDectection
{
public Image<Bgr, Byte> img;
public PictureBox Picture;
public PictureBox Result;
public double dCannyThres;
private UMat uimage;
private UMat cannyEdges;
private List<Triangle2DF> triangleList;
private List<RotatedRect> boxList;
private Image<Bgr, Byte> triangleRectImage;
public ShapeDectection(PictureBox pic, string filepath, PictureBox results)
{
Picture = pic;
Result = results;
img = new Image<Bgr, Byte>(filepath);
triangleList = new List<Triangle2DF>();
boxList = new List<RotatedRect>();
uimage = new UMat();
cannyEdges = new UMat();
dCannyThres = 180.0;
fnFindTriangleRect();
MessageBox.Show("done");
}
private void fnFindTriangleRect()
{
CvInvoke.CvtColor(img, uimage, ColorConversion.Bgr2Gray);
UMat pyrDown = new UMat();
CvInvoke.PyrDown(uimage, pyrDown);
CvInvoke.PyrUp(pyrDown, uimage);
triangleRectImage = img.CopyBlank();
using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
{
CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
int count = contours.Size;
MessageBox.Show("count = " + count);
for (int i = 0; i < count; i++)
{
using (VectorOfPoint contour = contours[i])
using (VectorOfPoint approxContour = new VectorOfPoint())
{
CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);
if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contour with area > 250
{
MessageBox.Show("approxContour.Size = " + approxContour.Size);
if (approxContour.Size == 3) //The contour has 3 vertices, is a triangle
{
Point[] pts = approxContour.ToArray();
triangleList.Add(new Triangle2DF(pts[0], pts[1], pts[2]));
}
else if (approxContour.Size == 4) // The contour has 4 vertices
{
#region Determine if all the angles in the contours are within [80,100] degree
bool isRectangle = true;
Point[] pts = approxContour.ToArray();
LineSegment2D[] edges = PointCollection.PolyLine(pts, true);
for (int j = 0; j < edges.Length; j++)
{
double dAngle = Math.Abs(edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
MessageBox.Show("" + dAngle);
if (dAngle < 80 || dAngle > 100)
{
isRectangle = false;
break;
}
}
#endregion
if (isRectangle) boxList.Add(CvInvoke.MinAreaRect(approxContour));
}
}
}
}
}
foreach (Triangle2DF triangle in triangleList)
{
triangleRectImage.Draw(triangle, new Bgr(Color.DarkBlue), 2);
}
foreach (RotatedRect box in boxList)
triangleRectImage.Draw(box, new Bgr(Color.Red), 2);
Result.Image = triangleRectImage.ToBitmap();
}
}
I am trying to detect the shapes in the following picture
however this is the result:
as you can see no shape was detected by the script as the number of contours was zero.how can I modify the script so that I can detect these shapes?
I can't seem to find "Canny edge detection"
UMat cannyEdges = new UMat();
CvInvoke.Canny(uimage, cannyEdges, cannyThreshold, cannyThresholdLinking);
in your code. Here is the link to EmguCV wiki page http://www.emgu.com/wiki/index.php/Shape_(Triangle,_Rectangle,_Circle,_Line)_Detection_in_CSharp
Check there for missing parts.

Why am i getting a System.NullReferenceException error in my code? [duplicate]

This question already has answers here:
What is a NullReferenceException, and how do I fix it?
(27 answers)
Closed 7 years ago.
I'm currently working on an exercise for my c# class. I am having some trouble with one particular part and would really appreciate some help.
I am working on an exercise in which we are given an incomplete project file.
The project has multiple classes, this class is for controlling squares that are placed on the board.
namespace HareAndTortoise {
public partial class SquareControl : PictureBox {
public const int SQUARE_SIZE = 100;
private Square square; // A reference to the corresponding square object
private BindingList<Player> players; // References the players in the overall game.
private bool[] containsPlayers = new bool[6];//HareAndTortoiseGame.MAX_PLAYERS];
public bool[] ContainsPlayers {
get {
return containsPlayers;
}
set {
containsPlayers = value;
}
}
// Font and brush for displaying text inside the square.
private Font textFont = new Font("Microsoft Sans Serif", 8);
private Brush textBrush = Brushes.White;
public SquareControl(Square square, BindingList<Player> players) {
this.square = square;
this.players = players;
// Set GUI properties of the whole square.
Size = new Size(SQUARE_SIZE, SQUARE_SIZE);
Margin = new Padding(0); // No spacing around the cell. (Default is 3 pixels.)
Dock = DockStyle.Fill;
BorderStyle = BorderStyle.FixedSingle;
BackColor = Color.CornflowerBlue;
SetImageWhenNeeded();
}
private void SetImageWhenNeeded()
{
if (square is Square.Win_Square)
{
LoadImageFromFile("Win.png");
textBrush = Brushes.Black;
}
else if (square is Square.Lose_Square)
{
LoadImageFromFile("Lose.png");
textBrush = Brushes.Red;
}
else if (square is Square.Chance_Square)
{
LoadImageFromFile("monster-green.png");
}
else if (square.Name == "Finish")
{
LoadImageFromFile("checkered-flag.png");
}
else
{
// No image needed.
}
}
private void LoadImageFromFile(string fileName) {
Image image = Image.FromFile(#"Images\" + fileName);
Image = image;
SizeMode = PictureBoxSizeMode.StretchImage; // Zoom is also ok.
}
protected override void OnPaint(PaintEventArgs e) {
// Due to a limitation in WinForms, don't use base.OnPaint(e) here.
if (Image != null)
e.Graphics.DrawImage(Image, e.ClipRectangle);
string name = square.Name;
// Create rectangle for drawing.
float textWidth = textFont.Size * name.Length;
float textHeight = textFont.Height;
float textX = e.ClipRectangle.Right - textWidth;
float textY = e.ClipRectangle.Bottom - textHeight;
RectangleF drawRect = new RectangleF(textX, textY, textWidth, textHeight);
// When debugging this method, show the drawing-rectangle on the screen.
//Pen blackPen = new Pen(Color.Black);
//e.Graphics.DrawRectangle(blackPen, textX, textY, textWidth, textHeight);
// Set format of string.
StringFormat drawFormat = new StringFormat();
drawFormat.Alignment = StringAlignment.Far; // Right-aligned.
// Draw string to screen.
e.Graphics.DrawString(name, textFont, textBrush, drawRect, drawFormat);
// Draw player tokens (when any players are on this square).
const int PLAYER_TOKENS_PER_ROW = 3;
const int PLAYER_TOKEN_SIZE = 30; // pixels.
const int PLAYER_TOKEN_SPACING = (SQUARE_SIZE - (PLAYER_TOKEN_SIZE * PLAYER_TOKENS_PER_ROW)) / (PLAYER_TOKENS_PER_ROW - 1);
for (int i = 0; i < containsPlayers.Length; i++) {
if (containsPlayers[i]) {
int xPosition = i % PLAYER_TOKENS_PER_ROW;
int yPosition = i / PLAYER_TOKENS_PER_ROW;
int xPixels = xPosition * (PLAYER_TOKEN_SIZE + PLAYER_TOKEN_SPACING);
int yPixels = yPosition * (PLAYER_TOKEN_SIZE + PLAYER_TOKEN_SPACING);
Brush playerTokenColour = players[i].PlayerTokenColour;
e.Graphics.FillEllipse(playerTokenColour, xPixels, yPixels, PLAYER_TOKEN_SIZE, PLAYER_TOKEN_SIZE);
}
}//endfor
}
}
}
The program trips up at:
else if (square.Name == "Finish")
{
LoadImageFromFile("checkered-flag.png");
}
I know it is because of square.name but from going through the code, I cant see why square.Name is not recognizable.
Square is passed from another class using this method
private void SetUpGuiGameBoard()
{
for (int i = 0; i <= 55; i++)
{
Square q = Board.GetGameBoardSquare(i);
SquareControl sq = new SquareControl(q, null);
int coloumn;
int row;
if (i == 0)
{
BackColor = Color.BurlyWood;
}
if (i == 55)
{
BackColor = Color.BurlyWood;
}
MapSquareNumToTablePanel(i, out coloumn, out row);
tableLayoutPanel1.Controls.Add(sq, coloumn, row);
}
and Squares are created in this class
private static Square[] gameBoard = new Square[56];
static public void SetUpBoard()
{
for (int i = 1; i == 55; i++)
{
gameBoard[i] = new Square("Ordinary Square", i);
}
gameBoard[0] = new Square("Start", 0);
gameBoard[4] = new Square.Lose_Square("Lose Square", 4);
gameBoard[5] = new Square.Chance_Square("Chance Square", 5);
gameBoard[9] = new Square.Win_Square("Win Square", 9);
gameBoard[11] = new Square.Chance_Square("Chance Square", 11);
gameBoard[14] = new Square.Lose_Square("Lose Square", 14);
gameBoard[17] = new Square.Chance_Square("Chance Square", 17);
gameBoard[19] = new Square.Win_Square("Win Square", 19);
gameBoard[24] = new Square.Lose_Square("Lose Square", 24);
gameBoard[29] = new Square.Win_Square("Win Square", 29);
gameBoard[34] = new Square.Lose_Square("Lose Square", 34);
gameBoard[35] = new Square.Chance_Square("Chance Square", 35);
gameBoard[39] = new Square.Win_Square("Win Square", 39);
gameBoard[44] = new Square.Lose_Square("Lose Square", 44);
gameBoard[47] = new Square.Chance_Square("Chance Square", 47);
gameBoard[49] = new Square.Win_Square("Win Square", 49);
gameBoard[53] = new Square.Chance_Square("Chance Square", 53);
gameBoard[55] = new Square("Finish", 56);
}
public static Square GetGameBoardSquare(int n)
{
return gameBoard[n];
}
public static Square StartSquare()
{
return gameBoard[0];
}
public static Square NextSquare(int n)
{
return gameBoard[(n+1)];
}
}
The answer already provided is the best way for prevention of any null reference exception. For more clarification I can suggest you to check the call stack at the point the debugger reaches the SquareControl constructor. At this point you should check why the Square object being passed in is a 'NULL'. That will lead you to the root cause of the problem. Hope this helps.

Kinect 1.8 colorframe and depthframe not coordinated

My program has a problem with poor coordination between the depth and color images.
The player mask is not in the same place as the person (see the picture below).
void _AllFreamReady(object sender, AllFramesReadyEventArgs e)
{
using (ColorImageFrame _colorFrame = e.OpenColorImageFrame())
{
if (_colorFrame == null) //jezeli pusta ramka nie rob nic
{
return;
}
byte[] _pixels = new byte[_colorFrame.PixelDataLength]; //utworzenie tablicy pixeli dla 1 ramki obrazu o rozmiarach przechwyconej ramki z strumienia
_colorFrame.CopyPixelDataTo(_pixels); //kopiujemy pixele do tablicy
int _stride = _colorFrame.Width * 4; //Kazdy pixel moze miec 4 wartosci Red Green Blue lub pusty
image1.Source =
BitmapSource.Create(_colorFrame.Width, _colorFrame.Height,
96, 96, PixelFormats.Bgr32, null, _pixels, _stride);
if (_closing)
{
return;
}
using (DepthImageFrame _depthFrame = e.OpenDepthImageFrame())
{
if (_depthFrame == null)
{
return;
}
byte[] _pixelsdepth = _GenerateColoredBytes(_depthFrame,_pixels);
int _dstride = _depthFrame.Width * 4;
image3.Source =
BitmapSource.Create(_depthFrame.Width, _depthFrame.Height,
96, 96, PixelFormats.Bgr32, null, _pixelsdepth, _dstride);
}
}
}
private byte[] _GenerateColoredBytes(DepthImageFrame _depthFrame, byte[] _pixels)
{
short[] _rawDepthData = new short[_depthFrame.PixelDataLength];
_depthFrame.CopyPixelDataTo(_rawDepthData);
Byte[] _dpixels = new byte[_depthFrame.Height * _depthFrame.Width * 4];
const int _blueindex = 0;
const int _greenindex = 1;
const int _redindex = 2;
for (int _depthindex = 0, _colorindex = 0;
_depthindex < _rawDepthData.Length && _colorindex < _dpixels.Length;
_depthindex++, _colorindex += 4)
{
int _player = _rawDepthData[_depthindex] & DepthImageFrame.PlayerIndexBitmaskWidth;
if (_player > 0)
{
_dpixels[_colorindex + _redindex] = _pixels[_colorindex + _redindex];
_dpixels[_colorindex + _greenindex] = _pixels[_colorindex + _greenindex];
_dpixels[_colorindex + _blueindex] = _pixels[_colorindex + _blueindex];
};
}
return _dpixels;
}
RGB and depth data are not aligned. This is due to the position of depth sensor and RGB camera in the Kinect case: they are different, so you cannot expect aligned images using different points of view.
However, you problem is quite common, and was solved by the KinectSensor.MapDepthFrameToColorFrame, that was deprecated after SDK 1.6. Now, what you need is the CoordinateMapper.MapDepthFrameToColorFrame method.
The Coordinate Mapping Basics-WPF C# Sample shows how to use this method. You can find some significant parts of the code in the following:
// Intermediate storage for the depth data received from the sensor
private DepthImagePixel[] depthPixels;
// Intermediate storage for the color data received from the camera
private byte[] colorPixels;
// Intermediate storage for the depth to color mapping
private ColorImagePoint[] colorCoordinates;
// Inverse scaling factor between color and depth
private int colorToDepthDivisor;
// Format we will use for the depth stream
private const DepthImageFormat DepthFormat = DepthImageFormat.Resolution320x240Fps30;
// Format we will use for the color stream
private const ColorImageFormat ColorFormat = ColorImageFormat.RgbResolution640x480Fps30;
//...
// Initialization
this.colorCoordinates = new ColorImagePoint[this.sensor.DepthStream.FramePixelDataLength];
this.depthWidth = this.sensor.DepthStream.FrameWidth;
this.depthHeight = this.sensor.DepthStream.FrameHeight;
int colorWidth = this.sensor.ColorStream.FrameWidth;
int colorHeight = this.sensor.ColorStream.FrameHeight;
this.colorToDepthDivisor = colorWidth / this.depthWidth;
this.sensor.AllFramesReady += this.SensorAllFramesReady;
//...
private void SensorAllFramesReady(object sender, AllFramesReadyEventArgs e)
{
// in the middle of shutting down, so nothing to do
if (null == this.sensor)
{
return;
}
bool depthReceived = false;
bool colorReceived = false;
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (null != depthFrame)
{
// Copy the pixel data from the image to a temporary array
depthFrame.CopyDepthImagePixelDataTo(this.depthPixels);
depthReceived = true;
}
}
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (null != colorFrame)
{
// Copy the pixel data from the image to a temporary array
colorFrame.CopyPixelDataTo(this.colorPixels);
colorReceived = true;
}
}
if (true == depthReceived)
{
this.sensor.CoordinateMapper.MapDepthFrameToColorFrame(
DepthFormat,
this.depthPixels,
ColorFormat,
this.colorCoordinates);
// ...
int depthIndex = x + (y * this.depthWidth);
DepthImagePixel depthPixel = this.depthPixels[depthIndex];
// scale color coordinates to depth resolution
int X = colorImagePoint.X / this.colorToDepthDivisor;
int Y = colorImagePoint.Y / this.colorToDepthDivisor;
// depthPixel is the depth for the (X,Y) pixel in the color frame
}
}
I am working on this problem myself. I agree with VitoShadow that one solution is in the coordinate mapping, but a section not posted where the ratio between the miss matched depth and color screen resolutions(this.colorToDepthDivisor = colorWidth / this.depthWidth;). This is used with a shift of the data (this.playerPixelData[playerPixelIndex - 1] = opaquePixelValue;) to account for the miss match.
Unfortunately, this can create a border around the masked image where the depthframe isn't stretched to the edge of the color frame. I am trying to not use skeleton mapping and am optimizing my code by tracking depthdata with emgu cv to pass a point as the center of the ROI of the colorframe. I am still working on it.

Categories

Resources