I have images that could be as big as 20000x20000 pixels in height and width. (Not more than 50 MB). Loading such images into a picture box using C# is very slow and when I need to slice this thing into 200x200 pixels images, it is very slow (few images per second). My current application works fine but very slow and am looking into much faster approaches if available.
This is how I currently loading the image into the image box. (Which is slow too - few seconds to load)
resizedImage = new System.Drawing.Bitmap(OriginalImage, new System.Drawing.Size((int)(OriginalImage.Width * zoomFactor), (int)(OriginalImage.Height * zoomFactor)));
imgBox.Image = resizedImage;
What is the best way to slice this image faster?
This is my current slicing routine .
private void CropSolderJoints()
{
//ordinatesToCrop CordsToCrop = createCordinatesToCrop();
try
{
PB.Maximum = Adjustedcordinates.Pins.Count;
double OriginX = Convert.ToDouble(txtOriginX.Text);
double OriginY = Convert.ToDouble(txtOriginY.Text);
double Scale = Convert.ToDouble(txtScale.Text);
double BumpSize = Convert.ToDouble(txtBumpSize.Text);
Stopwatch watch = new Stopwatch();
watch.Start();
System.Drawing.PointF pos = new System.Drawing.PointF();
double zoomFactor = (float)(double)(Math.Max(imgBox.Width, imgBox.Height) / (double)Math.Max(OriginalImage.Width, OriginalImage.Height));
System.Drawing.Size lensPixelSize = new System.Drawing.Size((int)Math.Round(BumpSize / zoomFactor), (int)Math.Round(BumpSize / zoomFactor));
Rectangle CropJointRectangle = new Rectangle(0, 0, (int)Math.Round(BumpSize / zoomFactor), (int)Math.Round(BumpSize / zoomFactor));
int pinCount = 0;
float radius = (float)(BumpSize) / 2;
foreach (PinCordinates p in Adjustedcordinates.Pins)
{
{
{
PB.Invoke(new MethodInvoker(delegate {
}));
string folderName = new DirectoryInfo(imageFolder).Name;
string filename = dgvImages.SelectedCells[1].Value.ToString();
string Imagefilename = String.Format("{0}_{1}_{2}_{3}", folderName, Path.GetFileNameWithoutExtension(filename), p.defect.ToString(), p.PinLabel);
string saveFilePath = String.Format("{0}\\{1}.{2}", imageFolder, Imagefilename, "png");
float TopLeftX = (float)(OriginX + (float)(p.Cordinates.X * Scale));
float TopLeftY = (float)(OriginY + (float)(p.Cordinates.Y * Scale));
float length = (float)(BumpSize);
float width = (float)(BumpSize);
pos = new System.Drawing.PointF((float)(TopLeftX), (float)(TopLeftY));
imageLens.Location = pkg.GetLensPosition(pos, imageLens);
imageLens.Size = lensUseRelativeSize
? pkg.GetScaledLensSize(imgBox.ClientRectangle, SourceImage.Size, lensPixelSize)
: lensPixelSize;
RectangleF section = pkg.CanvasToImageRect(imgBox.ClientRectangle, SourceImage.Size, imageLens);
Bitmap imgJoint = new Bitmap((int)Math.Round(BumpSize / zoomFactor), (int)Math.Round(BumpSize / zoomFactor));
Graphics g = Graphics.FromImage(imgJoint);
pkg.DrawImageSelection(g, CropJointRectangle, section, SourceImage);
g.DrawImage(imgJoint, 0, 0);
picZoom.Image = imgJoint;
imgJoint.Save(saveFilePath);
PB.Value = pinCount;
pinCount++;
picZoom.Refresh();
}
}
}
PB.Value = 0;
watch.Stop();
label1.Text = watch.Elapsed.TotalSeconds.ToString();
MessageBox.Show("Slicing completed sucessfully." + pinCount.ToString() + "bumps sliced.");
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
}
When Form loaded will add UserControls 2 columns and multi rows
To show recently added data
The number of rows depending on the height of the screen.
The number of columns depending on the width of the screen
if ((int)count_columns == 2)
{
for (int sp = 0; sp <= (int)count_rows; sp++)
{
//add UserControl
latest latest1 = new latest();
latest1.Name = String.Concat("latest1_", sp.ToString());
latest1.Left = (leftPanel.Width + (int)space_between_columns);
latest1.Top = ((topPanel.Height + (32 * sp)) + (latest1.Height * sp)) + 32;
this.Controls.Add(latest1);
latest latest2 = new latest();
latest2.Name = String.Concat("latest2_", sp.ToString());
latest2.Left = (2 * (int)space_between_columns + latest2.Width + leftPanel.Width);
latest2.Top = ((topPanel.Height + (32 * sp)) + (latest1.Height * sp)) + 32;
this.Controls.Add(latest2);
}
}
when tried draw gradient on UserControl
private void latest1_Paint(object sender, PaintEventArgs e)
{
Pen white = new Pen(Color.White);
var rec = new Rectangle(0, 0, 500, 145);
var brush = new System.Drawing.Drawing2D.LinearGradientBrush(
rec,
Color.FromArgb(255, Color.DarkBlue),
Color.FromArgb(0, Color.DarkBlue),
90f);
e.Graphics.PixelOffsetMode = System.Drawing.Drawing2D.PixelOffsetMode.Half;
e.Graphics.FillRectangle(brush, rec);
}
nothing happened
Background: I am currently busy with showing position of a Vehicle on a Zoomable Canvas based on the Position (X,Y) and Orientation (for Rotation). I use Rectangle for visualizing the vehicle. Everything works well but I got a bit greedy and now I want to replace the Rectangle with Top View Picture of the Vehicle, so it looks that the vehicle itself is moving instead a Rectangle.
Code Below:
private void PaintLocationVehicle(VehicleClass vc)
{
IEnumerable<Rectangle> collection = vc.ZoomableCanvas.Children.OfType<Rectangle>().Where(x => x.Name == _vehicleobjectname);
List<Rectangle> listE = collection.ToList<Rectangle>();
for (int e = 0; e < listE.Count; e++)
vc.ZoomableCanvas.Children.Remove(listE[e]);
// Assign X and Y Position from Vehicle
double drawingX = vc.gCurrentX * GlobalVar.DrawingQ;
double drawingY = vc.gCurrentY * GlobalVar.DrawingQ;
// Scale Length and Width of Vehicle
double tractorWidthScaled = vc.tractorWidth * GlobalVar.DrawingQ;
double tractorLengthScaled = vc.tractorLength * GlobalVar.DrawingQ;
// Get Drawing Location
double _locationX = drawingX - (tractorLengthScaled / 2);
double _locationY = drawingY - ((tractorWidthScaled / 2));
RotateTransform rotation = new RotateTransform();
// Angle in 10th of a Degree
rotation.Angle = vc.gCurrentTheeta/10 ;
double i = 0;
//paint the node
Rectangle _rectangle = new Rectangle();
_rectangle.Stroke = new SolidColorBrush((Color)ColorConverter.ConvertFromString(vc.VehicleColor == "" ? "Black" : vc.VehicleColor));
_rectangle.Fill = new SolidColorBrush((Color)ColorConverter.ConvertFromString(vc.VehicleColor == "" ? "Black" : vc.VehicleColor));
i += 0;
_rectangle.Width = tractorLengthScaled ;
_rectangle.Height = tractorWidthScaled;
rotation.CenterX = _rectangle.Width / 2;
rotation.CenterY = _rectangle.Height / 2;
_rectangle.RenderTransform = rotation;
Canvas.SetTop(_rectangle, _locationY + i);
Canvas.SetLeft(_rectangle, _locationX + i);
_rectangle.SetValue(ZoomableCanvas.ZIndexProperty, 2);
string _tooltipmsg = "Canvas: " + vc.ZoomableCanvas.Name;
// Assign ToolTip Values for User
_tooltipmsg += "\nX: " + vc.gCurrentX;
_tooltipmsg += "\nY: " + vc.gCurrentY;
_rectangle.ToolTip = _tooltipmsg;
_rectangle.Name = _vehicleobjectname;
//add to the canvas
vc.ZoomableCanvas.Children.Add(_rectangle);
}
Note: VehicleClass holds all the Values for a certain Vehicle. DrawingQ holds the transformation scale from Reality to Zoomable Canvas.
So the issues I forsee:
How to append the Size of a Jpeg file to get the size same as
Rectangle?
What kind of Shape object shall I use? Please
suggest.
If i undrestand you correctly. you wanted to show an image of the vechicle inside the rectangle. in order to do that you can use
ImageBrush and assign to the Rectangle Fill property
something like this
Rectangle rect = new Rectangle();
rect.Width = 100;
rect.Height = 100;
ImageBrush img = new ImageBrush();
BitmapImage bmp = new BitmapImage();
bmp.BeginInit();
bmp.UriSource = new Uri("vehicle image path");
bmp.EndInit();
img.ImageSource = bmp;
rect.Fill = img;
I hope that helps
I've been working on this project for a few months now, where I'm trying to integrate eye-tracking into Unity using OpenCVSharp. I've managed to get everything working, including the actual tracking of the pupil etc, however I've got a memory leak. Basically after 20-30seconds of the program running it freezes and the console errors saying "Unable to allocate (insert number here) bits". After looking at the memory usage during running of the program, you can see its use steadily climb until it maxes then crashes.
Now I've spent quite a while trying to fix the issue, and read a lot of help posts about releasing images/storage etc correctly. Despite the fact I'm doing this, it doesn't appear to be releasing them correctly. I tried using the garbage collector to force it to reclaim the memory however that didn't seem to work either. Am I just doing something fundamentally wrong with the images and how I reclaim them? Or is having the creation of new images each frame (even though I'm releasing them) causing the problem.
Any help would be greatly appreciated. Here's the code below, you can ignore a lot of the stuff within the update function as its to do with the actual tracking section and calibration. I realise the code is pretty messy, sorry about that! The main section to worry about is EyeDetection().
using UnityEngine;
using System.Collections;
using System;
using System.IO;
using OpenCvSharp;
using OpenCvSharp.Blob;
//using System.Xml;
//using System.Threading;
//using AForge;
//using OpenCvSharp.Extensions;
//using System.Windows.Media;
//using System.Windows.Media.Imaging;
public class CaptureScript2 : MonoBehaviour
{
//public GameObject planeObj;
public WebCamTexture webcamTexture; //Texture retrieved from the webcam
//public Texture2D texImage; //Texture to apply to plane
public string deviceName;
private int devId = 1;
private int imWidth = 800; //camera width
private int imHeight = 600; //camera height
private string errorMsg = "No errors found!";
private static IplImage camImage; //Ipl image of the converted webcam texture
//private static IplImage yuv;
//private static IplImage dst;
private CvCapture cap; //Current camera capture
//private IplImage eyeLeft;
//private IplImage eyeRight;
//private IplImage eyeLeftFinal;
//private IplImage eyeRightFinal;
private double leftEyeX;
private double leftEyeY;
private double rightEyeX;
private double rightEyeY;
private int calibState;
private double LTRCPx;
private double LTLCPx;
private double LBLCPy;
private double LTLCPy;
private double RTRCPx;
private double RTLCPx;
private double RBLCPy;
private double RTLCPy;
private double gazeWidth;
private double gazeHeight;
private double gazeScaleX;
private double gazeScaleY;
public static CvMemStorage storageFace;
public static CvMemStorage storage;
public static double gazePosX;
public static double gazePosY;
private bool printed = true;
//private CvRect r;
//private IplImage smallImg;
CvColor[] colors = new CvColor[]
{
new CvColor(0,0,255),
new CvColor(0,128,255),
new CvColor(0,255,255),
new CvColor(0,255,0),
new CvColor(255,128,0),
new CvColor(255,255,0),
new CvColor(255,0,0),
new CvColor(255,0,255),
};
//scale for small image
const double Scale = 1.25;
const double scaleEye = 10.0;
const double ScaleFactor = 2.5;
//must show 2 eyes on the screen
const int MinNeighbors = 2;
const int MinNeighborsFace = 1;
// Use this for initialization
void Start ()
{
//Webcam initialisation
WebCamDevice[] devices = WebCamTexture.devices;
Debug.Log ("num:" + devices.Length);
for (int i=0; i<devices.Length; i++)
{
print (devices [i].name);
if (devices [i].name.CompareTo (deviceName) == 1)
{
devId = i;
}
}
if (devId >= 0)
{
//mainImage = new IplImage (imWidth, imHeight, BitDepth.U8, 3);
}
//create capture from current device
cap = Cv.CreateCameraCapture(devId);
//set properties of the capture
Cv.SetCaptureProperty(cap, CaptureProperty.FrameWidth, imWidth);
Cv.SetCaptureProperty(cap, CaptureProperty.FrameHeight, imHeight);
//create window to display capture
//Cv.NamedWindow("Eye tracking", WindowMode.AutoSize);
Cv.NamedWindow ("EyeLeft", WindowMode.AutoSize);
Cv.NamedWindow ("EyeRight", WindowMode.AutoSize);
Cv.NamedWindow ("Face", WindowMode.AutoSize);
calibState = 1;
}
void Update ()
{
if(Input.GetKeyDown(KeyCode.Space) && calibState < 3)
{
calibState++;
}
if(Input.GetMouseButtonDown(0) && calibState < 4)
{
printed = false;
calibState++;
Cv.DestroyAllWindows();
Cv.ReleaseCapture(cap);
cap = Cv.CreateCameraCapture(devId);
}
//if device is connected
if (devId >= 0)
{
//cap = Cv.CreateCameraCapture(devId);
//Cv.Release
//retrieve the current frame from camera
camImage = Cv.QueryFrame(cap);
//detect eyes and apply circles
//
EyeDetection();
Cv.ReleaseImage(camImage);
//PupilTracking();
switch(calibState)
{
case 1:
LTRCPx = leftEyeX;
RTRCPx = rightEyeX;
break;
case 2:
LTLCPx = leftEyeX;
LTLCPy = leftEyeY;
RTLCPx = rightEyeX;
RTLCPy = rightEyeY;
break;
case 3:
LBLCPy = leftEyeY;// + rightEyeY) /2 ;
RBLCPy = rightEyeY;
break;
case 4:
//gazeWidth = (((LTRCPx - LTLCPx) + (RTRCPx - RTLCPx)) / 2) * -1;
//gazeHeight = ((LBLCPy - LTLCPy) + (RBLCPy - RTLCPy)) /2;
gazeWidth = LTLCPx -LTRCPx;
gazeHeight = LBLCPy - LTLCPy;
gazeScaleX = (Screen.width/gazeWidth);
gazeScaleY = Screen.height/gazeHeight;
gazePosX = gazeScaleX *(leftEyeX - LTRCPx);
gazePosY = gazeScaleY *(leftEyeY - LTLCPy);
break;
}
//Cv.ReleaseCapture(cap);
}
else
{
Debug.Log ("Can't find camera!");
}
//print (calibState);
if(printed == false)
{
print ("Gaze pos x = " + gazePosX);
print ("Gaze pos Y = " + gazePosY);
print ("Scale x = " + gazeScaleX);
print ("Scale y = " + gazeScaleY);
print ("Gaze width = " + gazeWidth);
print ("Gaze Height = " + gazeHeight);
print ("left eye x = " + leftEyeX);
print ("left eye Y = " + leftEyeY);
print ("calib state = " + calibState);
printed = true;
}
//Cv.ShowImage("Eye tracking", mainImage);
//Cv.ShowImage ("EyeLeft", grayEyeLeft);
//Cv.ShowImage ("EyeRight", grayEyeRight);
}
void EyeDetection()
{
IplImage mainImage = new IplImage (imWidth, imHeight, BitDepth.U8, 3);
IplImage smallImg = new IplImage(mainImage.Width, mainImage.Height ,BitDepth.U8, 1);
Cv.Resize (camImage, mainImage, Interpolation.Linear);
IplImage gray = new IplImage(mainImage.Size, BitDepth.U8, 1);
Cv.CvtColor (mainImage, gray, ColorConversion.BgrToGray);
Cv.Resize(gray, smallImg, Interpolation.Linear);
Cv.EqualizeHist(smallImg, smallImg);
Cv.ReleaseImage (gray);
//IplImage hack = Cv.LoadImage("\\Users\\User\\Desktop\\Honours Projects\\Project10\\Project\\Assets\\bug.jpeg");
//Cv.Erode (hack, hack);
//Cv.ReleaseImage (hack);
//uint sizeStore = 2877212;
CvHaarClassifierCascade cascadeFace = CvHaarClassifierCascade.FromFile("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml");
CvMemStorage storageFace = new CvMemStorage();
storageFace.Clear ();
CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(smallImg, cascadeFace, storageFace, ScaleFactor, MinNeighborsFace, 0, new CvSize(30,30));
for(int j = 0; j < faces.Total; j++)
{
CvRect face = faces[j].Value.Rect;
CvHaarClassifierCascade cascadeEye = CvHaarClassifierCascade.FromFile ("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_eye.xml");
IplImage faceImg = new IplImage(face.Width, face.Height, BitDepth.U8, 1);
IplImage faceImgColour = new IplImage(face.Width, face.Height, BitDepth.U8, 3);
CvMemStorage storage = new CvMemStorage();
storage.Clear ();
Cv.SetImageROI(smallImg, face);
Cv.Copy (smallImg, faceImg);
Cv.ResetImageROI(smallImg);
Cv.SetImageROI(mainImage, face);
Cv.Copy (mainImage, faceImgColour);
Cv.ResetImageROI(mainImage);
Cv.ShowImage ("Face", faceImgColour);
CvSeq<CvAvgComp> eyes = Cv.HaarDetectObjects(faceImg, cascadeEye, storage, ScaleFactor, MinNeighbors, 0, new CvSize(30, 30));
for(int i = 0; i < eyes.Total; i++)
{
CvRect r = eyes[i].Value.Rect;
Cv.SetImageROI(faceImgColour, r);
if(i == 1)
{
IplImage eyeLeft = new IplImage(new CvSize(r.Width, r.Height), BitDepth.U8, 3);
Cv.Copy(faceImgColour, eyeLeft);
IplImage yuv = new IplImage(eyeLeft.Size, BitDepth.U8, 3);
IplImage dst = new IplImage(eyeLeft.Size, BitDepth.U8, 3);
IplImage grayEyeLeft = new IplImage(eyeLeft.Size, BitDepth.U8, 1);
IplImage eyeLeftFinal = new IplImage(Cv.Round(grayEyeLeft.Width * scaleEye), Cv.Round(grayEyeLeft.Height * scaleEye), BitDepth.U8, 1);
Cv.CvtColor(eyeLeft, yuv, ColorConversion.BgrToCrCb);
Cv.Not(yuv, dst);
Cv.CvtColor(dst,eyeLeft,ColorConversion.CrCbToBgr);
Cv.CvtColor(eyeLeft, grayEyeLeft, ColorConversion.BgrToGray);
Cv.Resize (grayEyeLeft, eyeLeftFinal, Interpolation.Linear);
Cv.Threshold(eyeLeftFinal, eyeLeftFinal, 230, 230, ThresholdType.Binary);
CvBlobs b1 = new CvBlobs(eyeLeftFinal);
if(b1.Count > 0)
{
leftEyeX = b1.LargestBlob().Centroid.X;
leftEyeY = b1.LargestBlob().Centroid.Y;
}
Cv.ShowImage ("EyeLeft", eyeLeftFinal);
Cv.ReleaseImage (yuv);
Cv.ReleaseImage (dst);
Cv.ReleaseImage (grayEyeLeft);
Cv.ReleaseImage (eyeLeftFinal);
b1.Clear();
Cv.ReleaseImage (eyeLeft);
}
if(i == 0)
{
IplImage eyeRight = new IplImage(new CvSize(r.Width, r.Height), BitDepth.U8, 3);
Cv.Copy(faceImgColour, eyeRight);
IplImage yuv2 = new IplImage(eyeRight.Size, BitDepth.U8, 3);
IplImage dst2 = new IplImage(eyeRight.Size, BitDepth.U8, 3);
IplImage grayEyeRight = new IplImage(eyeRight.Size, BitDepth.U8, 1);
IplImage eyeRightFinal = new IplImage(Cv.Round(grayEyeRight.Width * scaleEye), Cv.Round(grayEyeRight.Height * scaleEye), BitDepth.U8, 1);
Cv.CvtColor(eyeRight, yuv2, ColorConversion.BgrToCrCb);
Cv.Not(yuv2, dst2);
Cv.CvtColor(dst2,eyeRight,ColorConversion.CrCbToBgr);
Cv.CvtColor(eyeRight, grayEyeRight, ColorConversion.BgrToGray);
Cv.Resize (grayEyeRight, eyeRightFinal, Interpolation.Linear);
Cv.Threshold(eyeRightFinal, eyeRightFinal, 230, 230, ThresholdType.Binary);
CvBlobs b2 = new CvBlobs(eyeRightFinal);
if(b2.Count > 0)
{
rightEyeX = b2.LargestBlob().Centroid.X;
rightEyeY = b2.LargestBlob().Centroid.Y;
}
Cv.ShowImage ("EyeRight", eyeRightFinal);
Cv.ReleaseImage (yuv2);
Cv.ReleaseImage (dst2);
Cv.ReleaseImage (grayEyeRight);
Cv.ReleaseImage (eyeRightFinal);
b2.Clear ();
Cv.ReleaseImage (eyeRight);
}
Cv.ResetImageROI(faceImgColour);
}
//Cv.ShowImage("Eye tracking", mainImage);
Cv.ReleaseImage (faceImg);
Cv.ReleaseImage (faceImgColour);
Cv.ReleaseMemStorage(storage);
Cv.ReleaseHaarClassifierCascade(cascadeEye);
}
Cv.ReleaseMemStorage(storageFace);
Cv.ReleaseHaarClassifierCascade(cascadeFace);
//PupilTracking ();
Cv.ReleaseImage(smallImg);
Cv.ReleaseImage (mainImage);
GC.Collect();
}
void OnGUI ()
{
GUI.Label (new Rect (200, 200, 100, 90), errorMsg);
}
void OnDestroy()
{
Cv.DestroyAllWindows();
Cv.ReleaseCapture(cap);
}
I am not familiar with OpenCV, but as a general rule:
I would limit instantiation in the Update loop, like new CvMemStorage()
Don't load data in the Update loop: CvHaarClassifierCascade.FromFile("\\Users\\User\\Documents\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml"); That should be loaded once on start and assigned to a class variable.
Allocate on start and Release only if needed.
I find that in most situations there's plenty of RAM to go around. I allocate on Start() what is going to be used over and over, especially 60 times per second in the Update() loop!
But Loading XML data, Allocating and releasing variables like storage or cascadeEye, is bound to create issues when the app is trying to do so 60 times a second.
Creating and destroying objects is a very, very, very expensive. So do so wisely and sparingly, especially when dealing with complex data structures like the OpenCV objects, bitmaps or loaders.
hth.
So what I'm trying to do is take the Kinect Skeletal Sample and save x amount of photos, only when a human goes by. I have gotten it to work, except once it detects a human it just records x amount of photos even once the person leaves the vision of Kinect. Does anyone know how to make it so that once a person enters it starts recording, and once they leave it stops?
Variables
Runtime nui;
int totalFrames = 0;
int totalFrames2 = 0;
int lastFrames = 0;
int lastFrameWithMotion = 0;
int stopFrameNumber = 100;
DateTime lastTime = DateTime.MaxValue;
Entering/Exiting the Frame
void nui_SkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
{
SkeletonFrame skeletonFrame = e.SkeletonFrame;
int iSkeleton = 0;
++totalFrames;
string bb1 = Convert.ToString(totalFrames);
//Uri uri1 = new Uri("C:\\Research\\Kinect\\Proposal_Skeleton\\Skeleton_Img" + bb1 + ".png");
Uri uri1 = new Uri("C:\\temp\\Skeleton_Img" + bb1 + ".png");
// string file_name_3 = "C:\\Research\\Kinect\\Proposal_Skeleton\\Skeleton_Img" + bb1 + ".png"; // xxx
Brush[] brushes = new Brush[6];
brushes[0] = new SolidColorBrush(Color.FromRgb(255, 0, 0));
brushes[1] = new SolidColorBrush(Color.FromRgb(0, 255, 0));
brushes[2] = new SolidColorBrush(Color.FromRgb(64, 255, 255));
brushes[3] = new SolidColorBrush(Color.FromRgb(255, 255, 64));
brushes[4] = new SolidColorBrush(Color.FromRgb(255, 64, 255));
brushes[5] = new SolidColorBrush(Color.FromRgb(128, 128, 255));
skeleton.Children.Clear();
//byte[] skeletonFrame32 = new byte[(int)(skeleton.Width) * (int)(skeleton.Height) * 4];
foreach (SkeletonData data in skeletonFrame.Skeletons)
{
if (SkeletonTrackingState.Tracked == data.TrackingState)
{
// Draw bones
Brush brush = brushes[iSkeleton % brushes.Length];
skeleton.Children.Add(getBodySegment(data.Joints, brush, JointID.HipCenter, JointID.Spine, JointID.ShoulderCenter, JointID.Head));
skeleton.Children.Add(getBodySegment(data.Joints, brush, JointID.ShoulderCenter, JointID.ShoulderLeft, JointID.ElbowLeft, JointID.WristLeft, JointID.HandLeft));
skeleton.Children.Add(getBodySegment(data.Joints, brush, JointID.ShoulderCenter, JointID.ShoulderRight, JointID.ElbowRight, JointID.WristRight, JointID.HandRight));
skeleton.Children.Add(getBodySegment(data.Joints, brush, JointID.HipCenter, JointID.HipLeft, JointID.KneeLeft, JointID.AnkleLeft, JointID.FootLeft));
skeleton.Children.Add(getBodySegment(data.Joints, brush, JointID.HipCenter, JointID.HipRight, JointID.KneeRight, JointID.AnkleRight, JointID.FootRight));
// Draw joints
// try to add a comment, xxx
foreach (Joint joint in data.Joints)
{
Point jointPos = getDisplayPosition(joint);
Line jointLine = new Line();
jointLine.X1 = jointPos.X - 3;
jointLine.X2 = jointLine.X1 + 6;
jointLine.Y1 = jointLine.Y2 = jointPos.Y;
jointLine.Stroke = jointColors[joint.ID];
jointLine.StrokeThickness = 6;
skeleton.Children.Add(jointLine);
}
// ExportToPng(uri1, skeleton);
// SoundPlayerAction Source = "C:/LiamScienceFair/muhaha.wav";
//SoundPlayer player1 = new SoundPlayer("muhaha.wav")
// player1.Play();
// MediaPlayer.
// axWindowsMediaPlayer1.currentPlaylist = axWindowsMediaPlayer1.mediaCollection.getByName("mediafile");
nui.VideoFrameReady += new EventHandler<ImageFrameReadyEventArgs>(nui_ColorFrameReady2);
}
iSkeleton++;
} // for each skeleton
}
Actual Code
void nui_ColorFrameReady2(object sender, ImageFrameReadyEventArgs e)
{
// 32-bit per pixel, RGBA image xxx
PlanarImage Image = e.ImageFrame.Image;
int deltaFrames = totalFrames - lastFrameWithMotion;
if (totalFrames2 <= stopFrameNumber & deltaFrames > 300)
{
++totalFrames2;
string bb1 = Convert.ToString(totalFrames2);
// string file_name_3 = "C:\\Research\\Kinect\\Proposal\\Depth_Img" + bb1 + ".jpg"; xxx
string file_name_4 = "C:\\temp\\Video2_Img" + bb1 + ".jpg";
video.Source = BitmapSource.Create(
Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, Image.Bits, Image.Width * Image.BytesPerPixel);
BitmapSource image4 = BitmapSource.Create(
Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, Image.Bits, Image.Width * Image.BytesPerPixel);
image4.Save(file_name_4, Coding4Fun.Kinect.Wpf.ImageFormat.Jpeg);
if (totalFrames2 == stopFrameNumber)
{
lastFrameWithMotion = totalFrames;
stopFrameNumber += 100;
}
}
}
In most setups I have used in the skeletal tracking event area there is a check for if (skeleton != null) all you need to do is reset your trigger once a null skeleton is received.
The SDK will send a skeleton frame every time the event is fired so...
if(skeleton != null)
{
\\do image taking here
}
else
{
\\reset image counter
}
I would try something like this. Create a bool class variable named SkeletonInFrame and initialize it to false. Every time SkeletonFrameReady fires, set this bool to true. When you process a color frame, only process if this variable is true. Then after you process a color frame, set the variable to false. This should help you stop processing frame when you are no longer receiving skeleton events.