Extracting Text from Camera in opencv (c#) - c#

I wrote some code for finding text from a camera. Actually I first wrote code for finding text in an image and I succeeded but I cant find text from the camera.
Thank you in advance.
In addition I wrote this code in classlibrary so I can't test it well.
public string GetImageText(string imgPath)
{
pathImage = imgPath;
//CvCapture cap = new CvCapture(0);
CvCapture cap;
//CvCapture cap = new CvCapture(imgPath);
cap = CvCapture.FromFile(imgPath);
cap= CvCapture.FromCamera(CaptureDevice.Any);
// img = new IplImage(imgPath, LoadMode.Color);
// cap =CvCapture.FromFile(imgPath);
// IplImage frame = new IplImage();
// imgPath=cap.QueryFrame(frame);
frame = cap.QueryFrame();
// IplImage frame = new IplImage();
// BitmapConverter.ToBitmap(frame);
//frame = new IplImage(imgPath, LoadMode.Color);
if (frame != null)
{
IplImage img1 = new IplImage(frame.Size, BitDepth.U8, 1);
IplConvKernel element = Cv.CreateStructuringElementEx(21, 3, 10, 2, ElementShape.Rect, null);
aimg = new IplImage(frame.Size, BitDepth.U8, 1);
IplImage temp = aimg.Clone();
IplImage dest = aimg.Clone();
frame.CvtColor(aimg, ColorConversion.RgbaToGray);
bimg = aimg.Clone();
Cv.Smooth(aimg, aimg, SmoothType.Gaussian);
Cv.MorphologyEx(aimg, temp, dest, element, MorphologyOperation.TopHat, 1);
Cv.Threshold(dest, aimg, 128, 255, ThresholdType.Binary | ThresholdType.Otsu);
Cv.Smooth(aimg, dest, SmoothType.Median);
Cv.Dilate(dest, dest, element, 2);
Cv.ReleaseImage(temp);
Cv.ReleaseImage(dest);
IplImage labelImage = new IplImage(frame.Size, CvBlobLib.DepthLabel, 1);
labelImage = new IplImage(frame.Size, BitDepth.U8, 1);
frame = new IplImage(frame.Size, BitDepth.U8, 1);
blob = new CvBlobs();
text.Clear();
CvBlobLib.Label(labelImage, blob);
CvBlobLib.FilterByArea(blob, 6, 10);
IplImage imgtemp = frame.Clone();
foreach (var item in blob)
{
item.Value.SetImageRoiToBlob(bimg);
double ratio = (double)item.Value.Rect.Width / item.Value.Rect.Height;
double angle = (double)item.Value.Angle();
if (ratio > 3.5 && ratio < 5.4 && angle > -15 && angle < 15)
{
IplImage texttemp = new IplImage(new CvSize(140, 27), bimg.Depth, bimg.NChannels);
// texttemp.Flip( null , FlipMode.X );
Cv.Resize(bimg, texttemp);
text.Add(texttemp);
frame.Rectangle(item.Value.Rect, new CvScalar(0, 0, 255), 2, LineType.Link4);
// frame.Flip(null, FlipMode.X);
// frame.Rectangle(item.Value.Rect, new CvScalar(0, 0, 255), 2, LineType.Link4)=RotateFlipType.Rotate180FlipNone()
// RotateFlipType.Rotate180FlipX(frame.Rectangle(item.Value.Rect, new CvScalar(0, 0, 255), 2, LineType.Link4));
// flipImage=frame.Rectangle(item.Value.Rect, new CvScalar(0, 0, 255), 2, LineType.Link4);
}
}
textList.Clear();
}
return pathImage;
}

I don't have the reputation needed to comment, so this will have to be in the form of an answer. It looks like pathImage doesn't get any value after you do all the processing to find the text - it keeps the value of imgPath, so you just return the same value you put in.

Related

Add margin to barcode image using C#

I'm creatin a Code39 barcode image with C# and I need to add margin to this image.
This is the first image which I created.
I created it.
But it should be like this.
It should be like this.
The code is complicated.
This code is creating barcode bars, number and title strings.
public byte[] Code39(string code, int barSize, bool showCodeString, string title, string fontFile)
{
// Create stream....
MemoryStream ms = new MemoryStream();
FontFamilyName = "Free 3 of 9";//ConfigurationSettings.AppSettings["BarCodeFontFamily"];
FontFileName = fontFile;//#"C:\Documents and Settings\narottam.sharma\Desktop\Barcode\WSBarCode\Code39Font\FREE3OF9.TTF";// ConfigurationSettings.AppSettings["BarCodeFontFile"];
FontSize = barSize;
ShowCodeString = showCodeString;
if (title + "" != "")
Title = title;
Bitmap objBitmap = GenerateBarcode(code);
objBitmap.Save(ms, ImageFormat.Png);
//return bytes....
return ms.GetBuffer();
}
public Bitmap GenerateBarcode(string barCode)
{
int bcodeWidth = 0;
int bcodeHeight = 0;
// Get the image container...
Bitmap bcodeBitmap = CreateImageContainer(barCode, ref bcodeWidth, ref bcodeHeight);
Graphics objGraphics = Graphics.FromImage(bcodeBitmap);
// Fill the background
objGraphics.FillRectangle(new SolidBrush(Color.White), new Rectangle(0, 0, bcodeWidth, bcodeHeight));
int vpos = 0;
// Draw the title string
if (_titleString != null)
{
objGraphics.DrawString(_titleString, _titleFont, new SolidBrush(Color.Black), XCentered((int)_titleSize.Width, bcodeWidth), vpos);
vpos += (((int)_titleSize.Height) + _itemSepHeight);
}
// Draw the barcode
objGraphics.DrawString(barCode, Code39Font, new SolidBrush(Color.Black), XCentered((int)_barCodeSize.Width, bcodeWidth), vpos);
// Draw the barcode string
if (_showCodeString)
{
vpos += (((int)_barCodeSize.Height));
objGraphics.DrawString(barCode, _codeStringFont, new SolidBrush(Color.Black), XCentered((int)_codeStringSize.Width, bcodeWidth), vpos);
}
// return the image...
return bcodeBitmap;
}
private Bitmap CreateImageContainer(string barCode, ref int bcodeWidth, ref int bcodeHeight)
{
Graphics objGraphics;
// Create a temporary bitmap...
Bitmap tmpBitmap = new Bitmap(1, 1, PixelFormat.Format32bppArgb);
objGraphics = Graphics.FromImage(tmpBitmap);
// calculate size of the barcode items...
if (_titleString != null)
{
_titleSize = objGraphics.MeasureString(_titleString, _titleFont);
bcodeWidth = (int)_titleSize.Width;
bcodeHeight = (int)_titleSize.Height + _itemSepHeight;
}
_barCodeSize = objGraphics.MeasureString(barCode, Code39Font);
bcodeWidth = Max(bcodeWidth, (int)_barCodeSize.Width);
bcodeHeight += (int)_barCodeSize.Height;
if (_showCodeString)
{
_codeStringSize = objGraphics.MeasureString(barCode, _codeStringFont);
bcodeWidth = Max(bcodeWidth, (int)_codeStringSize.Width);
bcodeHeight += (_itemSepHeight + (int)_codeStringSize.Height);
}
// dispose temporary objects...
objGraphics.Dispose();
tmpBitmap.Dispose();
return (new Bitmap(bcodeWidth, bcodeHeight, PixelFormat.Format32bppArgb));
}
You can modify the objGraphics.FillRectangle(new SolidBrush(Color.White), new Rectangle(0, 0, bcodeWidth, bcodeHeight)); call from GenerateBarcode and make the width greater.
var margin = VALUE NEEDED;
objGraphics.FillRectangle(new SolidBrush(Color.White), new Rectangle(margin, 0, bcodeWidth + margin, bcodeHeight));
// Draw additiona text to the left
objGraphics.DrawString(ADDITIONAL_TEXT, _titleFont, new SolidBrush(Color.Black), new RectangleF(0, 0, margin, bcodeHeight)); //you can modify the rectangle area as needed

Reading Datastream sharpDX Error all values are 0

I followed this solution for my project : How to create bitmap from Surface (SharpDX)
I don't have enough reputation to comment so I'm opening a new question here.
My project is basically in Direct 2D, I have a Surface buffer, a swapchain. I want to put my buffer into a datastream and reads it's value to put it into a bitmap and save it on disk ( like a screen capture), but my code won't work since all the bytes values are 0 (which is black) and this doesn't make sense since my image is fully white with a bit of blue.
Here is my code :
SwapChainDescription description = new SwapChainDescription()
{
ModeDescription = new ModeDescription(this.Width, this.Height, new Rational(60, 1), Format.B8G8R8A8_UNorm),
SampleDescription = new SampleDescription(1, 0),
Usage = Usage.RenderTargetOutput,
BufferCount = 1,
SwapEffect = SwapEffect.Discard,
IsWindowed = true,
OutputHandle = this.Handle
};
Device.CreateWithSwapChain(DriverType.Hardware, DeviceCreationFlags.Debug | DeviceCreationFlags.BgraSupport, description, out device, out swapChain);
SharpDX.DXGI.Device dxgiDevice = device.QueryInterface<SharpDX.DXGI.Device>();
SharpDX.DXGI.Adapter dxgiAdapter = dxgiDevice.Adapter;
SharpDX.Direct2D1.Device d2dDevice = new SharpDX.Direct2D1.Device(dxgiDevice);
d2dContext = new SharpDX.Direct2D1.DeviceContext(d2dDevice, SharpDX.Direct2D1.DeviceContextOptions.None);
SharpDX.Direct3D11.DeviceContext d3DeviceContext = new SharpDX.Direct3D11.DeviceContext(device);
properties = new BitmapProperties(new SharpDX.Direct2D1.PixelFormat(SharpDX.DXGI.Format.B8G8R8A8_UNorm, SharpDX.Direct2D1.AlphaMode.Premultiplied),
96, 96);
Surface backBuffer = swapChain.GetBackBuffer<Surface>(0);
d2dTarget = new SharpDX.Direct2D1.Bitmap(d2dContext, backBuffer, properties);
d2dContext.Target = d2dTarget;
playerBitmap = this.LoadBitmapFromContentFile(#"C:\Users\ndesjardins\Desktop\wave.png");
//System.Drawing.Bitmap bitmapCanva = new System.Drawing.Bitmap(1254, 735);
d2dContext.BeginDraw();
d2dContext.Clear(SharpDX.Color.White);
d2dContext.DrawBitmap(playerBitmap, new SharpDX.RectangleF(0, 0, playerBitmap.Size.Width, playerBitmap.Size.Height), 1f, SharpDX.Direct2D1.BitmapInterpolationMode.NearestNeighbor);
SharpDX.Direct2D1.SolidColorBrush brush = new SharpDX.Direct2D1.SolidColorBrush(d2dContext, SharpDX.Color.Green);
d2dContext.DrawRectangle(new SharpDX.RectangleF(200, 200, 100, 100), brush);
d2dContext.EndDraw();
swapChain.Present(1, PresentFlags.None);
Texture2D backBuffer3D = backBuffer.QueryInterface<SharpDX.Direct3D11.Texture2D>();
Texture2DDescription desc = backBuffer3D.Description;
desc.CpuAccessFlags = CpuAccessFlags.Read;
desc.Usage = ResourceUsage.Staging;
desc.OptionFlags = ResourceOptionFlags.None;
desc.BindFlags = BindFlags.None;
var texture = new Texture2D(device, desc);
d3DeviceContext.CopyResource(backBuffer3D, texture);
byte[] data = null;
using (Surface surface = texture.QueryInterface<Surface>())
{
DataStream dataStream;
var map = surface.Map(SharpDX.DXGI.MapFlags.Read, out dataStream);
int lines = (int)(dataStream.Length / map.Pitch);
data = new byte[surface.Description.Width * surface.Description.Height * 4];
dataStream.Position = 0;
int dataCounter = 0;
// width of the surface - 4 bytes per pixel.
int actualWidth = surface.Description.Width * 4;
for (int y = 0; y < lines; y++)
{
for (int x = 0; x < map.Pitch; x++)
{
if (x < actualWidth)
{
data[dataCounter++] = dataStream.Read<byte>();
}
else
{
dataStream.Read<byte>();
}
}
}
dataStream.Dispose();
surface.Unmap();
int width = surface.Description.Width;
int height = surface.Description.Height;
byte[] bytewidth = BitConverter.GetBytes(width);
byte[] byteheight = BitConverter.GetBytes(height);
Array.Copy(bytewidth, 0, data, 0, 4);
Array.Copy(byteheight, 0, data, 4, 4);
}
Do you guys have any idea why the byte array that is returned at the end is full of 0 since it should be mostly 255? All I did in my backbuffer was to draw a bitmap image and a rectangle form. Array.Copy is to add the width and height header to the byte array, therefore I could create a bitmap out of it.
I answered in a comment but formatting is horrible so apologies!
https://gamedev.stackexchange.com/a/112978/29920 This looks promising but as you said in reply to mine, this was some time ago and I'm pulling this out of thin air, if it doesn't work either someone with more current knowledge will have to answer or I'll have to grab some source code and try myself.
SharpDX.Direct2D1.Bitmap dxbmp = new SharpDX.Direct2D1.Bitmap(renderTarget,
new SharpDX.Size2(bmpWidth, bmpHeight), new
BitmapProperties(renderTarget.PixelFormat));
dxbmp.CopyFromMemory(bmpBits, bmpWidth * 4);
This looks kind of like what you need. I'm assuming bmpBits in here is either a byte array or a memory stream either of which could then be saved off or at least give you something to look at to see if you're actually getting pixel data

drawing a rectangle for a bitmap clone

I have a bitmap in which i clone and specify a rectangle - the current rectangle has certain width and height values which i've used for checking the rectangle for a QR code. I noticed this checks the top left corner. I would i be able to alter this to check for top right corner, bottom right and left corners of the same size(width and height)?
Bitmap result = fullImg.Clone(new System.Drawing.Rectangle(0, 0, 375, 375), fullImg.PixelFormat);
Any help is greatly appreciated.
for (int pg = 0; pg < inputDocument.PageCount; pg++)
{
string workGif = workingFilename.Replace(".pdf", string.Format(".{0}.gif", pg + 1));
GhostscriptWrapper.GeneratePageThumb(workingFilename, workGif, pg + 1, 300, 300); // size (last two params) does not seem to have any effect
using (var fullImg = new Bitmap(workGif))
{
Bitmap result = fullImg.Clone(new System.Drawing.Rectangle(0, 0, 375, 375), fullImg.PixelFormat);
string QRinfo = Process(result);
MessageBox.Show(QRinfo);
string[] qcode = QRinfo.Split('/');
string gid = qcode[qcode.Count() - 1];
Guid pgGuid = new Guid(gid);
}
}
Process Method for qr
public string Process(Bitmap bitmap)
{
var reader = new com.google.zxing.qrcode.QRCodeReader();
try
{
LuminanceSource source = new RGBLuminanceSource(bitmap, bitmap.Width, bitmap.Height);
var binarizer = new HybridBinarizer(source);
var binBitmap = new BinaryBitmap(binarizer);
return reader.decode(binBitmap).Text;
}
catch (Exception e)
{
return e.Message;
}
}
If the QRCodes are ALWAYS on the corners, you can use a picturebox for the Bitmap, and then rotate it using the RotateFlip method:
Bitmap bp = new Bitmap("myImage.jpg");
pictureBox1.Image = bp;
bp.RotateFlip(RotateFlipType.Rotate90FlipNone);
pictureBox1.Invalidate();

EmguCV SURF with cam?

i am new on EMGU CV. I would like to SURF detect more than one patterns with using cam. Like this video. But now, i try to develop this just one pattern for starting point.
I examined EMGUCV's SURF example. When i try to implement this codes to cam capture's example, error turns on run time. I searched more but did not find any code example.
So, do you suggest me a code snippet or tutorial which is explained good.
Thank very much already now.
Codes are below which i am working on;
...........................................
FrameRaw = capture.QueryFrame();
CamImageBox.Image = FrameRaw;
Run(FrameRaw);
...........................................
private void Run(Image<Bgr, byte> TempImage)
{
Image<Gray, Byte> modelImage = new Image<Gray, byte>("sample.jpg");
Image<Gray, Byte> observedImage = TempImage.Convert<Gray, Byte>();
// Image<Gray, Byte> observedImage = new Image<Gray,byte>("box_in_scene.png");
Stopwatch watch;
HomographyMatrix homography = null;
SURFDetector surfCPU = new SURFDetector(500, false);
VectorOfKeyPoint modelKeyPoints;
VectorOfKeyPoint observedKeyPoints;
Matrix<int> indices;
Matrix<float> dist;
Matrix<byte> mask;
if (GpuInvoke.HasCuda)
{
GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))
//extract features from the object image
using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2))
{
modelKeyPoints = new VectorOfKeyPoint();
surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
watch = Stopwatch.StartNew();
// extract features from the observed image
using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))
using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, 2, 1))
using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuMatchIndices.Size, 1))
{
observedKeyPoints = new VectorOfKeyPoint();
surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);
matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null);
indices = new Matrix<int>(gpuMatchIndices.Size);
dist = new Matrix<float>(indices.Size);
gpuMatchIndices.Download(indices);
gpuMatchDist.Download(dist);
mask = new Matrix<byte>(dist.Rows, 1);
mask.SetValue(255);
Features2DTracker.VoteForUniqueness(dist, 0.8, mask);
int nonZeroCount = CvInvoke.cvCountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
}
watch.Stop();
}
}
}
else
{
//extract features from the object image
modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
//MKeyPoint[] kpts = modelKeyPoints.ToArray();
Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);
watch = Stopwatch.StartNew();
// extract features from the observed image
observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32);
matcher.Add(modelDescriptors);
int k = 2;
indices = new Matrix<int>(observedDescriptors.Rows, k);
dist = new Matrix<float>(observedDescriptors.Rows, k);
matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
mask = new Matrix<byte>(dist.Rows, 1);
mask.SetValue(255);
Features2DTracker.VoteForUniqueness(dist, 0.8, mask);
int nonZeroCount = CvInvoke.cvCountNonZero(mask);
if (nonZeroCount >= 4)
{
nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
if (nonZeroCount >= 4)
homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
}
watch.Stop();
}
//Draw the matched keypoints
Image<Bgr, Byte> result = Features2DTracker.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DTracker.KeypointDrawType.NOT_DRAW_SINGLE_POINTS);
#region draw the projected region on the image
if (homography != null)
{ //draw a rectangle along the projected model
Rectangle rect = modelImage.ROI;
PointF[] pts = new PointF[] {
new PointF(rect.Left, rect.Bottom),
new PointF(rect.Right, rect.Bottom),
new PointF(rect.Right, rect.Top),
new PointF(rect.Left, rect.Top)};
homography.ProjectPoints(pts);
result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
}
#endregion
// ImageViewer.Show(result, String.Format("Matched using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds));
}
I found the SURF tutorial you used, but I don't see why it should cause an error. Have you been able to execute the tutorial code by itself, without the GPU acceleration complication?
Moreover, what error occurred?

Kinect - Detecting when a Human exits the frame

So what I'm trying to do is take the Kinect Skeletal Sample and save x amount of photos, only when a human goes by. I have gotten it to work, except once it detects a human it just records x amount of photos even once the person leaves the vision of Kinect. Does anyone know how to make it so that once a person enters it starts recording, and once they leave it stops?
Variables
Runtime nui;
int totalFrames = 0;
int totalFrames2 = 0;
int lastFrames = 0;
int lastFrameWithMotion = 0;
int stopFrameNumber = 100;
DateTime lastTime = DateTime.MaxValue;
Entering/Exiting the Frame
void nui_SkeletonFrameReady(object sender, SkeletonFrameReadyEventArgs e)
{
SkeletonFrame skeletonFrame = e.SkeletonFrame;
int iSkeleton = 0;
++totalFrames;
string bb1 = Convert.ToString(totalFrames);
//Uri uri1 = new Uri("C:\\Research\\Kinect\\Proposal_Skeleton\\Skeleton_Img" + bb1 + ".png");
Uri uri1 = new Uri("C:\\temp\\Skeleton_Img" + bb1 + ".png");
// string file_name_3 = "C:\\Research\\Kinect\\Proposal_Skeleton\\Skeleton_Img" + bb1 + ".png"; // xxx
Brush[] brushes = new Brush[6];
brushes[0] = new SolidColorBrush(Color.FromRgb(255, 0, 0));
brushes[1] = new SolidColorBrush(Color.FromRgb(0, 255, 0));
brushes[2] = new SolidColorBrush(Color.FromRgb(64, 255, 255));
brushes[3] = new SolidColorBrush(Color.FromRgb(255, 255, 64));
brushes[4] = new SolidColorBrush(Color.FromRgb(255, 64, 255));
brushes[5] = new SolidColorBrush(Color.FromRgb(128, 128, 255));
skeleton.Children.Clear();
//byte[] skeletonFrame32 = new byte[(int)(skeleton.Width) * (int)(skeleton.Height) * 4];
foreach (SkeletonData data in skeletonFrame.Skeletons)
{
if (SkeletonTrackingState.Tracked == data.TrackingState)
{
// Draw bones
Brush brush = brushes[iSkeleton % brushes.Length];
skeleton.Children.Add(getBodySegment(data.Joints, brush, JointID.HipCenter, JointID.Spine, JointID.ShoulderCenter, JointID.Head));
skeleton.Children.Add(getBodySegment(data.Joints, brush, JointID.ShoulderCenter, JointID.ShoulderLeft, JointID.ElbowLeft, JointID.WristLeft, JointID.HandLeft));
skeleton.Children.Add(getBodySegment(data.Joints, brush, JointID.ShoulderCenter, JointID.ShoulderRight, JointID.ElbowRight, JointID.WristRight, JointID.HandRight));
skeleton.Children.Add(getBodySegment(data.Joints, brush, JointID.HipCenter, JointID.HipLeft, JointID.KneeLeft, JointID.AnkleLeft, JointID.FootLeft));
skeleton.Children.Add(getBodySegment(data.Joints, brush, JointID.HipCenter, JointID.HipRight, JointID.KneeRight, JointID.AnkleRight, JointID.FootRight));
// Draw joints
// try to add a comment, xxx
foreach (Joint joint in data.Joints)
{
Point jointPos = getDisplayPosition(joint);
Line jointLine = new Line();
jointLine.X1 = jointPos.X - 3;
jointLine.X2 = jointLine.X1 + 6;
jointLine.Y1 = jointLine.Y2 = jointPos.Y;
jointLine.Stroke = jointColors[joint.ID];
jointLine.StrokeThickness = 6;
skeleton.Children.Add(jointLine);
}
// ExportToPng(uri1, skeleton);
// SoundPlayerAction Source = "C:/LiamScienceFair/muhaha.wav";
//SoundPlayer player1 = new SoundPlayer("muhaha.wav")
// player1.Play();
// MediaPlayer.
// axWindowsMediaPlayer1.currentPlaylist = axWindowsMediaPlayer1.mediaCollection.getByName("mediafile");
nui.VideoFrameReady += new EventHandler<ImageFrameReadyEventArgs>(nui_ColorFrameReady2);
}
iSkeleton++;
} // for each skeleton
}
Actual Code
void nui_ColorFrameReady2(object sender, ImageFrameReadyEventArgs e)
{
// 32-bit per pixel, RGBA image xxx
PlanarImage Image = e.ImageFrame.Image;
int deltaFrames = totalFrames - lastFrameWithMotion;
if (totalFrames2 <= stopFrameNumber & deltaFrames > 300)
{
++totalFrames2;
string bb1 = Convert.ToString(totalFrames2);
// string file_name_3 = "C:\\Research\\Kinect\\Proposal\\Depth_Img" + bb1 + ".jpg"; xxx
string file_name_4 = "C:\\temp\\Video2_Img" + bb1 + ".jpg";
video.Source = BitmapSource.Create(
Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, Image.Bits, Image.Width * Image.BytesPerPixel);
BitmapSource image4 = BitmapSource.Create(
Image.Width, Image.Height, 96, 96, PixelFormats.Bgr32, null, Image.Bits, Image.Width * Image.BytesPerPixel);
image4.Save(file_name_4, Coding4Fun.Kinect.Wpf.ImageFormat.Jpeg);
if (totalFrames2 == stopFrameNumber)
{
lastFrameWithMotion = totalFrames;
stopFrameNumber += 100;
}
}
}
In most setups I have used in the skeletal tracking event area there is a check for if (skeleton != null) all you need to do is reset your trigger once a null skeleton is received.
The SDK will send a skeleton frame every time the event is fired so...
if(skeleton != null)
{
\\do image taking here
}
else
{
\\reset image counter
}
I would try something like this. Create a bool class variable named SkeletonInFrame and initialize it to false. Every time SkeletonFrameReady fires, set this bool to true. When you process a color frame, only process if this variable is true. Then after you process a color frame, set the variable to false. This should help you stop processing frame when you are no longer receiving skeleton events.

Categories

Resources