I've been searching the internet and I haven't had any luck. I'm using an Xbox Kinect with the Kinect SDK v1.0. I want to take the raw depth data and convert it into a text document so I can use the depth data. I found something on this site but it was for the Beta2 and I need to use v1.0. Any help is appreciated but I am new to coding so sample code would be best.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using Microsoft.Kinect;
using System.Diagnostics;
using System.IO;
namespace DepthTextStream
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
public MainWindow()
{
InitializeComponent();
}
const float MaxDepthDistance = 4095; // max value returned
const float MinDepthDistance = 850; // min value returned
const float MaxDepthDistanceOffset = MaxDepthDistance - MinDepthDistance;
private void Window_Loaded(object sender, RoutedEventArgs e)
{
kinectSensorChooser1.KinectSensorChanged += new DependencyPropertyChangedEventHandler(kinectSensorChooser1_KinectSensorChanged);
}
void kinectSensorChooser1_KinectSensorChanged(object sender, DependencyPropertyChangedEventArgs e)
{
var oldSensor = (KinectSensor)e.OldValue;
//stop the old sensor
if (oldSensor != null)
{
oldSensor.Stop();
oldSensor.AudioSource.Stop();
}
//get the new sensor
var newSensor = (KinectSensor)e.NewValue;
if (newSensor == null)
{
return;
}
//turn on features that you need
newSensor.DepthStream.Enable(DepthImageFormat.Resolution320x240Fps30);
newSensor.SkeletonStream.Enable();
//sign up for events if you want to get at API directly
newSensor.AllFramesReady += new EventHandler<AllFramesReadyEventArgs>(newSensor_AllFramesReady);
try
{
newSensor.Start();
}
catch (System.IO.IOException)
{
//this happens if another app is using the Kinect
kinectSensorChooser1.AppConflictOccurred();
}
}
void newSensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
short[] depthData;
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame()) //create a new frame every time one is ready
{
//assign a value to depthData
depthData = new short[depthFrame.PixelDataLength];
}
}
private void SaveDepthData(short[] depthData)
{
//initialize a StreamWriter
StreamWriter sw = new StreamWriter(#"C:/Example.txt");
//search the depth data and add it to the file
for (int i = 0; i < depthData.Length; i++)
{
sw.WriteLine(depthData[i] + "\n"); //\n for a new line
}
//dispose of sw
sw.Close();
SaveDepthData(depthData);
}
private void Window_Closing(object sender, System.ComponentModel.CancelEventArgs e)
{
StopKinect(kinectSensorChooser1.Kinect);
}
private void StopKinect(KinectSensor sensor)
{
if (sensor != null)
{
if (sensor.IsRunning)
{
//stop sensor
sensor.Stop();
//stop audio if not null
if (sensor.AudioSource != null)
{
sensor.AudioSource.Stop();
}
}
}
}
}
}
This is pretty simple using version 1.5.0.1, which is practically the same as version 1.0 and will work on it. All you need to complete this is A)a short[] to hold the depth data B)a DepthImageFrame to move the data to the array, and C)A StreamWriter to save the data.
Add a short[] to store your depth data, and inside of your DepthFrameReadyEventArgs (or AllFramesReadyEventArgs) you "use" a DepthImageFrame by doing:
short[] depthData;
...
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame(()) //create a new frame every time one is ready
{
//assign a value to depthData
depthData = new short[depthFrame.PixelDataLength];
}
Then you can add the depth from each frame to depthData using DepthImageFrame.CopyPixelDataTo
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame(()) //create a new frame every time one is ready
{
//assign a value to depthData
depthData = new short[depthFrame.PixelDataLength];
//add raw depth data to depthData
depthFrame.CopyPixelDataTo(depthData);
}
Then we can write a method to save our data using a StreamWriter.
private void SaveDepthData(short[] depthData)
{
//initialize a StreamWriter
StreamWriter sw = new StreamWriter(#"C:/Example.txt");
//search the depth data and add it to the file
for (int i = 0; i < depthData.Length; i++)
{
sw.WriteLine(depthData[i] + "\n"); //\n for a new line
}
//dispose of sw
sw.Close();
}
...
SaveDepthData(depthData);
Hope this helps!
Related
I am currently trying to display and play an animated gif in WPF without much luck
I have tried all the solutions found here including the one in the question
Here is what i currently have
<Window x:Class="Sooooothing.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
xmlns:Attached="clr-namespace:Sooooothing"
Title="MainWindow" Height="327.861" Width="525">
<Grid>
<MediaElement x:Name="gif" MediaEnded="myGif_MediaEnded" UnloadedBehavior="Manual"
Source="Images\7c6516d73fc8643abf1df969fcc1a72c.gif"
LoadedBehavior="Play" Stretch="None"/>
</Grid>
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
namespace Sooooothing
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
public MainWindow()
{
InitializeComponent();
//gif.Play();
}
private void myGif_MediaEnded(object sender, RoutedEventArgs e)
{
gif.Position = new TimeSpan(0, 0, 1);
gif.Play();
}
}
}
When the application starts all i get is the white background for the window, at first i though it was the gif not loading but i let it sit for a couple minutes without any change.
Here is my solution
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
namespace Sooooothing
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
GifBitmapDecoder decoder2;
int ImageNumber = 0;
bool mouseIn = true;
double opasity = 0;
List<Thread> threadList = new List<Thread>();
bool programClosed = false;
public MainWindow()
{
InitializeComponent();
Classes.DataHouse.load(); //Im storing all my gif links in this List<string> so i can
// loop through them with buttons on the window
#region thread gif frame changer
Uri myUri = new Uri(Classes.DataHouse.images[ImageNumber], UriKind.RelativeOrAbsolute);
decoder2 = new GifBitmapDecoder(myUri, BitmapCreateOptions.PreservePixelFormat, BitmapCacheOption.Default);
BitmapSource bitmapSource2; // Decode the gif using GifBitmapDecoder then start a thread to
int frameCount = decoder2.Frames.Count;// loop through all the images and put them one after
Thread th = new Thread(() => {// another into an image on your window
while (!programClosed)
{
for (int i = 0; i < frameCount; i++)
{
try{
this.Dispatcher.Invoke(new Action(delegate()
{
frameCount = decoder2.Frames.Count;
if (frameCount >= i)
{
bitmapSource2 = decoder2.Frames[i];
image.Source = bitmapSource2;
}
}));
}
catch
{
//the thread was probably aborted
}
System.Threading.Thread.Sleep(100);
}
}
});
threadList.Add(th);
th.Start();
#endregion
}
private void Window_MouseDown(object sender, MouseButtonEventArgs e)
{
if (e.ChangedButton == MouseButton.Left)
this.DragMove();
}
private void img_forward_MouseLeftButtonDown(object sender, MouseButtonEventArgs e)
{
if (ImageNumber+1 >= Classes.DataHouse.images.Count)
ImageNumber = 0;
else
ImageNumber++;
Uri myUri = new Uri(Classes.DataHouse.images[ImageNumber], UriKind.RelativeOrAbsolute);
decoder2 = new GifBitmapDecoder(myUri, BitmapCreateOptions.PreservePixelFormat, BitmapCacheOption.Default);
}
private void Grid_MouseEnter(object sender, MouseEventArgs e)
{
mouseIn = true;
Thread th = new Thread(() =>
{
while (opasity < 100 && mouseIn)
{
System.Threading.Thread.Sleep(25);
opasity++;
try
{
this.Dispatcher.Invoke(new Action(delegate()
{
img_forward.Opacity = opasity / 100;
img_exit.Opacity = opasity / 100;
img_backward.Opacity = opasity / 100;
}));
}
catch
{
//the thread was probably aborted
}
}
});
threadList.Add(th);
th.Start();
}
private void Grid_MouseLeave(object sender, MouseEventArgs e)
{
mouseIn = false;
Thread th = new Thread(() =>
{
while (opasity > 0 && !mouseIn)
{
System.Threading.Thread.Sleep(25);
opasity--;
try{
this.Dispatcher.Invoke(new Action(delegate()
{
img_forward.Opacity = opasity / 100;
img_exit.Opacity = opasity / 100;
img_backward.Opacity = opasity / 100;
}));
}
catch
{
//the thread was probably aborted
}
}
});
threadList.Add(th);
th.Start();
}
private void img_backward_MouseLeftButtonDown(object sender, MouseButtonEventArgs e)
{
if (ImageNumber - 1 < 0)
ImageNumber = Classes.DataHouse.images.Count-1;
else
ImageNumber--;
Uri myUri = new Uri(Classes.DataHouse.images[ImageNumber], UriKind.RelativeOrAbsolute);
decoder2 = new GifBitmapDecoder(myUri, BitmapCreateOptions.PreservePixelFormat, BitmapCacheOption.Default);
}
private void img_exit_MouseLeftButtonDown(object sender, MouseButtonEventArgs e)
{
foreach (Thread th in threadList)
{
while(th.ThreadState == ThreadState.Running)
th.Abort();
}
programClosed = true;
this.Close();
}
}
}
Unfortunately, animated GIF images were somewhat of an oversight when Microsoft developed WPF, so there is no 'built in' functionality that handles this. There are several working examples found online, but you should still be aware that code needs to be run on the UI thread to animate these images.
Therefore, certain situations, such as trying to use a 'busy' GIF during loading will not work because the UI thread will already be busy.
Having announced that disclaimer, you can find some working code that will do what you want in Thomas Levesque's GitHub project at the link below:
thomaslevesque/WpfAnimatedGif
Using a Thread
public MainWindow()
{
InitializeComponent();
Uri myUri = new Uri(#"Images\2b3601fe2b62e87481bd301da52a2182.gif", UriKind.RelativeOrAbsolute);
GifBitmapDecoder decoder2 = new GifBitmapDecoder(myUri, BitmapCreateOptions.PreservePixelFormat, BitmapCacheOption.Default);
BitmapSource bitmapSource2;
int frameCount = decoder2.Frames.Count;
Thread th = new Thread(() => {
while (true)
{
for (int i = 0; i < frameCount; i++)
{
this.Dispatcher.Invoke(new Action(delegate()
{
bitmapSource2 = decoder2.Frames[i];
image.Source = bitmapSource2;
}));
System.Threading.Thread.Sleep(100);
}
}
});
th.Start();
}
This only works with physical files that aren't compiled as Resources, etc.
Try to change your image resource Build Action to Content or something similar, instead of Resource (And activate Copy to Output Directory, maybe?).
I managed to get a sample working that way. The key is that you must reference a physical file in some folder or internet URL.
For more info, check this out: https://social.msdn.microsoft.com/Forums/vstudio/en-US/93d50a97-0d8d-4b18-992e-cd3200693337/how-to-use-an-animated-gif?forum=wpf
Ok so I started writing an app following the how-to-kinect series from Microsoft and everything was going well until I wrote this:
using (InfraredFrame IRFrame = args.FrameReference.AcquiredFrame())
{
}
For some reason it keeps saying args does not exist in the current context and I have no idea why..here is my full code:
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Runtime.InteropServices.WindowsRuntime;
using Windows.Foundation;
using Windows.Foundation.Collections;
using Windows.UI.Xaml;
using Windows.UI.Xaml.Controls;
using Windows.UI.Xaml.Controls.Primitives;
using Windows.UI.Xaml.Data;
using Windows.UI.Xaml.Input;
using Windows.UI.Xaml.Media;
using Windows.UI.Xaml.Navigation;
using Windows.UI.Xaml.Media.Imaging;
using WindowsPreview.Kinect;
// The Blank Page item template is documented at http://go.microsoft.com/fwlink/?LinkId=234238
namespace KinectApp1
{
/// <summary>
/// An empty page that can be used on its own or navigated to within a Frame.
/// </summary>
public sealed partial class MainPage : Page
{
public MainPage()
{
this.InitializeComponent();
this.Loaded += MainPage_Loaded;
}
/*Getting the Kinect Sensor*/
KinectSensor Sensor;
/*Getting the Infared Reader*/
InfraredFrameReader IRReader;
/*This is IR Data Form*/
ushort[] IRData;
/*Converting the Data (Buffer) */
byte[] IRDataConverted;
/*Writing the Bitmap Image Described in XAML*/
WriteableBitmap IRBitmap;
void MainPage_Loaded(object sender, RoutedEventArgs e)
{
/*Get the sensor in the loaded frame*/
Sensor = KinectSensor.GetDefault();
/*Get the reader from the Source on the Sensor*/
IRReader = Sensor.InfraredFrameSource.OpenReader();
/*Frame Description for the Infrared and see how big they are*/
FrameDescription FD = Sensor.InfraredFrameSource.FrameDescription;
IRData = new ushort[FD.LengthInPixels];
IRDataConverted = new byte[FD.LengthInPixels * 4];
IRBitmap = new WriteableBitmap(FD.Width, FD.Height);
Image.Source = IRBitmap;
/*Start Sensor*/
Sensor.Open();
/*Subscribe to the event off the Reader*/
IRReader.FrameArrived += IRReader_FrameArrived;
}
void IRReader_FrameArrived(InfraredFrameReader sender, InfraredFrameArrivedEventArgs e)
{
using (InfraredFrame IRFrame = args.FrameReference.AcquiredFrame())
{
if (IRFrame != null)
{
IRFrame.CopyFrameDataToArray(IRData);
for (int i = 0; i < IRData.Length; i++)
{
byte intensity = (byte)(IRData[i]>>8);
IRDataConverted[i*4] = intensity;
IRDataConverted[i*4 + 1] = intensity;
IRDataConverted[i*4 + 2] = intensity;
IRDataConverted[i*4 + 3] = 255;
}
IRDataConverted.CopyTo(IRBitmap.PixelBuffer);
IRBitmap.Invalidate();
}
}
}
}
}
Can anyone kindly explain why this has happened? I am pretty confused,
Thanks in Advance.
P.S this is the video I was following:
http://www.microsoft.com/en-us/kinectforwindows/develop/how-to-videos.aspx
Well it's been answered so I'm not sure what else to do apart from answer it:
As Preston Guillot said in the comments:
"There is no variable named args in scope in the IRReader_FrameArrived method. You have a parameter of type InfraredFrameArrivedEventArgs named e that I'm guessing you meant to use"
I am trying to create a recorder from audio coming out from soundcard and this is my progress so far, the problem is the recorded audio when saving to file is so large like a song can reach up to hundreds of megabyte.
here's my code
using NAudio.CoreAudioApi;
using NAudio.Wave;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.IO;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace Record_From_Soundcard
{
public partial class frmMain : Form
{
private WaveFileWriter writer;
private WasapiLoopbackCapture waveInSel;
public frmMain()
{
InitializeComponent();
}
private void frmMain_Load(object sender, EventArgs e)
{
MMDeviceEnumerator deviceEnum = new MMDeviceEnumerator();
MMDeviceCollection deviceCol = deviceEnum.EnumerateAudioEndPoints(DataFlow.Render, DeviceState.Active);
cboAudioDrivers.DataSource = deviceCol.ToList();
}
private void btnStopRecord_Click(object sender, EventArgs e)
{
waveInSel.StopRecording();
writer.Close();
}
private void btnStartRecord_Click(object sender, EventArgs e)
{
using (SaveFileDialog _sfd = new SaveFileDialog())
{
_sfd.Filter = "Mp3 File (*.mp3)|*.mp3";
if (_sfd.ShowDialog() == System.Windows.Forms.DialogResult.OK)
{
MMDevice _device = (MMDevice)cboAudioDrivers.SelectedItem;
waveInSel = new WasapiLoopbackCapture(_device);
writer = new WaveFileWriter(_sfd.FileName, waveInSel.WaveFormat);
waveInSel.DataAvailable += (n, m) =>
{
writer.Write(m.Buffer, 0, m.BytesRecorded);
};
waveInSel.StartRecording();
}
}
}
}
}
can anyone help me on how to compress audio upon saving?
maybe it will be added on this part
waveInSel.DataAvailable += (n, m) =>
{
writer.Write(m.Buffer, 0, m.BytesRecorded);
};
Thanks in advance.. ;)
Try this using naudio dll
Using NAudio.Wave; Using NAudio.Wave.SampleProviders;Using NAudio.Lame
Private void WaveToMP3(int bitRate = 128){
String waveFileName = Application.StartupPath + #"\Temporal\mix.wav";
String mp3FileName = Application.StartupPath + #"\Grabaciones\ " + Strings.Format(DateTime.Now, "dd-MM-yyyy.HH.mm.ss") + ".mp3";
Using (var reader = New AudioFileReader(waveFileName))
{
Using (var writer = New LameMP3FileWriter(mp3FileName, reader.WaveFormat, bitRate))
{
reader.CopyTo(writer);
}
reader.Close();
}
}
You can't make an MP3 file by saving a WAV file with a .MP3 extension (which is what you are doing here). You will need to select an encoder available on your machine, and pass the audio through that. I go into some detail about how to do this with NAudio in this article.
I am working on a project which involves extracting features from color and depth frames from Kinect Camera. The problem I am facing is that whenever I try displaying 2 Images, the UI hangs. When I tried debugging, the depthFrame and colorFrame were coming as null. If enable only the color steam then both colorImage and featureImage1 are displayed properly and if I enable only the depth stream, it works as it should. But when I enable them both, the UI hangs. I have no idea what is causing the problem. I have the following the following code for my Kinect Application. What is the cause of this problem and how can I fix it?
Config: Windows 8 Pro 64bit, 2Ghz Core2Duo, VisualStudio 2012 Ultimate, EmguCV 2.4.0.
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using Microsoft.Kinect;
using Emgu.CV;
using Emgu.CV.WPF;
using Emgu.CV.Structure;
using Emgu.Util;
namespace features
{
public partial class MainWindow : Window
{
public MainWindow()
{
InitializeComponent();
}
private Image<Bgra, Byte> cvColorImage;
private Image<Gray, Int16> cvDepthImage;
private int colorWidth = 640;
private int colorHeight = 480;
private int depthWidth = 640;
private int depthHeight = 480;
private static readonly int Bgr32BytesPerPixel = (PixelFormats.Bgr32.BitsPerPixel + 7) / 8;
private byte[] colorPixels;
private byte[] depthPixels;
private short[] rawDepthData;
private bool first = true;
private bool firstDepth = true;
Image<Bgra, byte> image2;
private void Window_Loaded(object sender, RoutedEventArgs e)
{
kinectSensorChooser.KinectSensorChanged += new DependencyPropertyChangedEventHandler(kinectSensorChooser_KinectSensorChanged);
}
void kinectSensorChooser_KinectSensorChanged(object sender, DependencyPropertyChangedEventArgs e)
{
KinectSensor oldSensor = (KinectSensor)e.OldValue;
KinectStop(oldSensor);
KinectSensor _sensor = (KinectSensor)e.NewValue;
_sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30);
_sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30);
_sensor.DepthFrameReady += new EventHandler<DepthImageFrameReadyEventArgs>(_sensor_DepthFrameReady);
_sensor.ColorFrameReady += new EventHandler<ColorImageFrameReadyEventArgs>(_sensor_ColorFrameReady);
_sensor.DepthStream.FrameHeight);
try
{
_sensor.Start();
}
catch
{
kinectSensorChooser.AppConflictOccurred();
}
}
void KinectStop(KinectSensor sensor)
{
if (sensor != null)
{
sensor.Stop();
}
}
private void Window_Closed(object sender, EventArgs e)
{
KinectStop(kinectSensorChooser.Kinect);
}
void _sensor_ColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
{
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (colorFrame == null) return;
if (first)
{
this.colorPixels = new Byte[colorFrame.PixelDataLength];
first = false;
}
colorFrame.CopyPixelDataTo(this.colorPixels); //raw data in bgrx format
processColor();
}
}
void _sensor_DepthFrameReady(object sender, DepthImageFrameReadyEventArgs e)
{
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (depthFrame == null) return;
if (firstDepth)
{
this.rawDepthData = new short[depthFrame.PixelDataLength];
firstDepth = false;
}
depthFrame.CopyPixelDataTo(rawDepthData);
processDepth();
}
}
private void processColor(){...}
private void processDepth(){...}
}
}
The processDepth function is as follows. I am just making an Image from the RAW depth data.
private void processDepth() {
GCHandle pinnedArray = GCHandle.Alloc(this.rawDepthData, GCHandleType.Pinned);
IntPtr pointer = pinnedArray.AddrOfPinnedObject();
cvDepthImage = new Image<Gray, Int16>(depthWidth, depthHeight, depthWidth << 1, pointer);
pinnedArray.Free();
depthImage.Source = BitmapSourceConvert.ToBitmapSource(cvDepthImage.Not().Bitmap);
}
The processColor function is as follows. Here just for the sake of it, I am trying to display the cloned image instead of extracting features, just to check the lag. When both streams are enabled (color and depth) the following function displays the colorImage properly, but as soon as I uncomment the commented lines, the UI hangs.
private void processColor() {
GCHandle handle = GCHandle.Alloc(this.colorPixels, GCHandleType.Pinned);
Bitmap image = new Bitmap(colorWidth, colorHeight, colorWidth<<2, System.Drawing.Imaging.PixelFormat.Format32bppRgb, handle.AddrOfPinnedObject());
handle.Free();
cvColorImage = new Image<Bgra, byte>(image);
image.Dispose();
BitmapSource src = BitmapSourceConvert.ToBitmapSource(cvColorImage.Bitmap);
colorImage.Source = src;
//image2 = new Image<Bgra, byte>(cvColorImage.ToBitmap()); //uncomment and it hangs
//featureImage1.Source = BitmapSourceConvert.ToBitmapSource(image2.Bitmap); //uncomment and it hangs
}
I see code that do a lot of work in event handlers. I almost certain that handlers is called in GUI thread. I suggest you to extract your code to the background thread routine. Don't forget that updating of the Form's controls (depthImage and controlImage) should be done using BeginInvoke method of the parent form,
I have a WPF which executes a GUI for serial communication. I want to keep triggering the device and recoed the values till the user says stop. I have a global bool variable STOP which I set to false when user presses the stop which in turn should terminate the while loop which triggers the device continuously but the problem is that once the user selects the run button whcih runs the while loop, the WPF app freezes and doesn't accept the STOP button click.
Can someone give me an idea of what is happening here or a any suggestion on how to implement it differently.
Here is my code
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using System.IO.Ports;
using Keyence.data_class;
using Microsoft.Win32;
using System.IO;
using Keyence;
using System.Threading;
namespace Keyence
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
public static string filePath;
public static string fileName;
public static int baudrate = 9600;
public static string thresHold = "60";
public bool STOP = true;
public static char[] buffer = new char[256];
public static string x, y, match;
public static string value;
public static SerialPort port = new SerialPort("COM4", baudrate, Parity.None, 8, StopBits.One);
public MainWindow()
{
InitializeComponent();
//port.DataReceived
//SerialPort port = new SerialPort(COM, baudrate, Parity.None, 8, StopBits.One);
port.Open();
port.NewLine = "\r";
//*******display camera types*****
string[] cameraTypes = new string[] { "CVseries100 ", "CVseries500" , "CVseries700" };
foreach (string cam in cameraTypes)
{
camera_type.Items.Add(cam);
}
//*******select your trigger mode( default is set to NONE)*******
string[] triggerModes = new string[] { "triggerModeNone", "triggerModeDATA", "triggerModeRepeat" };
foreach(string trigger in triggerModes)
{
trigger_mode.Items.Add(trigger);
}
//*****put a default value for thresHold********
threshold_value.Text = thresHold;
//*******add values to the baudrate dropdown********
baud_rate.Items.Add("9600");
baud_rate.Items.Add("19200");
baud_rate.Items.Add("38400");
port.DataReceived += new SerialDataReceivedEventHandler(port_DataReceived);
}
public static void port_DataReceived(object sender, SerialDataReceivedEventArgs e)
{
SerialPort sp = (SerialPort)sender;
value = sp.ReadTo("\r");
string[] values = value.Split(new char[] { ',' });
x = values[0];
y = values[1];
match = values[2];
}
//public static void get_data()
//{
// port.Write("T");
//}
#region encapsulates all the individuals functions when the user changes settings ( UI interaction functions)
private void get_sample_Click(object sender, RoutedEventArgs e)
{
//write code to read x,y,threshold values in the text boxes
port.Write("T");
//MainWindow.get_data();
x_value.Text = string.Format("{0}", x);
y_value.Text = string.Format("{0}", y);
thresholdvalue.Text = string.Format("{0}", match);
}
#region the method opens a savefileDialog(dataFile) and lets user save the file in which data is stored
public void data_file_Click(object sender, RoutedEventArgs e)
{
SaveFileDialog dataFile = new SaveFileDialog();
dataFile.DefaultExt = ".txt";
dataFile.InitialDirectory = Environment.GetFolderPath(Environment.SpecialFolder.MyDocuments);
dataFile.RestoreDirectory = true;
dataFile.Title = "select or create a new file";
DateTime currentTime = DateTime.Now;
dataFile.Filter = " Text Document |*.txt | Excel |*.xls";
string datepatt = #"M-d-yy_hh-mm_Data";
string saveDT = currentTime.ToString(datepatt);
dataFile.FileName = saveDT;
Nullable<bool> result = dataFile.ShowDialog();
if (result == true)
{
filePath = dataFile.FileName;
fileName = dataFile.SafeFileName;
}
System.IO.FileStream fs = (System.IO.FileStream)dataFile.OpenFile();
using (StreamWriter Write = new StreamWriter(fs))
{
Write.WriteLine("Wafer Placement Data\n\n");
Write.WriteLine("File Name : {0}\n", fileName);
Write.WriteLine("Port Name : COM4\n");
Write.WriteLine("Threshold : {0}\n", threshold_value.Text);
Write.WriteLine("Date : {0}\n\n", Convert.ToString(DateTime.Now));
//************Print Header to the file******************
Write.WriteLine("\tX\tY\tcorrelation\tDate\n");
}
}
#endregion
#region function called when the threshold value is changed
private void threshold_value_TextChanged(object sender, TextChangedEventArgs e)
{
thresHold = threshold_value.Text;
}
#endregion
#region function to write individual data points on the window for checking
private void writeSample(double X, double Y, double threshold)
{
FileStream file = new FileStream(filePath, FileMode.Append, FileAccess.ReadWrite);
using (StreamWriter Writer = new StreamWriter(file))
{
Writer.WriteLine(DateTime.Now);
Writer.WriteLine("\t X \t Y\t threshold% ");
}
}
#endregion
private void run_button_Click(object sender, RoutedEventArgs e)
{
do
{
port.Write("T");
FileStream file = new FileStream(filePath, FileMode.Append, FileAccess.Write);
Thread.Sleep(500);
using (StreamWriter Writer = new StreamWriter(file))
{
Writer.WriteLine("\t{0}\t{1}\t{2}\t{3}", x, y, match, Convert.ToString(DateTime.Now));
}
port.DiscardInBuffer();
} while (STOP);
}
private void stop_button_Click(object sender, RoutedEventArgs e)
{
STOP = false;
}
private void command_TextChanged(object sender, TextChangedEventArgs e)
{
string C = command.Text;
port.WriteLine(C);
}
#endregion
}
}
You need to run your serial port code in separate thread. In your implementation GUI thread is busy w/ serial port communication that is why your window is frozen. The following article explains exactly what you need:
http://www.ryanvice.net/wpf-3-5/using-backgroundworker-with-wpf/