OpenNI Face Detection in .NET Windows Form Application - c#

I am developing a windows form application in C#. I have to add face recognition functionality. For that purpose I am using OpenNI Library. The hardware for video capture is Xtion PRO LIVE.
I have successfully installed it and I was able to run the sample code. This code continuously recording video and nothing else. I have modified it in such a way that after pressing Capture button, It saves the current picture to hard drive (Its fine!).
Now what I want to accomplish is to detect face by facial landmarks so that I can verify a person's image with saved images in database. How can I do this with OpenNI?
Here is my code:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using OpenNI;
using System.Threading;
using System.Drawing.Imaging;
namespace CameraApp
{
public partial class MainWindow : Form
{
public MainWindow()
{
InitializeComponent();
this.context = Context.CreateFromXmlFile(SAMPLE_XML_FILE, out scriptNode);
this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator;
if (this.depth == null)
{
throw new Exception("Viewer must have a depth node!");
}
this.histogram = new int[this.depth.DeviceMaxDepth];
MapOutputMode mapMode = this.depth.MapOutputMode;
this.bitmap = new Bitmap((int)mapMode.XRes, (int)mapMode.YRes, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
this.shouldRun = true;
this.readerThread = new Thread(ReaderThread);
this.readerThread.Start();
}
protected override void OnPaint(PaintEventArgs e)
{
base.OnPaint(e);
lock (this)
{
e.Graphics.DrawImage(this.bitmap,
this.panelView.Location.X,
this.panelView.Location.Y,
this.panelView.Size.Width,
this.panelView.Size.Height);
}
}
protected override void OnPaintBackground(PaintEventArgs pevent)
{
//Don't allow the background to paint
}
protected override void OnClosing(CancelEventArgs e)
{
this.shouldRun = false;
this.readerThread.Join();
base.OnClosing(e);
}
protected override void OnKeyPress(KeyPressEventArgs e)
{
if (e.KeyChar == 27)
{
Close();
}
base.OnKeyPress(e);
}
private unsafe void CalcHist(DepthMetaData depthMD)
{
// reset
for (int i = 0; i < this.histogram.Length; ++i)
this.histogram[i] = 0;
ushort* pDepth = (ushort*)depthMD.DepthMapPtr.ToPointer();
int points = 0;
for (int y = 0; y < depthMD.YRes; ++y)
{
for (int x = 0; x < depthMD.XRes; ++x, ++pDepth)
{
ushort depthVal = *pDepth;
if (depthVal != 0)
{
this.histogram[depthVal]++;
points++;
}
}
}
for (int i = 1; i < this.histogram.Length; i++)
{
this.histogram[i] += this.histogram[i - 1];
}
if (points > 0)
{
for (int i = 1; i < this.histogram.Length; i++)
{
this.histogram[i] = (int)(256 * (1.0f - (this.histogram[i] / (float)points)));
}
}
}
private unsafe void ReaderThread()
{
DepthMetaData depthMD = new DepthMetaData();
while (this.shouldRun)
{
try
{
this.context.WaitOneUpdateAll(this.depth);
}
catch (Exception)
{
}
this.depth.GetMetaData(depthMD);
CalcHist(depthMD);
lock (this)
{
Rectangle rect = new Rectangle(0, 0, this.bitmap.Width, this.bitmap.Height);
BitmapData data = this.bitmap.LockBits(rect, ImageLockMode.WriteOnly, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
//ushort* pDepth = (ushort*)this.depth.DepthMapPtr.ToPointer();
//// set pixels
//for (int y = 0; y < depthMD.YRes; ++y)
//{
// byte* pDest = (byte*)data.Scan0.ToPointer() + y * data.Stride;
// for (int x = 0; x < depthMD.XRes; ++x, ++pDepth, pDest += 3)
// {
// byte pixel = (byte)this.histogram[*pDepth];
// pDest[0] = 0;
// pDest[1] = pixel;
// pDest[2] = pixel;
// }
//}
// This will point to the depth image.
ushort* pDepth = (ushort*)this.depth.DepthMapPtr.ToPointer();
// This will point to the RGB image.
RGB24Pixel* pRGB =
(RGB24Pixel*)this.depth.DepthMapPtr.ToPointer();
// Go over the depth and RGB image and set the bitmaps
// we're copying to based on our depth & RGB values.
for (int y = 0; y < depthMD.YRes; ++y)
{
// Assuming that the size of each data frame is
// 640x480.
// Scan line by line (480 lines), each line
// consists of 640 pointers.
byte* pDest_Depth =
(byte*)data.Scan0.ToPointer() + y *
data.Stride;
byte* pDest_Rgb = (byte*)data.Scan0.ToPointer()
+ y * data.Stride;
for (int x = 0; x < depthMD.XRes; ++x,
++pDepth, pDest_Depth += 3,
++pRGB, pDest_Rgb += 3)
{
// Change the color of the bitmap
// based on depth value.
byte pixel = (byte)this.histogram[*pDepth];
pDest_Depth[0] = 0;
pDest_Depth[1] = pixel;
pDest_Depth[2] = pixel;
// Get the RGB values to generate
// a whole RGB image.
byte red = pRGB->Red;
byte green = pRGB->Green;
byte blue = pRGB->Blue;
// Get depth information.
ushort depthVal = *pDepth;
}
}
this.bitmap.UnlockBits(data);
}
this.Invalidate();
}
}
private readonly string SAMPLE_XML_FILE = #"C:/Program Files/Microsoft Visual Studio 10.0/Microsoft Visual Studio 2010 Ultimate - ENU/OpenNI/Data/SamplesConfig.xml";
private Context context;
private ScriptNode scriptNode;
private DepthGenerator depth;
private Thread readerThread;
private bool shouldRun;
private Bitmap bitmap;
private int[] histogram;
private void button1_Click(object sender, EventArgs e)
{
this.readerThread.Abort();
this.bitmap.Save("D:\\Screenshot.jpeg", System.Drawing.Imaging.ImageFormat.Jpeg);
this.readerThread = new Thread(ReaderThread);
this.readerThread.Start();
}
}
}
Any kind of help will be appreciated. Any tutorial/link anything!!!

Related

How can I send the byte array for Bitmap to the main page in image processing?

I am trying to send bytes to the "alldata.AddRange()" but I want to do that as line.What I mean,for example, I have a RGB view 640 * 360.Width of the view is 640.I want to take the view 640*3=1920(as a line) and make it gray and send it back to the function(alldata.AddRange).If I send line 360 of them I want to take the image.How can I do that?
EDIT:I changed the code just a little.May be it can be thought as sending data between classes through arrays and I need to send them in parts instead of thinking as image processing problem.
Here is the code for Form1:
using AForge.Video.DirectShow;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace dnm2510img
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
public FilterInfoCollection devices;
public VideoCaptureDevice camera;
private void Form1_Load(object sender, EventArgs e)
{
devices = new FilterInfoCollection(FilterCategory.VideoInputDevice);
foreach (FilterInfo item in devices)
{
comboBox1.Items.Add(item.Name);
}
camera = new VideoCaptureDevice();
comboBox1.SelectedIndexChanged += comboBox1_SelectedIndexChanged;
}
private void comboBox1_SelectedIndexChanged(object sender, EventArgs e)
{
try
{
if (camera.IsRunning == false)
{
camera = new VideoCaptureDevice(devices[comboBox1.SelectedIndex].MonikerString);
camera.NewFrame += Camera_NewFrame;
camera.Start();
}
}
catch (Exception exc)
{
MessageBox.Show(exc.Message + "");
}
}
public void Camera_NewFrame(object sender, AForge.Video.NewFrameEventArgs eventArgs)
{
List<byte> alldata = new List<byte>();
//byte[] line = new byte[360];
Bitmap image = (Bitmap)eventArgs.Frame.Clone();
byte[] maindata = new byte[image.Height*image.Width*4];
int count = 0;
if(btnapplyWasClicked == true)
{
for (int i = 0; i < image.Height; i++)
{
for (int j = 0; j < image.Width; j++)
{
Color color = image.GetPixel(j, i);
maindata[count] = color.R;
maindata[count + 1] = color.G;
maindata[count + 2] = color.B;
maindata[count + 3] = color.A;
count = count + 4;
for (int k = 1; k <= 360; k++)
{
if (maindata[(count + 4) * k] == maindata[2560 * k])
{
dnm2510img.Gray.GrayFilter(maindata, 2560 * k);
}
}
}
}
//alldata.AddRange(maindata);
}
}
private bool btnapplyWasClicked = false;
//private bool button1WasClicked = false;
//private bool GeriALWasClicked = false;
private void btnapply_Click(object sender, EventArgs e)
{
btnapplyWasClicked = true;
}
private void button1_Click(object sender, EventArgs e)
{
//button1WasClicked = true;
}
}
}
Here is the code for Grayscale:
using AForge.Video.DirectShow;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace dnm2510img
{
public class Gray
{
public static byte[] GrayFilter(byte[] data,int width)
{
List<byte> alldataa = new List<byte>();
for (int i = 0; i < width; i++)
{
int temp =((data[i]+data[i+1]+data[i+2]+data[i+3]) / 4);
data[i] = (byte)temp;
data[i+1] = (byte)temp;
data[i+2] = (byte)temp;
data[i + 3] = (byte)temp;
}
//alldataa.AddRange(data);
return data;
}
}
}
This is how you convert a 24 bpp bitmap to grayscale and output it to a linear array:
public static unsafe byte[] ToBgr24To8Mono(Bitmap source)
{
var width = source.Width;
var height = source.Height;
var sourceData = source.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadOnly, source.PixelFormat);
var sourceStride = sourceData.Stride;
var sourcePtr = (byte*)sourceData.Scan0;
var targetArray = new byte[width * height];
try
{
Parallel.For(0, height, y =>
{
var sourceRow = sourcePtr + y * sourceStride;
var targetRow = y * width;
for (int x = 0; x < width; x++)
{
var sourceIndex = (sourceRow + x * 3);
var value = (byte) (sourceIndex[0] * 0.11f + sourceIndex[1] * 0.59f + sourceIndex[2] * 0.3f);
targetArray[targetRow + x] = value;
}
});
}
finally
{
source.UnlockBits(sourceData);
}
return targetArray;
}
If you want to use a 32bit image as input, change x * 3 to x * 4. The parallel loop can be switched to a regular loop if you wish.

Cluster overlapping circles?

I am trying to cluster(group) every circle that's uninterrupted overlapping (connected) to each other how could I do that? (preferably in a pretty efficient way).
(I have messed around trying to write some recursive functions but haven't gotten anything to work.)
I have created a VS project to visualize the problem.
Download here:
Generates Random circles.
How the clustering currently works:
(its only looks at what circle is overlapping that specific circle not all that is connected)
How it should look if its working
(separate clusters for all connecting circles)
CODE: (C#)
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
// Cluster overlapping circles
// Patrik Fröhler
// www.patan77.com
// 2017-08-14
namespace circleGroup
{
struct circle // the circle "object"
{
public float[] pos;
public int radius;
public Color color;
public int id;
public float x
{
get { return pos[0]; }
set { pos[0] = value; }
}
public float y
{
get { return pos[1]; }
set { pos[1] = value; }
}
}
public partial class Form1 : Form
{
DB _DB = new DB(); // "Global Database"
public Form1()
{
InitializeComponent();
}
private static circle createCircle(float _x = 0, float _y = 0, int _radius = 1, Color? _color = null, int _id = -1) // creates a circle
{
circle tmpCircle = new circle() { pos = new float[2], x = _x, y = _y, radius = _radius, id = _id };
tmpCircle.color = _color ?? Color.Black;
return (tmpCircle);
}
private circle[] genRngCircles(int _n) // generates an array of random circles
{
Random rng = new Random();
circle tmpC;
circle[] tmpCarr = new circle[_n];
for (int i = 0; i < _n; i++)
{
tmpC = createCircle();
tmpC.radius = rng.Next(10, 75);
tmpC.x = rng.Next(tmpC.radius, (512 - tmpC.radius));
tmpC.y = rng.Next(tmpC.radius, (512 - tmpC.radius));
tmpC.color = Color.FromArgb(127, rng.Next(0, 255), rng.Next(0, 255), rng.Next(0, 255));
tmpC.id = i;
tmpCarr[i] = tmpC;
}
return tmpCarr;
}
private void drawCircle(circle _circle, Graphics _g) // draws one circle
{
SolidBrush sb = new SolidBrush(_circle.color);
_g.FillEllipse(sb, (_circle.x - _circle.radius), (_circle.y - _circle.radius), (_circle.radius * 2), (_circle.radius * 2));
sb.Dispose();
}
private void drawString(float[] _pos, string _text, Graphics _g) // draws text
{
StringFormat sf = new StringFormat();
sf.LineAlignment = StringAlignment.Center;
sf.Alignment = StringAlignment.Center;
Font font = new Font("Arial", 12);
SolidBrush sb = new SolidBrush(Color.Black);
float x = _pos[0];
float y = _pos[1];
_g.DrawString(_text, font, sb, x, y, sf);
font.Dispose();
sb.Dispose();
}
private void drawCircleArr(circle[] _circleArr, Graphics _g)// draws an array of circles
{
_g.Clear(panel1.BackColor);
for (int i = 0; i < _circleArr.Length; i++)
{
drawCircle(_circleArr[i], _g);
drawString(_circleArr[i].pos, _circleArr[i].id.ToString(), _g);
}
}
static double mDistance<T>(T[] _p0, T[] _p1) // gets euclidean distance between two points of arbitrary numbers of dimensions
{
double[] p0 = new double[] { Convert.ToDouble(_p0[0]), Convert.ToDouble(_p0[1]) };
double[] p1 = new double[] { Convert.ToDouble(_p1[0]), Convert.ToDouble(_p1[1]) };
double tmp = 0;
double tmpTotal = 0;
for (int i = 0; i < _p0.Length; i++)
{
tmp = (p0[i] - p1[i]);
tmpTotal += (tmp * tmp);
}
double output = Math.Sqrt(tmpTotal);
return (output);
}
private bool overlap(circle _c0, circle _c1) // checks if two circles overlap
{
double dis = mDistance(_c0.pos, _c1.pos);
if (dis <= (_c0.radius + _c1.radius))
{
return (true);
}
return (false);
}
private Color avgColor(List<circle> _colorArr) // averages mutiple colors togehter
{
float ia = 0;
float ir = 0;
float ig = 0;
float ib = 0;
for (int i = 0; i < _colorArr.Count; i++)
{
ia += _colorArr[i].color.A;
ir += _colorArr[i].color.R;
ig += _colorArr[i].color.G;
ib += _colorArr[i].color.B;
}
byte a = Convert.ToByte(Math.Round(ia / _colorArr.Count));
byte r = Convert.ToByte(Math.Round(ir / _colorArr.Count));
byte g = Convert.ToByte(Math.Round(ig / _colorArr.Count));
byte b = Convert.ToByte(Math.Round(ib / _colorArr.Count));
return (Color.FromArgb(a, r, g, b));
}
private void treeView(List<circle>[] _circleLArr) // Create Treeview
{
treeView1.Nodes.Clear();
for (int i = 0; i < _circleLArr.Length; i++)
{
treeView1.Nodes.Add(i.ToString());
for (int j = 0; j < _circleLArr[i].Count; j++)
{
treeView1.Nodes[i].Nodes.Add(_circleLArr[i][j].id.ToString());
}
}
treeView1.ExpandAll();
}
private void drawCircleClusters(List<circle>[] _circleLArr, Graphics _g) // draws the circle clusters
{
_g.Clear(panel1.BackColor);
circle tmpC;
Color tmpColor;
for (int i = 0; i < _circleLArr.Length; i++)
{
tmpColor = avgColor(_circleLArr[i]);
for (int j = 0; j < _circleLArr[i].Count; j++)
{
tmpC = _circleLArr[i][j];
tmpC.color = tmpColor;
drawCircle(tmpC, _g);
drawString(_circleLArr[i][j].pos, _circleLArr[i][j].id.ToString(), _g);
}
}
}
//----------------------------------------------------
private List<circle>[] simpleOverlap(circle[] _circleArr) // test what circles overlaps
{
List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
for (int i = 0; i < (_circleArr.Length); i++)
{
tmpLArr[i] = new List<circle>();
for (int j = 0; j < (_circleArr.Length); j++)
{
if (overlap(_circleArr[i], _circleArr[j]))
{
tmpLArr[i].Add(_circleArr[j]);
}
}
}
return (tmpLArr);
}
/*
private circle[] recurOverlap(circle[] _circleArr) // recursive overlap test(not done/working)
{
List<circle> overlapArr = new List<circle>();
List<circle> dontOverlapArr = new List<circle>();
bool loop = true;
int n = 0;
while (loop)
{
if (overlap(_circleArr[0], _circleArr[n]))
{
overlapArr.Add(_circleArr[n]);
dontOverlapArr.Insert(0, _circleArr[n]);
circle[] dontArr = dontOverlapArr.ToArray();
recurOverlap(dontArr);
}
else
{
dontOverlapArr.Add(_circleArr[n]);
}
n++;
if (n >= _circleArr.Length)
{
loop = false;
}
}
if(_circleArr.Length <= 1)
{
return _circleArr;
}
else{
return overlapArr.ToArray();
}
}
private List<circle>[] clusterBrecur(circle[] _circleArr)
{
List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
for (int i = 0; i < (_circleArr.Length); i++)
{
tmpLArr[i] = new List<circle>();
recurOverlap(_circleArr);
}
return (tmpLArr);
}*/
private void run() // Run function
{
treeView1.Nodes.Clear(); // clear tree view
_DB.g = panel1.CreateGraphics();// Create Panel Graphics to draw on
_DB.circleArr = genRngCircles(10); // Creates an array with random circles
drawCircleArr(_DB.circleArr, _DB.g); // Draws the random circles
clusterAbtn.Enabled = true; // enables the cluster button
}
private void clusterA() // clusterA function
{
_DB.circleClusters = simpleOverlap(_DB.circleArr); // runs cluster algorithm test A
treeView(_DB.circleClusters); // Creates the treeview
drawCircleClusters(_DB.circleClusters, _DB.g); // draws the circle clusters
}
private void clusterB()
{
}
private void clusterA_rClick()
{
drawCircleArr(_DB.circleArr, _DB.g); // Draws the random circles
}
private void runBtn_Click(object sender, EventArgs e) // run button click
{
run();
}
private void clusterAbtn_MouseUp(object sender, MouseEventArgs e)
{
switch (e.Button)
{
case MouseButtons.Left:
clusterA();
break;
case MouseButtons.Right:
clusterA_rClick();
break;
}
}
private void clusterBbtn_Click(object sender, EventArgs e) // clusterB button click
{
clusterB();
}
}
class DB // "Database"
{
public Graphics g;
public circle[] circleArr;
public List<circle>[] circleClusters;
}
}
The current "overlap function"
private List<circle>[] simpleOverlap(circle[] _circleArr) // test what circles overlaps
{
List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
for (int i = 0; i < (_circleArr.Length); i++)
{
tmpLArr[i] = new List<circle>();
for (int j = 0; j < (_circleArr.Length); j++)
{
if (overlap(_circleArr[i], _circleArr[j]))
{
tmpLArr[i].Add(_circleArr[j]);
}
}
}
return (tmpLArr);
}
I made following change to your code. Looks like working
private List<circle>[] simpleOverlap(circle[] _circleArr) // test what circles overlaps
{
List<List<circle>> list = new List<List<circle>>();
//List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
//for (int i = 0; i < (_circleArr.Length); i++)
foreach (circle circle in _circleArr)
{
List<circle> cluster = null;
//tmpLArr[i] = new List<circle>();
//for (int j = 0; j < (_circleArr.Length); j++)
//{
// if (overlap(_circleArr[i], _circleArr[j]))
// {
// tmpLArr[i].Add(_circleArr[j]);
// }
//}
foreach(List<circle> cluster2 in list)
{
foreach (circle circle2 in cluster2)
{
if (overlap(circle, circle2))
{
cluster = cluster2;
goto label_001;
}
}
}
label_001:
if (cluster == null)
{
cluster = new List<circle>();
list.Add(cluster);
}
cluster.Add(circle);
}
bool flag = true;
for (int i = 0; i < list.Count; i += (flag ? 1 : 0))
{
flag = true;
List<circle> cluster = list[i];
for (int j = i + 1; j < list.Count; j++)
{
List<circle> cluster2 = list[j];
if (Intersects(cluster, cluster2))
{
cluster.AddRange(cluster2);
list.Remove(cluster2);
j--;
flag = false;
}
}
}
return list.ToArray();
//return (tmpLArr);
}
bool Intersects(List<circle> cluster1, List<circle> cluster2)
{
foreach (circle circle1 in cluster1)
{
foreach (circle circle2 in cluster2)
{
if (overlap(circle1, circle2))
{
return true;
}
}
}
return false;
}
I had to add 1 more method bool Intersects(List<circle> cluster1, List<circle> cluster2). See if it helps.
I believe the function you are looking for is intersection. I have attached an article by Mike K which I believe will give you an idea of how to approach this in your own code.
C# circles intersections

Attempting to draw rectangles multithreaded

I am attempting to draw about 3600 points on a form, it is pretty slow using one thread so I decided I want to use 4 threads for it.
In my code I divide the 3600 points to the 4 threads and they are supposed to draw it. however for some reason an ArgumentOutOfRangeException is being thrown.
I tried to debug my code but I couldn't find the mistake.
here is the code :
(Ignore the class _3DPoint, it is just a point that has x,y,z values. when I draw them I only use the x,y values.)
code for drawing the points :
public Graphics g; //g = this.CreateGraphics() in form1.Load()
public void drawrectangle(_3DPoint)
float xCord = float.Parse(p.x.ToString());
float yCord = float.Parse(p.y.ToString());
Brush b = new SolidBrush(Color.White);
xCord = lsize * xCord + center.X;
yCord = lsize * yCord + 10 + center.Y;
g.FillRectangle(b, xCord, yCord, 2, 2);
}
lsize, center are just variables for aligning the points as I want them.
All of the multithread action code:
public List<_3DPoint[]> multiThreadsdata = new List<_3DPoint[]>();
public void handlemultithread(_3DPoint[] P)
{
g.Clear(Color.Black);
for (int i = 0; i < multiThreads.Length; i++)
{
multiThreadsdata.Add(new _3DPoint[P.Length / multiThreads.Length]);
}
for (int i = 0; i < multiThreads.Length; i++)
{
for (int j = (P.Length / multiThreads.Length) * (i); j < (P.Length / multiThreads.Length) * (i + 1); j++)
{
multiThreadsdata[i][j - ((P.Length / multiThreads.Length) * i)] = new _3DPoint(P[j]);
}
}
for (int i = 0; i < multiThreads.Length; i++)
{
multiThreads[i] = new Thread(() => drawPoints(multiThreadsdata[i]));
multiThreads[i].Start();
}
}
delegate void SetCallBackPoint(_3DPoint location);
public void drawPoints(_3DPoint[] locations)
{
for (int i = 0; i < locations.Length; i++)
{
if (this.InvokeRequired)
{
SetCallBackPoint e = new SetCallBackPoint(drawrectangle);
this.Invoke(e, new object[] { locations[i] });
}
else
{
drawrectangle(locations[i]);
}
}
}
P is a _3DPoint array that contains all the 3600 points.
mutliThreads is a Thread[] containing 4 threads.
I get the exception in handlemultithread method. in the third line of this for loop :
for (int i = 0; i < multiThreads.Length; i++)
{
multiThreads[i] = new Thread(() => drawPoints(multiThreadsdata[i])); // <- here.
multiThreads[i].Start();
}
I don't know what is the problem, my guess is that there is some problem with the multithreading because I'm just a beginner with multithreading.
Thanks a bunch.
It is entirely possible to Draw 3600 rectangles quickly on a form when you apply the suggestions in the comments.
If that doesn't give you enough time you can consider creating Images on a single background thread, store them in some sort of buffer until they are needed to e painted on the Graphics object of the Paint event of the form. That is only feasible if you can know upfront what needs to be painted on the next frame.
This example uses a simple Background worker to fill an ConcurrentQueue with images. The comments in the code explain what is going on.
public partial class Form1 : Form
{
static ConcurrentQueue<Image> buffer = new ConcurrentQueue<Image>();
static Random r = new Random();
public Form1()
{
InitializeComponent();
backgroundWorker1.RunWorkerAsync();
// this is already a great performance win ...
DoubleBuffered = true;
}
private void Form1_Paint(object sender, PaintEventArgs e)
{
Image img =null;
// get from buffer ..
if (!buffer.TryDequeue(out img))
{
// nothing available
// direct random
for (var x = 0; x < e.ClipRectangle.Width; x++)
{
for (var y = 0; y < e.ClipRectangle.Height; y++)
{
using (var pen = new Pen(new SolidBrush(Color.FromArgb(r.Next(255), r.Next(255), r.Next(255)))))
{
e.Graphics.DrawRectangle(pen, x, y, 1, 1);
}
}
}
}
else
{
// otherwise Draw the prepared image
e.Graphics.DrawImage(img,0,0);
Trace.WriteLine(buffer.Count);
img.Dispose();
}
}
private void button1_Click(object sender, EventArgs e)
{
// force a repaint of the Form
Invalidate();
}
private void backgroundWorker1_DoWork(object sender, DoWorkEventArgs e)
{
// as long as the form is not disposed
while (!IsDisposed)
{
// we keep 60 images in memory
if (buffer.Count < 60)
{
// bitmap
var bmp = new Bitmap(this.Width, this.Height);
var img = Graphics.FromImage(bmp);
// draw
for (int i = 0; i < 3600; i++)
{
using (var pen = new Pen(new SolidBrush(Color.FromArgb(r.Next(255), r.Next(255), r.Next(255)))))
{
img.DrawRectangle(pen, r.Next(Width),r.Next(Height), r.Next(Width), r.Next(Height));
}
}
// store the drawing in the buffer
buffer.Enqueue(bmp);
}
else
{
// simple and naive way to give other threads a bit of room
Thread.Sleep(0);
}
}
}
}
Keep in mind that when you have a CPU heavy process adding more threads will not by magic make your methods run quicker. You might even make it worse: more threads compete for time on the CPU.

Kinect 1.8 colorframe and depthframe not coordinated

My program has a problem with poor coordination between the depth and color images.
The player mask is not in the same place as the person (see the picture below).
void _AllFreamReady(object sender, AllFramesReadyEventArgs e)
{
using (ColorImageFrame _colorFrame = e.OpenColorImageFrame())
{
if (_colorFrame == null) //jezeli pusta ramka nie rob nic
{
return;
}
byte[] _pixels = new byte[_colorFrame.PixelDataLength]; //utworzenie tablicy pixeli dla 1 ramki obrazu o rozmiarach przechwyconej ramki z strumienia
_colorFrame.CopyPixelDataTo(_pixels); //kopiujemy pixele do tablicy
int _stride = _colorFrame.Width * 4; //Kazdy pixel moze miec 4 wartosci Red Green Blue lub pusty
image1.Source =
BitmapSource.Create(_colorFrame.Width, _colorFrame.Height,
96, 96, PixelFormats.Bgr32, null, _pixels, _stride);
if (_closing)
{
return;
}
using (DepthImageFrame _depthFrame = e.OpenDepthImageFrame())
{
if (_depthFrame == null)
{
return;
}
byte[] _pixelsdepth = _GenerateColoredBytes(_depthFrame,_pixels);
int _dstride = _depthFrame.Width * 4;
image3.Source =
BitmapSource.Create(_depthFrame.Width, _depthFrame.Height,
96, 96, PixelFormats.Bgr32, null, _pixelsdepth, _dstride);
}
}
}
private byte[] _GenerateColoredBytes(DepthImageFrame _depthFrame, byte[] _pixels)
{
short[] _rawDepthData = new short[_depthFrame.PixelDataLength];
_depthFrame.CopyPixelDataTo(_rawDepthData);
Byte[] _dpixels = new byte[_depthFrame.Height * _depthFrame.Width * 4];
const int _blueindex = 0;
const int _greenindex = 1;
const int _redindex = 2;
for (int _depthindex = 0, _colorindex = 0;
_depthindex < _rawDepthData.Length && _colorindex < _dpixels.Length;
_depthindex++, _colorindex += 4)
{
int _player = _rawDepthData[_depthindex] & DepthImageFrame.PlayerIndexBitmaskWidth;
if (_player > 0)
{
_dpixels[_colorindex + _redindex] = _pixels[_colorindex + _redindex];
_dpixels[_colorindex + _greenindex] = _pixels[_colorindex + _greenindex];
_dpixels[_colorindex + _blueindex] = _pixels[_colorindex + _blueindex];
};
}
return _dpixels;
}
RGB and depth data are not aligned. This is due to the position of depth sensor and RGB camera in the Kinect case: they are different, so you cannot expect aligned images using different points of view.
However, you problem is quite common, and was solved by the KinectSensor.MapDepthFrameToColorFrame, that was deprecated after SDK 1.6. Now, what you need is the CoordinateMapper.MapDepthFrameToColorFrame method.
The Coordinate Mapping Basics-WPF C# Sample shows how to use this method. You can find some significant parts of the code in the following:
// Intermediate storage for the depth data received from the sensor
private DepthImagePixel[] depthPixels;
// Intermediate storage for the color data received from the camera
private byte[] colorPixels;
// Intermediate storage for the depth to color mapping
private ColorImagePoint[] colorCoordinates;
// Inverse scaling factor between color and depth
private int colorToDepthDivisor;
// Format we will use for the depth stream
private const DepthImageFormat DepthFormat = DepthImageFormat.Resolution320x240Fps30;
// Format we will use for the color stream
private const ColorImageFormat ColorFormat = ColorImageFormat.RgbResolution640x480Fps30;
//...
// Initialization
this.colorCoordinates = new ColorImagePoint[this.sensor.DepthStream.FramePixelDataLength];
this.depthWidth = this.sensor.DepthStream.FrameWidth;
this.depthHeight = this.sensor.DepthStream.FrameHeight;
int colorWidth = this.sensor.ColorStream.FrameWidth;
int colorHeight = this.sensor.ColorStream.FrameHeight;
this.colorToDepthDivisor = colorWidth / this.depthWidth;
this.sensor.AllFramesReady += this.SensorAllFramesReady;
//...
private void SensorAllFramesReady(object sender, AllFramesReadyEventArgs e)
{
// in the middle of shutting down, so nothing to do
if (null == this.sensor)
{
return;
}
bool depthReceived = false;
bool colorReceived = false;
using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
{
if (null != depthFrame)
{
// Copy the pixel data from the image to a temporary array
depthFrame.CopyDepthImagePixelDataTo(this.depthPixels);
depthReceived = true;
}
}
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (null != colorFrame)
{
// Copy the pixel data from the image to a temporary array
colorFrame.CopyPixelDataTo(this.colorPixels);
colorReceived = true;
}
}
if (true == depthReceived)
{
this.sensor.CoordinateMapper.MapDepthFrameToColorFrame(
DepthFormat,
this.depthPixels,
ColorFormat,
this.colorCoordinates);
// ...
int depthIndex = x + (y * this.depthWidth);
DepthImagePixel depthPixel = this.depthPixels[depthIndex];
// scale color coordinates to depth resolution
int X = colorImagePoint.X / this.colorToDepthDivisor;
int Y = colorImagePoint.Y / this.colorToDepthDivisor;
// depthPixel is the depth for the (X,Y) pixel in the color frame
}
}
I am working on this problem myself. I agree with VitoShadow that one solution is in the coordinate mapping, but a section not posted where the ratio between the miss matched depth and color screen resolutions(this.colorToDepthDivisor = colorWidth / this.depthWidth;). This is used with a shift of the data (this.playerPixelData[playerPixelIndex - 1] = opaquePixelValue;) to account for the miss match.
Unfortunately, this can create a border around the masked image where the depthframe isn't stretched to the edge of the color frame. I am trying to not use skeleton mapping and am optimizing my code by tracking depthdata with emgu cv to pass a point as the center of the ROI of the colorframe. I am still working on it.

emgu cv CvInvoke.cvRemap hangs when trying to udistort from stereo calibration data

I'm trying to implement a stereo camera calibration app using emgu cv.
My problem is when I try to use CvInvoke.cvRemap to undistort an image the function just hangs. No errors or crashes, it just hangs and I've left it for 2 hours in case it was just being slow. Here's what I'm doing:
Capturing 10 pairs of Chessboard samples (left and right), making sure FindChessboardCorners works on each. I'm not doing anything special to sync the cameras just capturing them at the same time.
Generate set of object points based off the chessboard used.
Doing a separate CalibrateCamera on the left and right images of each sample using the object points from 2 and the image points from 1.
Doing a StereoCalibrate using the IntrinsicCameraParameters generated by CalibrateCamera in 3, the object points in 2, and the image points captured from the chessboards in 1.
Doing a StereoRectify using the IntrinsicCameraParameters from 3/4.
Generating mapx and mapy for both left and right from cvInitUndistortRectifyMap using output from 5.
Attempting to cvRemap using mapx and mapy from 6 and fresh images captured from the cameras.
NEXT: Use StereoBM.FindStereoCorrespondence and PointCollection.ReprojectImageTo3D to generate a point cloud from my hopefully calibrated stereo data.
So when I get to 7 cvRemap just hangs. I've gotton cvRemap to work capturing from a single camera though so I know the function is working to some degree with my setup.
I've written a class to manage multiple cameras:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Drawing;
using System.Drawing.Drawing2D;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.CV.UI;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.VideoSurveillance;
namespace Capture2Cams
{
class camData
{
public Capture capture;
public Image<Bgr, Byte> lastFrame;
public Image<Gray, Byte> lastFrameGray;
public bool lastChessboardFound;
public PointF[] lastChessboardCorners;
public Image<Gray, Byte>[] samplesGray;
public PointF[][] samplesChessboardCorners;
public Size cbDimensions;
public Size imageDimensions;
public int cursampleIndex = 0;
public ImageList sampleIcons;
private Image<Gray, Byte> _chessBoardDisplay;
private int _iconWidth = 160;
private int _icnonHeight = 90;
private int _numSamples = 0;
public int numSamples()
{
return _numSamples;
}
public void numSamples(int val)
{
_numSamples = val;
this.samplesGray = new Image<Gray, Byte>[val];
this.samplesChessboardCorners = new PointF[val][];
this.sampleIcons.ImageSize = new Size(_iconWidth, _icnonHeight);
Bitmap tmp = new Bitmap(_iconWidth, _icnonHeight);
this.sampleIcons.Images.Clear();
for (int c = 0; c < _numSamples; c++) this.sampleIcons.Images.Add(tmp);
}
public camData(int camIndex, int capWidth, int capHeight, int pcbWidth, int pcbHeight, int pNumSamples)
{
this.sampleIcons = new ImageList();
try
{
this.capture = new Capture(camIndex);
this.capture.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, capWidth);
this.capture.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, capHeight);
}
catch (Exception e)
{
MessageBox.Show(e.Message);
return;
}
this.imageDimensions = new Size(capWidth, capHeight);
this.cbDimensions = new Size(pcbWidth, pcbHeight);
this.numSamples(pNumSamples);
}
public Image<Gray, Byte> captureFrame()
{
this.lastFrame = this.capture.QueryFrame();
this.lastFrameGray = this.lastFrame.Convert<Gray, Byte>();
return this.lastFrameGray;
}
public int captureSample()
{
this.detectChessboard(true); // detectChessboard calls -> captureFrame
if (lastChessboardFound)
{
this.samplesGray[cursampleIndex] = this.lastFrameGray;
this.samplesChessboardCorners[cursampleIndex] = this.lastChessboardCorners;
this.sampleIcons.Images[this.cursampleIndex] = this.lastFrameGray.ToBitmap(_iconWidth, _icnonHeight);
this.cursampleIndex++;
if (this.cursampleIndex >= _numSamples) this.cursampleIndex = 0;
}
return cursampleIndex;
}
public void clearSamples()
{
this.cursampleIndex = 0;
this.numSamples(_numSamples);
}
public Image<Gray, Byte> detectChessboard(bool pDoCapture)
{
if (pDoCapture) this.captureFrame();
this.lastChessboardFound = CameraCalibration.FindChessboardCorners(this.lastFrameGray, this.cbDimensions, CALIB_CB_TYPE.ADAPTIVE_THRESH | CALIB_CB_TYPE.FILTER_QUADS, out this.lastChessboardCorners);
_chessBoardDisplay = this.lastFrameGray.Clone();
CameraCalibration.DrawChessboardCorners(this._chessBoardDisplay, this.cbDimensions, this.lastChessboardCorners, this.lastChessboardFound);
return this._chessBoardDisplay;
}
public void saveSampleImages(string pPath, string pID)
{
for(int ic = 0; ic < this._numSamples; ic++)
{
this.samplesGray[ic].Save(pPath + pID + ic.ToString() + ".bmp");
}
}
public void loadSampleImages(string pPath, string pID)
{
clearSamples();
for (int ic = 0; ic < this._numSamples; ic++)
{
this.lastFrameGray = new Image<Gray, byte>(new Bitmap(pPath + pID + ic.ToString() + ".bmp"));
this.detectChessboard(false);
this.samplesChessboardCorners[ic] = this.lastChessboardCorners;
this.sampleIcons.Images[ic] = this.lastFrameGray.ToBitmap(_iconWidth, _icnonHeight);
this.samplesGray[ic] = this.lastFrameGray;
}
}
}
}
And here's my form code with the rest of the calibration logic:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Runtime.InteropServices;
using Emgu.CV.Util;
using Emgu.CV;
using Emgu.CV.UI;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.VideoSurveillance;
namespace Capture2Cams
{
public partial class CaptureForm : Form
{
private static camData camLeft;
private static camData camRight;
private int _numSamples = 10; // Number of calibration samples
private int _imageWidth = 1280; // web cam resolution
private int _imageHeight = 720; // web cam resolution
private int _cbWidth = 9; // chessboard corner count
private int _cbHeight = 5; // chessboard corner count
// TODO: Test post calibration values, these will need to be loaded and saved
private static Matrix<double> _foundamentalMatrix;
private static Matrix<double> _essentialMatrix;
private static IntrinsicCameraParameters _inPramsLeft;
private static IntrinsicCameraParameters _inPramsRight;
private static ExtrinsicCameraParameters _outExtParamsStereo;
private Matrix<float> _mapxLeft;
private Matrix<float> _mapyLeft;
private Matrix<float> _mapxRight;
private Matrix<float> _mapyRight;
public CaptureForm()
{
InitializeComponent();
Run();
}
void Run()
{
camLeft = new camData(0, _imageWidth, _imageHeight, _cbWidth, _cbHeight, _numSamples);
camRight = new camData(1, _imageWidth, _imageHeight, _cbWidth, _cbHeight, _numSamples);
this.listViewLeft.LargeImageList = camLeft.sampleIcons;
for (int c = 0; c < _numSamples; c++)
{
ListViewItem curItem = new ListViewItem();
curItem.ImageIndex = c;
curItem.Text = "Sample" + c.ToString();
this.listViewLeft.Items.Add(curItem);
}
this.listViewRight.LargeImageList = camRight.sampleIcons;
for (int c = 0; c < _numSamples; c++)
{
ListViewItem curItem = new ListViewItem();
curItem.ImageIndex = c;
curItem.Text = "Sample" + c.ToString();
this.listViewRight.Items.Add(curItem);
}
Application.Idle += ProcessFrame;
}
void ProcessFrame(object sender, EventArgs e)
{
if (!checkBoxRectify.Checked)
{
if (this.checkBoxCapCB.Checked)
{
imageBoxLeft.Image = camLeft.detectChessboard(true);
imageBoxRight.Image = camRight.detectChessboard(true);
}
else
{
imageBoxLeft.Image = camLeft.captureFrame();
imageBoxRight.Image = camRight.captureFrame();
}
}
else
{
camLeft.captureFrame();
camRight.captureFrame();
Image<Gray, byte> imgLeft = camLeft.lastFrameGray.Clone();
Image<Gray, byte> imgRight = camRight.lastFrameGray.Clone();
CvInvoke.cvRemap(camLeft.lastFrameGray.Ptr, imgLeft.Ptr, _mapxLeft.Ptr, _mapyLeft.Ptr, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0));
CvInvoke.cvRemap(camRight.lastFrameGray.Ptr, imgRight.Ptr, _mapxRight.Ptr, _mapyRight.Ptr, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0));
imageBoxLeft.Image = imgLeft;
imageBoxRight.Image = imgRight;
}
//checkBoxRectify
}
private void buttonCaptureSample_Click(object sender, EventArgs e)
{
camLeft.captureSample();
camRight.captureSample();
this.listViewLeft.Refresh();
this.listViewRight.Refresh();
}
private void buttonStereoCalibrate_Click(object sender, EventArgs e)
{
// We should have most of the data needed from the sampling with the camData objects
int numCorners = _cbWidth * _cbHeight;
// Calc intrisitcs / camera
_inPramsLeft = new IntrinsicCameraParameters();
_inPramsRight = new IntrinsicCameraParameters();
ExtrinsicCameraParameters[] outExtParamsLeft;
ExtrinsicCameraParameters[] outExtParamsRight;
//Matrix<double> foundamentalMatrix;
//Matrix<double> essentialMatrix;
outExtParamsLeft = new ExtrinsicCameraParameters[_numSamples];
outExtParamsRight = new ExtrinsicCameraParameters[_numSamples];
_outExtParamsStereo = new ExtrinsicCameraParameters();
// Building object points
// These are the points on the cessboard in local 3d coordinates
// Requires one set per sample, if the same calibration object (chessboard) is used for each sample then just use the same set of points for each sample
// Also setting sub pixel analasys on samples
MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[_numSamples][];
for (int sc = 0; sc < _numSamples; sc++) // Samples count
{
// indivual cam setup
outExtParamsLeft[sc] = new ExtrinsicCameraParameters();
outExtParamsRight[sc] = new ExtrinsicCameraParameters();
// Sub pixel analasys
camLeft.samplesGray[sc].FindCornerSubPix(new PointF[][] { camLeft.samplesChessboardCorners[sc] }, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(300, 0.01));
camRight.samplesGray[sc].FindCornerSubPix(new PointF[][] { camRight.samplesChessboardCorners[sc] }, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(300, 0.01));
// Object points
objectPoints[sc] = new MCvPoint3D32f[numCorners];
for (int cc = 0; cc < numCorners; cc++) // chessboard corners count
{
objectPoints[sc][cc].x = cc / _cbWidth;
objectPoints[sc][cc].y = cc % _cbWidth;
objectPoints[sc][cc].z = 0.0f;
}
}
Size imageSize = new Size(_imageWidth, _imageHeight);
// Indivual cam camibration
CameraCalibration.CalibrateCamera(objectPoints, camLeft.samplesChessboardCorners, imageSize, _inPramsLeft, CALIB_TYPE.DEFAULT, out outExtParamsLeft);
CameraCalibration.CalibrateCamera(objectPoints, camRight.samplesChessboardCorners, imageSize, _inPramsRight, CALIB_TYPE.DEFAULT, out outExtParamsRight);
// Stereo Cam calibration
CameraCalibration.StereoCalibrate(
objectPoints,
camLeft.samplesChessboardCorners,
camRight.samplesChessboardCorners,
_inPramsLeft,
_inPramsRight,
imageSize,
CALIB_TYPE.CV_CALIB_FIX_ASPECT_RATIO | CALIB_TYPE.CV_CALIB_ZERO_TANGENT_DIST | CALIB_TYPE.CV_CALIB_FIX_FOCAL_LENGTH,
new MCvTermCriteria(100, 0.001),
out _outExtParamsStereo,
out _foundamentalMatrix,
out _essentialMatrix
);
PrintIntrinsic(_inPramsLeft);
PrintIntrinsic(_inPramsRight);
}
private void listViewLeft_ItemSelectionChanged(object sender, ListViewItemSelectionChangedEventArgs e)
{
}
private void listViewRight_ItemSelectionChanged(object sender, ListViewItemSelectionChangedEventArgs e)
{
}
private void buttonSaveSamples_Click(object sender, EventArgs e)
{
camLeft.saveSampleImages(textBoxSavePath.Text, "left");
camRight.saveSampleImages(textBoxSavePath.Text, "right");
}
private void buttonLoadSamples_Click(object sender, EventArgs e)
{
camLeft.loadSampleImages(textBoxSavePath.Text, "left");
camRight.loadSampleImages(textBoxSavePath.Text, "right");
this.listViewLeft.Refresh();
this.listViewRight.Refresh();
}
private void buttonCapture_Click(object sender, EventArgs e)
{
}
private void buttonCaptureCurframe_Click(object sender, EventArgs e)
{
camLeft.captureFrame();
camRight.captureFrame();
camLeft.lastFrame.Save(textBoxSavePath.Text + "frameLeft" + ".bmp");
camLeft.lastFrameGray.Save(textBoxSavePath.Text + "frameLeftGray" + ".bmp");
camRight.lastFrame.Save(textBoxSavePath.Text + "frameRight" + ".bmp");
camRight.lastFrameGray.Save(textBoxSavePath.Text + "frameRightGray" + ".bmp");
}
public void StereoRectify(
IntrinsicCameraParameters intrinsicParam1,
IntrinsicCameraParameters intrinsicParam2,
Size imageSize,
ExtrinsicCameraParameters extrinsicParams,
out Matrix<double> R1,
out Matrix<double> R2,
out Matrix<double> P1,
out Matrix<double> P2,
out Matrix<double> Q,
STEREO_RECTIFY_TYPE flags,
double alpha,
Size newImageSize,
ref Rectangle validPixROI1,
ref Rectangle validPixROI2
)
{
R1 = new Matrix<double>(3, 3);
R2 = new Matrix<double>(3, 3);
P1 = new Matrix<double>(3, 4);
P2 = new Matrix<double>(3, 4);
Q = new Matrix<double>(4, 4);
CvInvoke.cvStereoRectify(
_inPramsLeft.IntrinsicMatrix.Ptr,
_inPramsRight.IntrinsicMatrix.Ptr,
_inPramsLeft.DistortionCoeffs.Ptr,
_inPramsRight.DistortionCoeffs.Ptr,
imageSize,
extrinsicParams.RotationVector.Ptr,
extrinsicParams.TranslationVector.Ptr,
R1.Ptr,
R2.Ptr,
P1.Ptr,
P2.Ptr,
Q.Ptr,
STEREO_RECTIFY_TYPE.DEFAULT,
alpha,
newImageSize,
ref validPixROI1,
ref validPixROI1);
}
public void InitUndistortRectifyMap(
IntrinsicCameraParameters intrinsicParam,
Matrix<double> R,
Matrix<double> newCameraMatrix,
out Matrix<float> mapx,
out Matrix<float> mapy
)
{
mapx = new Matrix<float>(new Size(_imageWidth, _imageHeight));
mapy = new Matrix<float>(new Size(_imageWidth, _imageHeight));
CvInvoke.cvInitUndistortRectifyMap(intrinsicParam.IntrinsicMatrix.Ptr, intrinsicParam.DistortionCoeffs.Ptr, R.Ptr, newCameraMatrix.Ptr, mapx.Ptr, mapy.Ptr);
}
private void buttonTestCalc_Click(object sender, EventArgs e)
{
// Stereo Rectify images
Matrix<double> R1;
Matrix<double> R2;
Matrix<double> P1;
Matrix<double> P2;
Matrix<double> Q;
Rectangle validPixROI1, validPixROI2;
validPixROI1 = new Rectangle();
validPixROI2 = new Rectangle();
StereoRectify(_inPramsLeft, _inPramsRight, new Size(_imageWidth, _imageHeight), _outExtParamsStereo, out R1, out R2, out P1, out P2, out Q, 0, 0, new Size(_imageWidth, _imageHeight), ref validPixROI1, ref validPixROI2);
//InitUndistortRectifyMap(_inPramsLeft, R1, P1, out _mapxLeft, out _mapyLeft);
//InitUndistortRectifyMap(_inPramsRight, R2, P2, out _mapxRight, out _mapyRight);
_inPramsLeft.InitUndistortMap(_imageWidth, _imageHeight, out _mapxLeft, out _mapyLeft);
_inPramsRight.InitUndistortMap(_imageWidth, _imageHeight, out _mapxRight, out _mapyRight);
Image<Gray, byte> imgLeft = camLeft.lastFrameGray.Clone();
Image<Gray, byte> imgRight = camRight.lastFrameGray.Clone();
// **** THIS IS WHERE IM UP TO, no errors, it just hangs ****
CvInvoke.cvRemap(camLeft.lastFrameGray.Ptr, imgLeft.Ptr, _mapxLeft.Ptr, _mapyLeft.Ptr, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0));
// StereoBM stereoSolver = new StereoBM(Emgu.CV.CvEnum.STEREO_BM_TYPE.BASIC, 0);
//stereoSolver.FindStereoCorrespondence(
}
public void PrintIntrinsic(IntrinsicCameraParameters CamIntrinsic)
{
// Prints the Intrinsic camera parameters to the command line
Console.WriteLine("Intrinsic Matrix:");
string outStr = "";
int i = 0;
int j = 0;
for (i = 0; i < CamIntrinsic.IntrinsicMatrix.Height; i++)
{
for (j = 0; j < CamIntrinsic.IntrinsicMatrix.Width; j++)
{
outStr = outStr + CamIntrinsic.IntrinsicMatrix.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
Console.WriteLine("Distortion Coefficients: ");
outStr = "";
for (j = 0; j < CamIntrinsic.DistortionCoeffs.Height; j++)
{
outStr = outStr + CamIntrinsic.DistortionCoeffs.Data[j, 0].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
}
public void PrintExtrinsic(ExtrinsicCameraParameters CamExtrinsic)
{
// Prints the Extrinsic camera parameters to the command line
Console.WriteLine("Extrinsic Matrix:");
string outStr = "";
int i = 0;
int j = 0;
for (i = 0; i < CamExtrinsic.ExtrinsicMatrix.Height; i++)
{
for (j = 0; j < CamExtrinsic.ExtrinsicMatrix.Width; j++)
{
outStr = outStr + CamExtrinsic.ExtrinsicMatrix.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
Console.WriteLine("Rotation Vector: ");
outStr = "";
for (i = 0; i < CamExtrinsic.RotationVector.Height; i++)
{
for (j = 0; j < CamExtrinsic.RotationVector.Width; j++)
{
outStr = outStr + CamExtrinsic.RotationVector.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
Console.WriteLine("Translation Vector: ");
outStr = "";
for (i = 0; i < CamExtrinsic.TranslationVector.Height; i++)
{
for (j = 0; j < CamExtrinsic.TranslationVector.Width; j++)
{
outStr = outStr + CamExtrinsic.TranslationVector.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
}
}
}
TNKS!
Your maps must be images instead of matrices.
Specifically, of "Gray, float" type.

Categories

Resources