WPF Redraw all Children UI - c#

All that I need it show ToolTip on Line.
At what point I should to add children on the control? Need to add them after bind model.
I try to do it in the OnRender:
protected override void OnRender(DrawingContext context)
{
base.OnRender(context);
_planetsPos = new Dictionary<int, Point>();
Pen pen = new Pen((SolidColorBrush)(new BrushConverter().ConvertFrom("#000000")), 1.0);
context.DrawEllipse(Brushes.White, pen, new Point(CentreX, CentreY), Radius, Radius);
pen = new Pen((SolidColorBrush)(new BrushConverter().ConvertFrom("#00C800")), 1.0);
if (!_load && Planets.Count > 0)
{
_aspects = new Dictionary<PlanetModel, List<AspectUI>>();
for (int i = 0; i < Planets.Count; i++)
{
List<AspectUI> list = new List<AspectUI>();
for (int j = 0; j < Planets.Count; j++)
{
var aspect = new AspectUI(Planets[j]);
list.Add(aspect);
if (i != j)
{
this.Children.Add(aspect);
}
}
_aspects.Add(Planets[i], list);
}
_load = true;
}
for (int i = 0; i < Planets.Count; i++)
{
for (int j = 1; j < Planets.Count; j++)
{
if (i != j)
{
var delta = Math.Abs(Planets[i].Longitude - Planets[j].Longitude);
for (int k = 0; k < Orbs.Orbs.Count; k++)
{
var orb = Orbs.Orbs[k];
pen = new Pen((SolidColorBrush)(new BrushConverter().ConvertFrom("#" + orb.Color)), 1.0);
var obj = _aspects.Where(w => w.Key.Code == Planets[i].Code);
if (null != obj)
{
var aspectUI = obj.FirstOrDefault().Value[j];
int ind = this.Children.IndexOf(aspectUI);
if (ind > -1)
{
var child = (AspectUI)this.Children[ind];
SetAspectUIData(child, pen, i, j, delta, orb);
}
}
}
}
}
}
}
private void SetAspectUIData(AspectUI aspectUI, Pen pen, int i, int j, double delta, Orb orb)
{
double orbValue = (double)GetPropValue(orb, "Pl" + i.ToString());
if ((orb.Aspect <= delta && orb.Aspect + orbValue >= delta) ||
(orb.Aspect >= delta && orb.Aspect - orbValue <= delta))
{
aspectUI.SetAspectData(_planetsPos[Planets[i].Code], _planetsPos[Planets[j].Code], pen,
String.Format("{0} {1} - {2}", orb.Name, Planets[i].Name, Planets[j].Name));
aspectUI.Visibility = Visibility.Visible;
}
else
{
aspectUI.Visibility = Visibility.Hidden;
}
}
How do I update these controls? OnRender for AspectUI not called.
using ISweEph;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Media;
using System.Windows.Shapes;
using System.Windows.Threading;
namespace Zodiac.Controls
{
public class AspectUI : Canvas
{
PlanetModel _planet;
Point _startPoint;
Point _endPoint;
Pen _pen;
public AspectUI(PlanetModel planet)
{
_planet = planet;
}
public void SetAspectData(Point startPoint, Point endPoint, Pen pen, String toolTip)
{
_startPoint = startPoint;
_endPoint = endPoint;
_pen = pen;
ToolTip = toolTip;
}
protected override void OnRender(DrawingContext context)
{
base.OnRender(context);
context.DrawLine(_pen, _startPoint, _endPoint);
}
}
}

Related

Telerik Creating a StackedBarChart

I'm Fairly new to WPF and MVVM in general, I'm trying to follow this tutorial to create a stacked bar chart in telerik/WPF C#:
https://docs.telerik.com/devtools/winforms/knowledge-base/chartview-summary-labels-stacked-bars
But I'm, unsure where to implement the "Custom Renderer and Labels" Code, Can't seem to have it work. Should I declare a seperate class or something? A rough step by step guide is all i need, thanks in advance
this is the example code (I'm not sure where to put it)
public class CustomCartesianRenderer : CartesianRenderer
{
public CustomCartesianRenderer(CartesianArea area)
: base(area)
{ }
protected override void InitializeSeriesLabels()
{
base.InitializeSeriesLabels();
IDictionary<object, List<double?>> summaryValues = new Dictionary<object, List<double?>>();
for (int i = 0; i < this.Area.Series.Count; i++)
{
BarSeries barSeries = this.Area.Series[i] as BarSeries;
if (barSeries == null)
{
continue;
}
for (int j = 0; j < barSeries.DataPoints.Count; j++)
{
CategoricalDataPoint dp = (CategoricalDataPoint)barSeries.DataPoints[j];
if (!summaryValues.ContainsKey(dp.Category))
{
summaryValues.Add(dp.Category, new List<double?>() { dp.Value });
}
else
{
summaryValues[dp.Category].Add(dp.Value);
}
}
}
string lastSeriesName = this.Area.Series[this.Area.Series.Count - 1].Name;
for (int i = 0; i < this.DrawParts.Count; i++)
{
BarLabelElementDrawPart labelPart = this.DrawParts[i] as BarLabelElementDrawPart;
if (labelPart != null && labelPart.Element.Name == lastSeriesName)
{
CustomBarLabelElementDrawPart customLabelPart = new CustomBarLabelElementDrawPart((BarSeries)labelPart.Element, this);
customLabelPart.SummaryValues = summaryValues;
this.DrawParts[i] = customLabelPart;
}
}
}
}
public class CustomBarLabelElementDrawPart : BarLabelElementDrawPart
{
private IDictionary<object, List<double?>> summaryValues;
public CustomBarLabelElementDrawPart(BarSeries series, IChartRenderer renderer)
: base(series, renderer)
{ }
public IDictionary<object, List<double?>> SummaryValues
{
get
{
return this.summaryValues;
}
set
{
this.summaryValues = value;
}
}
public override void Draw()
{
Graphics graphics = this.Renderer.Surface as Graphics;
RadGdiGraphics radGraphics = new RadGdiGraphics(graphics);
foreach (DataPointElement dataPointElement in this.Element.Children)
{
CategoricalDataPoint categoricalDataPoint = dataPointElement.DataPoint as CategoricalDataPoint;
if (!this.summaryValues.ContainsKey(categoricalDataPoint.Category))
{
continue;
}
double? sum = this.summaryValues[categoricalDataPoint.Category].Sum();
string summaryText = string.Format("Sum: {0}", sum);
RadRect slot = categoricalDataPoint.LayoutSlot;
RectangleF barBounds = new RectangleF((float)(this.OffsetX + slot.X), (float)(this.OffsetY + slot.Y), (float)slot.Width, (float)slot.Height);
float realHeight = barBounds.Height * dataPointElement.HeightAspectRatio;
barBounds.Y += barBounds.Height - realHeight;
barBounds.Height = realHeight;
barBounds = this.AdjustBarDataPointBounds(dataPointElement, barBounds);
barBounds.Width = Math.Max(barBounds.Width, 1f);
object state = radGraphics.SaveState();
int horizontalTranslate = (int)(barBounds.X + barBounds.Width / 2);
int verticalTranslate = (int)(barBounds.Y + barBounds.Height / 2);
float angle = (float)this.Element.LabelRotationAngle % 360f;
if (angle != 0)
{
radGraphics.TranslateTransform(horizontalTranslate, verticalTranslate);
radGraphics.RotateTransform(angle);
radGraphics.TranslateTransform(-horizontalTranslate, -verticalTranslate);
}
Size desiredSize = TextRenderer.MeasureText(summaryText, dataPointElement.Font);
FillPrimitiveImpl fill = new FillPrimitiveImpl(dataPointElement, null);
fill.PaintFill(radGraphics, 0, Size.Empty, barBounds);
BorderPrimitiveImpl border = new BorderPrimitiveImpl(dataPointElement, null);
border.PaintBorder(radGraphics, 0, Size.Empty, barBounds);
using (Brush brush = new SolidBrush(dataPointElement.ForeColor))
{
RectangleF drawRectangle = new RectangleF();
drawRectangle.X = barBounds.X + dataPointElement.Padding.Left + (barBounds.Width - desiredSize.Width) /2;
drawRectangle.Y = barBounds.Y + dataPointElement.Padding.Top - desiredSize.Height;
drawRectangle.Width = barBounds.Width - dataPointElement.Padding.Right;
drawRectangle.Height = barBounds.Height - dataPointElement.Padding.Bottom;
StringFormat format = new StringFormat();
graphics.DrawString(summaryText, dataPointElement.Font, brush, drawRectangle, format);
}
if (angle != 0)
{
radGraphics.ResetTransform();
}
radGraphics.RestoreState(state);
}
base.Draw();
}
private RectangleF AdjustBarDataPointBounds(DataPointElement point, RectangleF bounds)
{
RectangleF barBounds = bounds;
if (point.BorderBoxStyle == BorderBoxStyle.SingleBorder || point.BorderBoxStyle == BorderBoxStyle.OuterInnerBorders)
{
barBounds.X += point.BorderWidth - (int)((point.BorderWidth - 1f) / 2f);
barBounds.Width -= point.BorderWidth;
barBounds.Y += point.BorderWidth - (int)((point.BorderWidth - 1f) / 2f);
barBounds.Height -= point.BorderWidth;
}
else if (point.BorderBoxStyle == BorderBoxStyle.FourBorders)
{
barBounds.Y += 1;
barBounds.Height -= 1;
barBounds.X += 1;
barBounds.Width -= 1;
}
if (((CartesianRenderer)this.Renderer).Area.Orientation == System.Windows.Forms.Orientation.Horizontal)
{
barBounds.X--;
}
return barBounds;
}
}

How can I send the byte array for Bitmap to the main page in image processing?

I am trying to send bytes to the "alldata.AddRange()" but I want to do that as line.What I mean,for example, I have a RGB view 640 * 360.Width of the view is 640.I want to take the view 640*3=1920(as a line) and make it gray and send it back to the function(alldata.AddRange).If I send line 360 of them I want to take the image.How can I do that?
EDIT:I changed the code just a little.May be it can be thought as sending data between classes through arrays and I need to send them in parts instead of thinking as image processing problem.
Here is the code for Form1:
using AForge.Video.DirectShow;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace dnm2510img
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
public FilterInfoCollection devices;
public VideoCaptureDevice camera;
private void Form1_Load(object sender, EventArgs e)
{
devices = new FilterInfoCollection(FilterCategory.VideoInputDevice);
foreach (FilterInfo item in devices)
{
comboBox1.Items.Add(item.Name);
}
camera = new VideoCaptureDevice();
comboBox1.SelectedIndexChanged += comboBox1_SelectedIndexChanged;
}
private void comboBox1_SelectedIndexChanged(object sender, EventArgs e)
{
try
{
if (camera.IsRunning == false)
{
camera = new VideoCaptureDevice(devices[comboBox1.SelectedIndex].MonikerString);
camera.NewFrame += Camera_NewFrame;
camera.Start();
}
}
catch (Exception exc)
{
MessageBox.Show(exc.Message + "");
}
}
public void Camera_NewFrame(object sender, AForge.Video.NewFrameEventArgs eventArgs)
{
List<byte> alldata = new List<byte>();
//byte[] line = new byte[360];
Bitmap image = (Bitmap)eventArgs.Frame.Clone();
byte[] maindata = new byte[image.Height*image.Width*4];
int count = 0;
if(btnapplyWasClicked == true)
{
for (int i = 0; i < image.Height; i++)
{
for (int j = 0; j < image.Width; j++)
{
Color color = image.GetPixel(j, i);
maindata[count] = color.R;
maindata[count + 1] = color.G;
maindata[count + 2] = color.B;
maindata[count + 3] = color.A;
count = count + 4;
for (int k = 1; k <= 360; k++)
{
if (maindata[(count + 4) * k] == maindata[2560 * k])
{
dnm2510img.Gray.GrayFilter(maindata, 2560 * k);
}
}
}
}
//alldata.AddRange(maindata);
}
}
private bool btnapplyWasClicked = false;
//private bool button1WasClicked = false;
//private bool GeriALWasClicked = false;
private void btnapply_Click(object sender, EventArgs e)
{
btnapplyWasClicked = true;
}
private void button1_Click(object sender, EventArgs e)
{
//button1WasClicked = true;
}
}
}
Here is the code for Grayscale:
using AForge.Video.DirectShow;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace dnm2510img
{
public class Gray
{
public static byte[] GrayFilter(byte[] data,int width)
{
List<byte> alldataa = new List<byte>();
for (int i = 0; i < width; i++)
{
int temp =((data[i]+data[i+1]+data[i+2]+data[i+3]) / 4);
data[i] = (byte)temp;
data[i+1] = (byte)temp;
data[i+2] = (byte)temp;
data[i + 3] = (byte)temp;
}
//alldataa.AddRange(data);
return data;
}
}
}
This is how you convert a 24 bpp bitmap to grayscale and output it to a linear array:
public static unsafe byte[] ToBgr24To8Mono(Bitmap source)
{
var width = source.Width;
var height = source.Height;
var sourceData = source.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadOnly, source.PixelFormat);
var sourceStride = sourceData.Stride;
var sourcePtr = (byte*)sourceData.Scan0;
var targetArray = new byte[width * height];
try
{
Parallel.For(0, height, y =>
{
var sourceRow = sourcePtr + y * sourceStride;
var targetRow = y * width;
for (int x = 0; x < width; x++)
{
var sourceIndex = (sourceRow + x * 3);
var value = (byte) (sourceIndex[0] * 0.11f + sourceIndex[1] * 0.59f + sourceIndex[2] * 0.3f);
targetArray[targetRow + x] = value;
}
});
}
finally
{
source.UnlockBits(sourceData);
}
return targetArray;
}
If you want to use a 32bit image as input, change x * 3 to x * 4. The parallel loop can be switched to a regular loop if you wish.

Cluster overlapping circles?

I am trying to cluster(group) every circle that's uninterrupted overlapping (connected) to each other how could I do that? (preferably in a pretty efficient way).
(I have messed around trying to write some recursive functions but haven't gotten anything to work.)
I have created a VS project to visualize the problem.
Download here:
Generates Random circles.
How the clustering currently works:
(its only looks at what circle is overlapping that specific circle not all that is connected)
How it should look if its working
(separate clusters for all connecting circles)
CODE: (C#)
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
// Cluster overlapping circles
// Patrik Fröhler
// www.patan77.com
// 2017-08-14
namespace circleGroup
{
struct circle // the circle "object"
{
public float[] pos;
public int radius;
public Color color;
public int id;
public float x
{
get { return pos[0]; }
set { pos[0] = value; }
}
public float y
{
get { return pos[1]; }
set { pos[1] = value; }
}
}
public partial class Form1 : Form
{
DB _DB = new DB(); // "Global Database"
public Form1()
{
InitializeComponent();
}
private static circle createCircle(float _x = 0, float _y = 0, int _radius = 1, Color? _color = null, int _id = -1) // creates a circle
{
circle tmpCircle = new circle() { pos = new float[2], x = _x, y = _y, radius = _radius, id = _id };
tmpCircle.color = _color ?? Color.Black;
return (tmpCircle);
}
private circle[] genRngCircles(int _n) // generates an array of random circles
{
Random rng = new Random();
circle tmpC;
circle[] tmpCarr = new circle[_n];
for (int i = 0; i < _n; i++)
{
tmpC = createCircle();
tmpC.radius = rng.Next(10, 75);
tmpC.x = rng.Next(tmpC.radius, (512 - tmpC.radius));
tmpC.y = rng.Next(tmpC.radius, (512 - tmpC.radius));
tmpC.color = Color.FromArgb(127, rng.Next(0, 255), rng.Next(0, 255), rng.Next(0, 255));
tmpC.id = i;
tmpCarr[i] = tmpC;
}
return tmpCarr;
}
private void drawCircle(circle _circle, Graphics _g) // draws one circle
{
SolidBrush sb = new SolidBrush(_circle.color);
_g.FillEllipse(sb, (_circle.x - _circle.radius), (_circle.y - _circle.radius), (_circle.radius * 2), (_circle.radius * 2));
sb.Dispose();
}
private void drawString(float[] _pos, string _text, Graphics _g) // draws text
{
StringFormat sf = new StringFormat();
sf.LineAlignment = StringAlignment.Center;
sf.Alignment = StringAlignment.Center;
Font font = new Font("Arial", 12);
SolidBrush sb = new SolidBrush(Color.Black);
float x = _pos[0];
float y = _pos[1];
_g.DrawString(_text, font, sb, x, y, sf);
font.Dispose();
sb.Dispose();
}
private void drawCircleArr(circle[] _circleArr, Graphics _g)// draws an array of circles
{
_g.Clear(panel1.BackColor);
for (int i = 0; i < _circleArr.Length; i++)
{
drawCircle(_circleArr[i], _g);
drawString(_circleArr[i].pos, _circleArr[i].id.ToString(), _g);
}
}
static double mDistance<T>(T[] _p0, T[] _p1) // gets euclidean distance between two points of arbitrary numbers of dimensions
{
double[] p0 = new double[] { Convert.ToDouble(_p0[0]), Convert.ToDouble(_p0[1]) };
double[] p1 = new double[] { Convert.ToDouble(_p1[0]), Convert.ToDouble(_p1[1]) };
double tmp = 0;
double tmpTotal = 0;
for (int i = 0; i < _p0.Length; i++)
{
tmp = (p0[i] - p1[i]);
tmpTotal += (tmp * tmp);
}
double output = Math.Sqrt(tmpTotal);
return (output);
}
private bool overlap(circle _c0, circle _c1) // checks if two circles overlap
{
double dis = mDistance(_c0.pos, _c1.pos);
if (dis <= (_c0.radius + _c1.radius))
{
return (true);
}
return (false);
}
private Color avgColor(List<circle> _colorArr) // averages mutiple colors togehter
{
float ia = 0;
float ir = 0;
float ig = 0;
float ib = 0;
for (int i = 0; i < _colorArr.Count; i++)
{
ia += _colorArr[i].color.A;
ir += _colorArr[i].color.R;
ig += _colorArr[i].color.G;
ib += _colorArr[i].color.B;
}
byte a = Convert.ToByte(Math.Round(ia / _colorArr.Count));
byte r = Convert.ToByte(Math.Round(ir / _colorArr.Count));
byte g = Convert.ToByte(Math.Round(ig / _colorArr.Count));
byte b = Convert.ToByte(Math.Round(ib / _colorArr.Count));
return (Color.FromArgb(a, r, g, b));
}
private void treeView(List<circle>[] _circleLArr) // Create Treeview
{
treeView1.Nodes.Clear();
for (int i = 0; i < _circleLArr.Length; i++)
{
treeView1.Nodes.Add(i.ToString());
for (int j = 0; j < _circleLArr[i].Count; j++)
{
treeView1.Nodes[i].Nodes.Add(_circleLArr[i][j].id.ToString());
}
}
treeView1.ExpandAll();
}
private void drawCircleClusters(List<circle>[] _circleLArr, Graphics _g) // draws the circle clusters
{
_g.Clear(panel1.BackColor);
circle tmpC;
Color tmpColor;
for (int i = 0; i < _circleLArr.Length; i++)
{
tmpColor = avgColor(_circleLArr[i]);
for (int j = 0; j < _circleLArr[i].Count; j++)
{
tmpC = _circleLArr[i][j];
tmpC.color = tmpColor;
drawCircle(tmpC, _g);
drawString(_circleLArr[i][j].pos, _circleLArr[i][j].id.ToString(), _g);
}
}
}
//----------------------------------------------------
private List<circle>[] simpleOverlap(circle[] _circleArr) // test what circles overlaps
{
List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
for (int i = 0; i < (_circleArr.Length); i++)
{
tmpLArr[i] = new List<circle>();
for (int j = 0; j < (_circleArr.Length); j++)
{
if (overlap(_circleArr[i], _circleArr[j]))
{
tmpLArr[i].Add(_circleArr[j]);
}
}
}
return (tmpLArr);
}
/*
private circle[] recurOverlap(circle[] _circleArr) // recursive overlap test(not done/working)
{
List<circle> overlapArr = new List<circle>();
List<circle> dontOverlapArr = new List<circle>();
bool loop = true;
int n = 0;
while (loop)
{
if (overlap(_circleArr[0], _circleArr[n]))
{
overlapArr.Add(_circleArr[n]);
dontOverlapArr.Insert(0, _circleArr[n]);
circle[] dontArr = dontOverlapArr.ToArray();
recurOverlap(dontArr);
}
else
{
dontOverlapArr.Add(_circleArr[n]);
}
n++;
if (n >= _circleArr.Length)
{
loop = false;
}
}
if(_circleArr.Length <= 1)
{
return _circleArr;
}
else{
return overlapArr.ToArray();
}
}
private List<circle>[] clusterBrecur(circle[] _circleArr)
{
List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
for (int i = 0; i < (_circleArr.Length); i++)
{
tmpLArr[i] = new List<circle>();
recurOverlap(_circleArr);
}
return (tmpLArr);
}*/
private void run() // Run function
{
treeView1.Nodes.Clear(); // clear tree view
_DB.g = panel1.CreateGraphics();// Create Panel Graphics to draw on
_DB.circleArr = genRngCircles(10); // Creates an array with random circles
drawCircleArr(_DB.circleArr, _DB.g); // Draws the random circles
clusterAbtn.Enabled = true; // enables the cluster button
}
private void clusterA() // clusterA function
{
_DB.circleClusters = simpleOverlap(_DB.circleArr); // runs cluster algorithm test A
treeView(_DB.circleClusters); // Creates the treeview
drawCircleClusters(_DB.circleClusters, _DB.g); // draws the circle clusters
}
private void clusterB()
{
}
private void clusterA_rClick()
{
drawCircleArr(_DB.circleArr, _DB.g); // Draws the random circles
}
private void runBtn_Click(object sender, EventArgs e) // run button click
{
run();
}
private void clusterAbtn_MouseUp(object sender, MouseEventArgs e)
{
switch (e.Button)
{
case MouseButtons.Left:
clusterA();
break;
case MouseButtons.Right:
clusterA_rClick();
break;
}
}
private void clusterBbtn_Click(object sender, EventArgs e) // clusterB button click
{
clusterB();
}
}
class DB // "Database"
{
public Graphics g;
public circle[] circleArr;
public List<circle>[] circleClusters;
}
}
The current "overlap function"
private List<circle>[] simpleOverlap(circle[] _circleArr) // test what circles overlaps
{
List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
for (int i = 0; i < (_circleArr.Length); i++)
{
tmpLArr[i] = new List<circle>();
for (int j = 0; j < (_circleArr.Length); j++)
{
if (overlap(_circleArr[i], _circleArr[j]))
{
tmpLArr[i].Add(_circleArr[j]);
}
}
}
return (tmpLArr);
}
I made following change to your code. Looks like working
private List<circle>[] simpleOverlap(circle[] _circleArr) // test what circles overlaps
{
List<List<circle>> list = new List<List<circle>>();
//List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
//for (int i = 0; i < (_circleArr.Length); i++)
foreach (circle circle in _circleArr)
{
List<circle> cluster = null;
//tmpLArr[i] = new List<circle>();
//for (int j = 0; j < (_circleArr.Length); j++)
//{
// if (overlap(_circleArr[i], _circleArr[j]))
// {
// tmpLArr[i].Add(_circleArr[j]);
// }
//}
foreach(List<circle> cluster2 in list)
{
foreach (circle circle2 in cluster2)
{
if (overlap(circle, circle2))
{
cluster = cluster2;
goto label_001;
}
}
}
label_001:
if (cluster == null)
{
cluster = new List<circle>();
list.Add(cluster);
}
cluster.Add(circle);
}
bool flag = true;
for (int i = 0; i < list.Count; i += (flag ? 1 : 0))
{
flag = true;
List<circle> cluster = list[i];
for (int j = i + 1; j < list.Count; j++)
{
List<circle> cluster2 = list[j];
if (Intersects(cluster, cluster2))
{
cluster.AddRange(cluster2);
list.Remove(cluster2);
j--;
flag = false;
}
}
}
return list.ToArray();
//return (tmpLArr);
}
bool Intersects(List<circle> cluster1, List<circle> cluster2)
{
foreach (circle circle1 in cluster1)
{
foreach (circle circle2 in cluster2)
{
if (overlap(circle1, circle2))
{
return true;
}
}
}
return false;
}
I had to add 1 more method bool Intersects(List<circle> cluster1, List<circle> cluster2). See if it helps.
I believe the function you are looking for is intersection. I have attached an article by Mike K which I believe will give you an idea of how to approach this in your own code.
C# circles intersections

I have Image Compositing (white/alpha mask) working in OpenTK/C#, except every pass through scales the output

So here's the problem I've been trying to solve. Given:
a) A product image. Expected to be full color
b) A mask of that image: RGBA(0,0,0,0) means ignore, RGBA(255,255,255,255) means replace
c) A composite image: This is composited with the mask
The idea is to composite the mask with the composite image, which will result in all white pixels becoming the composite pixels, but the transparent pixels remain transparent. This finally gets overlayed on top of the product image, effectively transforming only the pixels in the mask region.
I have this working perfectly, except for one small problem. Every pass through my Composite function seems to shrink the output by a scale of 0.5.
There's a bit of code in my solution, so I'll post what I think is necessary but feel free to ask for more.
Here's my Composite methods:
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using OpenTK;
using OpenTK.Graphics;
using OpenTK.Graphics.OpenGL;
using OpenTK.Platform;
using EnableCap = OpenTK.Graphics.OpenGL.EnableCap;
using GL = OpenTK.Graphics.OpenGL.GL;
using HintMode = OpenTK.Graphics.OpenGL.HintMode;
using HintTarget = OpenTK.Graphics.OpenGL.HintTarget;
using MatrixMode = OpenTK.Graphics.OpenGL.MatrixMode;
using PixelFormat = System.Drawing.Imaging.PixelFormat;
using PixelType = OpenTK.Graphics.OpenGL.PixelType;
using TextureUnit = OpenTK.Graphics.OpenGL.TextureUnit;
using Utilities = OpenTK.Platform.Utilities;
namespace SpriteSheetMaker
{
public static class ImageBlender
{
static DebugLogger logger = DebugLogger.GetInstance(#"debug.txt");
//Mask should be white where replacing, and transparent elsewhere
public static TexturedPolygon Composite(TexturedPolygon __baseImage, TextureBlendItem item, int level = 1)
{
var oldScale = OpenGLHelpers.Scale.Clone();
var oldSize = OpenGLHelpers.Canvas.Size;
var newSize = __baseImage.Texture2D.Texture2D.Size;
logger.WriteLine("Composite Requested. Mask Details: " + __baseImage.Rotation + " --- " + __baseImage.Scale + " --- " + __baseImage.Translation);
OpenGLHelpers.ClearScreen();
var _baseImage = __baseImage.Clone();
_baseImage.Texture2D.Texture2D.Save("composite_test_base" + level + ".png");
var _mask = item.Mask.Clone();
var _composite = item.Composite.Clone();
logger.WriteLine("Composite Requested. Mask Details: " + __baseImage.Rotation + " --- " + __baseImage.Scale + " --- " + __baseImage.Translation);
_mask.ResetTransform();
_composite.ResetTransform();
_baseImage.ResetTransform();
GL.Enable(EnableCap.Blend);
// render the mask
_mask.Draw();
//Blend the composite
GL.Disable(EnableCap.Blend);
GL.Enable(EnableCap.Blend);
GL.BlendEquation(BlendEquationMode.Min);
_composite.Draw();
GL.Disable(EnableCap.Blend); ;
//Not sure exactly what this does
GL.Enable(EnableCap.Blend);
GL.BlendEquation(BlendEquationMode.FuncAdd);
//Grab the composite and save it into a variable, because we are clearing the screen now
var bmp = OpenGLHelpers.GrabScreenshot(oldSize);
var _composite_2 = new BaseTextureImage(bmp).GetDrawable();
_composite_2.ResetTransform();
//Now we have the composited mask, we can simply draw it over the original image
OpenGLHelpers.ClearScreen();
_baseImage.Draw();
_composite_2.Draw();
bmp = OpenGLHelpers.GrabScreenshot(oldSize);
// OpenGLHelpers.Canvas.Resize(oldSize);
return new BaseTextureImage(bmp).GetDrawable();
}
public static TexturedPolygon Composite(TexturedPolygon _baseImage, List<TextureBlendItem> blends)
{
TexturedPolygon rtn = _baseImage.Clone();
bool doScaleFix = true;
int idx = 0;
foreach(var blend in blends)
{
var c = Composite(rtn, blend, (idx+1));
c.Texture2D.Texture2D.Save("composite_test_" + idx + ".png");
c.Scale = _baseImage.Scale;
c.Rotation = _baseImage.Rotation;
c.Translation = _baseImage.Translation;
rtn = c;
doScaleFix = false;
idx++;
}
return rtn;
}
}
}
Here is the TexturedPolygon class
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using OpenTK;
using OpenTK.Graphics;
using OpenTK.Graphics.OpenGL;
using OpenTK.Platform;
using EnableCap = OpenTK.Graphics.OpenGL.EnableCap;
using GL = OpenTK.Graphics.OpenGL.GL;
using HintMode = OpenTK.Graphics.OpenGL.HintMode;
using HintTarget = OpenTK.Graphics.OpenGL.HintTarget;
using MatrixMode = OpenTK.Graphics.OpenGL.MatrixMode;
using PixelFormat = System.Drawing.Imaging.PixelFormat;
using PixelType = OpenTK.Graphics.OpenGL.PixelType;
using TextureUnit = OpenTK.Graphics.OpenGL.TextureUnit;
using Utilities = OpenTK.Platform.Utilities;
using System.Drawing.Imaging;
using System.Reflection;
namespace SpriteSheetMaker
{
public class TexturedPolygon : BasePolygon
{
public BaseTextureImage Texture2D { get; set; }
public TexturedPolygon(BaseTextureImage texture, List<Vector3> pts) : base(pts)
{
Texture2D = texture;
}
public new TexturedPolygon Clone()
{
TexturedPolygon polygon = new TexturedPolygon(Texture2D, Vertices);
polygon.FillColor = this.FillColor.Clone();
polygon.EdgeColor = this.EdgeColor.Clone();
polygon.EdgeWidth = this.EdgeWidth;
polygon.Translation = this.Translation.Clone();
polygon.Rotation = this.Rotation.Clone();
polygon.Scale = this.Scale.Clone();
return polygon;
}
public TexturedPolygon(List<Vector3> pts) : base(pts)
{
}
public override void _draw()
{
if (Texture2D == null)
{
GL.ClearColor(Color.Transparent);
GL.Enable(EnableCap.Blend);
GL.BlendFunc(BlendingFactorSrc.SrcAlpha, BlendingFactorDest.OneMinusSrcAlpha);
this._basePolygonDraw();
return;
}
var bb = NoTransformBoundingBox();
GL.ClearColor(Color.Transparent);
GL.BindTexture(TextureTarget.Texture2D, Texture2D.TextureID);
GL.Enable(EnableCap.Texture2D);
GL.Enable(EnableCap.Blend);
GL.BlendFunc(BlendingFactorSrc.SrcAlpha, BlendingFactorDest.OneMinusSrcAlpha);
GL.Begin(PrimitiveType.Polygon);
for(int i = 0; i < Vertices.Count; i++)
{
var pt = Vertices[i];
var fillColor = GetVertexFillColor(i);
var alpha = fillColor.Alpha;
GL.Color4(1.0, 1.0, 1.0, alpha);
var texX = (pt.X - (float)bb.Left) / (bb.Right - (float)bb.Left);
var texY = (pt.Y - (float)bb.Top) / (bb.Bottom - (float)bb.Top);
GL.TexCoord2(texX, texY);
GL.Vertex2(pt.X, pt.Y);
}
GL.End();
// GL.Disable(EnableCap.Texture2D);
}
}
}
Here is the BasePolygon class:
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using OpenTK;
using OpenTK.Graphics;
using OpenTK.Graphics.OpenGL;
using OpenTK.Platform;
using EnableCap = OpenTK.Graphics.OpenGL.EnableCap;
using GL = OpenTK.Graphics.OpenGL.GL;
using HintMode = OpenTK.Graphics.OpenGL.HintMode;
using HintTarget = OpenTK.Graphics.OpenGL.HintTarget;
using MatrixMode = OpenTK.Graphics.OpenGL.MatrixMode;
using PixelFormat = System.Drawing.Imaging.PixelFormat;
using PixelType = OpenTK.Graphics.OpenGL.PixelType;
using TextureUnit = OpenTK.Graphics.OpenGL.TextureUnit;
using Utilities = OpenTK.Platform.Utilities;
namespace SpriteSheetMaker
{
public class BasePolygon : BaseTexture
{
protected List<Vector3> Vertices = new List<Vector3>();
public GeometryColor FillColor = new GeometryColor();
public GeometryColor EdgeColor = new GeometryColor();
public float EdgeWidth = 1.0f;
public List<Vector3> CurrentVertices
{
get
{
return Vertices.ToList();
}
}
Point[] _points
{
get
{
return Vertices.Select(x => x.ToPoint()).ToArray();
}
}
protected override List<Vector3> GetPolygonPoints()
{
List<Vector3> rtn = new List<Vector3>();
foreach(var pt in Vertices)
{
rtn.Add(pt.Clone());
}
return rtn;
}
protected void AlignToOrigin()
{
var ctr = NoTransformCenter();
for(int i = 0; i < Vertices.Count; i++)
{
var pt = Vertices[i];
pt = pt.Subtract(ctr);
Vertices[i] = pt;
}
Translation = ctr.Clone();
}
protected void AlignToCenter()
{
var ctr = NoTransformCenter();
for (int i = 0; i < Vertices.Count; i++)
{
var pt = Vertices[i];
pt = pt.Add(Translation);
Vertices[i] = pt;
}
Translation = Vector3.ZERO;
}
public static BasePolygon ClonePolygon(BasePolygon poly)
{
return poly.Clone();
}
public BasePolygon Clone()
{
BasePolygon polygon = new BasePolygon();
polygon.Vertices = this.CurrentVertices;
polygon.FillColor = this.FillColor.Clone();
polygon.EdgeColor = this.EdgeColor.Clone();
polygon.EdgeWidth = this.EdgeWidth;
polygon.Translation = this.Translation.Clone();
polygon.Rotation = this.Rotation.Clone();
polygon.Scale = this.Scale.Clone();
return polygon;
}
public BasePolygon(List<Vector3> pts)
{
AddVertexes(pts);
AlignToOrigin();
}
public BasePolygon(params Vector3[] pts)
{
AddVertexes(pts.ToList());
AlignToOrigin();
}
protected void AddVertex(Vector3 pt)
{
Vertices.Add(pt.Clone());
}
protected void AddVertexes(IEnumerable<Vector3> pts)
{
pts.ToList().ForEach(x =>
{
AddVertex(x);
});
}
protected void RemoveVertex(Vector3 pt)
{
Vertices.Remove(pt);
}
protected void RemoveVertexes(IEnumerable<Vector3> pts)
{
pts.ToList().ForEach(x =>
{
RemoveVertex(x);
});
}
public override void _draw()
{
this._basePolygonDraw();
}
public ColorLibrary.sRGB GetVertexFillColor(int index)
{
var pct = ((float)index + 1.0f) / (float)Vertices.Count;
var colorIdx = (int)((FillColor.Colors.Count - 1.0f) * pct);
return FillColor.Colors[colorIdx];
}
public ColorLibrary.sRGB GetVertexEdgeColor(int index)
{
var pct = ((float)index + 1.0f) / (float)Vertices.Count;
var colorIdx = (int)Math.Round((float)(EdgeColor.Colors.Count - 1.0f) * pct);
return EdgeColor.Colors[colorIdx];
}
protected void _basePolygonDraw()
{
this.wireframe();
this.fill();
}
private void wireframe()
{
GL.BindTexture(TextureTarget.Texture2D, 0);
GL.Hint(HintTarget.LineSmoothHint, HintMode.Nicest);
GL.Hint(HintTarget.PolygonSmoothHint, HintMode.Nicest);
var pts = Vertices;
ColorLibrary.sRGB color1;
ColorLibrary.sRGB color2;
GL.Begin(PrimitiveType.Lines);
for (var i = 0; i < pts.Count; i++)
{
var idx2 = (i + 1) % pts.Count;
color1 = GetVertexEdgeColor(i);
color2 = GetVertexEdgeColor(idx2);
var a = pts[i];
var b = pts[idx2];
GL.Color4(color1.R, color1.G, color1.B, color1.Alpha);
GL.Vertex3(a.OpenTKVector);
GL.Color4(color2.R, color2.G, color2.B, color2.Alpha);
GL.Vertex3(b.OpenTKVector);
}
GL.End();
}
private void fill()
{
GL.Hint(HintTarget.LineSmoothHint, HintMode.Nicest);
GL.Hint(HintTarget.PolygonSmoothHint, HintMode.Nicest);
GL.BindTexture(TextureTarget.Texture2D, 0);
var pts = Vertices;
ColorLibrary.sRGB color;
GL.Begin(PrimitiveType.Polygon);
for(int i = 0; i < pts.Count; i++)
{
color = GetVertexFillColor(i);
var pt = pts[i];
GL.Color4(color.R, color.G, color.B, color.Alpha);
GL.Vertex3(pt.OpenTKVector);
}
GL.End();
}
}
}
and here is the BaseTexture class
using ....
namespace SpriteSheetMaker
{
public abstract class BaseTexture
{
protected static Random rnd = new Random();
public Vector3 Translation = new Vector3(0, 0, 0);
public Vector3 Scale = new Vector3(1, 1, 1);
public Vector3 Rotation = new Vector3(0, 0, 0);
public Vector3 Velocity = new Vector3(0, 0, 0);
public void ResetTransform()
{
Translation = Vector3.ZERO;
Scale = Vector3.ONE;
Rotation = Vector3.ZERO;
}
private Guid _guid = Guid.NewGuid();
public Guid Guid
{
get
{
return _guid;
}
private set
{
_guid = value;
}
}
public bool DrawBounds = false;
public enum CollisionDirection
{
None = 0,
Up = 1,
Right = 2,
Down = 3,
Left = 4
}
public RectangleF NoTransformBoundingBox()
{
var poly = GetPolygonPoints();
var rect = Utility.PointsToRectangle(poly);
return rect;
}
protected abstract List<Vector3> GetPolygonPoints();
public Vector3 NoTransformCenter()
{
var poly = GetPolygonPoints();
var ctr = Vector3.Average(poly);
return ctr;
}
protected virtual RectangleF NonRotatedBoundingBox()
{
var polyPts = GetPolygonPoints();
foreach (var p in polyPts)
{
p.X = (Translation.X + Scale.X * p.X);
p.Y = (Translation.Y + Scale.Y * p.Y);
}
var rect = Utility.PointsToRectangle(polyPts);
return rect;
}
public RectangleF BoundingBox()
{
if(this.Rotation.Z == 0)
{
return NonRotatedBoundingBox();
}
return RotatedBoundingBox();
}
private RectangleF RotatedBoundingBox()
{
var polyPts = GetPolygonPoints();
foreach(var p in polyPts)
{
p.X = Translation.X + Scale.X*p.X;
p.Y = Translation.Y + Scale.Y*p.Y;
}
var ctr = this.Center();
var theta = this.Rotation.Z;
var ptsRotated = Utility.RotatePoints(polyPts, ctr, theta);
var bb = Utility.GetBoundsFromPoints(ptsRotated);
var rotated = new RectangleF(bb.Left, bb.Top, bb.Width, bb.Height);
return rotated;
}
public bool IsColliding(BaseTexture tex)
{
var bb = this.BoundingBox();
var bb2 = tex.BoundingBox();
return RectangleF.Intersect(bb, bb2) != RectangleF.Empty;
}
public CollisionDirection CollisionSide(BaseTexture tex)
{
var isColliding = IsColliding(tex);
if (!isColliding)
{
return CollisionDirection.None;
}
var ctrThis = this.Center();
var ctrThat = tex.Center();
var angleTo = ctrThis.AngleTo(ctrThat);
var pi = Math.PI;
var two_pi = 2*pi;
var pi_over_2 = pi/2.0;
var pi_over_4 = pi/4.0;
//0 to 45
if (angleTo >= 0 && angleTo <= pi_over_4)
{
return CollisionDirection.Right;
}
//45 to 135
else if (angleTo >= pi_over_4 && angleTo <= (pi_over_4) + (pi_over_2))
{
return CollisionDirection.Down;
}
//135 to 225
else if (angleTo >= (pi_over_4) + (pi_over_2) && angleTo <= (pi) + (pi_over_4))
{
return CollisionDirection.Left;
}
//225 to 315
else if (angleTo >= (pi) + (pi_over_4) && angleTo <= two_pi - pi_over_4)
{
return CollisionDirection.Up;
}
//315 to 360
else if (angleTo >= two_pi - pi_over_4 && angleTo <= two_pi)
{
return CollisionDirection.Right;
}
else
{
return CollisionDirection.None;
}
}
public Vector3 Center()
{
return NoTransformCenter().Add(this.Translation);
}
public float NoTransformRadius
{
get
{
var bb = NoTransformBoundingBox();
return (float)(Math.Max(bb.Width, bb.Height));
}
}
public float Radius
{
get
{
var bb = BoundingBox();
return (float)(Math.Max(bb.Width, bb.Height));
}
}
public abstract void _draw();
protected void _draw_bounds()
{
var bb = BoundingBox();
var ctr = Utility.RectangleCenter(bb);
var ctr2 = this.Center();
RectanglePolygon rec = RectanglePolygon.Create(ctr, bb.Width, bb.Height);
var fillColor = new ColorLibrary.sRGB(1,1,1);
fillColor.Alpha = 0.0;
rec.FillColor.SetSolid(fillColor);
rec.EdgeColor.SetSolid(new ColorLibrary.sRGB(1,0,0));
rec.Draw();
}
public virtual void Draw()
{
GL.ClearColor(Color.Transparent);
if (this.DrawBounds)
{
this._draw_bounds();
}
var ctr = NoTransformCenter();
OpenGLHelpers.RotationPivot = ctr;
OpenGLHelpers.AddTransform(Rotation,Scale,Translation);
OpenGLHelpers.ApplyTransforms();
_draw();
OpenGLHelpers.SubtractTransform(Rotation,Scale,Translation);
GL.PopMatrix();
// GL.Flush();
}
public override bool Equals(object obj)
{
var o = obj as BaseTexture;
return o.Guid == Guid;
}
}
}
If you need any more code, like the methods inside of OpenGLHelpers, I can provide that. Just keep in mind that generally my drawing code works. I didn't set any projection here, so that's why I reset the transforms. The translation and rotation are zero for now anyway, so it's really just to reset the scale. My base drawing method is normalized from [0,1], and the scale is what stretches the image to whatever dimensions desired.
Also, here is what I mean by the scale gets messed up. That red flask looking shape is supposed to complete cover the inside of the flask, and also, the flask itself got shrunk down! So it double shrunk...
That shows the iterations through 4 pass throughs. The smiley faces were another mask/composite combo added on
Base Image:
Mask 1:
Composite 1:
Thank you to whoever helps
I got it working!
So there were 2 problems.
I was using -0.5 to 0.5 as my domain range, when -1 to 1 was more appropriate
I also had miscellaneous issues, such as needing to make sure all of my textures were actually in the viewport, and to resize the glControl to fit the product texture

emgu cv CvInvoke.cvRemap hangs when trying to udistort from stereo calibration data

I'm trying to implement a stereo camera calibration app using emgu cv.
My problem is when I try to use CvInvoke.cvRemap to undistort an image the function just hangs. No errors or crashes, it just hangs and I've left it for 2 hours in case it was just being slow. Here's what I'm doing:
Capturing 10 pairs of Chessboard samples (left and right), making sure FindChessboardCorners works on each. I'm not doing anything special to sync the cameras just capturing them at the same time.
Generate set of object points based off the chessboard used.
Doing a separate CalibrateCamera on the left and right images of each sample using the object points from 2 and the image points from 1.
Doing a StereoCalibrate using the IntrinsicCameraParameters generated by CalibrateCamera in 3, the object points in 2, and the image points captured from the chessboards in 1.
Doing a StereoRectify using the IntrinsicCameraParameters from 3/4.
Generating mapx and mapy for both left and right from cvInitUndistortRectifyMap using output from 5.
Attempting to cvRemap using mapx and mapy from 6 and fresh images captured from the cameras.
NEXT: Use StereoBM.FindStereoCorrespondence and PointCollection.ReprojectImageTo3D to generate a point cloud from my hopefully calibrated stereo data.
So when I get to 7 cvRemap just hangs. I've gotton cvRemap to work capturing from a single camera though so I know the function is working to some degree with my setup.
I've written a class to manage multiple cameras:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Drawing;
using System.Drawing.Drawing2D;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.CV.UI;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.VideoSurveillance;
namespace Capture2Cams
{
class camData
{
public Capture capture;
public Image<Bgr, Byte> lastFrame;
public Image<Gray, Byte> lastFrameGray;
public bool lastChessboardFound;
public PointF[] lastChessboardCorners;
public Image<Gray, Byte>[] samplesGray;
public PointF[][] samplesChessboardCorners;
public Size cbDimensions;
public Size imageDimensions;
public int cursampleIndex = 0;
public ImageList sampleIcons;
private Image<Gray, Byte> _chessBoardDisplay;
private int _iconWidth = 160;
private int _icnonHeight = 90;
private int _numSamples = 0;
public int numSamples()
{
return _numSamples;
}
public void numSamples(int val)
{
_numSamples = val;
this.samplesGray = new Image<Gray, Byte>[val];
this.samplesChessboardCorners = new PointF[val][];
this.sampleIcons.ImageSize = new Size(_iconWidth, _icnonHeight);
Bitmap tmp = new Bitmap(_iconWidth, _icnonHeight);
this.sampleIcons.Images.Clear();
for (int c = 0; c < _numSamples; c++) this.sampleIcons.Images.Add(tmp);
}
public camData(int camIndex, int capWidth, int capHeight, int pcbWidth, int pcbHeight, int pNumSamples)
{
this.sampleIcons = new ImageList();
try
{
this.capture = new Capture(camIndex);
this.capture.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, capWidth);
this.capture.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, capHeight);
}
catch (Exception e)
{
MessageBox.Show(e.Message);
return;
}
this.imageDimensions = new Size(capWidth, capHeight);
this.cbDimensions = new Size(pcbWidth, pcbHeight);
this.numSamples(pNumSamples);
}
public Image<Gray, Byte> captureFrame()
{
this.lastFrame = this.capture.QueryFrame();
this.lastFrameGray = this.lastFrame.Convert<Gray, Byte>();
return this.lastFrameGray;
}
public int captureSample()
{
this.detectChessboard(true); // detectChessboard calls -> captureFrame
if (lastChessboardFound)
{
this.samplesGray[cursampleIndex] = this.lastFrameGray;
this.samplesChessboardCorners[cursampleIndex] = this.lastChessboardCorners;
this.sampleIcons.Images[this.cursampleIndex] = this.lastFrameGray.ToBitmap(_iconWidth, _icnonHeight);
this.cursampleIndex++;
if (this.cursampleIndex >= _numSamples) this.cursampleIndex = 0;
}
return cursampleIndex;
}
public void clearSamples()
{
this.cursampleIndex = 0;
this.numSamples(_numSamples);
}
public Image<Gray, Byte> detectChessboard(bool pDoCapture)
{
if (pDoCapture) this.captureFrame();
this.lastChessboardFound = CameraCalibration.FindChessboardCorners(this.lastFrameGray, this.cbDimensions, CALIB_CB_TYPE.ADAPTIVE_THRESH | CALIB_CB_TYPE.FILTER_QUADS, out this.lastChessboardCorners);
_chessBoardDisplay = this.lastFrameGray.Clone();
CameraCalibration.DrawChessboardCorners(this._chessBoardDisplay, this.cbDimensions, this.lastChessboardCorners, this.lastChessboardFound);
return this._chessBoardDisplay;
}
public void saveSampleImages(string pPath, string pID)
{
for(int ic = 0; ic < this._numSamples; ic++)
{
this.samplesGray[ic].Save(pPath + pID + ic.ToString() + ".bmp");
}
}
public void loadSampleImages(string pPath, string pID)
{
clearSamples();
for (int ic = 0; ic < this._numSamples; ic++)
{
this.lastFrameGray = new Image<Gray, byte>(new Bitmap(pPath + pID + ic.ToString() + ".bmp"));
this.detectChessboard(false);
this.samplesChessboardCorners[ic] = this.lastChessboardCorners;
this.sampleIcons.Images[ic] = this.lastFrameGray.ToBitmap(_iconWidth, _icnonHeight);
this.samplesGray[ic] = this.lastFrameGray;
}
}
}
}
And here's my form code with the rest of the calibration logic:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Runtime.InteropServices;
using Emgu.CV.Util;
using Emgu.CV;
using Emgu.CV.UI;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.VideoSurveillance;
namespace Capture2Cams
{
public partial class CaptureForm : Form
{
private static camData camLeft;
private static camData camRight;
private int _numSamples = 10; // Number of calibration samples
private int _imageWidth = 1280; // web cam resolution
private int _imageHeight = 720; // web cam resolution
private int _cbWidth = 9; // chessboard corner count
private int _cbHeight = 5; // chessboard corner count
// TODO: Test post calibration values, these will need to be loaded and saved
private static Matrix<double> _foundamentalMatrix;
private static Matrix<double> _essentialMatrix;
private static IntrinsicCameraParameters _inPramsLeft;
private static IntrinsicCameraParameters _inPramsRight;
private static ExtrinsicCameraParameters _outExtParamsStereo;
private Matrix<float> _mapxLeft;
private Matrix<float> _mapyLeft;
private Matrix<float> _mapxRight;
private Matrix<float> _mapyRight;
public CaptureForm()
{
InitializeComponent();
Run();
}
void Run()
{
camLeft = new camData(0, _imageWidth, _imageHeight, _cbWidth, _cbHeight, _numSamples);
camRight = new camData(1, _imageWidth, _imageHeight, _cbWidth, _cbHeight, _numSamples);
this.listViewLeft.LargeImageList = camLeft.sampleIcons;
for (int c = 0; c < _numSamples; c++)
{
ListViewItem curItem = new ListViewItem();
curItem.ImageIndex = c;
curItem.Text = "Sample" + c.ToString();
this.listViewLeft.Items.Add(curItem);
}
this.listViewRight.LargeImageList = camRight.sampleIcons;
for (int c = 0; c < _numSamples; c++)
{
ListViewItem curItem = new ListViewItem();
curItem.ImageIndex = c;
curItem.Text = "Sample" + c.ToString();
this.listViewRight.Items.Add(curItem);
}
Application.Idle += ProcessFrame;
}
void ProcessFrame(object sender, EventArgs e)
{
if (!checkBoxRectify.Checked)
{
if (this.checkBoxCapCB.Checked)
{
imageBoxLeft.Image = camLeft.detectChessboard(true);
imageBoxRight.Image = camRight.detectChessboard(true);
}
else
{
imageBoxLeft.Image = camLeft.captureFrame();
imageBoxRight.Image = camRight.captureFrame();
}
}
else
{
camLeft.captureFrame();
camRight.captureFrame();
Image<Gray, byte> imgLeft = camLeft.lastFrameGray.Clone();
Image<Gray, byte> imgRight = camRight.lastFrameGray.Clone();
CvInvoke.cvRemap(camLeft.lastFrameGray.Ptr, imgLeft.Ptr, _mapxLeft.Ptr, _mapyLeft.Ptr, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0));
CvInvoke.cvRemap(camRight.lastFrameGray.Ptr, imgRight.Ptr, _mapxRight.Ptr, _mapyRight.Ptr, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0));
imageBoxLeft.Image = imgLeft;
imageBoxRight.Image = imgRight;
}
//checkBoxRectify
}
private void buttonCaptureSample_Click(object sender, EventArgs e)
{
camLeft.captureSample();
camRight.captureSample();
this.listViewLeft.Refresh();
this.listViewRight.Refresh();
}
private void buttonStereoCalibrate_Click(object sender, EventArgs e)
{
// We should have most of the data needed from the sampling with the camData objects
int numCorners = _cbWidth * _cbHeight;
// Calc intrisitcs / camera
_inPramsLeft = new IntrinsicCameraParameters();
_inPramsRight = new IntrinsicCameraParameters();
ExtrinsicCameraParameters[] outExtParamsLeft;
ExtrinsicCameraParameters[] outExtParamsRight;
//Matrix<double> foundamentalMatrix;
//Matrix<double> essentialMatrix;
outExtParamsLeft = new ExtrinsicCameraParameters[_numSamples];
outExtParamsRight = new ExtrinsicCameraParameters[_numSamples];
_outExtParamsStereo = new ExtrinsicCameraParameters();
// Building object points
// These are the points on the cessboard in local 3d coordinates
// Requires one set per sample, if the same calibration object (chessboard) is used for each sample then just use the same set of points for each sample
// Also setting sub pixel analasys on samples
MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[_numSamples][];
for (int sc = 0; sc < _numSamples; sc++) // Samples count
{
// indivual cam setup
outExtParamsLeft[sc] = new ExtrinsicCameraParameters();
outExtParamsRight[sc] = new ExtrinsicCameraParameters();
// Sub pixel analasys
camLeft.samplesGray[sc].FindCornerSubPix(new PointF[][] { camLeft.samplesChessboardCorners[sc] }, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(300, 0.01));
camRight.samplesGray[sc].FindCornerSubPix(new PointF[][] { camRight.samplesChessboardCorners[sc] }, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(300, 0.01));
// Object points
objectPoints[sc] = new MCvPoint3D32f[numCorners];
for (int cc = 0; cc < numCorners; cc++) // chessboard corners count
{
objectPoints[sc][cc].x = cc / _cbWidth;
objectPoints[sc][cc].y = cc % _cbWidth;
objectPoints[sc][cc].z = 0.0f;
}
}
Size imageSize = new Size(_imageWidth, _imageHeight);
// Indivual cam camibration
CameraCalibration.CalibrateCamera(objectPoints, camLeft.samplesChessboardCorners, imageSize, _inPramsLeft, CALIB_TYPE.DEFAULT, out outExtParamsLeft);
CameraCalibration.CalibrateCamera(objectPoints, camRight.samplesChessboardCorners, imageSize, _inPramsRight, CALIB_TYPE.DEFAULT, out outExtParamsRight);
// Stereo Cam calibration
CameraCalibration.StereoCalibrate(
objectPoints,
camLeft.samplesChessboardCorners,
camRight.samplesChessboardCorners,
_inPramsLeft,
_inPramsRight,
imageSize,
CALIB_TYPE.CV_CALIB_FIX_ASPECT_RATIO | CALIB_TYPE.CV_CALIB_ZERO_TANGENT_DIST | CALIB_TYPE.CV_CALIB_FIX_FOCAL_LENGTH,
new MCvTermCriteria(100, 0.001),
out _outExtParamsStereo,
out _foundamentalMatrix,
out _essentialMatrix
);
PrintIntrinsic(_inPramsLeft);
PrintIntrinsic(_inPramsRight);
}
private void listViewLeft_ItemSelectionChanged(object sender, ListViewItemSelectionChangedEventArgs e)
{
}
private void listViewRight_ItemSelectionChanged(object sender, ListViewItemSelectionChangedEventArgs e)
{
}
private void buttonSaveSamples_Click(object sender, EventArgs e)
{
camLeft.saveSampleImages(textBoxSavePath.Text, "left");
camRight.saveSampleImages(textBoxSavePath.Text, "right");
}
private void buttonLoadSamples_Click(object sender, EventArgs e)
{
camLeft.loadSampleImages(textBoxSavePath.Text, "left");
camRight.loadSampleImages(textBoxSavePath.Text, "right");
this.listViewLeft.Refresh();
this.listViewRight.Refresh();
}
private void buttonCapture_Click(object sender, EventArgs e)
{
}
private void buttonCaptureCurframe_Click(object sender, EventArgs e)
{
camLeft.captureFrame();
camRight.captureFrame();
camLeft.lastFrame.Save(textBoxSavePath.Text + "frameLeft" + ".bmp");
camLeft.lastFrameGray.Save(textBoxSavePath.Text + "frameLeftGray" + ".bmp");
camRight.lastFrame.Save(textBoxSavePath.Text + "frameRight" + ".bmp");
camRight.lastFrameGray.Save(textBoxSavePath.Text + "frameRightGray" + ".bmp");
}
public void StereoRectify(
IntrinsicCameraParameters intrinsicParam1,
IntrinsicCameraParameters intrinsicParam2,
Size imageSize,
ExtrinsicCameraParameters extrinsicParams,
out Matrix<double> R1,
out Matrix<double> R2,
out Matrix<double> P1,
out Matrix<double> P2,
out Matrix<double> Q,
STEREO_RECTIFY_TYPE flags,
double alpha,
Size newImageSize,
ref Rectangle validPixROI1,
ref Rectangle validPixROI2
)
{
R1 = new Matrix<double>(3, 3);
R2 = new Matrix<double>(3, 3);
P1 = new Matrix<double>(3, 4);
P2 = new Matrix<double>(3, 4);
Q = new Matrix<double>(4, 4);
CvInvoke.cvStereoRectify(
_inPramsLeft.IntrinsicMatrix.Ptr,
_inPramsRight.IntrinsicMatrix.Ptr,
_inPramsLeft.DistortionCoeffs.Ptr,
_inPramsRight.DistortionCoeffs.Ptr,
imageSize,
extrinsicParams.RotationVector.Ptr,
extrinsicParams.TranslationVector.Ptr,
R1.Ptr,
R2.Ptr,
P1.Ptr,
P2.Ptr,
Q.Ptr,
STEREO_RECTIFY_TYPE.DEFAULT,
alpha,
newImageSize,
ref validPixROI1,
ref validPixROI1);
}
public void InitUndistortRectifyMap(
IntrinsicCameraParameters intrinsicParam,
Matrix<double> R,
Matrix<double> newCameraMatrix,
out Matrix<float> mapx,
out Matrix<float> mapy
)
{
mapx = new Matrix<float>(new Size(_imageWidth, _imageHeight));
mapy = new Matrix<float>(new Size(_imageWidth, _imageHeight));
CvInvoke.cvInitUndistortRectifyMap(intrinsicParam.IntrinsicMatrix.Ptr, intrinsicParam.DistortionCoeffs.Ptr, R.Ptr, newCameraMatrix.Ptr, mapx.Ptr, mapy.Ptr);
}
private void buttonTestCalc_Click(object sender, EventArgs e)
{
// Stereo Rectify images
Matrix<double> R1;
Matrix<double> R2;
Matrix<double> P1;
Matrix<double> P2;
Matrix<double> Q;
Rectangle validPixROI1, validPixROI2;
validPixROI1 = new Rectangle();
validPixROI2 = new Rectangle();
StereoRectify(_inPramsLeft, _inPramsRight, new Size(_imageWidth, _imageHeight), _outExtParamsStereo, out R1, out R2, out P1, out P2, out Q, 0, 0, new Size(_imageWidth, _imageHeight), ref validPixROI1, ref validPixROI2);
//InitUndistortRectifyMap(_inPramsLeft, R1, P1, out _mapxLeft, out _mapyLeft);
//InitUndistortRectifyMap(_inPramsRight, R2, P2, out _mapxRight, out _mapyRight);
_inPramsLeft.InitUndistortMap(_imageWidth, _imageHeight, out _mapxLeft, out _mapyLeft);
_inPramsRight.InitUndistortMap(_imageWidth, _imageHeight, out _mapxRight, out _mapyRight);
Image<Gray, byte> imgLeft = camLeft.lastFrameGray.Clone();
Image<Gray, byte> imgRight = camRight.lastFrameGray.Clone();
// **** THIS IS WHERE IM UP TO, no errors, it just hangs ****
CvInvoke.cvRemap(camLeft.lastFrameGray.Ptr, imgLeft.Ptr, _mapxLeft.Ptr, _mapyLeft.Ptr, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0));
// StereoBM stereoSolver = new StereoBM(Emgu.CV.CvEnum.STEREO_BM_TYPE.BASIC, 0);
//stereoSolver.FindStereoCorrespondence(
}
public void PrintIntrinsic(IntrinsicCameraParameters CamIntrinsic)
{
// Prints the Intrinsic camera parameters to the command line
Console.WriteLine("Intrinsic Matrix:");
string outStr = "";
int i = 0;
int j = 0;
for (i = 0; i < CamIntrinsic.IntrinsicMatrix.Height; i++)
{
for (j = 0; j < CamIntrinsic.IntrinsicMatrix.Width; j++)
{
outStr = outStr + CamIntrinsic.IntrinsicMatrix.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
Console.WriteLine("Distortion Coefficients: ");
outStr = "";
for (j = 0; j < CamIntrinsic.DistortionCoeffs.Height; j++)
{
outStr = outStr + CamIntrinsic.DistortionCoeffs.Data[j, 0].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
}
public void PrintExtrinsic(ExtrinsicCameraParameters CamExtrinsic)
{
// Prints the Extrinsic camera parameters to the command line
Console.WriteLine("Extrinsic Matrix:");
string outStr = "";
int i = 0;
int j = 0;
for (i = 0; i < CamExtrinsic.ExtrinsicMatrix.Height; i++)
{
for (j = 0; j < CamExtrinsic.ExtrinsicMatrix.Width; j++)
{
outStr = outStr + CamExtrinsic.ExtrinsicMatrix.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
Console.WriteLine("Rotation Vector: ");
outStr = "";
for (i = 0; i < CamExtrinsic.RotationVector.Height; i++)
{
for (j = 0; j < CamExtrinsic.RotationVector.Width; j++)
{
outStr = outStr + CamExtrinsic.RotationVector.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
Console.WriteLine("Translation Vector: ");
outStr = "";
for (i = 0; i < CamExtrinsic.TranslationVector.Height; i++)
{
for (j = 0; j < CamExtrinsic.TranslationVector.Width; j++)
{
outStr = outStr + CamExtrinsic.TranslationVector.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
}
}
}
TNKS!
Your maps must be images instead of matrices.
Specifically, of "Gray, float" type.

Categories

Resources