How to invoke Load_form function without using button - c#

I am trying to create a GUI that shows small 3 images on the app. When there is a new image in the folder, the new image has to be on the right
side of the imageBoxes. In order to do that, I am using a timer to invoke Load_Form function. The interval is 1000ms. However after some time (
approximately 3:20 min), the program gives an exception "Out of memory" or "Parameter is not valid". Is there another way to update imageboxes automatically( when there is
new image, update it)? The code is here, thank you in advance:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.IO;
using System.Drawing.Drawing2D;
using System.Timers;
namespace test1
{
public partial class Form1 : Form
{
string actCode = null;
string pathLogNames = #"D:\LOG\Right";
string[] imPaths;
public Form1()
{
InitializeComponent();
//timer1.Start();
}
private void pictureBox1_Click(object sender, EventArgs e)
{
}
string[] getFiles(string path, string text, string fileExtension)
{
try
{
string searchingText = text;
searchingText = "*" + text + "*";
string[] filesArray = Directory.GetFiles(path, searchingText, SearchOption.AllDirectories);
List<string> filesList = new List<string>(filesArray);
List<string> newFilesList = new List<string>();
foreach (string file in filesList)
{
if (file.Contains(fileExtension) == true)
{
newFilesList.Add(file);
}
}
string[] files = newFilesList.ToArray();
return files;
}
catch
{
string[] files = new string[0];
return files;
}
}
string[] viewerPaths;
int pathsNumber;
Image actImage;
Image actImage1;
Image actImage2;
//int actIndex;
double imageDefaultZoom;
public bool mouseFlag = false;
//Point startPoint = new Point();
public void setPaths(string[] paths, double defZoom)
{
viewerPaths = paths;
pathsNumber = viewerPaths.Length;
imageDefaultZoom = defZoom;
int total = viewerPaths.Length;
if (pathsNumber > 0)
{
actImage = Image.FromFile(viewerPaths[total - 3]);
Bitmap b = new Bitmap(actImage);
Image i = resizeImage(b, new Size(100, 100));
pictureBox1.Image = i;
actImage1 = Image.FromFile(viewerPaths[total - 2]);
Bitmap b1 = new Bitmap(actImage1);
Image i1 = resizeImage(b1, new Size(100, 100));
pictureBox2.Image = i1;
actImage2 = Image.FromFile(viewerPaths[total - 1]);
Bitmap b2 = new Bitmap(actImage2);
Image i2 = resizeImage(b2, new Size(100, 100));
pictureBox3.Image = i2;
}
}
private static System.Drawing.Image resizeImage(System.Drawing.Image imgToResize, Size size)
{
//Get the image current width
int sourceWidth = imgToResize.Width;
//Get the image current height
int sourceHeight = imgToResize.Height;
float nPercent = 0;
float nPercentW = 0;
float nPercentH = 0;
//Calulate width with new desired size
nPercentW = ((float)size.Width / (float)sourceWidth);
//Calculate height with new desired size
nPercentH = ((float)size.Height / (float)sourceHeight);
if (nPercentH < nPercentW)
nPercent = nPercentH;
else
nPercent = nPercentW;
//New Width
int destWidth = (int)(sourceWidth * nPercent);
//New Height
int destHeight = (int)(sourceHeight * nPercent);
Bitmap b = new Bitmap(destWidth, destHeight);
Graphics g = Graphics.FromImage((System.Drawing.Image)b);
g.InterpolationMode = InterpolationMode.HighQualityBicubic;
// Draw image with new width and height
g.DrawImage(imgToResize, 0, 0, destWidth, destHeight);
g.Dispose();
return (System.Drawing.Image)b;
}
private void onChanged(object sender, FileSystemEventArgs e)
{
//Dispose();
imPaths = getFiles(pathLogNames, actCode, ".tif");
setPaths(imPaths, 0.5);
}
private void Form1_Load(object sender, EventArgs e)
{
imPaths = getFiles(pathLogNames, actCode, ".tif");
setPaths(imPaths, 0.5);
}
}
}
}

Related

How can I send the byte array for Bitmap to the main page in image processing?

I am trying to send bytes to the "alldata.AddRange()" but I want to do that as line.What I mean,for example, I have a RGB view 640 * 360.Width of the view is 640.I want to take the view 640*3=1920(as a line) and make it gray and send it back to the function(alldata.AddRange).If I send line 360 of them I want to take the image.How can I do that?
EDIT:I changed the code just a little.May be it can be thought as sending data between classes through arrays and I need to send them in parts instead of thinking as image processing problem.
Here is the code for Form1:
using AForge.Video.DirectShow;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace dnm2510img
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
public FilterInfoCollection devices;
public VideoCaptureDevice camera;
private void Form1_Load(object sender, EventArgs e)
{
devices = new FilterInfoCollection(FilterCategory.VideoInputDevice);
foreach (FilterInfo item in devices)
{
comboBox1.Items.Add(item.Name);
}
camera = new VideoCaptureDevice();
comboBox1.SelectedIndexChanged += comboBox1_SelectedIndexChanged;
}
private void comboBox1_SelectedIndexChanged(object sender, EventArgs e)
{
try
{
if (camera.IsRunning == false)
{
camera = new VideoCaptureDevice(devices[comboBox1.SelectedIndex].MonikerString);
camera.NewFrame += Camera_NewFrame;
camera.Start();
}
}
catch (Exception exc)
{
MessageBox.Show(exc.Message + "");
}
}
public void Camera_NewFrame(object sender, AForge.Video.NewFrameEventArgs eventArgs)
{
List<byte> alldata = new List<byte>();
//byte[] line = new byte[360];
Bitmap image = (Bitmap)eventArgs.Frame.Clone();
byte[] maindata = new byte[image.Height*image.Width*4];
int count = 0;
if(btnapplyWasClicked == true)
{
for (int i = 0; i < image.Height; i++)
{
for (int j = 0; j < image.Width; j++)
{
Color color = image.GetPixel(j, i);
maindata[count] = color.R;
maindata[count + 1] = color.G;
maindata[count + 2] = color.B;
maindata[count + 3] = color.A;
count = count + 4;
for (int k = 1; k <= 360; k++)
{
if (maindata[(count + 4) * k] == maindata[2560 * k])
{
dnm2510img.Gray.GrayFilter(maindata, 2560 * k);
}
}
}
}
//alldata.AddRange(maindata);
}
}
private bool btnapplyWasClicked = false;
//private bool button1WasClicked = false;
//private bool GeriALWasClicked = false;
private void btnapply_Click(object sender, EventArgs e)
{
btnapplyWasClicked = true;
}
private void button1_Click(object sender, EventArgs e)
{
//button1WasClicked = true;
}
}
}
Here is the code for Grayscale:
using AForge.Video.DirectShow;
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace dnm2510img
{
public class Gray
{
public static byte[] GrayFilter(byte[] data,int width)
{
List<byte> alldataa = new List<byte>();
for (int i = 0; i < width; i++)
{
int temp =((data[i]+data[i+1]+data[i+2]+data[i+3]) / 4);
data[i] = (byte)temp;
data[i+1] = (byte)temp;
data[i+2] = (byte)temp;
data[i + 3] = (byte)temp;
}
//alldataa.AddRange(data);
return data;
}
}
}
This is how you convert a 24 bpp bitmap to grayscale and output it to a linear array:
public static unsafe byte[] ToBgr24To8Mono(Bitmap source)
{
var width = source.Width;
var height = source.Height;
var sourceData = source.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadOnly, source.PixelFormat);
var sourceStride = sourceData.Stride;
var sourcePtr = (byte*)sourceData.Scan0;
var targetArray = new byte[width * height];
try
{
Parallel.For(0, height, y =>
{
var sourceRow = sourcePtr + y * sourceStride;
var targetRow = y * width;
for (int x = 0; x < width; x++)
{
var sourceIndex = (sourceRow + x * 3);
var value = (byte) (sourceIndex[0] * 0.11f + sourceIndex[1] * 0.59f + sourceIndex[2] * 0.3f);
targetArray[targetRow + x] = value;
}
});
}
finally
{
source.UnlockBits(sourceData);
}
return targetArray;
}
If you want to use a 32bit image as input, change x * 3 to x * 4. The parallel loop can be switched to a regular loop if you wish.

How can I execute an aspx from url returning an ContentType Image?

This function return an resized and centered image.
I would like tu execute it like thumb.aspx?image=test.jpg&width=100&height=50&needToFill=tru‌​e to get a ContentType = "image/jpeg"
public static System.Drawing.Image FixedSize(Image image, int Width, int Height, bool needToFill)
{
int sourceWidth = image.Width;
int sourceHeight = image.Height;
int sourceX = 0;
int sourceY = 0;
double destX = 0;
double destY = 0;
double nScale = 0;
double nScaleW = 0;
double nScaleH = 0;
nScaleW = ((double)Width / (double)sourceWidth);
nScaleH = ((double)Height / (double)sourceHeight);
if (!needToFill)
{
nScale = Math.Min(nScaleH, nScaleW);
}
else
{
nScale = Math.Max(nScaleH, nScaleW);
destY = (Height - sourceHeight * nScale) / 2;
destX = (Width - sourceWidth * nScale) / 2;
}
if (nScale > 1)
nScale = 1;
int destWidth = (int)Math.Round(sourceWidth * nScale);
int destHeight = (int)Math.Round(sourceHeight * nScale);
System.Drawing.Bitmap bmPhoto = null;
try
{
bmPhoto = new System.Drawing.Bitmap(destWidth + (int)Math.Round(2 * destX), destHeight + (int)Math.Round(2 * destY));
}
catch (Exception ex)
{
throw new ApplicationException(string.Format("destWidth:{0}, destX:{1}, destHeight:{2}, desxtY:{3}, Width:{4}, Height:{5}",
destWidth, destX, destHeight, destY, Width, Height), ex);
}
using (System.Drawing.Graphics grPhoto = System.Drawing.Graphics.FromImage(bmPhoto))
{
grPhoto.InterpolationMode = InterpolationMode.HighQualityBicubic;
grPhoto.CompositingQuality = CompositingQuality.HighQuality;
grPhoto.SmoothingMode = SmoothingMode.HighQuality;
Rectangle to = new System.Drawing.Rectangle((int)Math.Round(destX), (int)Math.Round(destY), destWidth, destHeight);
Rectangle from = new System.Drawing.Rectangle(sourceX, sourceY, sourceWidth, sourceHeight);
grPhoto.DrawImage(image, to, from, System.Drawing.GraphicsUnit.Pixel);
return bmPhoto;
}
}
Can I just add this in somehow?
void Page_Load(Object sender, EventArgs e){
You can use an HTTP Handler to satisfy such requirement. An ASP.NET HTTP handler is the process (frequently referred to as the "endpoint") that runs in response to a request made to an ASP.NET Web application. To learn more, take a look at HTTP Handlers and HTTP Modules Overview
ASHX Example
To process a request like this: http://localhost:19610/ImageHandler.ashx?width=200&height=200
Add a new Generic Handler (.ashx) to project and name it ImageHandler.ashx
Write code to get parameters from query string and perform processing and return suitable response:
using System;
using System.Collections.Generic;
using System.Web;
using System.Drawing;
namespace WebApplication1 /*use your application namespace*/
{
public class ImageHandler: IHttpHandler
{
public void ProcessRequest(HttpContext context)
{
int width = 0;
int.TryParse(context.Request.QueryString["width"], out width);
var height = 0;
int.TryParse(context.Request.QueryString["height"], out height);
if (width <= 0) width = 100;
if (height <= 0) height = 100;
using (var image = new Bitmap(width, height))
{
using (var g = Graphics.FromImage(image))
g.Clear(Color.Red);
byte[] buffer =
(byte[])new ImageConverter().ConvertTo(image, typeof(byte[]));
context.Response.ContentType = "image/bmp";
context.Response.OutputStream.Write(buffer, 0, buffer.Length);
}
}
public bool IsReusable { get { return false; } }
}
}
ASPX Example
If for any reason you want to use an aspx instead, you can create a aspx file without any code behind file like this:
<%# Page Title="Home Page" Language="C#" %>
<script language="C#" runat="server">
protected void Page_Load(object sender, EventArgs e)
{
int width = 0;
int.TryParse(Request.QueryString["width"], out width);
var height = 0;
int.TryParse(Request.QueryString["height"], out height);
if (width <= 0) width = 100;
if (height <= 0) height = 100;
using (var image = new System.Drawing.Bitmap(width, height))
{
using (var g = System.Drawing.Graphics.FromImage(image))
g.Clear(System.Drawing.Color.Red);
byte[] buffer =
(byte[])new System.Drawing.ImageConverter().ConvertTo(image, typeof(byte[]));
Response.ContentType = "image/bmp";
Response.OutputStream.Write(buffer, 0, buffer.Length);
}
}
</script>

Change alpha coefficient using Lockbits

I have written a function which changes the alpha coefficient of an image. I use setpixel and getpixel,which is very slow. I found out that Lockbits method is faster.How can I do it with Lockbits?
Here is my current code:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace WindowsFormsApplication2
{
public partial class Form1 : Form
{
private static Image Tran(Image s,int alpha)
{
int x = 0, y = 0;
Bitmap tImage = new Bitmap(s);
for (x = 0; x < tImage.Width; x++)
{
for (y = 0; y < tImage.Height; y++)
{
tImage.SetPixel(x, y, Color.FromArgb(alpha, tImage.GetPixel(x, y).R, tImage.GetPixel(x, y).G, tImage.GetPixel(x, y).B));
}
}
return tImage;
}
public Form1()
{
InitializeComponent();
trackBar1.TickStyle = TickStyle.Both;
trackBar1.Orientation = Orientation.Vertical;
trackBar1.Minimum = 0;
trackBar1.Maximum = 255;
trackBar1.Height = 101;
trackBar1.Value = 255;
pictureBox1.Image = Image.FromFile("C:\\Users\\rati\\Desktop\\water.jpg");
pictureBox1.SizeMode = PictureBoxSizeMode.StretchImage;
}
private void trackBar1_Scroll(object sender, EventArgs e)
{
pictureBox1.Image = ChangeAlpha(pictureBox1.Image, trackBar1.Value);
textBox1.Text = trackBar1.Value.ToString();
}
}
}
You can change the opacity of your image by drawing it in a new bitmap using a new ColorMatrix and assigning a float value between 0 and 1 to its Matrix33 as its new alpha value:
public Image ChangeAlpha(Image img, int value)
{
if (value < 0 || value > 255)
throw new Exception("value must be between 0 and 255");
Bitmap bmp = new Bitmap(img.Width, img.Height); // Determining Width and Height of Source Image
Graphics graphics = Graphics.FromImage(bmp);
ColorMatrix colormatrix = new ColorMatrix();
colormatrix.Matrix33 = value / 255f;
ImageAttributes imgAttribute = new ImageAttributes();
imgAttribute.SetColorMatrix(colormatrix, ColorMatrixFlag.Default, ColorAdjustType.Bitmap);
graphics.DrawImage(img, new Rectangle(0, 0, bmp.Width, bmp.Height), 0, 0, img.Width, img.Height, GraphicsUnit.Pixel, imgAttribute);
graphics.Dispose(); // Releasing all resource used by graphics
return bmp;
}
And here is a sample usage:
private void button1_Click(object sender, EventArgs e)
{
int opacityvalue = 127;
var img = ChangeAlpha(Image.FromFile(#"d:\1.png"), opacityvalue);
img.Save(#"d:\2.png");
}
Don't forget to add using System.Drawing; and using System.Drawing.Imaging;.
You can see before and after calling the function with value=127 below:
EDIT
If you want to see the result in a PictureBox you should pay attention to using 2 different picture boxes, one for original image, and one for changed image:
private void trackBar1_Scroll(object sender, EventArgs e)
{
this.pictureBox2.Image = ChangeAlpha(this.pictureBox1.Image, this.trackBar1.Value);
}
As I see in your code when you change the opacity, you set the result as image of your PictureBox1 again and apply opacity again on the result. In other word you apply opacity on an image that you applied opacity to it before over and over again.
public Form1()
{
...
originalImage = new Bitmap("C:\\Users\\rati\\Desktop\\water.jpg");
pictureBox1.Image = originalImage;
...
}
// Add an extra field to the Form class.
Bitmap originalImage;
void trackBar1_Scroll(object sender, EventArgs e)
{
pictureBox1.Image = ChangeAlpha((byte)trackBar1.Value);
textBox1.Text = trackBar1.Value.ToString();
}
Image ChangeAlpha(byte alpha)
{
Bitmap bmp = new Bitmap(originalImage);
Rectangle rect = new Rectangle(0, 0, bmp.Width, bmp.Height);
BitmapData bmpData = bmp.LockBits(rect, ImageLockMode.ReadWrite, bmp.PixelFormat);
IntPtr ptr = bmpData.Scan0;
int bytes = Math.Abs(bmpData.Stride) * bmp.Height;
byte[] rgbValues = new byte[bytes];
Marshal.Copy(ptr, rgbValues, 0, bytes);
// Set every fourth value to alpha. A 32bpp bitmap will change transparency.
for (int counter = 3; counter < rgbValues.Length; counter += 4)
rgbValues[counter] = alpha;
// Also you can try parallelize loop.
//int length = rgbValues.Length / 4;
//Parallel.For(3, length, counter => rgbValues[counter * 4 - 1] = alpha);
Marshal.Copy(rgbValues, 0, ptr, bytes);
bmp.UnlockBits(bmpData);
return bmp;
}

Named argument specifications must appear after all fixed arguments have been specified

I'm working in image processing in C# and I have two major error:
Error: Named argument specifications must appear after all fixed arguments have been specified
Error: System.Drawing.Size' is a 'type' but is used like a 'variable'
This is my code:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.CV.Structure;
using Emgu.Util;
using Emgu.CV.CvEnum;
using Emgu.CV.GPU;
using Emgu.CV.UI;
namespace SNAKE_C_Sharp
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void imageBox1_Click(object sender, EventArgs e)
{
}
private void Form1_Load(object sender, EventArgs e)
{
}
private void button1_Click(object sender, EventArgs e)
{
using (OpenFileDialog dialog = new OpenFileDialog())
{
dialog.Filter = "(*.*)|*.*";
if (dialog.ShowDialog() == DialogResult.OK)
{
pictureBox1.SizeMode = PictureBoxSizeMode.StretchImage;
Image image = Image.FromFile(dialog.FileName);
pictureBox1.Image = image;
}
}
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
struct parameter
{
public double alpha { get; set; }
public double beta { get; set; }
public double gamma { get; set; }
};
unsafe private void button3_Click(object sender, EventArgs e)
{
{
int length = 1000;
MCvPoint2D64f* contour;
MCvPoint2D64f center = new MCvPoint2D64f();
var snake_param = new List<parameter>();
snake_param.Add(new parameter { alpha = 0.1, beta = 0.1, gamma = 0.1, });
//Image src_img = pictureBox1.Image;
IntPtr dst_img = new IntPtr();
//IntPtr src_img = Emgu.CV.CvInvoke.cvLoadImage("pictureBox1.Image", Emgu.CV.CvEnum.LOAD_IMAGE_TYPE.CV_LOAD_IMAGE_COLOR);
Bitmap bitmapp = new Bitmap("pictureBox1.Image");
Image<Bgr, byte> image = new Image<Bgr, byte>(bitmapp);
center.x = image.Width;
center.y = image.Height;
int i;
for (i = 0; i < length; i++)
{
contour[i].x = (int)(center.x * Math.Cos(2 * Math.PI * i / length) + center.x);
contour[i].y = (int)(center.y * Math.Sin(2 * Math.PI * i / length) + center.y);
}
for (i = 0; i < length - 1; i++)
{
CvInvoke.cvLine(dst_img, contour[i], contour[i + 1], new MCvScalar(255, 0, 0), 2, lineType: LINE_TYPE.EIGHT_CONNECTED,0);
}
CvInvoke.cvLine(dst_img, contour[length - 1], contour[0], new MCvScalar(255, 0, 0), 2, lineType: LINE_TYPE.EIGHT_CONNECTED, 0);
IntPtr src_img = image.Ptr;
CvInvoke.cvSnakeImage(src_img, contour, length, snake_param[1].alpha, snake_param[2].beta, snake_param[3].gamma, 1.0f, contour[i], System.Drawing.Size(15, 15), new MCvTermCriteria(1, 0.0), true);
CvInvoke.cvCvtColor(src_img, dst_img, COLOR_CONVERSION.GRAY2RGB);
for (i = 0; i < length - 1; i++)
{
CvInvoke.cvLine(dst_img, contour[i], contour[i + 1], new MCvScalar(255, 0, 0), 2, lineType: LINE_TYPE.EIGHT_CONNECTED, 0);
}
CvInvoke.cvLine(dst_img, contour[length - 1], contour[0], new MCvScalar(255, 0, 0), 2, lineType: LINE_TYPE.EIGHT_CONNECTED, 0);
pictureBox2.SizeMode = PictureBoxSizeMode.StretchImage;
Bitmap bitmappbb = new Bitmap("dst_img");
Image<Bgr, byte> imagee = new Image<Bgr, byte>(bitmapp);
pictureBox2.Image = bitmappbb;
}
}
private void imageBox1_Click_1(object sender, EventArgs e)
{
}
private void panAndZoomPictureBox1_Click(object sender, EventArgs e)
{
}
private void imageBox1_Click_2(object sender, EventArgs e)
{
}
}
}
How can i adjust above error?
This is one of the issues that caused error 1
CvInvoke.cvLine(dst_img, contour[i], contour[i + 1], new MCvScalar(255, 0, 0), 2, lineType: LINE_TYPE.EIGHT_CONNECTED,0);
I'll make it more readable...
CvInvoke.cvLine(
dst_img,
contour[i],
contour[i + 1],
new MCvScalar(255, 0, 0),
2,
lineType: LINE_TYPE.EIGHT_CONNECTED,
0
);
See the 2nd-to-last line is using a named argument (lineType:), but is followed by a non-named argument? How is the compiler meant to know what you mean?
The 2nd error is as #LajosArpad stated, you need to add a new in front of your use of System.Drawing.Size(..).
Instead of
CvInvoke.cvSnakeImage(src_img, contour, length, snake_param[1].alpha, snake_param[2].beta, snake_param[3].gamma, 1.0f, contour[i], System.Drawing.Size(15, 15), new MCvTermCriteria(1, 0.0), true);
you need this:
CvInvoke.cvSnakeImage(src_img, contour, length, snake_param[1].alpha, snake_param[2].beta, snake_param[3].gamma, 1.0f, contour[i], new System.Drawing.Size(15, 15), new MCvTermCriteria(1, 0.0), true);
This should fix the second error. Without the new keyword you do not have a System.Drawing.Size instance.
EDIT:
I am not going to test your code, nor to read it line-by-line, so I expect more information about your first error to give you the solution. Can you tell me on which line is the exception thrown?
Also, might I suggest that you should pay more attention to the tabulation of your code, as it is difficult to read if you write your code in such an unstructured manner. It is not impossible to read, but most of us (including myself) will not read it.
I fixed the last error and that's my new code:
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, System.EventArgs e)
{
using (OpenFileDialog dialog = new OpenFileDialog())
{
dialog.Filter = "JPEG|*.jpg|PNG|*.PNG";
if (dialog.ShowDialog() == DialogResult.OK)
{
pictureBox1.SizeMode = PictureBoxSizeMode.StretchImage;
Image image = Image.FromFile(dialog.FileName);
pictureBox1.Image = image;
}
}
}
private void button2_Click(object sender, EventArgs e)
{
this.Close();
}
private void Form1_Load(object sender, EventArgs e)
{
}
struct parameter
{
public float alpha { get; set; }
public float beta { get; set; }
public float gamma { get; set; }
};
unsafe private void button3_Click(object sender, EventArgs e)
{
{
int length = 1000;
Point *contour;
Point center = new Point();
var snake_param = new List<parameter>();
snake_param.Add(new parameter { alpha= 0.1f , beta = 0.1f, gamma= 0.1f, });
IntPtr dst_img= new IntPtr();
Bitmap bitmap = new Bitmap("pictureBox1.Image");
Image<Bgr, byte> image = new Image<Bgr, byte>(bitmap);
center.X = image.Width;
center.Y = image.Height;
int i;
for (i = 0; i < length; i++)
{
contour[i].X = (int)(center.X * Math.Cos(2 * Math.PI * i / length) + center.X);
contour[i].Y = (int)(center.Y * Math.Sin(2 * Math.PI * i / length) + center.Y);
}
LINE_TYPE lignetype = new LINE_TYPE();
for (i = 0; i < length - 1; i++)
{
CvInvoke.cvLine(
dst_img,
contour[i],
contour[i + 1],
new MCvScalar(255,0,0),
2,
LINE_TYPE.EIGHT_CONNECTED,
0 );
}
CvInvoke.cvLine
(
dst_img,
contour[length - 1],
contour[0],
new MCvScalar(255,0,0),
2,
LINE_TYPE.EIGHT_CONNECTED,
0
);
IntPtr ctr =new IntPtr();
//public void PixelToInkSpace(
//IntPtr a
//ref Point contour
//);
IntPtr src_img = image.Ptr;
CvInvoke.cvSnakeImage(
src_img,
contour[i],
length,
snake_param.[1].alfa,
snake_param[2].beta,
snake_param[3].gamma,
1,
new System.Drawing.Size(15, 15),
new MCvTermCriteria(1,0.0),
1);
CvInvoke.cvCvtColor(
src_img,
dst_img,
COLOR_CONVERSION.GRAY2RGB );
for (i = 0; i < length - 1; i++)
{
CvInvoke.cvLine(
dst_img,
contour[i],
contour[i + 1],
new MCvScalar(255,0,0),
2,
LINE_TYPE.EIGHT_CONNECTED,
0 );
}
CvInvoke.cvLine(
dst_img,
contour[length - 1],
contour[0],
new MCvScalar(255,0,0),
2,
LINE_TYPE.EIGHT_CONNECTED,
0);
pictureBox2.SizeMode = PictureBoxSizeMode.StretchImage;
Bitmap bitmappbb = new Bitmap("dst_img");
Image<Bgr, byte> imagee = new Image<Bgr, byte>(bitmappbb);
pictureBox2.Image = bitmappbb;
}
}
}
}
But my error now is different as I'm translating my code from c++ to c# ,
I discover that the snake format is
public static void cvSnakeImage(
IntPtr image,
IntPtr points,
int length,
float[] alpha,
float[] beta,
float[] gamma,
int coeffUsage,
Size win,
MCvTermCriteria criteria,
bool calcGradient
)
I didn't find way to convert the variable "contour" with type "Point" to "IntPtr".
And a way to call alfa, beta et gamma as float[];
#Timothy Walters

emgu cv CvInvoke.cvRemap hangs when trying to udistort from stereo calibration data

I'm trying to implement a stereo camera calibration app using emgu cv.
My problem is when I try to use CvInvoke.cvRemap to undistort an image the function just hangs. No errors or crashes, it just hangs and I've left it for 2 hours in case it was just being slow. Here's what I'm doing:
Capturing 10 pairs of Chessboard samples (left and right), making sure FindChessboardCorners works on each. I'm not doing anything special to sync the cameras just capturing them at the same time.
Generate set of object points based off the chessboard used.
Doing a separate CalibrateCamera on the left and right images of each sample using the object points from 2 and the image points from 1.
Doing a StereoCalibrate using the IntrinsicCameraParameters generated by CalibrateCamera in 3, the object points in 2, and the image points captured from the chessboards in 1.
Doing a StereoRectify using the IntrinsicCameraParameters from 3/4.
Generating mapx and mapy for both left and right from cvInitUndistortRectifyMap using output from 5.
Attempting to cvRemap using mapx and mapy from 6 and fresh images captured from the cameras.
NEXT: Use StereoBM.FindStereoCorrespondence and PointCollection.ReprojectImageTo3D to generate a point cloud from my hopefully calibrated stereo data.
So when I get to 7 cvRemap just hangs. I've gotton cvRemap to work capturing from a single camera though so I know the function is working to some degree with my setup.
I've written a class to manage multiple cameras:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Drawing;
using System.Drawing.Drawing2D;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.CV.UI;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.VideoSurveillance;
namespace Capture2Cams
{
class camData
{
public Capture capture;
public Image<Bgr, Byte> lastFrame;
public Image<Gray, Byte> lastFrameGray;
public bool lastChessboardFound;
public PointF[] lastChessboardCorners;
public Image<Gray, Byte>[] samplesGray;
public PointF[][] samplesChessboardCorners;
public Size cbDimensions;
public Size imageDimensions;
public int cursampleIndex = 0;
public ImageList sampleIcons;
private Image<Gray, Byte> _chessBoardDisplay;
private int _iconWidth = 160;
private int _icnonHeight = 90;
private int _numSamples = 0;
public int numSamples()
{
return _numSamples;
}
public void numSamples(int val)
{
_numSamples = val;
this.samplesGray = new Image<Gray, Byte>[val];
this.samplesChessboardCorners = new PointF[val][];
this.sampleIcons.ImageSize = new Size(_iconWidth, _icnonHeight);
Bitmap tmp = new Bitmap(_iconWidth, _icnonHeight);
this.sampleIcons.Images.Clear();
for (int c = 0; c < _numSamples; c++) this.sampleIcons.Images.Add(tmp);
}
public camData(int camIndex, int capWidth, int capHeight, int pcbWidth, int pcbHeight, int pNumSamples)
{
this.sampleIcons = new ImageList();
try
{
this.capture = new Capture(camIndex);
this.capture.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_WIDTH, capWidth);
this.capture.SetCaptureProperty(CAP_PROP.CV_CAP_PROP_FRAME_HEIGHT, capHeight);
}
catch (Exception e)
{
MessageBox.Show(e.Message);
return;
}
this.imageDimensions = new Size(capWidth, capHeight);
this.cbDimensions = new Size(pcbWidth, pcbHeight);
this.numSamples(pNumSamples);
}
public Image<Gray, Byte> captureFrame()
{
this.lastFrame = this.capture.QueryFrame();
this.lastFrameGray = this.lastFrame.Convert<Gray, Byte>();
return this.lastFrameGray;
}
public int captureSample()
{
this.detectChessboard(true); // detectChessboard calls -> captureFrame
if (lastChessboardFound)
{
this.samplesGray[cursampleIndex] = this.lastFrameGray;
this.samplesChessboardCorners[cursampleIndex] = this.lastChessboardCorners;
this.sampleIcons.Images[this.cursampleIndex] = this.lastFrameGray.ToBitmap(_iconWidth, _icnonHeight);
this.cursampleIndex++;
if (this.cursampleIndex >= _numSamples) this.cursampleIndex = 0;
}
return cursampleIndex;
}
public void clearSamples()
{
this.cursampleIndex = 0;
this.numSamples(_numSamples);
}
public Image<Gray, Byte> detectChessboard(bool pDoCapture)
{
if (pDoCapture) this.captureFrame();
this.lastChessboardFound = CameraCalibration.FindChessboardCorners(this.lastFrameGray, this.cbDimensions, CALIB_CB_TYPE.ADAPTIVE_THRESH | CALIB_CB_TYPE.FILTER_QUADS, out this.lastChessboardCorners);
_chessBoardDisplay = this.lastFrameGray.Clone();
CameraCalibration.DrawChessboardCorners(this._chessBoardDisplay, this.cbDimensions, this.lastChessboardCorners, this.lastChessboardFound);
return this._chessBoardDisplay;
}
public void saveSampleImages(string pPath, string pID)
{
for(int ic = 0; ic < this._numSamples; ic++)
{
this.samplesGray[ic].Save(pPath + pID + ic.ToString() + ".bmp");
}
}
public void loadSampleImages(string pPath, string pID)
{
clearSamples();
for (int ic = 0; ic < this._numSamples; ic++)
{
this.lastFrameGray = new Image<Gray, byte>(new Bitmap(pPath + pID + ic.ToString() + ".bmp"));
this.detectChessboard(false);
this.samplesChessboardCorners[ic] = this.lastChessboardCorners;
this.sampleIcons.Images[ic] = this.lastFrameGray.ToBitmap(_iconWidth, _icnonHeight);
this.samplesGray[ic] = this.lastFrameGray;
}
}
}
}
And here's my form code with the rest of the calibration logic:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Runtime.InteropServices;
using Emgu.CV.Util;
using Emgu.CV;
using Emgu.CV.UI;
using Emgu.CV.CvEnum;
using Emgu.CV.Structure;
using Emgu.CV.VideoSurveillance;
namespace Capture2Cams
{
public partial class CaptureForm : Form
{
private static camData camLeft;
private static camData camRight;
private int _numSamples = 10; // Number of calibration samples
private int _imageWidth = 1280; // web cam resolution
private int _imageHeight = 720; // web cam resolution
private int _cbWidth = 9; // chessboard corner count
private int _cbHeight = 5; // chessboard corner count
// TODO: Test post calibration values, these will need to be loaded and saved
private static Matrix<double> _foundamentalMatrix;
private static Matrix<double> _essentialMatrix;
private static IntrinsicCameraParameters _inPramsLeft;
private static IntrinsicCameraParameters _inPramsRight;
private static ExtrinsicCameraParameters _outExtParamsStereo;
private Matrix<float> _mapxLeft;
private Matrix<float> _mapyLeft;
private Matrix<float> _mapxRight;
private Matrix<float> _mapyRight;
public CaptureForm()
{
InitializeComponent();
Run();
}
void Run()
{
camLeft = new camData(0, _imageWidth, _imageHeight, _cbWidth, _cbHeight, _numSamples);
camRight = new camData(1, _imageWidth, _imageHeight, _cbWidth, _cbHeight, _numSamples);
this.listViewLeft.LargeImageList = camLeft.sampleIcons;
for (int c = 0; c < _numSamples; c++)
{
ListViewItem curItem = new ListViewItem();
curItem.ImageIndex = c;
curItem.Text = "Sample" + c.ToString();
this.listViewLeft.Items.Add(curItem);
}
this.listViewRight.LargeImageList = camRight.sampleIcons;
for (int c = 0; c < _numSamples; c++)
{
ListViewItem curItem = new ListViewItem();
curItem.ImageIndex = c;
curItem.Text = "Sample" + c.ToString();
this.listViewRight.Items.Add(curItem);
}
Application.Idle += ProcessFrame;
}
void ProcessFrame(object sender, EventArgs e)
{
if (!checkBoxRectify.Checked)
{
if (this.checkBoxCapCB.Checked)
{
imageBoxLeft.Image = camLeft.detectChessboard(true);
imageBoxRight.Image = camRight.detectChessboard(true);
}
else
{
imageBoxLeft.Image = camLeft.captureFrame();
imageBoxRight.Image = camRight.captureFrame();
}
}
else
{
camLeft.captureFrame();
camRight.captureFrame();
Image<Gray, byte> imgLeft = camLeft.lastFrameGray.Clone();
Image<Gray, byte> imgRight = camRight.lastFrameGray.Clone();
CvInvoke.cvRemap(camLeft.lastFrameGray.Ptr, imgLeft.Ptr, _mapxLeft.Ptr, _mapyLeft.Ptr, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0));
CvInvoke.cvRemap(camRight.lastFrameGray.Ptr, imgRight.Ptr, _mapxRight.Ptr, _mapyRight.Ptr, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0));
imageBoxLeft.Image = imgLeft;
imageBoxRight.Image = imgRight;
}
//checkBoxRectify
}
private void buttonCaptureSample_Click(object sender, EventArgs e)
{
camLeft.captureSample();
camRight.captureSample();
this.listViewLeft.Refresh();
this.listViewRight.Refresh();
}
private void buttonStereoCalibrate_Click(object sender, EventArgs e)
{
// We should have most of the data needed from the sampling with the camData objects
int numCorners = _cbWidth * _cbHeight;
// Calc intrisitcs / camera
_inPramsLeft = new IntrinsicCameraParameters();
_inPramsRight = new IntrinsicCameraParameters();
ExtrinsicCameraParameters[] outExtParamsLeft;
ExtrinsicCameraParameters[] outExtParamsRight;
//Matrix<double> foundamentalMatrix;
//Matrix<double> essentialMatrix;
outExtParamsLeft = new ExtrinsicCameraParameters[_numSamples];
outExtParamsRight = new ExtrinsicCameraParameters[_numSamples];
_outExtParamsStereo = new ExtrinsicCameraParameters();
// Building object points
// These are the points on the cessboard in local 3d coordinates
// Requires one set per sample, if the same calibration object (chessboard) is used for each sample then just use the same set of points for each sample
// Also setting sub pixel analasys on samples
MCvPoint3D32f[][] objectPoints = new MCvPoint3D32f[_numSamples][];
for (int sc = 0; sc < _numSamples; sc++) // Samples count
{
// indivual cam setup
outExtParamsLeft[sc] = new ExtrinsicCameraParameters();
outExtParamsRight[sc] = new ExtrinsicCameraParameters();
// Sub pixel analasys
camLeft.samplesGray[sc].FindCornerSubPix(new PointF[][] { camLeft.samplesChessboardCorners[sc] }, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(300, 0.01));
camRight.samplesGray[sc].FindCornerSubPix(new PointF[][] { camRight.samplesChessboardCorners[sc] }, new Size(10, 10), new Size(-1, -1), new MCvTermCriteria(300, 0.01));
// Object points
objectPoints[sc] = new MCvPoint3D32f[numCorners];
for (int cc = 0; cc < numCorners; cc++) // chessboard corners count
{
objectPoints[sc][cc].x = cc / _cbWidth;
objectPoints[sc][cc].y = cc % _cbWidth;
objectPoints[sc][cc].z = 0.0f;
}
}
Size imageSize = new Size(_imageWidth, _imageHeight);
// Indivual cam camibration
CameraCalibration.CalibrateCamera(objectPoints, camLeft.samplesChessboardCorners, imageSize, _inPramsLeft, CALIB_TYPE.DEFAULT, out outExtParamsLeft);
CameraCalibration.CalibrateCamera(objectPoints, camRight.samplesChessboardCorners, imageSize, _inPramsRight, CALIB_TYPE.DEFAULT, out outExtParamsRight);
// Stereo Cam calibration
CameraCalibration.StereoCalibrate(
objectPoints,
camLeft.samplesChessboardCorners,
camRight.samplesChessboardCorners,
_inPramsLeft,
_inPramsRight,
imageSize,
CALIB_TYPE.CV_CALIB_FIX_ASPECT_RATIO | CALIB_TYPE.CV_CALIB_ZERO_TANGENT_DIST | CALIB_TYPE.CV_CALIB_FIX_FOCAL_LENGTH,
new MCvTermCriteria(100, 0.001),
out _outExtParamsStereo,
out _foundamentalMatrix,
out _essentialMatrix
);
PrintIntrinsic(_inPramsLeft);
PrintIntrinsic(_inPramsRight);
}
private void listViewLeft_ItemSelectionChanged(object sender, ListViewItemSelectionChangedEventArgs e)
{
}
private void listViewRight_ItemSelectionChanged(object sender, ListViewItemSelectionChangedEventArgs e)
{
}
private void buttonSaveSamples_Click(object sender, EventArgs e)
{
camLeft.saveSampleImages(textBoxSavePath.Text, "left");
camRight.saveSampleImages(textBoxSavePath.Text, "right");
}
private void buttonLoadSamples_Click(object sender, EventArgs e)
{
camLeft.loadSampleImages(textBoxSavePath.Text, "left");
camRight.loadSampleImages(textBoxSavePath.Text, "right");
this.listViewLeft.Refresh();
this.listViewRight.Refresh();
}
private void buttonCapture_Click(object sender, EventArgs e)
{
}
private void buttonCaptureCurframe_Click(object sender, EventArgs e)
{
camLeft.captureFrame();
camRight.captureFrame();
camLeft.lastFrame.Save(textBoxSavePath.Text + "frameLeft" + ".bmp");
camLeft.lastFrameGray.Save(textBoxSavePath.Text + "frameLeftGray" + ".bmp");
camRight.lastFrame.Save(textBoxSavePath.Text + "frameRight" + ".bmp");
camRight.lastFrameGray.Save(textBoxSavePath.Text + "frameRightGray" + ".bmp");
}
public void StereoRectify(
IntrinsicCameraParameters intrinsicParam1,
IntrinsicCameraParameters intrinsicParam2,
Size imageSize,
ExtrinsicCameraParameters extrinsicParams,
out Matrix<double> R1,
out Matrix<double> R2,
out Matrix<double> P1,
out Matrix<double> P2,
out Matrix<double> Q,
STEREO_RECTIFY_TYPE flags,
double alpha,
Size newImageSize,
ref Rectangle validPixROI1,
ref Rectangle validPixROI2
)
{
R1 = new Matrix<double>(3, 3);
R2 = new Matrix<double>(3, 3);
P1 = new Matrix<double>(3, 4);
P2 = new Matrix<double>(3, 4);
Q = new Matrix<double>(4, 4);
CvInvoke.cvStereoRectify(
_inPramsLeft.IntrinsicMatrix.Ptr,
_inPramsRight.IntrinsicMatrix.Ptr,
_inPramsLeft.DistortionCoeffs.Ptr,
_inPramsRight.DistortionCoeffs.Ptr,
imageSize,
extrinsicParams.RotationVector.Ptr,
extrinsicParams.TranslationVector.Ptr,
R1.Ptr,
R2.Ptr,
P1.Ptr,
P2.Ptr,
Q.Ptr,
STEREO_RECTIFY_TYPE.DEFAULT,
alpha,
newImageSize,
ref validPixROI1,
ref validPixROI1);
}
public void InitUndistortRectifyMap(
IntrinsicCameraParameters intrinsicParam,
Matrix<double> R,
Matrix<double> newCameraMatrix,
out Matrix<float> mapx,
out Matrix<float> mapy
)
{
mapx = new Matrix<float>(new Size(_imageWidth, _imageHeight));
mapy = new Matrix<float>(new Size(_imageWidth, _imageHeight));
CvInvoke.cvInitUndistortRectifyMap(intrinsicParam.IntrinsicMatrix.Ptr, intrinsicParam.DistortionCoeffs.Ptr, R.Ptr, newCameraMatrix.Ptr, mapx.Ptr, mapy.Ptr);
}
private void buttonTestCalc_Click(object sender, EventArgs e)
{
// Stereo Rectify images
Matrix<double> R1;
Matrix<double> R2;
Matrix<double> P1;
Matrix<double> P2;
Matrix<double> Q;
Rectangle validPixROI1, validPixROI2;
validPixROI1 = new Rectangle();
validPixROI2 = new Rectangle();
StereoRectify(_inPramsLeft, _inPramsRight, new Size(_imageWidth, _imageHeight), _outExtParamsStereo, out R1, out R2, out P1, out P2, out Q, 0, 0, new Size(_imageWidth, _imageHeight), ref validPixROI1, ref validPixROI2);
//InitUndistortRectifyMap(_inPramsLeft, R1, P1, out _mapxLeft, out _mapyLeft);
//InitUndistortRectifyMap(_inPramsRight, R2, P2, out _mapxRight, out _mapyRight);
_inPramsLeft.InitUndistortMap(_imageWidth, _imageHeight, out _mapxLeft, out _mapyLeft);
_inPramsRight.InitUndistortMap(_imageWidth, _imageHeight, out _mapxRight, out _mapyRight);
Image<Gray, byte> imgLeft = camLeft.lastFrameGray.Clone();
Image<Gray, byte> imgRight = camRight.lastFrameGray.Clone();
// **** THIS IS WHERE IM UP TO, no errors, it just hangs ****
CvInvoke.cvRemap(camLeft.lastFrameGray.Ptr, imgLeft.Ptr, _mapxLeft.Ptr, _mapyLeft.Ptr, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0));
// StereoBM stereoSolver = new StereoBM(Emgu.CV.CvEnum.STEREO_BM_TYPE.BASIC, 0);
//stereoSolver.FindStereoCorrespondence(
}
public void PrintIntrinsic(IntrinsicCameraParameters CamIntrinsic)
{
// Prints the Intrinsic camera parameters to the command line
Console.WriteLine("Intrinsic Matrix:");
string outStr = "";
int i = 0;
int j = 0;
for (i = 0; i < CamIntrinsic.IntrinsicMatrix.Height; i++)
{
for (j = 0; j < CamIntrinsic.IntrinsicMatrix.Width; j++)
{
outStr = outStr + CamIntrinsic.IntrinsicMatrix.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
Console.WriteLine("Distortion Coefficients: ");
outStr = "";
for (j = 0; j < CamIntrinsic.DistortionCoeffs.Height; j++)
{
outStr = outStr + CamIntrinsic.DistortionCoeffs.Data[j, 0].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
}
public void PrintExtrinsic(ExtrinsicCameraParameters CamExtrinsic)
{
// Prints the Extrinsic camera parameters to the command line
Console.WriteLine("Extrinsic Matrix:");
string outStr = "";
int i = 0;
int j = 0;
for (i = 0; i < CamExtrinsic.ExtrinsicMatrix.Height; i++)
{
for (j = 0; j < CamExtrinsic.ExtrinsicMatrix.Width; j++)
{
outStr = outStr + CamExtrinsic.ExtrinsicMatrix.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
Console.WriteLine("Rotation Vector: ");
outStr = "";
for (i = 0; i < CamExtrinsic.RotationVector.Height; i++)
{
for (j = 0; j < CamExtrinsic.RotationVector.Width; j++)
{
outStr = outStr + CamExtrinsic.RotationVector.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
Console.WriteLine("Translation Vector: ");
outStr = "";
for (i = 0; i < CamExtrinsic.TranslationVector.Height; i++)
{
for (j = 0; j < CamExtrinsic.TranslationVector.Width; j++)
{
outStr = outStr + CamExtrinsic.TranslationVector.Data[i, j].ToString();
outStr = outStr + " ";
}
Console.WriteLine(outStr);
outStr = "";
}
}
}
}
TNKS!
Your maps must be images instead of matrices.
Specifically, of "Gray, float" type.

Categories

Resources