Color detection C# - c#

I want to make color_detection_bot and This bot provide me that when I click the button in this form, mousecursor changes position to facebook login button and click it.First of all I created a bitmap in Paint and save in System.Resources name as bmpLogin and this bitmap shows a 24 bit bitmap of facebook login button.
private void btn_login_Click(object sender, EventArgs e)
{
this.BackgroundImage = Screenshot(); //Method which is take screenshot
Point location;
bool success=FindBitmap(Properties.Resources.bmpLogin, Screenshot(), out location);
if (success == false)
{
MessageBox.Show("Couldn't find the login button");
return;
}
Cursor.Position = location; //bitmap location within screenshot
MouseCLick(); //Mouse click event works successfull
}
/// <summary>
/// Find the location of bitmap within another bitmap and return if it was succesfully found
/// </summary>
/// <param name="bmpNeedle"> The image we want to find </param>
/// <param name="bmpHaystack"> Where we want to search for the image </param>
/// <param name="location"> Where we found the image </param>
/// <returns> If the bmpNeedle was found succesfully </returns>
private Boolean FindBitmap(Bitmap bmpNeedle, Bitmap bmpHaystack, out Point location)
{
for (int outerX = 0; outerX < bmpHaystack.Width; outerX++)
{
for (int outerY = 0; outerY < bmpHaystack.Height ; outerY++)
{
for (int innerX = 0; innerX <bmpNeedle.Width; innerX++)
{
for (int innerY = 0; innerY <bmpNeedle.Height; innerY++)
{
Color cNeedle = bmpNeedle.GetPixel(innerX, innerY);
Color cHaystack = bmpHaystack.GetPixel(innerX + outerX, innerY + outerY);
if (cNeedle.R != cHaystack.R || cNeedle.G != cHaystack.G || cNeedle.B != cHaystack.B)
{
goto Notfound;
}
}
}
location = new Point(outerX, outerY);
return true;
Notfound:
continue;
}
}
location = Point.Empty;
return false;
}
This codes works perfectly But When I change if statemant as shown in below, Bot doesnt work.Where am I doing logical error
private Boolean FindBitmap(Bitmap bmpNeedle, Bitmap bmpHaystack, out Point location)
{
for (int outerX = 0; outerX < bmpHaystack.Width; outerX++)
{
for (int outerY = 0; outerY < bmpHaystack.Height; outerY++)
{
for (int innerX = 0; innerX <bmpNeedle.Width; innerX++)
{
for (int innerY = 0; innerY <bmpNeedle.Height; innerY++)
{
Color cNeedle = bmpNeedle.GetPixel(innerX, innerY);
Color cHaystack = bmpHaystack.GetPixel(innerX + outerX, innerY + outerY);
if (cNeedle.R == cHaystack.R && cNeedle.G == cHaystack.G && cNeedle.B == cHaystack.B)
{
location = new Point(outerX, outerY);
return true;
}
}
}
}
}
location = Point.Empty;
return false;
}

In your first code sample, the algorithm is matching all the pixels. In your second code sample, the algorithm is returning the location of the first matched pixel.
It's not enough just to match one pixel. For example, say your full image is:
1 2 3 4
1 a a b b
2 a a c a
3 a b a a
And you're searching for:
1 2
1 a b
2 a c
Your second block of code looks first at 1,1 and says, brilliant, they are both a, job done, and merrily returns the wrong answer. However your first block of code will look at 1,1, it matches so then it goes to 1,2 see that it is b whereas they haystack is a, and then the goto takes it to not found, and it searches the next position of the haystack.
As already pointed out, it's a good demonstration of what's wrong with goto. See here GOTO still considered harmful? for a full discussion on the topic.
Two other problems with this - one already mentioned, that if your search image is in the bottom right, you're going to go out of bounds when you compare it to the larger image - if we use the simple example above, if your search start point is at 4,3, then as you go to compare to the second column of the smaller image, you're looking at 5,3, which doesn't exist. Second problem, GetPixel is very slow, for a large search image this code is going to take a while - and it can't be multi-threaded. A faster but more complicated implementation is to use LockBits, which is much faster to access. Sample implementation is below.
static void Main(string[] args)
{
Bitmap haystack = (Bitmap)Image.FromFile(#"c:\temp\haystack.bmp");
Bitmap needle = (Bitmap)Image.FromFile(#"c:\temp\needle.bmp");
Point location;
if (FindBitmap(haystack, needle, out location) == false)
{
Console.WriteLine("Didn't find it");
}
else
{
using (Graphics g = Graphics.FromImage(haystack))
{
Brush b = new SolidBrush(Color.Red);
Pen pen = new Pen(b, 2);
g.DrawRectangle(pen, new Rectangle(location, new Size(needle.Width, needle.Height)));
}
haystack.Save(#"c:\temp\found.bmp");
Console.WriteLine("Found it # {0}, {1}", location.X, location.Y);
}
Console.ReadKey();
}
public class LockedBitmap : IDisposable
{
private BitmapData _data = null;
private Bitmap _sourceBitmap = null;
private byte[] _pixelData = null;
public readonly int BytesPerPixel, WidthInBytes, Height, Width, ScanWidth;
public LockedBitmap(Bitmap bitmap)
{
_sourceBitmap = bitmap;
_data = bitmap.LockBits(new Rectangle(0, 0, bitmap.Width, bitmap.Height), ImageLockMode.ReadOnly, bitmap.PixelFormat);
BytesPerPixel = Bitmap.GetPixelFormatSize(bitmap.PixelFormat) / 8;
ScanWidth = _data.Stride;
int byteCount = _data.Stride * bitmap.Height;
_pixelData = new byte[byteCount];
Marshal.Copy(_data.Scan0, _pixelData, 0, byteCount);
WidthInBytes = bitmap.Width * BytesPerPixel;
Height = bitmap.Height;
Width = bitmap.Width;
}
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void GetPixel(int x, int y, out int r, out int g, out int b)
{
int realY = y * ScanWidth;
int realX = x * BytesPerPixel;
b = (int)_pixelData[realY + realX];
g = (int)_pixelData[realY + realX + 1];
r = (int)_pixelData[realY + realX + 2];
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
protected virtual void Dispose(bool disposing)
{
if (!disposing) return;
if (_sourceBitmap != null && _data != null)
_sourceBitmap.UnlockBits(_data);
}
}
private static bool FindBitmap(Bitmap haystack, Bitmap needle, out Point location)
{
using (LockedBitmap haystackToSearch = new LockedBitmap(haystack))
using (LockedBitmap needleToSearch = new LockedBitmap(needle))
{
for (int outerY = 0; outerY <= (haystackToSearch.Height - needleToSearch.Height); outerY++)
{
for (int outerX = 0; outerX <= (haystackToSearch.Width - needleToSearch.Width); outerX++)
{
bool isMatched = true;
for (int innerY = 0; innerY < needleToSearch.Height; innerY++)
{
for (int innerX = 0; innerX < needleToSearch.Width; innerX++)
{
int needleR, needleG, needleB;
int haystackR, haystackG, haystackB;
haystackToSearch.GetPixel(outerX + innerX, outerY + innerY, out haystackR, out haystackG, out haystackB);
needleToSearch.GetPixel(innerX, innerY, out needleR, out needleG, out needleB);
isMatched = isMatched && needleR == haystackR && haystackG == needleG && haystackB == needleB;
if (!isMatched)
break;
}
if (!isMatched)
break;
}
if (isMatched)
{
location = new Point(outerX, outerY);
return true;
}
}
}
}
location = new Point();
return false;
}

Related

Image not (correctly?) resized on BatchUpdateRequest; doesn't fit initial space

I am updating a Google Document using BatchRequest and I especially want to update some images.
My issue is: if I don't resize (crop white spaces arround) my images, it is correctly displayed in my doc, but when I resize it, Google Docs seems to not be able to adapt it automatically to the initial size/area.
Here is my code:
Image resizing:
public static Bitmap Crop(Bitmap bmp)
{
int w = bmp.Width;
int h = bmp.Height;
Func<int, bool> allWhiteRow = row =>
{
for (int i = 0; i < w; ++i)
if (bmp.GetPixel(i, row).R != 255)
return false;
return true;
};
Func<int, bool> allWhiteColumn = col =>
{
for (int i = 0; i < h; ++i)
if (bmp.GetPixel(col, i).R != 255)
return false;
return true;
};
int topmost = 0;
for (int row = 0; row < h; ++row)
{
if (allWhiteRow(row))
topmost = row;
else break;
}
int bottommost = 0;
for (int row = h - 1; row >= 0; --row)
{
if (allWhiteRow(row))
bottommost = row;
else break;
}
int leftmost = 0, rightmost = 0;
for (int col = 0; col < w; ++col)
{
if (allWhiteColumn(col))
leftmost = col;
else
break;
}
for (int col = w - 1; col >= 0; --col)
{
if (allWhiteColumn(col))
rightmost = col;
else
break;
}
if (rightmost == 0) rightmost = w; // As reached left
if (bottommost == 0) bottommost = h; // As reached top.
int croppedWidth = rightmost - leftmost;
int croppedHeight = bottommost - topmost;
if (croppedWidth == 0) // No border on left or right
{
leftmost = 0;
croppedWidth = w;
}
if (croppedHeight == 0) // No border on top or bottom
{
topmost = 0;
croppedHeight = h;
}
try
{
var target = new Bitmap(croppedWidth, croppedHeight);
using (Graphics g = Graphics.FromImage(target))
{
g.DrawImage(bmp,
new RectangleF(0, 0, croppedWidth, croppedHeight),
new RectangleF(leftmost, topmost, croppedWidth, croppedHeight),
GraphicsUnit.Pixel);
}
return target;
}
catch (Exception ex)
{
throw new Exception(
string.Format("Values are topmost={0} btm={1} left={2} right={3} croppedWidth={4} croppedHeight={5}", topmost, bottommost, leftmost, rightmost, croppedWidth, croppedHeight),
ex);
}
}
(I also tried to resize it by using Clone method, but can't see any difference)
crop call: (original is a PNG)
Bitmap originalBmp = (Bitmap)Bitmap.FromStream(str);
Bitmap cropedBmp = ImageUtilities.Crop(originalBmp);
and my code to update images in doc:
BatchUpdateDocumentRequest batchUpdateRequest = new BatchUpdateDocumentRequest {
Requests = new List<Google.Apis.Docs.v1.Data.Request>()
};
var request = new Google.Apis.Docs.v1.Data.Request {
ReplaceImage = new ReplaceImageRequest() {
ImageObjectId = imageObjectId,
Uri = imgUrl,
ImageReplaceMethod = "CENTER_CROP"
}
};
batchUpdateRequest.Requests.Add(request);
DocumentsResource.BatchUpdateRequest updateRequest =
_docSercive.Documents.BatchUpdate(batchUpdateRequest, doc.DocumentId);
BatchUpdateDocumentResponse updateResponse = updateRequest.Execute();
All this results in an image which is correctly resized, but not correctly inserted in the doc.
For instance:
Here is the generated image:
[![Resized generated mage][1]][1]
and here is how it is displayed in the doc:
[![how it is displayed in the doc after replacement][2]][2]
Any idea?
Thanks!
[1]: https://i.stack.imgur.com/VZF7M.png
[2]: https://i.stack.imgur.com/iQ9lX.png

Cluster overlapping circles?

I am trying to cluster(group) every circle that's uninterrupted overlapping (connected) to each other how could I do that? (preferably in a pretty efficient way).
(I have messed around trying to write some recursive functions but haven't gotten anything to work.)
I have created a VS project to visualize the problem.
Download here:
Generates Random circles.
How the clustering currently works:
(its only looks at what circle is overlapping that specific circle not all that is connected)
How it should look if its working
(separate clusters for all connecting circles)
CODE: (C#)
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
// Cluster overlapping circles
// Patrik Fröhler
// www.patan77.com
// 2017-08-14
namespace circleGroup
{
struct circle // the circle "object"
{
public float[] pos;
public int radius;
public Color color;
public int id;
public float x
{
get { return pos[0]; }
set { pos[0] = value; }
}
public float y
{
get { return pos[1]; }
set { pos[1] = value; }
}
}
public partial class Form1 : Form
{
DB _DB = new DB(); // "Global Database"
public Form1()
{
InitializeComponent();
}
private static circle createCircle(float _x = 0, float _y = 0, int _radius = 1, Color? _color = null, int _id = -1) // creates a circle
{
circle tmpCircle = new circle() { pos = new float[2], x = _x, y = _y, radius = _radius, id = _id };
tmpCircle.color = _color ?? Color.Black;
return (tmpCircle);
}
private circle[] genRngCircles(int _n) // generates an array of random circles
{
Random rng = new Random();
circle tmpC;
circle[] tmpCarr = new circle[_n];
for (int i = 0; i < _n; i++)
{
tmpC = createCircle();
tmpC.radius = rng.Next(10, 75);
tmpC.x = rng.Next(tmpC.radius, (512 - tmpC.radius));
tmpC.y = rng.Next(tmpC.radius, (512 - tmpC.radius));
tmpC.color = Color.FromArgb(127, rng.Next(0, 255), rng.Next(0, 255), rng.Next(0, 255));
tmpC.id = i;
tmpCarr[i] = tmpC;
}
return tmpCarr;
}
private void drawCircle(circle _circle, Graphics _g) // draws one circle
{
SolidBrush sb = new SolidBrush(_circle.color);
_g.FillEllipse(sb, (_circle.x - _circle.radius), (_circle.y - _circle.radius), (_circle.radius * 2), (_circle.radius * 2));
sb.Dispose();
}
private void drawString(float[] _pos, string _text, Graphics _g) // draws text
{
StringFormat sf = new StringFormat();
sf.LineAlignment = StringAlignment.Center;
sf.Alignment = StringAlignment.Center;
Font font = new Font("Arial", 12);
SolidBrush sb = new SolidBrush(Color.Black);
float x = _pos[0];
float y = _pos[1];
_g.DrawString(_text, font, sb, x, y, sf);
font.Dispose();
sb.Dispose();
}
private void drawCircleArr(circle[] _circleArr, Graphics _g)// draws an array of circles
{
_g.Clear(panel1.BackColor);
for (int i = 0; i < _circleArr.Length; i++)
{
drawCircle(_circleArr[i], _g);
drawString(_circleArr[i].pos, _circleArr[i].id.ToString(), _g);
}
}
static double mDistance<T>(T[] _p0, T[] _p1) // gets euclidean distance between two points of arbitrary numbers of dimensions
{
double[] p0 = new double[] { Convert.ToDouble(_p0[0]), Convert.ToDouble(_p0[1]) };
double[] p1 = new double[] { Convert.ToDouble(_p1[0]), Convert.ToDouble(_p1[1]) };
double tmp = 0;
double tmpTotal = 0;
for (int i = 0; i < _p0.Length; i++)
{
tmp = (p0[i] - p1[i]);
tmpTotal += (tmp * tmp);
}
double output = Math.Sqrt(tmpTotal);
return (output);
}
private bool overlap(circle _c0, circle _c1) // checks if two circles overlap
{
double dis = mDistance(_c0.pos, _c1.pos);
if (dis <= (_c0.radius + _c1.radius))
{
return (true);
}
return (false);
}
private Color avgColor(List<circle> _colorArr) // averages mutiple colors togehter
{
float ia = 0;
float ir = 0;
float ig = 0;
float ib = 0;
for (int i = 0; i < _colorArr.Count; i++)
{
ia += _colorArr[i].color.A;
ir += _colorArr[i].color.R;
ig += _colorArr[i].color.G;
ib += _colorArr[i].color.B;
}
byte a = Convert.ToByte(Math.Round(ia / _colorArr.Count));
byte r = Convert.ToByte(Math.Round(ir / _colorArr.Count));
byte g = Convert.ToByte(Math.Round(ig / _colorArr.Count));
byte b = Convert.ToByte(Math.Round(ib / _colorArr.Count));
return (Color.FromArgb(a, r, g, b));
}
private void treeView(List<circle>[] _circleLArr) // Create Treeview
{
treeView1.Nodes.Clear();
for (int i = 0; i < _circleLArr.Length; i++)
{
treeView1.Nodes.Add(i.ToString());
for (int j = 0; j < _circleLArr[i].Count; j++)
{
treeView1.Nodes[i].Nodes.Add(_circleLArr[i][j].id.ToString());
}
}
treeView1.ExpandAll();
}
private void drawCircleClusters(List<circle>[] _circleLArr, Graphics _g) // draws the circle clusters
{
_g.Clear(panel1.BackColor);
circle tmpC;
Color tmpColor;
for (int i = 0; i < _circleLArr.Length; i++)
{
tmpColor = avgColor(_circleLArr[i]);
for (int j = 0; j < _circleLArr[i].Count; j++)
{
tmpC = _circleLArr[i][j];
tmpC.color = tmpColor;
drawCircle(tmpC, _g);
drawString(_circleLArr[i][j].pos, _circleLArr[i][j].id.ToString(), _g);
}
}
}
//----------------------------------------------------
private List<circle>[] simpleOverlap(circle[] _circleArr) // test what circles overlaps
{
List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
for (int i = 0; i < (_circleArr.Length); i++)
{
tmpLArr[i] = new List<circle>();
for (int j = 0; j < (_circleArr.Length); j++)
{
if (overlap(_circleArr[i], _circleArr[j]))
{
tmpLArr[i].Add(_circleArr[j]);
}
}
}
return (tmpLArr);
}
/*
private circle[] recurOverlap(circle[] _circleArr) // recursive overlap test(not done/working)
{
List<circle> overlapArr = new List<circle>();
List<circle> dontOverlapArr = new List<circle>();
bool loop = true;
int n = 0;
while (loop)
{
if (overlap(_circleArr[0], _circleArr[n]))
{
overlapArr.Add(_circleArr[n]);
dontOverlapArr.Insert(0, _circleArr[n]);
circle[] dontArr = dontOverlapArr.ToArray();
recurOverlap(dontArr);
}
else
{
dontOverlapArr.Add(_circleArr[n]);
}
n++;
if (n >= _circleArr.Length)
{
loop = false;
}
}
if(_circleArr.Length <= 1)
{
return _circleArr;
}
else{
return overlapArr.ToArray();
}
}
private List<circle>[] clusterBrecur(circle[] _circleArr)
{
List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
for (int i = 0; i < (_circleArr.Length); i++)
{
tmpLArr[i] = new List<circle>();
recurOverlap(_circleArr);
}
return (tmpLArr);
}*/
private void run() // Run function
{
treeView1.Nodes.Clear(); // clear tree view
_DB.g = panel1.CreateGraphics();// Create Panel Graphics to draw on
_DB.circleArr = genRngCircles(10); // Creates an array with random circles
drawCircleArr(_DB.circleArr, _DB.g); // Draws the random circles
clusterAbtn.Enabled = true; // enables the cluster button
}
private void clusterA() // clusterA function
{
_DB.circleClusters = simpleOverlap(_DB.circleArr); // runs cluster algorithm test A
treeView(_DB.circleClusters); // Creates the treeview
drawCircleClusters(_DB.circleClusters, _DB.g); // draws the circle clusters
}
private void clusterB()
{
}
private void clusterA_rClick()
{
drawCircleArr(_DB.circleArr, _DB.g); // Draws the random circles
}
private void runBtn_Click(object sender, EventArgs e) // run button click
{
run();
}
private void clusterAbtn_MouseUp(object sender, MouseEventArgs e)
{
switch (e.Button)
{
case MouseButtons.Left:
clusterA();
break;
case MouseButtons.Right:
clusterA_rClick();
break;
}
}
private void clusterBbtn_Click(object sender, EventArgs e) // clusterB button click
{
clusterB();
}
}
class DB // "Database"
{
public Graphics g;
public circle[] circleArr;
public List<circle>[] circleClusters;
}
}
The current "overlap function"
private List<circle>[] simpleOverlap(circle[] _circleArr) // test what circles overlaps
{
List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
for (int i = 0; i < (_circleArr.Length); i++)
{
tmpLArr[i] = new List<circle>();
for (int j = 0; j < (_circleArr.Length); j++)
{
if (overlap(_circleArr[i], _circleArr[j]))
{
tmpLArr[i].Add(_circleArr[j]);
}
}
}
return (tmpLArr);
}
I made following change to your code. Looks like working
private List<circle>[] simpleOverlap(circle[] _circleArr) // test what circles overlaps
{
List<List<circle>> list = new List<List<circle>>();
//List<circle>[] tmpLArr = new List<circle>[_circleArr.Length];
//for (int i = 0; i < (_circleArr.Length); i++)
foreach (circle circle in _circleArr)
{
List<circle> cluster = null;
//tmpLArr[i] = new List<circle>();
//for (int j = 0; j < (_circleArr.Length); j++)
//{
// if (overlap(_circleArr[i], _circleArr[j]))
// {
// tmpLArr[i].Add(_circleArr[j]);
// }
//}
foreach(List<circle> cluster2 in list)
{
foreach (circle circle2 in cluster2)
{
if (overlap(circle, circle2))
{
cluster = cluster2;
goto label_001;
}
}
}
label_001:
if (cluster == null)
{
cluster = new List<circle>();
list.Add(cluster);
}
cluster.Add(circle);
}
bool flag = true;
for (int i = 0; i < list.Count; i += (flag ? 1 : 0))
{
flag = true;
List<circle> cluster = list[i];
for (int j = i + 1; j < list.Count; j++)
{
List<circle> cluster2 = list[j];
if (Intersects(cluster, cluster2))
{
cluster.AddRange(cluster2);
list.Remove(cluster2);
j--;
flag = false;
}
}
}
return list.ToArray();
//return (tmpLArr);
}
bool Intersects(List<circle> cluster1, List<circle> cluster2)
{
foreach (circle circle1 in cluster1)
{
foreach (circle circle2 in cluster2)
{
if (overlap(circle1, circle2))
{
return true;
}
}
}
return false;
}
I had to add 1 more method bool Intersects(List<circle> cluster1, List<circle> cluster2). See if it helps.
I believe the function you are looking for is intersection. I have attached an article by Mike K which I believe will give you an idea of how to approach this in your own code.
C# circles intersections

Taking Screenshot & Comparing Images

I'm using these functions below in order to take a screenshot of the user's screen, and compare this screenshot to an image bmpTest which is in resources. If finds something compatible with bmpTest returns location of it.
My problem is: this algorithm used is a little slow and takes about 10-15 seconds to interpret the image and give a result. Does any of you knows other method which does the same but faster? And with maybe, some % of similarity? I just couldnt find on internet.
private Bitmap Screenshot()
{
// this is where we will store a snapshot of the screen
Bitmap bmpScreenshot = new Bitmap(Screen.PrimaryScreen.Bounds.Width, Screen.PrimaryScreen.Bounds.Height);
// creates a graphics object so we can draw the screen in the bitmap (bmpScreenshot)
Graphics g = Graphics.FromImage(bmpScreenshot);
// copy from screen into the bitmap we created
g.CopyFromScreen(0, 0, 0, 0, Screen.PrimaryScreen.Bounds.Size);
//g.CopyFromScreen(205, 179, 660, 241, new Size(455, 62));
// return the screenshot
return bmpScreenshot;
}
private bool FindBitmap(Bitmap bmpNeedle, Bitmap bmpHaystack, out Point location)
{
for (int outerX = 0; outerX < bmpHaystack.Width - bmpNeedle.Width; outerX++)
{
for (int outerY = 0; outerY < bmpHaystack.Height - bmpNeedle.Height; outerY++)
{
for (int innerX = 0; innerX < bmpNeedle.Width; innerX++)
{
for (int innerY = 0; innerY < bmpNeedle.Height; innerY++)
{
Color cNeedle = bmpNeedle.GetPixel(innerX, innerY);
Color cHaystack = bmpHaystack.GetPixel(innerX + outerX, innerY + outerY);
if (cNeedle.R != cHaystack.R || cNeedle.G != cHaystack.G || cNeedle.B != cHaystack.B)
{
goto notFound;
}
}
}
location = new Point(outerX, outerY);
return true;
notFound:
continue;
}
}
location = Point.Empty;
return false;
}
private void button8_Click_1(object sender, EventArgs e)
{
// takes a snapshot of the screen
Bitmap bmpScreenshot = Screenshot();
// find the login button and check if it exists
Point location;
bool success = FindBitmap(Properties.Resources.bmpTest, bmpScreenshot, out location);
// check if it found the bitmap
if (success == false)
{
MessageBox.Show("Not Found");
return;
}
else
{
MessageBox.Show("Image Found");
}
}
This occurs because every "GetPixel" call on bitmap object performs a lock which is causing your performance issue.
You can get better performance by dealing with the lock yourself, so you can lock the entire bitmap once, in your case both bitmaps, an then iterate through its pixels.
Check out the code example by Vano Maisuradze

OpenNI Face Detection in .NET Windows Form Application

I am developing a windows form application in C#. I have to add face recognition functionality. For that purpose I am using OpenNI Library. The hardware for video capture is Xtion PRO LIVE.
I have successfully installed it and I was able to run the sample code. This code continuously recording video and nothing else. I have modified it in such a way that after pressing Capture button, It saves the current picture to hard drive (Its fine!).
Now what I want to accomplish is to detect face by facial landmarks so that I can verify a person's image with saved images in database. How can I do this with OpenNI?
Here is my code:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using OpenNI;
using System.Threading;
using System.Drawing.Imaging;
namespace CameraApp
{
public partial class MainWindow : Form
{
public MainWindow()
{
InitializeComponent();
this.context = Context.CreateFromXmlFile(SAMPLE_XML_FILE, out scriptNode);
this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator;
if (this.depth == null)
{
throw new Exception("Viewer must have a depth node!");
}
this.histogram = new int[this.depth.DeviceMaxDepth];
MapOutputMode mapMode = this.depth.MapOutputMode;
this.bitmap = new Bitmap((int)mapMode.XRes, (int)mapMode.YRes, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
this.shouldRun = true;
this.readerThread = new Thread(ReaderThread);
this.readerThread.Start();
}
protected override void OnPaint(PaintEventArgs e)
{
base.OnPaint(e);
lock (this)
{
e.Graphics.DrawImage(this.bitmap,
this.panelView.Location.X,
this.panelView.Location.Y,
this.panelView.Size.Width,
this.panelView.Size.Height);
}
}
protected override void OnPaintBackground(PaintEventArgs pevent)
{
//Don't allow the background to paint
}
protected override void OnClosing(CancelEventArgs e)
{
this.shouldRun = false;
this.readerThread.Join();
base.OnClosing(e);
}
protected override void OnKeyPress(KeyPressEventArgs e)
{
if (e.KeyChar == 27)
{
Close();
}
base.OnKeyPress(e);
}
private unsafe void CalcHist(DepthMetaData depthMD)
{
// reset
for (int i = 0; i < this.histogram.Length; ++i)
this.histogram[i] = 0;
ushort* pDepth = (ushort*)depthMD.DepthMapPtr.ToPointer();
int points = 0;
for (int y = 0; y < depthMD.YRes; ++y)
{
for (int x = 0; x < depthMD.XRes; ++x, ++pDepth)
{
ushort depthVal = *pDepth;
if (depthVal != 0)
{
this.histogram[depthVal]++;
points++;
}
}
}
for (int i = 1; i < this.histogram.Length; i++)
{
this.histogram[i] += this.histogram[i - 1];
}
if (points > 0)
{
for (int i = 1; i < this.histogram.Length; i++)
{
this.histogram[i] = (int)(256 * (1.0f - (this.histogram[i] / (float)points)));
}
}
}
private unsafe void ReaderThread()
{
DepthMetaData depthMD = new DepthMetaData();
while (this.shouldRun)
{
try
{
this.context.WaitOneUpdateAll(this.depth);
}
catch (Exception)
{
}
this.depth.GetMetaData(depthMD);
CalcHist(depthMD);
lock (this)
{
Rectangle rect = new Rectangle(0, 0, this.bitmap.Width, this.bitmap.Height);
BitmapData data = this.bitmap.LockBits(rect, ImageLockMode.WriteOnly, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
//ushort* pDepth = (ushort*)this.depth.DepthMapPtr.ToPointer();
//// set pixels
//for (int y = 0; y < depthMD.YRes; ++y)
//{
// byte* pDest = (byte*)data.Scan0.ToPointer() + y * data.Stride;
// for (int x = 0; x < depthMD.XRes; ++x, ++pDepth, pDest += 3)
// {
// byte pixel = (byte)this.histogram[*pDepth];
// pDest[0] = 0;
// pDest[1] = pixel;
// pDest[2] = pixel;
// }
//}
// This will point to the depth image.
ushort* pDepth = (ushort*)this.depth.DepthMapPtr.ToPointer();
// This will point to the RGB image.
RGB24Pixel* pRGB =
(RGB24Pixel*)this.depth.DepthMapPtr.ToPointer();
// Go over the depth and RGB image and set the bitmaps
// we're copying to based on our depth & RGB values.
for (int y = 0; y < depthMD.YRes; ++y)
{
// Assuming that the size of each data frame is
// 640x480.
// Scan line by line (480 lines), each line
// consists of 640 pointers.
byte* pDest_Depth =
(byte*)data.Scan0.ToPointer() + y *
data.Stride;
byte* pDest_Rgb = (byte*)data.Scan0.ToPointer()
+ y * data.Stride;
for (int x = 0; x < depthMD.XRes; ++x,
++pDepth, pDest_Depth += 3,
++pRGB, pDest_Rgb += 3)
{
// Change the color of the bitmap
// based on depth value.
byte pixel = (byte)this.histogram[*pDepth];
pDest_Depth[0] = 0;
pDest_Depth[1] = pixel;
pDest_Depth[2] = pixel;
// Get the RGB values to generate
// a whole RGB image.
byte red = pRGB->Red;
byte green = pRGB->Green;
byte blue = pRGB->Blue;
// Get depth information.
ushort depthVal = *pDepth;
}
}
this.bitmap.UnlockBits(data);
}
this.Invalidate();
}
}
private readonly string SAMPLE_XML_FILE = #"C:/Program Files/Microsoft Visual Studio 10.0/Microsoft Visual Studio 2010 Ultimate - ENU/OpenNI/Data/SamplesConfig.xml";
private Context context;
private ScriptNode scriptNode;
private DepthGenerator depth;
private Thread readerThread;
private bool shouldRun;
private Bitmap bitmap;
private int[] histogram;
private void button1_Click(object sender, EventArgs e)
{
this.readerThread.Abort();
this.bitmap.Save("D:\\Screenshot.jpeg", System.Drawing.Imaging.ImageFormat.Jpeg);
this.readerThread = new Thread(ReaderThread);
this.readerThread.Start();
}
}
}
Any kind of help will be appreciated. Any tutorial/link anything!!!

DrawImage fails to position sliced images correctly

For a couple of days now I've tried to figure out why my nine-slice code does not work as expected. As far as I can see, there seems to be an issue with the Graphics.DrawImage method which handles my nine slice images incorrectly. So my problem is how to compensate for the incorrect scaling that is performed when running my code on the compact framework. I might add that this code of course works perfectly when running in the full framework environment. The problem only occurs when scaling the image to a larger image not the other way around. Here is the snippet:
public class NineSliceBitmapSnippet
{
private Bitmap m_OriginalBitmap;
public int CornerLength { get; set; }
/// <summary>
/// Initializes a new instance of the NineSliceBitmapSnippet class.
/// </summary>
public NineSliceBitmapSnippet(Bitmap bitmap)
{
CornerLength = 5;
m_OriginalBitmap = bitmap;
}
public Bitmap ScaleSingleBitmap(Size size)
{
Bitmap scaledBitmap = new Bitmap(size.Width, size.Height);
int[] horizontalTargetSlices = Slice(size.Width);
int[] verticalTargetSlices = Slice(size.Height);
int[] horizontalSourceSlices = Slice(m_OriginalBitmap.Width);
int[] verticalSourceSlices = Slice(m_OriginalBitmap.Height);
using (Graphics graphics = Graphics.FromImage(scaledBitmap))
{
using (Brush brush = new SolidBrush(Color.Fuchsia))
{
graphics.FillRectangle(brush, new Rectangle(0, 0, size.Width, size.Height));
}
int horizontalTargetOffset = 0;
int verticalTargetOffset = 0;
int horizontalSourceOffset = 0;
int verticalSourceOffset = 0;
for (int x = 0; x < horizontalTargetSlices.Length; x++)
{
verticalTargetOffset = 0;
verticalSourceOffset = 0;
for (int y = 0; y < verticalTargetSlices.Length; y++)
{
Rectangle destination = new Rectangle(horizontalTargetOffset, verticalTargetOffset, horizontalTargetSlices[x], verticalTargetSlices[y]);
Rectangle source = new Rectangle(horizontalSourceOffset, verticalSourceOffset, horizontalSourceSlices[x], verticalSourceSlices[y]);
graphics.DrawImage(m_OriginalBitmap, destination, source, GraphicsUnit.Pixel);
verticalTargetOffset += verticalTargetSlices[y];
verticalSourceOffset += verticalSourceSlices[y];
}
horizontalTargetOffset += horizontalTargetSlices[x];
horizontalSourceOffset += horizontalSourceSlices[x];
}
}
return scaledBitmap;
}
public int[] Slice(int length)
{
int cornerLength = CornerLength;
if (length <= (cornerLength * 2))
throw new Exception("Image to small for sliceing up");
int[] slices = new int[3];
slices[0] = cornerLength;
slices[1] = length - (2 * cornerLength);
slices[2] = cornerLength;
return slices;
}
}
So, my question is, does anybody now how I could compensate the incorrect scaling?
/Dan
After some more trial and error I've finally found a solution to my problem. The scaling problems has always been to the top-center, right-center, bottom-center and left-center slices since they're always stretched in only one direction according to the logic of nine slice scaling. If I apply a temporarely square stretch to those slices before applying the correct stretch the final bitmap will be correct. Once again the problem is only visible in the .Net Compact Framework of a Windows CE device (Smart Device). Here's a snippet with code adjusting for the bug in CF. My only concern now is that the slices that get square stretched will take much more memory due to the correction code. On the other hand this step is only a short period of time so I might get away with it. ;)
public class NineSliceBitmapSnippet
{
private Bitmap m_OriginalBitmap;
public int CornerLength { get; set; }
public NineSliceBitmapSnippet(Bitmap bitmap)
{
CornerLength = 5;
m_OriginalBitmap = bitmap;
}
public Bitmap Scale(Size size)
{
if (m_OriginalBitmap != null)
{
return ScaleSingleBitmap(size);
}
return null;
}
public Bitmap ScaleSingleBitmap(Size size)
{
Bitmap scaledBitmap = new Bitmap(size.Width, size.Height);
int[] horizontalTargetSlices = Slice(size.Width);
int[] verticalTargetSlices = Slice(size.Height);
int[] horizontalSourceSlices = Slice(m_OriginalBitmap.Width);
int[] verticalSourceSlices = Slice(m_OriginalBitmap.Height);
using (Graphics graphics = Graphics.FromImage(scaledBitmap))
{
using (Brush brush = new SolidBrush(Color.Fuchsia))
{
graphics.FillRectangle(brush, new Rectangle(0, 0, size.Width, size.Height));
}
int horizontalTargetOffset = 0;
int verticalTargetOffset = 0;
int horizontalSourceOffset = 0;
int verticalSourceOffset = 0;
for (int x = 0; x < horizontalTargetSlices.Length; x++)
{
verticalTargetOffset = 0;
verticalSourceOffset = 0;
for (int y = 0; y < verticalTargetSlices.Length; y++)
{
Rectangle destination = new Rectangle(horizontalTargetOffset, verticalTargetOffset, horizontalTargetSlices[x], verticalTargetSlices[y]);
Rectangle source = new Rectangle(horizontalSourceOffset, verticalSourceOffset, horizontalSourceSlices[x], verticalSourceSlices[y]);
bool isWidthAffectedByVerticalStretch = (y == 1 && (x == 0 || x == 2) && destination.Height > source.Height);
bool isHeightAffectedByHorizontalStretch = (x == 1 && (y == 0 || y == 2) && destination.Width > source.Width);
if (isHeightAffectedByHorizontalStretch)
{
BypassDrawImageError(graphics, destination, source, Orientation.Horizontal);
}
else if (isWidthAffectedByVerticalStretch)
{
BypassDrawImageError(graphics, destination, source, Orientation.Vertical);
}
else
{
graphics.DrawImage(m_OriginalBitmap, destination, source, GraphicsUnit.Pixel);
}
verticalTargetOffset += verticalTargetSlices[y];
verticalSourceOffset += verticalSourceSlices[y];
}
horizontalTargetOffset += horizontalTargetSlices[x];
horizontalSourceOffset += horizontalSourceSlices[x];
}
}
return scaledBitmap;
}
private void BypassDrawImageError(Graphics graphics, Rectangle destination, Rectangle source, Orientation orientationAdjustment)
{
Size adjustedSize = Size.Empty;
switch (orientationAdjustment)
{
case Orientation.Horizontal:
adjustedSize = new Size(destination.Width, destination.Width);
break;
case Orientation.Vertical:
adjustedSize = new Size(destination.Height, destination.Height);
break;
default:
break;
}
using (Bitmap quadScaledBitmap = new Bitmap(adjustedSize.Width, adjustedSize.Height))
{
using (Graphics tempGraphics = Graphics.FromImage(quadScaledBitmap))
{
tempGraphics.Clear(Color.Fuchsia);
tempGraphics.DrawImage(m_OriginalBitmap, new Rectangle(0, 0, adjustedSize.Width, adjustedSize.Height), source, GraphicsUnit.Pixel);
}
graphics.DrawImage(quadScaledBitmap, destination, new Rectangle(0, 0, quadScaledBitmap.Width, quadScaledBitmap.Height), GraphicsUnit.Pixel);
}
}
public int[] Slice(int length)
{
int cornerLength = CornerLength;
if (length <= (cornerLength * 2))
throw new Exception("Image to small for sliceing up");
int[] slices = new int[3];
slices[0] = cornerLength;
slices[1] = length - (2 * cornerLength);
slices[2] = cornerLength;
return slices;
}
}

Categories

Resources