The sharpened image becomes dark.
Why doe you think this is the case?
where to look for problems?
.
Relevant Source Code
Sharpening Code
public static Bitmap FftSharpen(Bitmap image, double[,] mask)
{
if (image.PixelFormat == PixelFormat.Format8bppIndexed)
{
Bitmap imageClone = (Bitmap)image.Clone();
double[,] maskClone = (double[,])mask.Clone();
Complex[,] cPaddedImage = ImageDataConverter.ToComplex(imageClone);
Complex[,] cPaddedMask = ImageDataConverter.ToComplex(maskClone);
Complex[,] cConvolved = Convolution.Convolve(cPaddedImage, cPaddedMask);
return ImageDataConverter.ToBitmap(cConvolved);
}
else
{
throw new Exception("not a grascale");
}
}
WinForms code
public SharpeningFilterForm()
{
InitializeComponent();
//Obtain image and kernel
Bitmap inputImage = Grayscale.ToGrayscale(Bitmap.FromFile(lenaPath) as Bitmap);
double[,] numericalKernel = new double[,] {
{ -1, -1, -1, },
{ -1, 9, -1, },
{ -1, -1, -1, },
};
//Padding operation
Bitmap inputImageCopy = (Bitmap)inputImage.Clone();
int maxWidth = (int)Math.Max(inputImageCopy.Width, numericalKernel.GetLength(0));
int maxHeight = (int)Math.Max(inputImageCopy.Height, numericalKernel.GetLength(1));
Bitmap paddedInputImage = ImagePadder.Pad(inputImageCopy, maxWidth, maxHeight);
double [,]paddedNumericalKernel = ImagePadder.Pad(numericalKernel, maxWidth, maxHeight);
//Sharpening
Bitmap sharpened = SharpenFilter.FftSharpen(paddedInputImage, paddedNumericalKernel);
//Displaying
inputImagePictureBox.Image = inputImage;
maskPictureBox.Image = ImageDataConverter.ToBitmap(numericalKernel);
paddedImagePictureBox.Image = paddedInputImage;
paddedMaskPictureBox.Image = ImageDataConverter.ToBitmap(paddedNumericalKernel);
filteredPictureBox.Image = sharpened as Image;
}
Convolution.cs
public static partial class Convolution
{
public static Complex[,] Convolve(Complex[,] image1, Complex[,] mask1)
{
Complex[,] image = (Complex[,])image1.Clone();
Complex[,] mask = (Complex[,])mask1.Clone();
Complex[,] convolve = null;
int imageWidth = image.GetLength(0);
int imageHeight = image.GetLength(1);
int maskWidth = mask.GetLength(0);
int maskeHeight = mask.GetLength(1);
if (imageWidth == maskWidth && imageHeight == maskeHeight)
{
Complex[,] ftForImage = FourierTransform.ForwardFFT(image);
Complex[,] ftForMask = FourierTransform.ForwardFFT(mask);
Complex[,] fftImage = ftForImage;
Complex[,] fftKernel = ftForMask;
Complex[,] fftConvolved = new Complex[imageWidth, imageHeight];
for (int j = 0; j < imageHeight; j++)
{
for (int i = 0; i < imageWidth; i++)
{
fftConvolved[i, j] = fftImage[i, j] * fftKernel[i, j];
}
}
Complex[,] ftForConv = FourierTransform.InverseFFT(fftConvolved);
convolve = ftForConv;
Rescale(convolve);
convolve = FourierShifter.ShiftFft(convolve);
}
else
{
throw new Exception("padding needed");
}
return convolve;
}
private static void Rescale(Complex[,] convolve)
{
int imageWidth = convolve.GetLength(0);
int imageHeight = convolve.GetLength(1);
double maxAmp = 0.0;
for (int j = 0; j < imageHeight; j++)
{
for (int i = 0; i < imageWidth; i++)
{
maxAmp = Math.Max(maxAmp, convolve[i, j].Magnitude);
}
}
double scale = 255.0 / maxAmp;
for (int j = 0; j < imageHeight; j++)
{
for (int i = 0; i < imageWidth; i++)
{
convolve[i, j] = new Complex(convolve[i, j].Real * scale, convolve[i, j].Imaginary * scale);
maxAmp = Math.Max(maxAmp, convolve[i, j].Magnitude);
}
}
}
Related
Store an image with the ".raw" extension in a two-dimensional byte array. Convert it to bitmap. I want to show this in the picture box, but if I run it with the code below, I get an error that the parameter is wrong.
Width and height are obtained from the information provided by the header file.
I wonder what I'm doing wrong.
string filename = #"test.raw";
byte[] rawBytes = File.ReadAllBytes(filename);
int bytePixel = 2;
int width = samples*bytePixel;
int height = lines;
byte[,] rawData = new byte[height, width];
int counter = new int();
for(int i = 0; i < height; i++)
{
for(int j = 0; j < width; j++, counter++)
{
rawData[i, j] = rawBytes[counter];
}
}
Bitmap bitmapImage = new Bitmap(width, height, PixelFormat.Format16bppGrayScale);
BitmapData bitmapImageData = bitmapImage.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.WriteOnly, PixelFormap.Format16bppGrayScale);
unsafe
{
byte* pointer = (byte*)bitmapImageData.Scan0.ToPointer();
for(int y = 0; y < height; y++)
{
for(int x = 0; x < width; x++, pointer++)
{
*pointer = rawData[y, x];
}
}
}
bitmapImage.UnlockBits(bitmapImageData);
pictureBox1.Image = bitmapImage;
Please give me some advice.
I can't figure out what's wrong but if you just want to see byte array result on screen, this func will make bmp file with IntPtr. Hope it helps.
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
var fs = new FileStream("test.bmp", FileMode.Open);
Bitmap bitmap = new Bitmap(800, 600, PixelFormat.Format16bppGrayScale);
BitmapData bitmapdata = bitmap.LockBits(new Rectangle(0, 0, 800, 600), ImageLockMode.WriteOnly, PixelFormat.Format16bppGrayScale);
unsafe
{
byte* p = (byte*)bitmapdata.Scan0.ToPointer();
for (int i = 0; i < 600; i++)
{
for (int j = 0; j < 800; j++)
{
*p = (byte)(i * j); p++;
}
}
}
FileSaveBMP($"{DateTime.Now.ToString("yyyyMMddHHmmssfff")}.bmp", bitmapdata.Scan0, new CRect() { Width = 800, Height = 600 }, 800);
bitmap.UnlockBits(bitmapdata);
//pictureBox1.Image = bitmap;
}
private unsafe void FileSaveBMP(string sFile, IntPtr ptr, CRect rect, int w, int p_nByte = 1)
{
FileStream fs = new FileStream(sFile, FileMode.Create, FileAccess.Write);
BinaryWriter bw = new BinaryWriter(fs);
bw.Write(Convert.ToUInt16(0x4d42));
if (p_nByte == 1)
{
if ((Int64)rect.Width * (Int64)rect.Height > Int32.MaxValue) bw.Write(Convert.ToUInt32(54 + 1024 + p_nByte * 1000 * 1000));
else bw.Write(Convert.ToUInt32(54 + 1024 + p_nByte * (Int64)rect.Width * (Int64)rect.Height));
}
else if (p_nByte == 3)
{
if ((Int64)rect.Width * (Int64)rect.Height > Int32.MaxValue) bw.Write(Convert.ToUInt32(54 + p_nByte * 1000 * 1000));//uint bfSize = br.ReadUInt32();
else bw.Write(Convert.ToUInt32(54 + p_nByte * (Int64)rect.Width * (Int64)rect.Height));//uint bfSize = br.ReadUInt32();
}
//image 크기 bw.Write(); bmfh.bfSize = sizeof(14byte) + nSizeHdr + rect.right * rect.bottom;
bw.Write(Convert.ToUInt16(0)); //reserved // br.ReadUInt16();
bw.Write(Convert.ToUInt16(0)); //reserved //br.ReadUInt16();
if (p_nByte == 1)
bw.Write(Convert.ToUInt32(1078));
else if (p_nByte == 3)
bw.Write(Convert.ToUInt32(54));//uint bfOffBits = br.ReadUInt32();
bw.Write(Convert.ToUInt32(40));// uint biSize = br.ReadUInt32();
bw.Write(Convert.ToInt32(rect.Width));// nWidth = br.ReadInt32();
bw.Write(Convert.ToInt32(rect.Height));// nHeight = br.ReadInt32();
bw.Write(Convert.ToUInt16(1));// a = br.ReadUInt16();
bw.Write(Convert.ToUInt16(8 * p_nByte)); //byte // nByte = br.ReadUInt16() / 8;
bw.Write(Convert.ToUInt32(0)); //compress //b = br.ReadUInt32();
if ((Int64)rect.Width * (Int64)rect.Height > Int32.MaxValue) bw.Write(Convert.ToUInt32(1000 * 1000));// b = br.ReadUInt32();
else bw.Write(Convert.ToUInt32((Int64)rect.Width * (Int64)rect.Height));// b = br.ReadUInt32();
bw.Write(Convert.ToInt32(0));//a = br.ReadInt32();
bw.Write(Convert.ToInt32(0));// a = br.ReadInt32();
bw.Write(Convert.ToUInt32(256)); //color //b = br.ReadUInt32();
bw.Write(Convert.ToUInt32(256)); //import // b = br.ReadUInt32();
if (p_nByte == 1)
{
for (int i = 0; i < 256; i++)
{
bw.Write(Convert.ToByte(i));
bw.Write(Convert.ToByte(i));
bw.Write(Convert.ToByte(i));
bw.Write(Convert.ToByte(255));
}
}
if (rect.Width % 4 != 0)
{
rect.Right += 4 - rect.Width % 4;
}
byte[] aBuf = new byte[p_nByte * rect.Width];
for (int i = rect.Height - 1; i >= 0; i--)
{
Marshal.Copy((IntPtr)((long)ptr + rect.Left + ((long)i + (long)rect.Top) * w * p_nByte), aBuf, 0, rect.Width * p_nByte);
bw.Write(aBuf);
}
bw.Close();
fs.Close();
}
}
public class CRect
{
public int Left
{
get; set;
}
public int Right
{
get; set;
}
public int Top
{
get; set;
}
public int Bottom
{
get; set;
}
public int Width
{
get; set;
}
public int Height
{
get; set;
}
}
Above code creates image file like this.
How do I call the method FlipTextureVertically in MakePhoto ?
My picture currently taken is upside down in unity, and I came across this texture flipping code, but I do not know how to apply it.
Would really appreciate if someone could help me out here!
public static Texture2D FlipTextureVertically(Texture2D original)
{
Texture2D flipped = new Texture2D(original.width, original.height, TextureFormat.ARGB32, false);
int xN = original.width;
int yN = original.height;
for (int i = 0; i < xN; i++)
{
for (int j = 0; j < yN; j++)
{
flipped.SetPixel(i, yN - j - 1, original.GetPixel(i, j));
}
}
flipped.Apply();
return flipped;
}
public string MakePhoto(bool openIt)
{
int resWidth = Screen.width;
int resHeight = Screen.height;
Texture2D screenShot = new Texture2D(resWidth, resHeight, TextureFormat.RGB24, false); //Create new texture
RenderTexture rt = new RenderTexture(resWidth, resHeight, 24);
// hide the info-text, if any
if (infoText)
{
infoText.text = string.Empty;
}
// render background and foreground cameras
if (backroundCamera && backroundCamera.enabled)
{
backroundCamera.targetTexture = rt;
backroundCamera.Render();
backroundCamera.targetTexture = null;
}
if (backroundCamera2 && backroundCamera2.enabled)
{
backroundCamera2.targetTexture = rt;
backroundCamera2.Render();
backroundCamera2.targetTexture = null;
}
if (foreroundCamera && foreroundCamera.enabled)
{
foreroundCamera.targetTexture = rt;
foreroundCamera.Render();
foreroundCamera.targetTexture = null;
}
// get the screenshot
RenderTexture prevActiveTex = RenderTexture.active;
RenderTexture.active = rt;
screenShot.ReadPixels(new Rect(0, 0, resWidth, resHeight), 0, 0);
// clean-up
RenderTexture.active = prevActiveTex;
Destroy(rt);
byte[] btScreenShot = screenShot.EncodeToJPG();
Destroy(screenShot);
// save the screenshot as jpeg file
string sDirName = Application.persistentDataPath + "/Screenshots";
if (!Directory.Exists(sDirName))
Directory.CreateDirectory (sDirName);
string sFileName = sDirName + "/" + string.Format ("{0:F0}", Time.realtimeSinceStartup * 10f) + ".jpg";
File.WriteAllBytes(sFileName, btScreenShot);
Debug.Log("Photo saved to: " + sFileName);
if (infoText)
{
infoText.text = "Saved to: " + sFileName;
}
// open file
if(openIt)
{
System.Diagnostics.Process.Start(sFileName);
}
return sFileName;
}
I don't really see why the screenshot should be upside down but I guess you should call it e.g. after
screenShot.ReadPixels(new Rect(0, 0, resWidth, resHeight), 0, 0);
screenShot = FlipTextureVertically(screenShot);
but there might be more efficient ways of doing that.
E.g. not creating a new Texture2D but instead alter only the pixels in the one you already have like
public static void FlipTextureVertically(Texture2D original)
{
var originalPixels = original.GetPixels();
var newPixels = new Color[originalPixels.Length];
var width = original.width;
var rows = original.height;
for (var x = 0; x < width; x++)
{
for (var y = 0; y < rows; y++)
{
newPixels[x + y * width] = originalPixels[x + (rows - y -1) * width];
}
}
original.SetPixels(newPixels);
original.Apply();
}
public static void FlipTextureHorizontally(Texture2D original)
{
var originalPixels = original.GetPixels();
var newPixels = new Color[originalPixels.Length];
var width = original.width;
var rows = original.height;
for (var x = 0; x < width; x++)
{
for (var y = 0; y < rows; y++)
{
newPixels[x + y * width] = originalPixels[(width - x - 1) + y * width];
}
}
original.SetPixels(newPixels);
original.Apply();
}
and use it like
screenShot.ReadPixels(new Rect(0, 0, resWidth, resHeight), 0, 0);
FlipTextureVertically(screenShot);
The reason your image is flipped is that you are swicthing the vertical pixels in your code.
public static Texture2D FlipTextureVertically(Texture2D original)
{
Texture2D flipped = new Texture2D(original.width, original.height, TextureFormat.ARGB32, false);
int xN = original.width;
int yN = original.height;
for (int i = 0; i < xN; i++)
{
for (int j = 0; j < yN; j++)
{
flipped.SetPixel(i, yN - j - 1, original.GetPixel(i, j));
}
}
flipped.Apply();
return flipped;
}
should be
public static Texture2D FlipTextureVertically(Texture2D original)
{
Texture2D flipped = new Texture2D(original.width, original.height, TextureFormat.ARGB32, false);
int xN = original.width;
int yN = original.height;
for (int i = 0; i < xN; i++)
{
for (int j = 0; j < yN; j++)
{
flipped.SetPixel(xN - i - 1, yN, original.GetPixel(i, j));
}
}
flipped.Apply();
return flipped;
}
I have just wrote this method to crop transparent pixels from images.
It seems to work ok but it is very slow because of GetPixel - any ideas on how to make the algorithm logic quicker?
I know I can change the GetPixel for faster (but unsafe) access code and I might do so, however I am after ways to avoid doing a full scan. I want advice on how to make the logic behind this algorithm quicker.
public Bitmap CropTransparentPixels(Bitmap originalBitmap)
{
// Find the min/max transparent pixels
Point min = new Point(int.MaxValue, int.MaxValue);
Point max = new Point(int.MinValue, int.MinValue);
for (int x = 0; x < originalBitmap.Width; ++x)
{
for (int y = 0; y < originalBitmap.Height; ++y)
{
Color pixelColor = originalBitmap.GetPixel(x, y);
if (pixelColor.A == 255)
{
if (x < min.X) min.X = x;
if (y < min.Y) min.Y = y;
if (x > max.X) max.X = x;
if (y > max.Y) max.Y = y;
}
}
}
// Create a new bitmap from the crop rectangle
Rectangle cropRectangle = new Rectangle(min.X, min.Y, max.X - min.X, max.Y - min.Y);
Bitmap newBitmap = new Bitmap(cropRectangle.Width, cropRectangle.Height);
using (Graphics g = Graphics.FromImage(newBitmap))
{
g.DrawImage(originalBitmap, 0, 0, cropRectangle, GraphicsUnit.Pixel);
}
return newBitmap;
}
This is the method I ended up writing and it is much faster.
public static Bitmap CropTransparentPixels(this Bitmap bmp)
{
BitmapData bmData = null;
try
{
bmData = bmp.LockBits(new Rectangle(0, 0, bmp.Width, bmp.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
int scanline = bmData.Stride;
IntPtr Scan0 = bmData.Scan0;
Point top = new Point(), left = new Point(), right = new Point(), bottom = new Point();
bool complete = false;
unsafe
{
byte* p = (byte*)(void*)Scan0;
for (int y = 0; y < bmp.Height; y++)
{
for (int x = 0; x < bmp.Width; x++)
{
if (p[3] != 0)
{
top = new Point(x, y);
complete = true;
break;
}
p += 4;
}
if (complete)
break;
}
p = (byte*)(void*)Scan0;
complete = false;
for (int y = bmp.Height - 1; y >= 0; y--)
{
for (int x = 0; x < bmp.Width; x++)
{
if (p[x * 4 + y * scanline + 3] != 0)
{
bottom = new Point(x + 1, y + 1);
complete = true;
break;
}
}
if (complete)
break;
}
p = (byte*)(void*)Scan0;
complete = false;
for (int x = 0; x < bmp.Width; x++)
{
for (int y = 0; y < bmp.Height; y++)
{
if (p[x * 4 + y * scanline + 3] != 0)
{
left = new Point(x, y);
complete = true;
break;
}
}
if (complete)
break;
}
p = (byte*)(void*)Scan0;
complete = false;
for (int x = bmp.Width - 1; x >= 0; x--)
{
for (int y = 0; y < bmp.Height; y++)
{
if (p[x * 4 + y * scanline + 3] != 0)
{
right = new Point(x + 1, y + 1);
complete = true;
break;
}
}
if (complete)
break;
}
}
bmp.UnlockBits(bmData);
System.Drawing.Rectangle rectangle = new Rectangle(left.X, top.Y, right.X - left.X, bottom.Y - top.Y);
Bitmap b = new Bitmap(rectangle.Width, rectangle.Height);
Graphics g = Graphics.FromImage(b);
g.DrawImage(bmp, 0, 0, rectangle, GraphicsUnit.Pixel);
g.Dispose();
return b;
}
catch
{
try
{
bmp.UnlockBits(bmData);
}
catch { }
return null;
}
}
I am using the following Kernel,
double[,] kernel = new double[,] { { -1, -1, -1, },
{ -1, 9, -1, },
{ -1, -1, -1, }, };
The following code seems to be blurring the input image, rather than Sharpening.
What could have been the issue here?
Here is the entire VS2013 solution.
The original image,
The resulting blurred image,
I have written the following code to Sharpen an image,
public static Bitmap FftSharpen(Bitmap image, double [,] mask)
{
if (image.PixelFormat == PixelFormat.Format8bppIndexed)
{
Bitmap imageClone = (Bitmap)image.Clone();
double[,] maskClone = (double[,])mask.Clone();
Complex[,] cPaddedImage = ImageDataConverter.ToComplex(imageClone);
Complex[,] cPaddedMask = ImageDataConverter.ToComplex(maskClone);
Complex[,] cConvolved = Convolution.Convolve(cPaddedImage, cPaddedMask);
return ImageDataConverter.ToBitmap(cConvolved);
}
else
{
throw new Exception("not a grascale");
}
}
.
.
P.S.
The following is my convolution code,
public static class Convolution
{
public static Complex[,] Convolve(Complex[,] image, Complex[,] mask)
{
Complex[,] convolve = null;
int imageWidth = image.GetLength(0);
int imageHeight = image.GetLength(1);
int maskWidth = mask.GetLength(0);
int maskeHeight = mask.GetLength(1);
if (imageWidth == maskWidth && imageHeight == maskeHeight)
{
FourierTransform ftForImage = new FourierTransform(image); ftForImage.ForwardFFT();
FourierTransform ftForMask = new FourierTransform(mask); ftForMask.ForwardFFT();
Complex[,] fftImage = ftForImage.FourierImageComplex;
Complex[,] fftKernel = ftForMask.FourierImageComplex;
Complex[,] fftConvolved = new Complex[imageWidth, imageHeight];
for (int j = 0; j < imageHeight; j++)
{
for (int i = 0; i < imageWidth; i++)
{
fftConvolved[i, j] = fftImage[i, j] * fftKernel[i, j];
}
}
FourierTransform ftForConv = new FourierTransform();
ftForConv.InverseFFT(fftConvolved);
convolve = ftForConv.GrayscaleImageComplex;
Rescale(convolve);
convolve = FourierShifter.FFTShift(convolve);
}
else
{
throw new Exception("padding needed");
}
return convolve;
}
//Rescale values between 0 and 255.
private static void Rescale(Complex[,] convolve)
{
int imageWidth = convolve.GetLength(0);
int imageHeight = convolve.GetLength(1);
double maxAmp = 0.0;
for (int j = 0; j < imageHeight; j++)
{
for (int i = 0; i < imageWidth; i++)
{
maxAmp = Math.Max(maxAmp, convolve[i, j].Magnitude);
}
}
double scale = 255.0 / maxAmp;
for (int j = 0; j < imageHeight; j++)
{
for (int i = 0; i < imageWidth; i++)
{
convolve[i, j] = new Complex(convolve[i, j].Real * scale, convolve[i, j].Imaginary * scale);
maxAmp = Math.Max(maxAmp, convolve[i, j].Magnitude);
}
}
}
}
Similarly to your other question, the kernel is obtained from an unsigned Bitmap which result in the effective kernel
255 255 255
255 9 255
255 255 255
instead of the expected
-1 -1 -1
-1 9 -1
-1 -1 -1
A solution would again be to convert the bitmap to signed values. Alternatively, since the code provided in this question also supports the numerical kernel to be provided directly to FftSharpen, you could pad _numericalKernel with:
public class MatrixPadder
{
public static double[,] Pad(double[,] image, int newWidth, int newHeight)
{
int width = image.GetLength(0);
int height = image.GetLength(1);
/*
It is always guaranteed that,
width < newWidth
and
height < newHeight
*/
if ((width < newWidth && height < newHeight)
|| (width < newWidth && height == newHeight)
|| (width == newWidth && height < newHeight))
{
double[,] paddedImage = new double[newWidth, newHeight];
int startPointX = (int)Math.Ceiling((double)(newWidth - width) / (double)2) - 1;
int startPointY = (int)Math.Ceiling((double)(newHeight - height) / (double)2) - 1;
for (int y = startPointY; y < (startPointY + height); y++)
{
for (int x = startPointX; x < (startPointX + width); x++)
{
int xxx = x - startPointX;
int yyy = y - startPointY;
paddedImage[x, y] = image[xxx, yyy];
}
}
return paddedImage;
}
else if (width == newWidth && height == newHeight)
{
return image;
}
else
{
throw new Exception("Pad() -- threw an exception");
}
}
}
which you could call from filterButton_Click using:
if (_convolutionType == ConvolutionType.FFT)
{
double[,] paddedmask = MatrixPadder.Pad(_numericalKernel,
_paddedImage.Width,
_paddedImage.Height);
sharpened = SharpenFilter.FftSharpen(_paddedImage, paddedmask);
}
Also adjusting the Rescale function as shown in my other answer should then give you the desired sharpened image:
What?
I have an application that scans an image of my screen by a color code .
Problem!
This process takes too long , because the entire screen is searched.
My Goal
I would like the search to a region around the current mouse position.
But how do i do that?
Code
Here is my Code:
Creates a Screen
private Bitmap CaptureScreen()
{
//Point a = new Point();
//a = Control.MousePosition;
Bitmap b = new Bitmap(Screen.PrimaryScreen.Bounds.Width, Screen.PrimaryScreen.Bounds.Height);
using (Graphics g = Graphics.FromImage(b))
{
g.CopyFromScreen(new Point(0, 0), new Point(0, 0), b.Size);
}
return b;
}
Search for Color Code
public Point GetPixelPosition(Color SearchColor, bool IgnoreAlphaChannel)
{
//Point a = new Point();
//a = Control.MousePosition;
_ColorFound = false;
Point PixelPt = new Point(0, 0);
using (Bitmap b = CaptureScreen())
{
for (int i = 0; i < b.Width; i++)
{
if (this._ColorFound)
break;
for (int j = 0; j < b.Height; j++)
{
if (this._ColorFound)
break;
Color tmpPixelColor = b.GetPixel(i, j);
if (((tmpPixelColor.A == SearchColor.A) || IgnoreAlphaChannel)
&& (tmpPixelColor.R == SearchColor.R)
&& (tmpPixelColor.G == SearchColor.G)
&& (tmpPixelColor.B == SearchColor.B)
)
{
PixelPt.X = i;
PixelPt.Y = j;
this._ColorFound = true;
}
}
}
}
return PixelPt;
}
I don't think your way of scanning is very effective... but in this answer I'm aiming at doing exactly what you want, by using your code (I haven't optimized absolutely anything):
public Point GetPixelPosition(Color SearchColor, bool IgnoreAlphaChannel, int pixelsToSearchAround)
{
Point mousePosition = Cursor.Position;
_ColorFound = false;
Point PixelPt = new Point(0, 0);
using (Bitmap b = CaptureScreen())
{
int minX = mousePosition.X - pixelsToSearchAround;
int maxX = mousePosition.X + pixelsToSearchAround;
int minY = mousePosition.Y - pixelsToSearchAround;
int maxY = mousePosition.Y + pixelsToSearchAround;
if(minX < 0) minX = 0;
if(minY < 0) minY = 0;
if(maxX > b.Width) maxX = b.Width;
if(maxY > b.Height) maxY = b.Height;
for (int i = minX; i < maxX; i++)
{
if (this._ColorFound)
break;
for (int j = minY; j < maxY; j++)
{
if (this._ColorFound)
break;
Color tmpPixelColor = b.GetPixel(i, j);
if (((tmpPixelColor.A == SearchColor.A) || IgnoreAlphaChannel)
&& (tmpPixelColor.R == SearchColor.R)
&& (tmpPixelColor.G == SearchColor.G)
&& (tmpPixelColor.B == SearchColor.B)
)
{
PixelPt.X = i;
PixelPt.Y = j;
this._ColorFound = true;
}
}
}
}
return PixelPt;
}
This should do what you are looking for in a very unoptimized manner: it's not what I'd do to search for a pixel component on screen.
You'd use the third parameter to determine how many pixels around the cursor to search for.
For further optimization, you could only capture the screen region that you are aiming to capture, but I'll leave that up to you (hint: instead of doing it in GetPixelPosition, you could do it in CaptureScreen, modifying the arguments to g.CopyFromScreen, instead of modifying the loop bounds).
Instead of limiting the region, you can improve the performance of the color checking method.
Don't use Bitmap.GetPixel! Use Bitmap.UnlockBits instead.
public static unsafe Point GetPoint (Bitmap bmp, Color c) {
BitmapData bmd = bmp.LockBits (new Rectangle(0,0,bmp.Width,bmp.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
try {
int s = bmd.Stride;
int search = (c.A<<0x18)|(c.R<<0x10)|(c.G<<0x08)|c.B;
int* clr = (int*)(void*)bmd.Scan0;
int tmp;
int* row = clr;
for (int i = 0; i < bmp.Height; i++) {
int* col = row;
for (int j = 0; j < bmp.Width; j++) {
tmp = *col;
if(tmp == search) {
return new Point(j,i);
}
col++;
}
row += s>>0x02;
}
return new Point(-1,-1);
} finally {
bmp.UnlockBits (bmd);
}
}
This method returns (-1,-1) if the color cannot be found. You can adapt it to ignore the alpha-channel as well:
public static unsafe Point GetPoint (Bitmap bmp, Color c, bool ignoreAlpha = false) {
BitmapData bmd = bmp.LockBits (new Rectangle(0,0,bmp.Width,bmp.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
try {
int s = bmd.Stride;
int search = (c.A<<0x18)|(c.R<<0x10)|(c.G<<0x08)|c.B;
if(ignoreAlpha) {
search &= 0xffffff;
}
int* clr = (int*)(void*)bmd.Scan0;
int tmp;
int* row = clr;
for (int i = 0; i < bmp.Height; i++) {
int* col = row;
for (int j = 0; j < bmp.Width; j++) {
tmp = *col;
if(ignoreAlpha) {
tmp &= 0xffffff;
}
if(tmp == search) {
return new Point(j,i);
}
col++;
}
row += s>>0x02;
}
return new Point(-1,-1);
} finally {
bmp.UnlockBits (bmd);
}
}
The reason GetPixel is slower is because you don't process them in batch. This is because the method always needs to decode the image and wait until the pixel you are querying walks by. Using UnlockBits you decode only once and then can iterate over all pixels.