I'm trying to scan 2 images (32bppArgb format), identify when there is a difference and store the difference block's bounds in a list of rectangles.
Suppose these are the images:
second:
I want to get the different rectangle bounds (the opened directory window in our case).
This is what I've done:
private unsafe List<Rectangle> CodeImage(Bitmap bmp, Bitmap bmp2)
{
List<Rectangle> rec = new List<Rectangle>();
bmData = bmp.LockBits(new System.Drawing.Rectangle(0, 0, 1920, 1080), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp.PixelFormat);
bmData2 = bmp2.LockBits(new System.Drawing.Rectangle(0, 0, 1920, 1080), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp2.PixelFormat);
IntPtr scan0 = bmData.Scan0;
IntPtr scan02 = bmData2.Scan0;
int stride = bmData.Stride;
int stride2 = bmData2.Stride;
int nWidth = bmp.Width;
int nHeight = bmp.Height;
int minX = int.MaxValue;;
int minY = int.MaxValue;
int maxX = 0;
bool found = false;
for (int y = 0; y < nHeight; y++)
{
byte* p = (byte*)scan0.ToPointer();
p += y * stride;
byte* p2 = (byte*)scan02.ToPointer();
p2 += y * stride2;
for (int x = 0; x < nWidth; x++)
{
if (p[0] != p2[0] || p[1] != p2[1] || p[2] != p2[2] || p[3] != p2[3]) //found differences-began to store positions.
{
found = true;
if (x < minX)
minX = x;
if (x > maxX)
maxX = x;
if (y < minY)
minY = y;
}
else
{
if (found)
{
int height = getBlockHeight(stride, scan0, maxX, minY, scan02, stride2);
found = false;
Rectangle temp = new Rectangle(minX, minY, maxX - minX, height);
rec.Add(temp);
//x += minX;
y += height;
minX = int.MaxValue;
minY = int.MaxValue;
maxX = 0;
}
}
p += 4;
p2 += 4;
}
}
return rec;
}
public unsafe int getBlockHeight(int stride, IntPtr scan, int x, int y1, IntPtr scan02, int stride2) //a function to get an existing block height.
{
int height = 0;;
for (int y = y1; y < 1080; y++) //only for example- in our case its 1080 height.
{
byte* p = (byte*)scan.ToPointer();
p += (y * stride) + (x * 4); //set the pointer to a specific potential point.
byte* p2 = (byte*)scan02.ToPointer();
p2 += (y * stride2) + (x * 4); //set the pointer to a specific potential point.
if (p[0] != p2[0] || p[1] != p2[1] || p[2] != p2[2] || p[3] != p2[3]) //still change on the height in the increasing **y** of the block.
height++;
}
return height;
}
This is actually how I call the method:
Bitmap a = Image.FromFile(#"C:\Users\itapi\Desktop\1.png") as Bitmap;//generates a 32bppRgba bitmap;
Bitmap b = Image.FromFile(#"C:\Users\itapi\Desktop\2.png") as Bitmap;//
List<Rectangle> l1 = CodeImage(a, b);
int i = 0;
foreach (Rectangle rec in l1)
{
i++;
Bitmap tmp = b.Clone(rec, a.PixelFormat);
tmp.Save(i.ToString() + ".png");
}
But I'm not getting the exact rectangle.. I'm getting only half of that and sometimes even worse. I think something in the code's logic is wrong.
Code for #nico
private unsafe List<Rectangle> CodeImage(Bitmap bmp, Bitmap bmp2)
{
List<Rectangle> rec = new List<Rectangle>();
var bmData1 = bmp.LockBits(new System.Drawing.Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp.PixelFormat);
var bmData2 = bmp2.LockBits(new System.Drawing.Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp2.PixelFormat);
int bytesPerPixel = 3;
IntPtr scan01 = bmData1.Scan0;
IntPtr scan02 = bmData2.Scan0;
int stride1 = bmData1.Stride;
int stride2 = bmData2.Stride;
int nWidth = bmp.Width;
int nHeight = bmp.Height;
bool[] visited = new bool[nWidth * nHeight];
byte* base1 = (byte*)scan01.ToPointer();
byte* base2 = (byte*)scan02.ToPointer();
for (int y = 0; y < nHeight; y += 5)
{
byte* p1 = base1;
byte* p2 = base2;
for (int x = 0; x < nWidth; x += 5)
{
if (!ArePixelsEqual(p1, p2, bytesPerPixel) && !(visited[x + nWidth * y]))
{
// fill the different area
int minX = x;
int maxX = x;
int minY = y;
int maxY = y;
var pt = new Point(x, y);
Stack<Point> toBeProcessed = new Stack<Point> ();
visited[x + nWidth * y] = true;
toBeProcessed.Push(pt);
while (toBeProcessed.Count > 0)
{
var process = toBeProcessed.Pop();
var ptr1 = (byte*)scan01.ToPointer() + process.Y * stride1 + process.X * bytesPerPixel;
var ptr2 = (byte*) scan02.ToPointer() + process.Y * stride2 + process.X * bytesPerPixel;
//Check pixel equality
if (ArePixelsEqual(ptr1, ptr2, bytesPerPixel))
continue;
//This pixel is different
//Update the rectangle
if (process.X < minX) minX = process.X;
if (process.X > maxX) maxX = process.X;
if (process.Y < minY) minY = process.Y;
if (process.Y > maxY) maxY = process.Y;
Point n;
int idx;
//Put neighbors in stack
if (process.X - 1 >= 0)
{
n = new Point(process.X - 1, process.Y);
idx = n.X + nWidth * n.Y;
if (!visited[idx])
{
visited[idx] = true;
toBeProcessed.Push(n);
}
}
if (process.X + 1 < nWidth)
{
n = new Point(process.X + 1, process.Y);
idx = n.X + nWidth * n.Y;
if (!visited[idx])
{
visited[idx] = true;
toBeProcessed.Push(n);
}
}
if (process.Y - 1 >= 0)
{
n = new Point(process.X, process.Y - 1);
idx = n.X + nWidth * n.Y;
if (!visited[idx])
{
visited[idx] = true;
toBeProcessed.Push(n);
}
}
if (process.Y + 1 < nHeight)
{
n = new Point(process.X, process.Y + 1);
idx = n.X + nWidth * n.Y;
if (!visited[idx])
{
visited[idx] = true;
toBeProcessed.Push(n);
}
}
}
if (((maxX - minX + 1) > 5) & ((maxY - minY + 1) > 5))
rec.Add(new Rectangle(minX, minY, maxX - minX + 1, maxY - minY + 1));
}
p1 += 5 * bytesPerPixel;
p2 += 5 * bytesPerPixel;
}
base1 += 5 * stride1;
base2 += 5 * stride2;
}
bmp.UnlockBits(bmData1);
bmp2.UnlockBits(bmData2);
return rec;
}
I see a couple of problems with your code. If I understand it correctly, you
find a pixel that's different between the two images.
then you continue to scan from there to the right, until you find a position where both images are identical again.
then you scan from the last "different" pixel to the bottom, until you find a position where both images are identical again.
then you store that rectangle and start at the next line below it
Am I right so far?
Two obvious things can go wrong here:
If two rectangles have overlapping y-ranges, you're in trouble: You'll find the first rectangle fine, then skip to the bottom Y-coordinate, ignoring all the pixels left or right of the rectangle you just found.
Even if there is only one rectangle, you assume that every pixel on the rectangle's border is different, and all the other pixels are identical. If that assumption isn't valid, you'll stop searching too early, and only find parts of rectangles.
If your images come from a scanner or digital camera, or if they contain lossy compression (jpeg) artifacts, the second assumption will almost certainly be wrong. To illustrate this, here's what I get when I mark every identical pixel the two jpg images you linked black, and every different pixel white:
What you see is not a rectangle. Instead, a lot of pixels around the rectangles you're looking for are different:
That's because of jpeg compression artifacts. But even if you used lossless source images, pixels at the borders might not form perfect rectangles, because of antialiasing or because the background just happens to have a similar color in that region.
You could try to improve your algorithm, but if you look at that border, you will find all kinds of ugly counterexamples to any geometric assumptions you'll make.
It would probably be better to implement this "the right way". Meaning:
Either implement a flood fill algorithm that erases different pixels (e.g. by setting them to identical or by storing a flag in a separate mask), then recursively checks if the 4 neighbor pixels.
Or implement a connected component labeling algorithm, that marks each different pixel with a temporary integer label, using clever data structures to keep track which temporary labels are connected. If you're only interested in a bounding box, you don't even have to merge the temporary labels, just merge the bounding boxes of adjacent labeled areas.
Connected component labeling is in general a bit faster, but is a bit trickier to get right than flood fill.
One last advice: I would rethink your "no 3rd party libraries" policy if I were you. Even if your final product will contain no 3rd party libraries, development might by a lot faster if you used well-documented, well-tested, useful building blocks from a library, then replaced them one by one with your own code. (And who knows, you might even find an open source library with a suitable license that's so much faster than your own code that you'll stick with it in the end...)
ADD: In case you want to rethink your "no libraries" position: Here's a quick and simple implementation using AForge (which has a more permissive library than emgucv):
private static void ProcessImages()
{
(* load images *)
var img1 = AForge.Imaging.Image.FromFile(#"compare1.jpg");
var img2 = AForge.Imaging.Image.FromFile(#"compare2.jpg");
(* calculate absolute difference *)
var difference = new AForge.Imaging.Filters.ThresholdedDifference(15)
{OverlayImage = img1}
.Apply(img2);
(* create and initialize the blob counter *)
var bc = new AForge.Imaging.BlobCounter();
bc.FilterBlobs = true;
bc.MinWidth = 5;
bc.MinHeight = 5;
(* find blobs *)
bc.ProcessImage(difference);
(* draw result *)
BitmapData data = img2.LockBits(
new Rectangle(0, 0, img2.Width, img2.Height),
ImageLockMode.ReadWrite, img2.PixelFormat);
foreach (var rc in bc.GetObjectsRectangles())
AForge.Imaging.Drawing.FillRectangle(data, rc, Color.FromArgb(128,Color.Red));
img2.UnlockBits(data);
img2.Save(#"compareResult.jpg");
}
The actual difference + blob detection part (without loading and result display) takes about 43ms, for the second run (this first time takes longer of course, due to JITting, cache, etc.)
Result (the rectangle is larger due to jpeg artifacts):
Here is a flood-fill based version of your code. It checks every pixel for difference. If it finds a different pixel, it runs an exploration to find the entire different area.
The code is only meant as an illustration. There are certainly some points that could be improved.
unsafe bool ArePixelsEqual(byte* p1, byte* p2, int bytesPerPixel)
{
for (int i = 0; i < bytesPerPixel; ++i)
if (p1[i] != p2[i])
return false;
return true;
}
private static unsafe List<Rectangle> CodeImage(Bitmap bmp, Bitmap bmp2)
{
if (bmp.PixelFormat != bmp2.PixelFormat || bmp.Width != bmp2.Width || bmp.Height != bmp2.Height)
throw new ArgumentException();
List<Rectangle> rec = new List<Rectangle>();
var bmData1 = bmp.LockBits(new System.Drawing.Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp.PixelFormat);
var bmData2 = bmp2.LockBits(new System.Drawing.Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp2.PixelFormat);
int bytesPerPixel = Image.GetPixelFormatSize(bmp.PixelFormat) / 8;
IntPtr scan01 = bmData1.Scan0;
IntPtr scan02 = bmData2.Scan0;
int stride1 = bmData1.Stride;
int stride2 = bmData2.Stride;
int nWidth = bmp.Width;
int nHeight = bmp.Height;
bool[] visited = new bool[nWidth * nHeight];
byte* base1 = (byte*)scan01.ToPointer();
byte* base2 = (byte*)scan02.ToPointer();
for (int y = 0; y < nHeight; y++)
{
byte* p1 = base1;
byte* p2 = base2;
for (int x = 0; x < nWidth; ++x)
{
if (!ArePixelsEqual(p1, p2, bytesPerPixel) && !(visited[x + nWidth * y]))
{
// fill the different area
int minX = x;
int maxX = x;
int minY = y;
int maxY = y;
var pt = new Point(x, y);
Stack<Point> toBeProcessed = new Stack<Point>();
visited[x + nWidth * y] = true;
toBeProcessed.Push(pt);
while (toBeProcessed.Count > 0)
{
var process = toBeProcessed.Pop();
var ptr1 = (byte*)scan01.ToPointer() + process.Y * stride1 + process.X * bytesPerPixel;
var ptr2 = (byte*)scan02.ToPointer() + process.Y * stride2 + process.X * bytesPerPixel;
//Check pixel equality
if (ArePixelsEqual(ptr1, ptr2, bytesPerPixel))
continue;
//This pixel is different
//Update the rectangle
if (process.X < minX) minX = process.X;
if (process.X > maxX) maxX = process.X;
if (process.Y < minY) minY = process.Y;
if (process.Y > maxY) maxY = process.Y;
Point n; int idx;
//Put neighbors in stack
if (process.X - 1 >= 0)
{
n = new Point(process.X - 1, process.Y); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
if (process.X + 1 < nWidth)
{
n = new Point(process.X + 1, process.Y); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
if (process.Y - 1 >= 0)
{
n = new Point(process.X, process.Y - 1); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
if (process.Y + 1 < nHeight)
{
n = new Point(process.X, process.Y + 1); idx = n.X + nWidth * n.Y;
if (!visited[idx]) { visited[idx] = true; toBeProcessed.Push(n); }
}
}
rec.Add(new Rectangle(minX, minY, maxX - minX + 1, maxY - minY + 1));
}
p1 += bytesPerPixel;
p2 += bytesPerPixel;
}
base1 += stride1;
base2 += stride2;
}
bmp.UnlockBits(bmData1);
bmp2.UnlockBits(bmData2);
return rec;
}
You can achieve this easily using a flood fill segmentation algorithm.
First an utility class to make fast bitmap access easier. This will help to encapsulate the complex pointer-logic and make the code more readable:
class BitmapWithAccess
{
public Bitmap Bitmap { get; private set; }
public System.Drawing.Imaging.BitmapData BitmapData { get; private set; }
public BitmapWithAccess(Bitmap bitmap, System.Drawing.Imaging.ImageLockMode lockMode)
{
Bitmap = bitmap;
BitmapData = bitmap.LockBits(new Rectangle(Point.Empty, bitmap.Size), lockMode, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
}
public Color GetPixel(int x, int y)
{
unsafe
{
byte* dataPointer = MovePointer((byte*)BitmapData.Scan0, x, y);
return Color.FromArgb(dataPointer[3], dataPointer[2], dataPointer[1], dataPointer[0]);
}
}
public void SetPixel(int x, int y, Color color)
{
unsafe
{
byte* dataPointer = MovePointer((byte*)BitmapData.Scan0, x, y);
dataPointer[3] = color.A;
dataPointer[2] = color.R;
dataPointer[1] = color.G;
dataPointer[0] = color.B;
}
}
public void Release()
{
Bitmap.UnlockBits(BitmapData);
BitmapData = null;
}
private unsafe byte* MovePointer(byte* pointer, int x, int y)
{
return pointer + x * 4 + y * BitmapData.Stride;
}
}
Then a class representing a rectangle containing different pixels, to mark them in the resulting image. In general this class can also contain a list of Point instances (or a byte[,] map) to make indicating individual pixels in the resulting image possible:
class Segment
{
public int Left { get; set; }
public int Top { get; set; }
public int Right { get; set; }
public int Bottom { get; set; }
public Bitmap Bitmap { get; set; }
public Segment()
{
Left = int.MaxValue;
Right = int.MinValue;
Top = int.MaxValue;
Bottom = int.MinValue;
}
};
Then the steps of a simple algorithm are as follows:
find different pixels
use a flood-fill algorithm to find segments on the difference image
draw bounding rectangles for the segments found
The first step is the easiest one:
static Bitmap FindDifferentPixels(Bitmap i1, Bitmap i2)
{
var result = new Bitmap(i1.Width, i2.Height, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
var ia1 = new BitmapWithAccess(i1, System.Drawing.Imaging.ImageLockMode.ReadOnly);
var ia2 = new BitmapWithAccess(i2, System.Drawing.Imaging.ImageLockMode.ReadOnly);
var ra = new BitmapWithAccess(result, System.Drawing.Imaging.ImageLockMode.ReadWrite);
for (int x = 0; x < i1.Width; ++x)
for (int y = 0; y < i1.Height; ++y)
{
var different = ia1.GetPixel(x, y) != ia2.GetPixel(x, y);
ra.SetPixel(x, y, different ? Color.White : Color.FromArgb(0, 0, 0, 0));
}
ia1.Release();
ia2.Release();
ra.Release();
return result;
}
And the second and the third steps are covered with the following three functions:
static List<Segment> Segmentize(Bitmap blackAndWhite)
{
var bawa = new BitmapWithAccess(blackAndWhite, System.Drawing.Imaging.ImageLockMode.ReadOnly);
var result = new List<Segment>();
HashSet<Point> queue = new HashSet<Point>();
bool[,] visitedPoints = new bool[blackAndWhite.Width, blackAndWhite.Height];
for (int x = 0;x < blackAndWhite.Width;++x)
for (int y = 0;y < blackAndWhite.Height;++y)
{
if (bawa.GetPixel(x, y).A != 0
&& !visitedPoints[x, y])
{
result.Add(BuildSegment(new Point(x, y), bawa, visitedPoints));
}
}
bawa.Release();
return result;
}
static Segment BuildSegment(Point startingPoint, BitmapWithAccess bawa, bool[,] visitedPoints)
{
var result = new Segment();
List<Point> toProcess = new List<Point>();
toProcess.Add(startingPoint);
while (toProcess.Count > 0)
{
Point p = toProcess.First();
toProcess.RemoveAt(0);
ProcessPoint(result, p, bawa, toProcess, visitedPoints);
}
return result;
}
static void ProcessPoint(Segment segment, Point point, BitmapWithAccess bawa, List<Point> toProcess, bool[,] visitedPoints)
{
for (int i = -1; i <= 1; ++i)
{
for (int j = -1; j <= 1; ++j)
{
int x = point.X + i;
int y = point.Y + j;
if (x < 0 || y < 0 || x >= bawa.Bitmap.Width || y >= bawa.Bitmap.Height)
continue;
if (bawa.GetPixel(x, y).A != 0 && !visitedPoints[x, y])
{
segment.Left = Math.Min(segment.Left, x);
segment.Right = Math.Max(segment.Right, x);
segment.Top = Math.Min(segment.Top, y);
segment.Bottom = Math.Max(segment.Bottom, y);
toProcess.Add(new Point(x, y));
visitedPoints[x, y] = true;
}
}
}
}
And the following program given your two images as arguments:
static void Main(string[] args)
{
Image ai1 = Image.FromFile(args[0]);
Image ai2 = Image.FromFile(args[1]);
Bitmap i1 = new Bitmap(ai1.Width, ai1.Height, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
Bitmap i2 = new Bitmap(ai2.Width, ai2.Height, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
using (var g1 = Graphics.FromImage(i1))
using (var g2 = Graphics.FromImage(i2))
{
g1.DrawImage(ai1, Point.Empty);
g2.DrawImage(ai2, Point.Empty);
}
var difference = FindDifferentPixels(i1, i2);
var segments = Segmentize(difference);
using (var g1 = Graphics.FromImage(i1))
{
foreach (var segment in segments)
{
g1.DrawRectangle(Pens.Red, new Rectangle(segment.Left, segment.Top, segment.Right - segment.Left, segment.Bottom - segment.Top));
}
}
i1.Save("result.png");
Console.WriteLine("Done.");
Console.ReadKey();
}
produces the following result:
As you can see there are more differences between the given images. You can filter the resulting segments with regard to their size for example to drop the small artefacts. Also there is of course much work to do in terms of error checking, design and performance.
One idea is to proceed as follows:
1) Rescale images to a smaller size (downsample)
2) Run the above algorithm on smaller images
3) Run the above algorithm on original images, but restricting yourself only to rectangles found in step 2)
This can be of course extended to a multi-level hierarchical approach (using more different image sizes, increasing accuracy with each step).
Ah an algorithm challenge. Like! :-)
There are other answers here using f.ex. floodfill that will work just fine. I just noticed that you wanted something fast, so let me propose a different idea. Unlike the other people, I haven't tested it; it shouldn't be too hard and should be quite fast, but I simply don't have the time at the moment to test it myself. If you do, please share the results. Also, note that it's not a standard algorithm, so there are probably some bugs here and there in my explanation (and no patents).
My idea is derived from the idea of mean adaptive thresholding but with a lot of important differences. I cannot find the link from wikipedia anymore or my code, so I'll do this from the top of my mind. Basically you create a new (64-bit) buffer for both images and fill it with:
f(x,y) = colorvalue + f(x-1, y) + f(x, y-1) - f(x-1, y-1)
f(x,0) = colorvalue + f(x-1, 0)
f(0,y) = colorvalue + f(0, y-1)
The main trick is that you can calculate the sum value of a portion of the image fast, namely by:
g(x1,y1,x2,y2) = f(x2,y2)-f(x1-1,y2)-f(x2,y1-1)+f(x1-1,y1-1)
In other words, this will give the same result as:
result = 0;
for (x=x1; x<=x2; ++x)
for (y=y1; y<=y2; ++y)
result += f(x,y)
In our case this means that with only 4 integer operations this will get you some unique number of the block in question. I'd say that's pretty awesome.
Now, in our case, we don't really care about the average value; we just care about some sort-of unique number. If the image changes, it should change - simple as that. As for colorvalue, usually some gray scale number is used for thresholding - instead, we'll be using the complete 24-bit RGB value. Because there are only so few compares, we can simply scan until we find a block that doesn't match.
The basic algorithm that I propose works as follows:
for (y=0; y<height;++y)
for (x=0; x<width; ++x)
if (src[x,y] != dst[x,y])
if (!IntersectsWith(x, y, foundBlocks))
FindBlock(foundBlocks);
Now, IntersectsWith can be something like a quad tree of if there are only a few blocks, you can simply iterate through the blocks and check if they are within the bounds of the block. You can also update the x variable accordingly (I would). You can even balance things by re-building the buffer for f(x,y) if you have too many blocks (more precise: merge found blocks back from dst into src, then rebuild the buffer).
FindBlocks is where it gets interesting. Using the formula for g that's now pretty easy:
int x1 = x-1; int y1 = y-1; int x2 = x; int y2 = y;
while (changes)
{
while (g(srcimage,x1-1,y1,x1,y2) == g(dstimage,x1-1,y1,x1,y2)) { --x1; }
while (g(srcimage,x1,y1-1,x1,y2) == g(dstimage,x1,y1-1,x1,y2)) { --y1; }
while (g(srcimage,x1,y1,x1+1,y2) == g(dstimage,x1,y1,x1+1,y2)) { ++x1; }
while (g(srcimage,x1,y1,x1,y2+1) == g(dstimage,x1,y1,x1,y2+1)) { ++y1; }
}
That's it. Note that the complexity of the FindBlocks algorithm is O(x + y), which is pretty awesome for finding a 2D block IMO. :-)
As I said, let me know how it turns out.
Related
I have an Bitmap with various color patterns and I need to find the bounding rectangles of one given color (For example: Red) within the Bitmap. I found some code to process images but unable to figure out how to achieve this.
Any help would be highly appreciated.
This is my code.
private void LockUnlockBitsExample(PaintEventArgs e)
{
// Create a new bitmap.
Bitmap bmp = new Bitmap("c:\\fakePhoto.jpg");
// Lock the bitmap's bits.
Rectangle rect = new Rectangle(0, 0, bmp.Width, bmp.Height);
System.Drawing.Imaging.BitmapData bmpData =
bmp.LockBits(rect, System.Drawing.Imaging.ImageLockMode.ReadWrite,
bmp.PixelFormat);
// Get the address of the first line.
IntPtr ptr = bmpData.Scan0;
// Declare an array to hold the bytes of the bitmap.
int bytes = Math.Abs(bmpData.Stride) * bmp.Height;
byte[] rgbValues = new byte[bytes];
// Copy the RGB values into the array.
System.Runtime.InteropServices.Marshal.Copy(ptr, rgbValues, 0, bytes);
// Set every third value to 255. A 24bpp bitmap will look red.
for (int counter = 2; counter < rgbValues.Length; counter += 3)
rgbValues[counter] = 255;
// Copy the RGB values back to the bitmap
System.Runtime.InteropServices.Marshal.Copy(rgbValues, 0, ptr, bytes);
// Unlock the bits.
bmp.UnlockBits(bmpData);
// Draw the modified image.
e.Graphics.DrawImage(bmp, 0, 150);
}
Edit: The Bitmap contains solid color shapes, multiple shapes with same color can appear. I need to find the bounding rectangle of each shape.
Just like the paint fills color with bucket tool, I need the bounding rectangle of the filled area.
I can provide x, y coordinates of point on Bitmap to find the bound rectangle of color.
You would do this just like any other code where you want to find the min or max value in a list. With the difference that you want to find both min and max in both X and Y dimensions. Ex:
public static Rectangle GetBounds(this Bitmap bmp, Color color)
{
int minX = int.MaxValue;
int minY = int.MaxValue;
int maxX = int.MinValue;
int maxY = int.MinValue;
for (int y = 0; y < bmp.Height; y++)
{
for (int x = 0; x < bmp.Width; x++)
{
var c = bmp.GetPixel(x, y);
if (color == c)
{
if (x < minX) minX = x;
if (x > maxX) maxX = x;
if (y < minY) minY = y;
if (y > maxY) maxY = y;
}
}
}
var width = maxX - minX;
var height = maxY - minY;
if (width <= 0 || height <= 0)
{
// Handle case where no color was found, or if color is a single row/column
return default;
}
return new Rectangle(minX, minY, width, height);
}
There are plenty of resources on how to use LockBits/pointers. So converting the code to use this instead of GetPixel is left as an exercise.
If you are not concerned with the performance, and an exact color match is enough for you, then just scan the bitmap:
var l = bmp.Width; var t = bmp.Height; var r = 0; var b = 0;
for (var i = 0; i<rgbValues.Length, i++)
{
if(rgbValues[i] == 255) // rgb representation of red;
{
l = Math.Min(l, i % bmpData.Stride); r = Math.Max(r, i % bmpData.Stride);
t = Math.Min(l, i / bmpData.Stride); b = Math.Max(b, i / bmpData.Stride);
}
}
if(l>=r) // at least one point is found
return new Rectangle(l, t, r-l+1, b-t+1);
else
return new Rectangle(0, 0, 0, 0); // nothing found
You can search for the first point of each shape that fills a different area on the Bitmap, read a single horizontal row to get the points of the given color, then loop vertically within the horizontal range to get the adjacent points.
Once you get all the points of each, you can calculate the bounding rectangle through the first and last points.
public static IEnumerable<Rectangle> GetColorRectangles(Bitmap src, Color color)
{
var rects = new List<Rectangle>();
var points = new List<Point>();
var srcRec = new Rectangle(0, 0, src.Width, src.Height);
var srcData = src.LockBits(srcRec, ImageLockMode.ReadOnly, src.PixelFormat);
var srcBuff = new byte[srcData.Stride * srcData.Height];
var pixSize = Image.GetPixelFormatSize(src.PixelFormat) / 8;
Marshal.Copy(srcData.Scan0, srcBuff, 0, srcBuff.Length);
src.UnlockBits(srcData);
Rectangle GetColorRectangle()
{
var curX = points.First().X;
var curY = points.First().Y + 1;
var maxX = points.Max(p => p.X);
for(var y = curY; y < src.Height; y++)
for(var x = curX; x <= maxX; x++)
{
var pos = (y * srcData.Stride) + (x * pixSize);
var blue = srcBuff[pos];
var green = srcBuff[pos + 1];
var red = srcBuff[pos + 2];
if (Color.FromArgb(red, green, blue).ToArgb().Equals(color.ToArgb()))
points.Add(new Point(x, y));
else
break;
}
var p1 = points.First();
var p2 = points.Last();
return new Rectangle(p1.X, p1.Y, p2.X - p1.X, p2.Y - p1.Y);
}
for (var y = 0; y < src.Height; y++)
{
for (var x = 0; x < src.Width; x++)
{
var pos = (y * srcData.Stride) + (x * pixSize);
var blue = srcBuff[pos];
var green = srcBuff[pos + 1];
var red = srcBuff[pos + 2];
if (Color.FromArgb(red, green, blue).ToArgb().Equals(color.ToArgb()))
{
var p = new Point(x, y);
if (!rects.Any(r => new Rectangle(r.X - 2, r.Y - 2,
r.Width + 4, r.Height + 4).Contains(p)))
points.Add(p);
}
}
if (points.Any())
{
var rect = GetColorRectangle();
rects.Add(rect);
points.Clear();
}
}
return rects;
}
Demo
private IEnumerable<Rectangle> shapesRects = Enumerable.Empty<Rectangle>();
private void pictureBox1_MouseClick(object sender, MouseEventArgs e)
{
var sx = 1f * pictureBox1.Width / pictureBox1.ClientSize.Width;
var sy = 1f * pictureBox1.Height / pictureBox1.ClientSize.Height;
var p = Point.Round(new PointF(e.X * sx, e.Y * sy));
var c = (pictureBox1.Image as Bitmap).GetPixel(p.X, p.Y);
shapesRects = GetColorRectangles(pictureBox1.Image as Bitmap, c);
pictureBox1.Invalidate();
}
private void pictureBox1_Paint(object sender, PaintEventArgs e)
{
if (shapesRects.Any())
using (var pen = new Pen(Color.Black, 2))
e.Graphics.DrawRectangles(pen, shapesRects.ToArray());
}
I am trying to split an image of hand written digits into separate ones.
Consider I have this image:
I did a simple logic that could work, but it will and it did encounter a problem:
private static void SplitImages()
{
//We're going to use this code once.. to split our own images into seperate images.. can we do this somehow?
Bitmap testSplitImage = (Bitmap)Bitmap.FromFile("TestSplitImage.jpg");
int[][] imagePixels = new int[testSplitImage.Width][];
for(int i=0;i<imagePixels.Length;i++)
{
imagePixels[i] = new int[testSplitImage.Height];
}
for(int i=0;i<imagePixels.Length;i++)
{
for(int j=0;j<imagePixels[i].Length;j++)
{
Color c = testSplitImage.GetPixel(i, j);
imagePixels[i][j] = (c.R + c.G + c.B) / 3;
}
}
//let's start by getting the first height vector... and count how many of them is white..dunno..
int startColNumber = 0;
int endColNumber = 0;
bool isStart = false;
int imageNumber = 1;
for(int i=0;i<imagePixels.Length;i++)
{
int whiteNumbers = 0;
for(int j=0;j<imagePixels[i].Length;j++)
{
if (imagePixels[i][j] > 200)
{
//consider it white or not really relevant
whiteNumbers++;
}
}
if (whiteNumbers > testSplitImage.Height*95.0/100.0)
{
//let's consider that if a height vector has more than 95% white pixels.. it means that we can start checking for an image
//now if we started checking for the image.. we need to stop
if (isStart)
{
//consider the end of image.. so the end column should be here or we make it +1 at least
endColNumber = i + 1;
isStart = false;
}
}
else
{
if (!isStart)
{
isStart = true; //we will start checking for the image one row before that maybe?
startColNumber = i == 0 ? i : i - 1;
}
}
if (endColNumber > 0)
{
//we got a start and an end.. let's create a new image out of those pixels..hopefully this will work
Bitmap splittedImage = new Bitmap(endColNumber - startColNumber + 1, testSplitImage.Height);
int col = 0;
for(int k=startColNumber;k<=endColNumber;k++)
{
for (int l=0;l<testSplitImage.Height;l++)
{
int c = imagePixels[k][l];
splittedImage.SetPixel(col, l, Color.FromArgb(c, c, c));
}
col++;
}
splittedImage.Save($"Image{imageNumber++}.jpg");
endColNumber = 0;
}
whiteNumbers = 0;
}
}
I did get good results:
I did also get the three zeros:
However, I got this as one image also:
This is one sample of an image that needs to be split (out of 4,000 images mainly), and it's one of the best and easiest one. I am wondering if there's a way to improve my logic, or I should drop this way and find another?
This code only works with monochrome (2 color, black and white) images.
public static class Processor
{
public static byte[] ToArray(this Bitmap bmp) // bitmap to byte array using lockbits
{
Rectangle rect = new Rectangle(0, 0, bmp.Width, bmp.Height);
BitmapData data = bmp.LockBits(rect, ImageLockMode.ReadWrite, bmp.PixelFormat);
IntPtr ptr = data.Scan0;
int numBytes = data.Stride * bmp.Height;
byte[] bytes = new byte[numBytes];
System.Runtime.InteropServices.Marshal.Copy(ptr, bytes, 0, numBytes);
bmp.UnlockBits(data);
return bytes;
}
public static int GetPixel(this byte[] array, int bpr, int x, int y) //find out if the given pixel is 0 or 1
{
int num = y * bpr + x / 8;
return (array[num] >> 7- x%8) & 1;
}
public static List<Point> getDrawingPoints(this Point start, byte[] array, int width, int height) // get one 0 point (black point) and find all adjacent black points by traveling neighbors
{
List<Point> points = new List<Point>();
points.Add(start);
int BytePerRow = array.Length / bmp.Height;
int counter = 0;
do
{
for (int i = Math.Max(0, points[counter].X - 1); i <= Math.Min(width - 1, points[counter].X + 1); i++)
for (int j = Math.Max(0, points[counter].Y - 1); j <= Math.Min(height - 1, points[counter].Y + 1); j++)
if (array.GetPixel(BytePerRow, i, j) == 0 && !points.Any(p => p.X == i && p.Y == j))
points.Add(new Point(i, j));
counter++;
} while (counter < points.Count);
return points;
}
public static Bitmap ToBitmap(this List<Point> points) // convert points to bitmap
{
int startX = points.OrderBy(p => p.X).First().X,
endX = points.OrderByDescending(p => p.X).First().X,
startY = points.OrderBy(p => p.Y).First().Y,
endY = points.OrderByDescending(p => p.Y).First().Y;
Bitmap bmp = new Bitmap(endX - startX + 1, endY - startY + 1);
Graphics g = Graphics.FromImage(bmp);
g.FillRectangle(new SolidBrush(Color.White), new Rectangle(0, 0, endX - startX - 1, endY - startY - 1));
for (int i = startY; i <= endY; i++)
for (int j = startX; j <= endX; j++)
if (points.Any(p => p.X == j && p.Y == i)) bmp.SetPixel(j - startX, i - startY, Color.Black);
return bmp;
}
}
And use it like this to get all numbers inside the main image:
List<Point> processed = new List<Point>();
Bitmap bmp = ((Bitmap)Bitmap.FromFile(SourceBitmapPath));
byte[] array = bmp.ToArray();
int BytePerRow = array.Length / bmp.Height;
int imgIndex = 1;
for (int i = 0; i < bmp.Width; i++)
for (int j = 0; j < bmp.Height; j++)
{
if (array.GetPixel(BytePerRow, i, j) == 0 && !processed.Any(p => p.X == i && p.Y == j))
{
List<Point> points = new Point(i, j).getDrawingPoints(array, bmp.Width, bmp.Height);
processed.AddRange(points);
Bitmap result = points.ToBitmap();
result.Save($"{imgIndex++}.bmp");
}
}
I'm using paint and Save As monochrome bmp format to generate the source image.
I also tested it with this Image:
that result in the following three images:
I've been experimenting with the image bicubic resampling algorithm present in the AForge framework with the idea of introducing something similar into my image processing solution. See the original algorithm here and interpolation kernel here
Unfortunately I've hit a wall. It looks to me like somehow I am calculating the sample destination position incorrectly, probably due to the algorithm being designed for Format24bppRgb images where as I am using a Format32bppPArgb format.
Here's my code:
public Bitmap Resize(Bitmap source, int width, int height)
{
int sourceWidth = source.Width;
int sourceHeight = source.Height;
Bitmap destination = new Bitmap(width, height, PixelFormat.Format32bppPArgb);
destination.SetResolution(source.HorizontalResolution, source.VerticalResolution);
using (FastBitmap sourceBitmap = new FastBitmap(source))
{
using (FastBitmap destinationBitmap = new FastBitmap(destination))
{
double heightFactor = sourceWidth / (double)width;
double widthFactor = sourceHeight / (double)height;
// Coordinates of source points
double ox, oy, dx, dy, k1, k2;
int ox1, oy1, ox2, oy2;
// Width and height decreased by 1
int maxHeight = height - 1;
int maxWidth = width - 1;
for (int y = 0; y < height; y++)
{
// Y coordinates
oy = (y * widthFactor) - 0.5;
oy1 = (int)oy;
dy = oy - oy1;
for (int x = 0; x < width; x++)
{
// X coordinates
ox = (x * heightFactor) - 0.5f;
ox1 = (int)ox;
dx = ox - ox1;
// Destination color components
double r = 0;
double g = 0;
double b = 0;
double a = 0;
for (int n = -1; n < 3; n++)
{
// Get Y cooefficient
k1 = Interpolation.BiCubicKernel(dy - n);
oy2 = oy1 + n;
if (oy2 < 0)
{
oy2 = 0;
}
if (oy2 > maxHeight)
{
oy2 = maxHeight;
}
for (int m = -1; m < 3; m++)
{
// Get X cooefficient
k2 = k1 * Interpolation.BiCubicKernel(m - dx);
ox2 = ox1 + m;
if (ox2 < 0)
{
ox2 = 0;
}
if (ox2 > maxWidth)
{
ox2 = maxWidth;
}
Color color = sourceBitmap.GetPixel(ox2, oy2);
r += k2 * color.R;
g += k2 * color.G;
b += k2 * color.B;
a += k2 * color.A;
}
}
destinationBitmap.SetPixel(
x,
y,
Color.FromArgb(a.ToByte(), r.ToByte(), g.ToByte(), b.ToByte()));
}
}
}
}
source.Dispose();
return destination;
}
And the kernel which should represent the given equation on Wikipedia
public static double BiCubicKernel(double x)
{
if (x < 0)
{
x = -x;
}
double bicubicCoef = 0;
if (x <= 1)
{
bicubicCoef = (1.5 * x - 2.5) * x * x + 1;
}
else if (x < 2)
{
bicubicCoef = ((-0.5 * x + 2.5) * x - 4) * x + 2;
}
return bicubicCoef;
}
Here's the original image at 500px x 667px.
And the image resized to 400px x 543px.
Visually it appears that the image is over reduced and then the same pixels are repeatedly applied once we hit a particular point.
Can anyone give me some pointers here to solve this?
Note FastBitmap is a wrapper for Bitmap that uses LockBits to manipulate pixels in memory. It works well with everything else I apply it to.
Edit
As per request here's the methods involved in ToByte
public static byte ToByte(this double value)
{
return Convert.ToByte(ImageMaths.Clamp(value, 0, 255));
}
public static T Clamp<T>(T value, T min, T max) where T : IComparable<T>
{
if (value.CompareTo(min) < 0)
{
return min;
}
if (value.CompareTo(max) > 0)
{
return max;
}
return value;
}
You are limiting your ox2 and oy2 to destination image dimensions, instead of source dimensions.
Change this:
// Width and height decreased by 1
int maxHeight = height - 1;
int maxWidth = width - 1;
to this:
// Width and height decreased by 1
int maxHeight = sourceHeight - 1;
int maxWidth = sourceWidth - 1;
Well, I've met a very strange thing, which might be or might be not a souce of the problem.
I've started to try implementing convolution matrix by myself and encountered strange behaviour. I was testing code on a small image 4x4 pixels. The code is following:
var source = Bitmap.FromFile(#"C:\Users\Public\Pictures\Sample Pictures\Безымянный.png");
using (FastBitmap sourceBitmap = new FastBitmap(source))
{
for (int TY = 0; TY < 4; TY++)
{
for (int TX = 0; TX < 4; TX++)
{
Color color = sourceBitmap.GetPixel(TX, TY);
Console.Write(color.B.ToString().PadLeft(5));
}
Console.WriteLine();
}
}
Althought I'm printing out only blue channel value, it's still clearly incorrect.
On the other hand, your solution partitially works, what makes the thing I've found kind of irrelevant. One more guess I have: what is your system's DPI?
From what I have found helpfull, here are some links:
C++ implementation of bicubic interpolation on
matrix
C# implemetation of bicubic interpolation, lacking the part about rescaling
Thread on gamedev.net which has almost working solution
That's my answer so far, but I will try further.
I develop a screen sharing app and i would like to make it as efficient as posibble so im trying to send only the differences between the screen shots.
So, suppose we have this image for example:its a 32bpprgba image with transpert parts around.
I would like to store each one of the blocks here as a rectangle in a List and get them bounds. It may sounds very complex but actually it just requires a little logic.
This is my code so far:
private unsafe List<Rectangle> CodeImage(Bitmap bmp)
{
List<Rectangle> rec = new List<Rectangle>();
Bitmap bmpRes = new Bitmap(bmp.Width, bmp.Height);
BitmapData bmData = bmp.LockBits(new Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp.PixelFormat);
IntPtr scan0 = bmData.Scan0;
int stride = bmData.Stride;
int nWidth = bmp.Width;
int nHeight = bmp.Height;
int minX = int.MaxValue; ;
int minY = int.MaxValue;
int maxX = 0;
bool found = false;
for (int y = 0; y < bmp.Height; y++)
{
byte* p = (byte*)scan0.ToPointer();
p += y * stride;
for (int x = 0; x < bmp.Width; x++)
{
if (p[3] != 0) //Check if pixel is not transparent;
{
found = true;
if (x < minX)
minX = x;
if (x > maxX)
maxX = x;
if (y < minY)
minY = y;
}
else
{
if (found)
{
int height = getBlockHeight(stride, scan0, maxX, minY);
found = false;
Rectangle temp = new Rectangle(minX, minY, maxX - minX, height);
rec.Add(temp);
y += minY;
break;
}
}
p += 4;//add to the pointer 4 bytes;
}
}
return rec;
}
as you see im trying to scan the image using the height and width, and when i found a pixel i send it to GetBlockHeight function to get it's height:
public unsafe int getBlockHeight(int stride, IntPtr scan, int x, int y1)
{
int height = 0; ;
for (int y = y1; y < 1080; y++)
{
byte* p = (byte*)scan.ToPointer();
p += (y * stride) + (x * 4);
if (p[3] != 0) //Check if pixel is not transparent;
{
height++;
}
}
return height;
}
But im just not getting the result... i think there's somthing with the logic here... can anyone light my eyes? i know it requires a bit time and thinking but i would very very appreciate anyone who could help a little.
In your current algorithm, after successfully matching a rectangle, you increase y with its height and break out of the inner loop. This means you can only detect data for one rectangle per horizontal line.
If I were you I'd think about the following things, before jumping back into the code:
Save the complete image as a PNG file, and look at its size. Is further processing really required?
Are these rectangles accurate? Will there be scenario's in which you would be constantly sending the contents of the entire screen anyway?
If you're developing for Windows, you might be able to hook into the procedure that invalidates areas on the screen, in which case you wouldn't have to determine these rectangles yourself. I don't know about other OSes
Also I personally wouldn't try to solve the rectangle-detection algorithm in a "nesty" for-loop, but go with something like this:
public void FindRectangles(Bitmap bitmap, Rectangle searchArea, List<Rectangle> results)
{
// Find the first non-transparent pixel within the search area.
// Ensure that it is the pixel with the lowest y-value possible
Point p;
if (!TryFindFirstNonTransparent(bitmap, searchArea, out p))
{
// No non-transparent pixels in this area
return;
}
// Find its rectangle within the area
Rectangle r = GetRectangle(bitmap, p, searchArea);
results.Add(r);
// No need to search above the rectangle we just found
Rectangle left = new Rectangle(searchArea.Left, r.Top, r.Left - searchArea.Left, searchArea.Bottom - r.Top);
Rectangle right = new Rectangle(r.Right, r.Top, searchArea.Right - r.Right, searchArea.Bottom - r.Top);
Rectangle bottom = new Rectangle(r.Left, r.Bottom, r.Width, searchArea.Bottom - r.Bottom);
FindRectangles(bitmap, left, results);
FindRectangles(bitmap, right, results);
FindRectangles(bitmap, bottom, results);
}
public Rectangle GetRectangle(Bitmap bitmap, Point p, Rectangle searchArea)
{
int right = searchArea.Right;
for (int x = p.X; x < searchArea.Right; x++)
{
if (IsTransparent(x, p.Y))
{
right = x - 1;
break;
}
}
int bottom = searchArea.Bottom;
for (int y = p.Y; y < searchArea.Bottom; y++)
{
if (IsTransparent(p.X, y))
{
bottom = y - 1;
break;
}
}
return new Rectangle(p.X, p.Y, right - p.X, bottom - p.Y);
}
This approach, when fully implemented, should give you a list of rectangles (although it will occasionally split a rectangle in two).
(Of course instead of providing the bitmap, you'd pass the pointer to the pixel data with some metadata instead)
Suppose I have a System.Drawing.Bitmap in 32bpp ARGB mode. It's a large bitmap, but it's mostly fully transparent pixels with a relatively small image somewhere in the middle.
What is a fast algorithm to detect the borders of the "real" image, so I can crop away all the transparent pixels from around it?
Alternatively, is there a function already in .Net that I can use for this?
The basic idea is to check every pixel of the image to find the top, left, right and bottom bounds of the image. To do this efficiently, don't use the GetPixel method, which is pretty slow. Use LockBits instead.
Here's the implementation I came up with:
static Bitmap TrimBitmap(Bitmap source)
{
Rectangle srcRect = default(Rectangle);
BitmapData data = null;
try
{
data = source.LockBits(new Rectangle(0, 0, source.Width, source.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
byte[] buffer = new byte[data.Height * data.Stride];
Marshal.Copy(data.Scan0, buffer, 0, buffer.Length);
int xMin = int.MaxValue;
int xMax = 0;
int yMin = int.MaxValue;
int yMax = 0;
for (int y = 0; y < data.Height; y++)
{
for (int x = 0; x < data.Width; x++)
{
byte alpha = buffer[y * data.Stride + 4 * x + 3];
if (alpha != 0)
{
if (x < xMin) xMin = x;
if (x > xMax) xMax = x;
if (y < yMin) yMin = y;
if (y > yMax) yMax = y;
}
}
}
if (xMax < xMin || yMax < yMin)
{
// Image is empty...
return null;
}
srcRect = Rectangle.FromLTRB(xMin, yMin, xMax, yMax);
}
finally
{
if (data != null)
source.UnlockBits(data);
}
Bitmap dest = new Bitmap(srcRect.Width, srcRect.Height);
Rectangle destRect = new Rectangle(0, 0, srcRect.Width, srcRect.Height);
using (Graphics graphics = Graphics.FromImage(dest))
{
graphics.DrawImage(source, destRect, srcRect, GraphicsUnit.Pixel);
}
return dest;
}
It can probably be optimized, but I'm not a GDI+ expert, so it's the best I can do without further research...
EDIT: actually, there's a simple way to optimize it, by not scanning some parts of the image :
scan left to right until you find a non-transparent pixel; store (x, y) into (xMin, yMin)
scan top to bottom until you find a non-transparent pixel (only for x >= xMin); store y into yMin
scan right to left until you find a non-transparent pixel (only for y >= yMin); store x into xMax
scan bottom to top until you find a non-transparent pixel (only for xMin <= x <= xMax); store y into yMax
EDIT2: here's an implementation of the approach above:
static Bitmap TrimBitmap(Bitmap source)
{
Rectangle srcRect = default(Rectangle);
BitmapData data = null;
try
{
data = source.LockBits(new Rectangle(0, 0, source.Width, source.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
byte[] buffer = new byte[data.Height * data.Stride];
Marshal.Copy(data.Scan0, buffer, 0, buffer.Length);
int xMin = int.MaxValue,
xMax = int.MinValue,
yMin = int.MaxValue,
yMax = int.MinValue;
bool foundPixel = false;
// Find xMin
for (int x = 0; x < data.Width; x++)
{
bool stop = false;
for (int y = 0; y < data.Height; y++)
{
byte alpha = buffer[y * data.Stride + 4 * x + 3];
if (alpha != 0)
{
xMin = x;
stop = true;
foundPixel = true;
break;
}
}
if (stop)
break;
}
// Image is empty...
if (!foundPixel)
return null;
// Find yMin
for (int y = 0; y < data.Height; y++)
{
bool stop = false;
for (int x = xMin; x < data.Width; x++)
{
byte alpha = buffer[y * data.Stride + 4 * x + 3];
if (alpha != 0)
{
yMin = y;
stop = true;
break;
}
}
if (stop)
break;
}
// Find xMax
for (int x = data.Width - 1; x >= xMin; x--)
{
bool stop = false;
for (int y = yMin; y < data.Height; y++)
{
byte alpha = buffer[y * data.Stride + 4 * x + 3];
if (alpha != 0)
{
xMax = x;
stop = true;
break;
}
}
if (stop)
break;
}
// Find yMax
for (int y = data.Height - 1; y >= yMin; y--)
{
bool stop = false;
for (int x = xMin; x <= xMax; x++)
{
byte alpha = buffer[y * data.Stride + 4 * x + 3];
if (alpha != 0)
{
yMax = y;
stop = true;
break;
}
}
if (stop)
break;
}
srcRect = Rectangle.FromLTRB(xMin, yMin, xMax, yMax);
}
finally
{
if (data != null)
source.UnlockBits(data);
}
Bitmap dest = new Bitmap(srcRect.Width, srcRect.Height);
Rectangle destRect = new Rectangle(0, 0, srcRect.Width, srcRect.Height);
using (Graphics graphics = Graphics.FromImage(dest))
{
graphics.DrawImage(source, destRect, srcRect, GraphicsUnit.Pixel);
}
return dest;
}
There won't be a significant gain if the non-transparent part is small of course, since it will still scan most of the pixels. But if it's big, only the rectangles around the non-transparent part will be scanned.
I would like to suggest a divide & conquer approach:
split the image in the middle (e.g. vertically)
check if there are non-transparent pixels on the cut line (if so, remember min/max for bounding box)
split left half again vertically
if cut line contains non-transparent pixels -> update bounding box
if not, you can probably discard the leftmost half (I don't know the pictures)
continue with the left-right half (you stated that the image is somewhere in the middle) until you find the leftmost bound of the image
do the same for the right half