public Rgb Jasnosc()
{
Byte rob =(byte)(0.299*this.r + 0.587*this.g + 0.114*this.b);
Rgb ret;
ret.r = rob;
ret.g = rob;
ret.b = rob;
return ret;
}
So i have this kind of structure, and i am changing the loaded image to a grayscaled.
My Problem is that i would like to use a similar method to perform DCT on the image, but i totally do not know how to do it. Here is the code i used to perform it:
public static Bitmap Jasnosc(Bitmap bitmapaWe)
{
int wysokosc = bitmapaWe.Height;
int szerokosc = bitmapaWe.Width;
Bitmap bitmapaWy = new Bitmap(szerokosc, wysokosc, PixelFormat.Format24bppRgb);
BitmapData bmWeData = bitmapaWe.LockBits(new Rectangle(0, 0, wysokosc, wysokosc), ImageLockMode.ReadWrite, PixelFormat.Format24bppRgb);
BitmapData bmWyData = bitmapaWy.LockBits(new Rectangle(0, 0, wysokosc, wysokosc), ImageLockMode.ReadWrite, PixelFormat.Format24bppRgb);
int strideWe = bmWeData.Stride;
int strideWy = bmWeData.Stride;
IntPtr scanWe = bmWeData.Scan0;
IntPtr scanWy = bmWyData.Scan0;
unsafe
{
for (int y = 0; y < wysokosc; y++)
{
byte* pWe = (byte*)(void*)scanWe + y * strideWe;
byte* pWy = (byte*)(void*)scanWy + y * strideWy;
for (int x = 0; x < szerokosc; x++)
{
((Rgb*)pWy)[x] = ((Rgb*)pWe)[x].Jasnosc();
((Rgb*)pWy)[y] = ((Rgb*)pWe)[y].Jasnosc();
}
}
}
bitmapaWy.UnlockBits(bmWyData);
bitmapaWe.UnlockBits(bmWeData);
return bitmapaWy;
I would suggest that you use "emguCV" ("OpenCV") for such operations.
This library is well designed in perfomance & exposes a lot of usefull Computer-Vision-Functions.
An example for DCT can be found in the Git repo.
/// <summary>
/// performs forward or inverse transform of 2D floating-point image
/// </summary>
/// <param name="type">Transformation flags</param>
/// <returns>The result of DCT</returns>
[ExposableMethod(Exposable = true, Category = "Discrete Transforms")]
public Image<TColor, Single> DCT(CvEnum.CV_DCT_TYPE type)
{
Image<TColor, Single> res = new Image<TColor, float>(Width, Height);
CvInvoke.cvDCT(Ptr, res.Ptr, type);
return res;
}
The DCT-Function of EMGUCV is declared as:
public static void Dct(
IInputArray src,
IOutputArray dst,
DctType flags
)
Related
I am developing a functionality that involves saving the fingerprint, for this function I have used the Digital Persona U 4500 reader.
Download and install the personal digital SDK and build a windows form c # application.
I added the control whose name: DigitalPersona Fingerprint Enrollment Control and in effect captures the fingerprint
The objective is to be able to visualize the footprint that has been placed in the reader to see in detail how it has been, for this purpose I added a picturebox to be displayed in it and additionally include the following:
public DPFP.Sample Sample = new DPFP.Sample();// instancia la muestra
DPFP.Capture.SampleConversion Convertor = new DPFP.Capture.SampleConversion();
Bitmap bitmap = null;
Convertor.ConvertToPicture(sample, ref bitmap);
PicBoxHuella.Image = bitmap;
With the previous action it should show the sample in the picture, but it is not. Valid and identified that the sample arrives in null.
I can't understand the Null value, if when putting a footprint I should capture the value, I would appreciate your guidance a bit on the subject.
On the OnCaptured event, you get the image data from the Fiv en Data.Views:
Note: pbFingerprint is an PictureBox where the image will display.
private void enrollment_OnCaptured(EnrollmentControl enrollmentControl, CaptureResult captureResult, int fingerPosition)
{
if (captureResult.ResultCode == Constants.ResultCode.DP_SUCCESS)
{
if (captureResult.Data != null)
{
foreach (Fid.Fiv fiv in captureResult.Data.Views)
{
pbFingerprint.Image = CreateBitmap(fiv.RawImage, fiv.Width, fiv.Height);
}
}
}
}
/// <summary>
/// Create a bitmap from raw data in row/column format.
/// </summary>
/// <param name="bytes"></param>
/// <param name="width"></param>
/// <param name="height"></param>
/// <returns></returns>
private Bitmap CreateBitmap(byte[] bytes, int width, int height)
{
byte[] rgbBytes = new byte[bytes.Length * 3];
for (int i = 0; i <= bytes.Length - 1; i++)
{
rgbBytes[(i * 3)] = bytes[i];
rgbBytes[(i * 3) + 1] = bytes[i];
rgbBytes[(i * 3) + 2] = bytes[i];
}
Bitmap bmp = new Bitmap(width, height, PixelFormat.Format24bppRgb);
BitmapData data = bmp.LockBits(new Rectangle(0, 0, bmp.Width, bmp.Height), ImageLockMode.WriteOnly, PixelFormat.Format24bppRgb);
for (int i = 0; i <= bmp.Height - 1; i++)
{
IntPtr p = new IntPtr(data.Scan0.ToInt64() + data.Stride * i);
System.Runtime.InteropServices.Marshal.Copy(rgbBytes, i * bmp.Width * 3, p, bmp.Width * 3);
}
bmp.UnlockBits(data);
return bmp;
}
I have simple code that trying to find sequence of pixel on the screen.
How two break two cycles without memory leaks in function FindPixelSequence
when if (isEqual) occures?
How to improve existing code?
namespace FindColor
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
PlayWithColor PWC = new PlayWithColor();
List<System.Drawing.Color> pixels { get; set; }
public MainWindow()
{
InitializeComponent();
pixels = new List<System.Drawing.Color>();
pixels.Add(System.Drawing.Color.FromArgb(0, 0, 0, 0));
pixels.Add(System.Drawing.Color.FromArgb(0, 153, 255, 0));
pixels.Add(System.Drawing.Color.FromArgb(0, 153, 255, 0));
pixels.Add(System.Drawing.Color.FromArgb(0, 128, 214, 0));
}
private void Button_Click(object sender, RoutedEventArgs e)
{
using (var bitmap = PWC.TakeScreen())
{
PWC.FindPixelSequence(bitmap, pixels);
}
}
}
class ColorEqualityComparer : IEqualityComparer<Color>
{
public bool Equals(Color b1, Color b2)
{
return b1.R == b2.R && b1.G == b2.G && b1.B == b2.B;
}
public int GetHashCode(Color obj)
{
throw new NotImplementedException();
}
}
public class PlayWithColor
{
[System.Runtime.InteropServices.DllImport("user32.dll")]
public static extern bool SetCursorPos(int x, int y);
private Bitmap bmpScreenshot { get; set; }
public Bitmap TakeScreen()
{
bmpScreenshot = new Bitmap(Screen.PrimaryScreen.Bounds.Width,
Screen.PrimaryScreen.Bounds.Height,
PixelFormat.Format32bppArgb);
var gfxScreenshot = Graphics.FromImage(bmpScreenshot);
int sourceX = Screen.PrimaryScreen.Bounds.X;
int sourceY = Screen.PrimaryScreen.Bounds.Y;
gfxScreenshot.CopyFromScreen(sourceX,
sourceY,
0,
0,
Screen.PrimaryScreen.Bounds.Size,
CopyPixelOperation.SourceCopy);
return bmpScreenshot;
}
public System.Drawing.Point? FindPixelSequence(Bitmap bitmap, List<Color> pixels)
{
Stopwatch watch = new Stopwatch();
List<Color> currentPixels;
watch.Start();
BitmapData data = bitmap.LockBits(new System.Drawing.Rectangle(0, 0, bitmap.Width, bitmap.Height),
ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
unsafe
{
byte* ptrSrc = (byte*)data.Scan0;
for (int y = 0; y < data.Height; y = y + 1)
{
for (int x = 0; x < data.Width; x = x + 1)
{
currentPixels = new List<Color>();
for (int i = 0; i < pixels.Count; i++)
{
byte r0 = ptrSrc[2];
byte g0 = ptrSrc[1];
byte b0 = ptrSrc[0];
Color currentPixel = Color.FromArgb(0, r0, g0, b0);
ptrSrc += 4;
currentPixels.Add(currentPixel);
}
ptrSrc -= (4 * (pixels.Count - 1));
bool isEqual = currentPixels.SequenceEqual(pixels, new ColorEqualityComparer());
if (isEqual)
{
SetCursorPos(x, y);
//how return coords of x and y from there?
}
}
}
}
bitmap.UnlockBits(data);
watch.Stop();
Debug.WriteLine(watch.ElapsedMilliseconds);
}
}
}
for the unsafe code surround it with a try finally , to always unlock the bitmap and release any memory you allocated internally
1.
unsafe
{
Stopwatch watch = new Stopwatch();
watch.Start();
try
{
....
return new Point(x,y); // To return x,y corrdinates
}
finally
{
bitmap.UnlockBits(data);
watch.Stop();
}
}
to simply the code I would use a 3rd party image tool software library for c# you have AForge.NET
I copied the AForge-Sample from here:
http://www.aforgenet.com/framework/features/template_matching.html
And hoped, it would work with 2 Bitmaps as sources as in the following code:
Bitmap findTemplate (Bitmap sourceImage, Bitmap template)
{
// create template matching algorithm's instance
// (set similarity threshold to x.y%, 1.0f = 100%)
ExhaustiveTemplateMatching tm = new ExhaustiveTemplateMatching( 0.4f );
// find all matchings with specified above similarity
TemplateMatch[] matchings = tm.ProcessImage( sourceImage, template ); **// "Unsupported pixel format of the source or template image." as error message**
// highlight found matchings
BitmapData data = sourceImage.LockBits(
new Rectangle( 0, 0, sourceImage.Width, sourceImage.Height ),
ImageLockMode.ReadWrite, sourceImage.PixelFormat );
foreach ( TemplateMatch m in matchings )
{
AForge.Imaging.Drawing.Rectangle( data, m.Rectangle, System.Drawing.Color.White );
// do something else with matching
}
sourceImage.UnlockBits( data );
return sourceImage;
}
But when calling TemplateMatch[] matchings = tm.P.... it gives the error mentioned above.
The template is generated this way:
Bitmap templatebitmap=(Bitmap)AForge.Imaging.Image.FromFile("template.jpg");
the source is generated with the kinect-webcam, where the PlanarImage is formatted as Bitmap (method copied from somewhere, but it was working up to now)
Bitmap PImageToBitmap(PlanarImage PImage)
{
Bitmap bmap = new Bitmap(
PImage.Width,
PImage.Height,
System.Drawing.Imaging.PixelFormat.Format32bppRgb);
BitmapData bmapdata = bmap.LockBits(
new Rectangle(0, 0, PImage.Width,
PImage.Height),
ImageLockMode.WriteOnly,
bmap.PixelFormat);
IntPtr ptr = bmapdata.Scan0;
Marshal.Copy(PImage.Bits,
0,
ptr,
PImage.Width *
PImage.BytesPerPixel *
PImage.Height);
bmap.UnlockBits(bmapdata);
return bmap;
}
So, is anbody able to help me, where my mistake might be?
Or maybe anyone knows a better way to match a template with a Kinect?
The overall job is to detect a known object with the kinect, in my case a rubberduck.
Thank you in advamce.
here's the solution using AForge
but it is slow take around 5 seconds but it works
As usuall u need to introduce AForge framework download and install it.
specify using AForge namespace
and copy paste to make it work
System.Drawing.Bitmap sourceImage = (Bitmap)Bitmap.FromFile(#"C:\SavedBMPs\1.jpg");
System.Drawing.Bitmap template = (Bitmap)Bitmap.FromFile(#"C:\SavedBMPs\2.jpg");
// create template matching algorithm's instance
// (set similarity threshold to 92.5%)
ExhaustiveTemplateMatching tm = new ExhaustiveTemplateMatching(0.921f);
// find all matchings with specified above similarity
TemplateMatch[] matchings = tm.ProcessImage(sourceImage, template);
// highlight found matchings
BitmapData data = sourceImage.LockBits(
new Rectangle(0, 0, sourceImage.Width, sourceImage.Height),
ImageLockMode.ReadWrite, sourceImage.PixelFormat);
foreach (TemplateMatch m in matchings)
{
Drawing.Rectangle(data, m.Rectangle, Color.White);
MessageBox.Show(m.Rectangle.Location.ToString());
// do something else with matching
}
sourceImage.UnlockBits(data);
So, I just implemented it myself. But it is so slow - so if anyone has an idea to improve, feel free to criticize my code:
public class Position
{
public int bestRow { get; set; }
public int bestCol { get; set; }
public double bestSAD { get; set; }
public Position(int row, int col, double sad)
{
bestRow = row;
bestCol = col;
bestSAD = sad;
}
}
Position element_position = new Position(0, 0, double.PositiveInfinity);
Position ownSearch(Bitmap search, Bitmap template) {
Position position = new Position(0,0,double.PositiveInfinity);
double minSAD = double.PositiveInfinity;
// loop through the search image
for (int x = 0; x <= search.PhysicalDimension.Width - template.PhysicalDimension.Width; x++)
{
for (int y = 0; y <= search.PhysicalDimension.Height - template.PhysicalDimension.Height; y++)
{
position_label2.Content = "Running: X=" + x + " Y=" + y;
double SAD = 0.0;
// loop through the template image
for (int i = 0; i < template.PhysicalDimension.Width; i++)
{
for (int j = 0; j < template.PhysicalDimension.Height; j++)
{
int r = Math.Abs(search.GetPixel(x + i, y + j).R - template.GetPixel(i, j).R);
int g = Math.Abs(search.GetPixel(x + i, y + j).G - template.GetPixel(i, j).G);
int b = Math.Abs(search.GetPixel(x + i, y + j).B - template.GetPixel(i, j).B);
int a = template.GetPixel(i, j).A;
SAD = SAD + ((r + g + b)*a/255 );
}
}
// save the best found position
if (minSAD > SAD)
{
minSAD = SAD;
// give me VALUE_MAX
position.bestRow = x;
position.bestCol = y;
position.bestSAD = SAD;
}
}
}
return position;
}
For a couple of days now I've tried to figure out why my nine-slice code does not work as expected. As far as I can see, there seems to be an issue with the Graphics.DrawImage method which handles my nine slice images incorrectly. So my problem is how to compensate for the incorrect scaling that is performed when running my code on the compact framework. I might add that this code of course works perfectly when running in the full framework environment. The problem only occurs when scaling the image to a larger image not the other way around. Here is the snippet:
public class NineSliceBitmapSnippet
{
private Bitmap m_OriginalBitmap;
public int CornerLength { get; set; }
/// <summary>
/// Initializes a new instance of the NineSliceBitmapSnippet class.
/// </summary>
public NineSliceBitmapSnippet(Bitmap bitmap)
{
CornerLength = 5;
m_OriginalBitmap = bitmap;
}
public Bitmap ScaleSingleBitmap(Size size)
{
Bitmap scaledBitmap = new Bitmap(size.Width, size.Height);
int[] horizontalTargetSlices = Slice(size.Width);
int[] verticalTargetSlices = Slice(size.Height);
int[] horizontalSourceSlices = Slice(m_OriginalBitmap.Width);
int[] verticalSourceSlices = Slice(m_OriginalBitmap.Height);
using (Graphics graphics = Graphics.FromImage(scaledBitmap))
{
using (Brush brush = new SolidBrush(Color.Fuchsia))
{
graphics.FillRectangle(brush, new Rectangle(0, 0, size.Width, size.Height));
}
int horizontalTargetOffset = 0;
int verticalTargetOffset = 0;
int horizontalSourceOffset = 0;
int verticalSourceOffset = 0;
for (int x = 0; x < horizontalTargetSlices.Length; x++)
{
verticalTargetOffset = 0;
verticalSourceOffset = 0;
for (int y = 0; y < verticalTargetSlices.Length; y++)
{
Rectangle destination = new Rectangle(horizontalTargetOffset, verticalTargetOffset, horizontalTargetSlices[x], verticalTargetSlices[y]);
Rectangle source = new Rectangle(horizontalSourceOffset, verticalSourceOffset, horizontalSourceSlices[x], verticalSourceSlices[y]);
graphics.DrawImage(m_OriginalBitmap, destination, source, GraphicsUnit.Pixel);
verticalTargetOffset += verticalTargetSlices[y];
verticalSourceOffset += verticalSourceSlices[y];
}
horizontalTargetOffset += horizontalTargetSlices[x];
horizontalSourceOffset += horizontalSourceSlices[x];
}
}
return scaledBitmap;
}
public int[] Slice(int length)
{
int cornerLength = CornerLength;
if (length <= (cornerLength * 2))
throw new Exception("Image to small for sliceing up");
int[] slices = new int[3];
slices[0] = cornerLength;
slices[1] = length - (2 * cornerLength);
slices[2] = cornerLength;
return slices;
}
}
So, my question is, does anybody now how I could compensate the incorrect scaling?
/Dan
After some more trial and error I've finally found a solution to my problem. The scaling problems has always been to the top-center, right-center, bottom-center and left-center slices since they're always stretched in only one direction according to the logic of nine slice scaling. If I apply a temporarely square stretch to those slices before applying the correct stretch the final bitmap will be correct. Once again the problem is only visible in the .Net Compact Framework of a Windows CE device (Smart Device). Here's a snippet with code adjusting for the bug in CF. My only concern now is that the slices that get square stretched will take much more memory due to the correction code. On the other hand this step is only a short period of time so I might get away with it. ;)
public class NineSliceBitmapSnippet
{
private Bitmap m_OriginalBitmap;
public int CornerLength { get; set; }
public NineSliceBitmapSnippet(Bitmap bitmap)
{
CornerLength = 5;
m_OriginalBitmap = bitmap;
}
public Bitmap Scale(Size size)
{
if (m_OriginalBitmap != null)
{
return ScaleSingleBitmap(size);
}
return null;
}
public Bitmap ScaleSingleBitmap(Size size)
{
Bitmap scaledBitmap = new Bitmap(size.Width, size.Height);
int[] horizontalTargetSlices = Slice(size.Width);
int[] verticalTargetSlices = Slice(size.Height);
int[] horizontalSourceSlices = Slice(m_OriginalBitmap.Width);
int[] verticalSourceSlices = Slice(m_OriginalBitmap.Height);
using (Graphics graphics = Graphics.FromImage(scaledBitmap))
{
using (Brush brush = new SolidBrush(Color.Fuchsia))
{
graphics.FillRectangle(brush, new Rectangle(0, 0, size.Width, size.Height));
}
int horizontalTargetOffset = 0;
int verticalTargetOffset = 0;
int horizontalSourceOffset = 0;
int verticalSourceOffset = 0;
for (int x = 0; x < horizontalTargetSlices.Length; x++)
{
verticalTargetOffset = 0;
verticalSourceOffset = 0;
for (int y = 0; y < verticalTargetSlices.Length; y++)
{
Rectangle destination = new Rectangle(horizontalTargetOffset, verticalTargetOffset, horizontalTargetSlices[x], verticalTargetSlices[y]);
Rectangle source = new Rectangle(horizontalSourceOffset, verticalSourceOffset, horizontalSourceSlices[x], verticalSourceSlices[y]);
bool isWidthAffectedByVerticalStretch = (y == 1 && (x == 0 || x == 2) && destination.Height > source.Height);
bool isHeightAffectedByHorizontalStretch = (x == 1 && (y == 0 || y == 2) && destination.Width > source.Width);
if (isHeightAffectedByHorizontalStretch)
{
BypassDrawImageError(graphics, destination, source, Orientation.Horizontal);
}
else if (isWidthAffectedByVerticalStretch)
{
BypassDrawImageError(graphics, destination, source, Orientation.Vertical);
}
else
{
graphics.DrawImage(m_OriginalBitmap, destination, source, GraphicsUnit.Pixel);
}
verticalTargetOffset += verticalTargetSlices[y];
verticalSourceOffset += verticalSourceSlices[y];
}
horizontalTargetOffset += horizontalTargetSlices[x];
horizontalSourceOffset += horizontalSourceSlices[x];
}
}
return scaledBitmap;
}
private void BypassDrawImageError(Graphics graphics, Rectangle destination, Rectangle source, Orientation orientationAdjustment)
{
Size adjustedSize = Size.Empty;
switch (orientationAdjustment)
{
case Orientation.Horizontal:
adjustedSize = new Size(destination.Width, destination.Width);
break;
case Orientation.Vertical:
adjustedSize = new Size(destination.Height, destination.Height);
break;
default:
break;
}
using (Bitmap quadScaledBitmap = new Bitmap(adjustedSize.Width, adjustedSize.Height))
{
using (Graphics tempGraphics = Graphics.FromImage(quadScaledBitmap))
{
tempGraphics.Clear(Color.Fuchsia);
tempGraphics.DrawImage(m_OriginalBitmap, new Rectangle(0, 0, adjustedSize.Width, adjustedSize.Height), source, GraphicsUnit.Pixel);
}
graphics.DrawImage(quadScaledBitmap, destination, new Rectangle(0, 0, quadScaledBitmap.Width, quadScaledBitmap.Height), GraphicsUnit.Pixel);
}
}
public int[] Slice(int length)
{
int cornerLength = CornerLength;
if (length <= (cornerLength * 2))
throw new Exception("Image to small for sliceing up");
int[] slices = new int[3];
slices[0] = cornerLength;
slices[1] = length - (2 * cornerLength);
slices[2] = cornerLength;
return slices;
}
}
So I've been sharing some thoughts on the above topic title on my website about fast, unsafe pixel access. A gentlemen gave me a rough example of how he'd do it in C++, but that doesn't help me in C# unless I can interop it, and the interop is fast as well. I had found a class in the internet that was written using MSDN help, to unsafely access pixels. The class is exceptionally fast, but it's not fast enough. Here's the class:
using System;
using System.Collections.Generic;
using System.Text;
using System.Drawing;
using System.Drawing.Imaging;
namespace DCOMProductions.Desktop.ScreenViewer {
public unsafe class UnsafeBitmap {
Bitmap bitmap;
// three elements used for MakeGreyUnsafe
int width;
BitmapData bitmapData = null;
Byte* pBase = null;
public UnsafeBitmap(Bitmap bitmap) {
this.bitmap = new Bitmap(bitmap);
}
public UnsafeBitmap(int width, int height) {
this.bitmap = new Bitmap(width, height, PixelFormat.Format32bppArgb);
}
public void Dispose() {
bitmap.Dispose();
}
public Bitmap Bitmap {
get {
return (bitmap);
}
}
private Point PixelSize {
get {
GraphicsUnit unit = GraphicsUnit.Pixel;
RectangleF bounds = bitmap.GetBounds(ref unit);
return new Point((int)bounds.Width, (int)bounds.Height);
}
}
public void LockBitmap() {
GraphicsUnit unit = GraphicsUnit.Pixel;
RectangleF boundsF = bitmap.GetBounds(ref unit);
Rectangle bounds = new Rectangle((int)boundsF.X,
(int)boundsF.Y,
(int)boundsF.Width,
(int)boundsF.Height);
// Figure out the number of bytes in a row
// This is rounded up to be a multiple of 4
// bytes, since a scan line in an image must always be a multiple of 4 bytes
// in length.
width = (int)boundsF.Width * sizeof(Pixel);
if (width % 4 != 0) {
width = 4 * (width / 4 + 1);
}
bitmapData =
bitmap.LockBits(bounds, ImageLockMode.ReadWrite, PixelFormat.Format32bppArgb);
pBase = (Byte*)bitmapData.Scan0.ToPointer();
}
public Pixel GetPixel(int x, int y) {
Pixel returnValue = *PixelAt(x, y);
return returnValue;
}
public void SetPixel(int x, int y, Pixel colour) {
Pixel* pixel = PixelAt(x, y);
*pixel = colour;
}
public void UnlockBitmap() {
bitmap.UnlockBits(bitmapData);
bitmapData = null;
pBase = null;
}
public Pixel* PixelAt(int x, int y) {
return (Pixel*)(pBase + y * width + x * sizeof(Pixel));
}
}
}
Basically what I am doing is copying the entire screen and comparing each pixel to and old copy. On a 1680x1050 bitmap, this takes approximately 300 milliseconds using the following code.
private Bitmap GetInvalidFrame(Bitmap frame) {
Stopwatch sp = new Stopwatch();
sp.Start();
if (m_FrameBackBuffer == null) {
return frame;
}
Int32 pixelsToRead = frame.Width * frame.Height;
Int32 x = 0, y = 0;
UnsafeBitmap unsafeBitmap = new UnsafeBitmap(frame);
UnsafeBitmap unsafeBuffBitmap = new UnsafeBitmap(m_FrameBackBuffer);
UnsafeBitmap retVal = new UnsafeBitmap(frame.Width, frame.Height);
unsafeBitmap.LockBitmap();
unsafeBuffBitmap.LockBitmap();
retVal.LockBitmap();
do {
for (x = 0; x < frame.Width; x++) {
Pixel newPixel = unsafeBitmap.GetPixel(x, y);
Pixel oldPixel = unsafeBuffBitmap.GetPixel(x, y);
if (newPixel.Alpha != oldPixel.Alpha || newPixel.Red != oldPixel.Red || newPixel.Green != oldPixel.Green || newPixel.Blue != oldPixel.Blue) {
retVal.SetPixel(x, y, newPixel);
}
else {
// Skip pixel
}
}
y++;
} while (y != frame.Height);
unsafeBitmap.UnlockBitmap();
unsafeBuffBitmap.UnlockBitmap();
retVal.UnlockBitmap();
sp.Stop();
System.Diagnostics.Debug.WriteLine(sp.Elapsed.Milliseconds.ToString());
sp.Reset();
return retVal.Bitmap;
}
Is there any possible method/means/approach that I could speed this up to about 30ms? I can copy the screen in about 30ms using Graphics.CopyFromScreen(), so that produces approximately 30 frames each second. However, a program only runs as fast as its slower counterpart, so the 300ms delay in GetInvalidFrame, slows this down to about 1 - 3 frames each second. This isn't good for a meeting software.
Any advice, approaches, pointers in the right direction would be absolutely wonderful! Also, the code that is used to draw the bitmap on the client-side is below as well.
To comment on Dmitriy's answer/comment:
#region RootWorkItem
private ScreenClient m_RootWorkItem;
/// <summary>
/// Gets the RootWorkItem
/// </summary>
public ScreenClient RootWorkItem {
get {
if (m_RootWorkItem == null) {
m_RootWorkItem = new ScreenClient();
m_RootWorkItem.FrameRead += new EventHandler<FrameEventArgs>(RootWorkItem_FrameRead);
}
return m_RootWorkItem;
}
}
#endregion
private void RootWorkItem_FrameRead(Object sender, FrameEventArgs e) {
if (e.Frame != null) {
if (uxSurface.Image != null) {
Bitmap frame = (Bitmap)uxSurface.Image;
Graphics g = Graphics.FromImage(frame);
g.DrawImage(e.Frame, 0, 0); // Draw only updated pixels
uxSurface.Image = frame;
}
else {
uxSurface.Image = e.Frame; // Draw initial, full image
}
}
else {
uxSurface.Image = null;
}
}
Unsafe approach with usage of integers instead of Pixels and single loop:
private static Bitmap GetInvalidFrame(Bitmap oldFrame, Bitmap newFrame)
{
if (oldFrame.Size != newFrame.Size)
{
throw new ArgumentException();
}
Bitmap result = new Bitmap(oldFrame.Width, oldFrame.Height, oldFrame.PixelFormat);
Rectangle lockArea = new Rectangle(Point.Empty, oldFrame.Size);
PixelFormat format = PixelFormat.Format32bppArgb;
BitmapData oldData = oldFrame.LockBits(lockArea, ImageLockMode.ReadOnly, format);
BitmapData newData = newFrame.LockBits(lockArea, ImageLockMode.ReadOnly, format);
BitmapData resultData = result.LockBits(lockArea, ImageLockMode.WriteOnly, format);
int len = resultData.Height * Math.Abs(resultData.Stride) / 4;
unsafe
{
int* pOld = (int*)oldData.Scan0;
int* pNew = (int*)newData.Scan0;
int* pResult = (int*)resultData.Scan0;
for (int i = 0; i < len; i++)
{
int oldValue = *pOld++;
int newValue = *pNew++;
*pResult++ = oldValue != newValue ? newValue : 0 /* replace with 0xff << 24 if you need non-transparent black pixel */;
// *pResult++ = *pOld++ ^ *pNew++; // if you can use XORs.
}
}
oldFrame.UnlockBits(oldData);
newFrame.UnlockBits(newData);
result.UnlockBits(resultData);
return result;
}
I think you really can use XORed frames here and I hope that this can have better performance on both sides.
private static void XorFrames(Bitmap leftFrame, Bitmap rightFrame)
{
if (leftFrame.Size != rightFrame.Size)
{
throw new ArgumentException();
}
Rectangle lockArea = new Rectangle(Point.Empty, leftFrame.Size);
PixelFormat format = PixelFormat.Format32bppArgb;
BitmapData leftData = leftFrame.LockBits(lockArea, ImageLockMode.ReadWrite, format);
BitmapData rightData = rightFrame.LockBits(lockArea, ImageLockMode.ReadOnly, format);
int len = leftData.Height * Math.Abs(rightData.Stride) / 4;
unsafe
{
int* pLeft = (int*)leftData.Scan0;
int* pRight = (int*)rightData.Scan0;
for (int i = 0; i < len; i++)
{
*pLeft++ ^= *pRight++;
}
}
leftFrame.UnlockBits(leftData);
rightFrame.UnlockBits(rightData);
}
You can use this procedure on both sides in following way:
On server side you need to evaluate difference between old and new frame, send it to client and replace old frame by new. The server code should look something like this:
XorFrames(oldFrame, newFrame); // oldFrame ^= newFrame
Send(oldFrame); // send XOR of two frames
oldFrame = newFrame;
On client side you need to update your current frame with xor frame recieved from server:
XorFrames((Bitmap)uxSurface.Image, e.Frame);
Here: Utilizing the GPU with c# there are mentioned some librarys for using the GPU from C#.
Yes, you can do so by using unsafe code.
BitmapData d = l.LockBits(new Rectangle(0, 0, l.Width, l.Height), ImageLockMode.ReadOnly,l.PixelFormat);
IntPtr scan = d.Scan0;
unsafe
{
byte* p = (byte*)(void*)scan;
//dostuff
}
Check out http://www.codeproject.com/KB/GDI-plus/csharpgraphicfilters11.aspx for some basic examples of this kind of stuff. My code is based on that.
Note:
One of the reasons this will be much faster than yours is that you are separately comparing each channel instead of just comparing the entire byte using one operation. Similarly, changing PixelAt to give you a byte to facilitate this would probably give you an improvement.
Instead of checking each and every pixel, you can just perform a basic memory compare of the 2 bitmaps. In C, something like memcmp().
This would give you a much quicker test to let you know that the images are the same or not. Only when you know they are different do you need to resort to the more expensive code that will help you determine where they are different (if you even need to know that).
I am not a C# person though, so I don't know how easy it is to get access to the raw memory.
Was able to slice off about 60ms. I think this is going to require the GPU. I'm not seeing any solution to this utilizing the CPU, even by comparing more than one byte/pixel at a time, unless someone can whip up a code sample to show me otherwise. Still sits at about 200-260ms, far too slow for 30fps.
private static BitmapData m_OldData;
private static BitmapData m_NewData;
private static unsafe Byte* m_OldPBase;
private static unsafe Byte* m_NewPBase;
private static unsafe Pixel* m_OldPixel;
private static unsafe Pixel* m_NewPixel;
private static Int32 m_X;
private static Int32 m_Y;
private static Stopwatch m_Watch = new Stopwatch();
private static GraphicsUnit m_GraphicsUnit = GraphicsUnit.Pixel;
private static RectangleF m_OldBoundsF;
private static RectangleF m_NewBoundsF;
private static Rectangle m_OldBounds;
private static Rectangle m_NewBounds;
private static Pixel m_TransparentPixel = new Pixel() { Alpha = 0x00, Red = 0, Green = 0, Blue = 0 };
private Bitmap GetInvalidFrame(Bitmap frame) {
if (m_FrameBackBuffer == null) {
return frame;
}
m_Watch.Start();
unsafe {
m_OldBoundsF = m_FrameBackBuffer.GetBounds(ref m_GraphicsUnit);
m_OldBounds = new Rectangle((Int32)m_OldBoundsF.X, (Int32)m_OldBoundsF.Y, (Int32)m_OldBoundsF.Width, (Int32)m_OldBoundsF.Height);
m_OldData = m_FrameBackBuffer.LockBits(m_OldBounds, ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
m_NewBoundsF = m_FrameBackBuffer.GetBounds(ref m_GraphicsUnit);
m_NewBounds = new Rectangle((Int32)m_NewBoundsF.X, (Int32)m_NewBoundsF.Y, (Int32)m_NewBoundsF.Width, (Int32)m_NewBoundsF.Height);
m_NewData = frame.LockBits(m_NewBounds, ImageLockMode.ReadWrite, PixelFormat.Format32bppArgb);
m_OldPBase = (Byte*)m_OldData.Scan0.ToPointer();
m_NewPBase = (Byte*)m_NewData.Scan0.ToPointer();
do {
for (m_X = 0; m_X < frame.Width; m_X++) {
m_OldPixel = (Pixel*)(m_OldPBase + m_Y * m_OldData.Stride + 1 + m_X * sizeof(Pixel));
m_NewPixel = (Pixel*)(m_NewPBase + m_Y * m_NewData.Stride + 1 + m_X * sizeof(Pixel));
if (m_OldPixel->Alpha == m_NewPixel->Alpha // AccessViolationException accessing Property in get {}
|| m_OldPixel->Red == m_NewPixel->Red
|| m_OldPixel->Green == m_NewPixel->Green
|| m_OldPixel->Blue == m_NewPixel->Blue) {
// Set the transparent pixel
*m_NewPixel = m_TransparentPixel;
}
}
m_Y++; //Debug.WriteLine(String.Format("X: {0}, Y: {1}", m_X, m_Y));
} while (m_Y < frame.Height);
}
m_Y = 0;
m_Watch.Stop();
Debug.WriteLine("Time elapsed: " + m_Watch.ElapsedMilliseconds.ToString());
m_Watch.Reset();
return frame;
}