Related
I have a WPF application that takes a screen shot of the running Handbrake executable using a class called ScreenCapture that I copied from stack overflow.
public class ScreenCapture
{
[DllImport("user32.dll")]
static extern int GetWindowRgn(IntPtr hWnd, IntPtr hRgn);
//Region Flags - The return value specifies the type of the region that the function obtains. It can be one of the following values.
const int ERROR = 0;
const int NULLREGION = 1;
const int SIMPLEREGION = 2;
const int COMPLEXREGION = 3;
[DllImport("user32.dll")]
[return: MarshalAs(UnmanagedType.Bool)]
static extern bool GetWindowRect(HandleRef hWnd, out RECT lpRect);
[DllImport("gdi32.dll")]
static extern IntPtr CreateRectRgn(int nLeftRect, int nTopRect, int nRightRect, int nBottomRect);
[DllImport("user32.dll", SetLastError = true)]
[return: MarshalAs(UnmanagedType.Bool)]
static extern bool PrintWindow(IntPtr hwnd, IntPtr hDC, uint nFlags);
[StructLayout(LayoutKind.Sequential)]
public struct RECT
{
public int Left, Top, Right, Bottom;
public RECT(int left, int top, int right, int bottom)
{
Left = left;
Top = top;
Right = right;
Bottom = bottom;
}
public RECT(System.Drawing.Rectangle r) : this(r.Left, r.Top, r.Right, r.Bottom) { }
public int X
{
get { return Left; }
set { Right -= (Left - value); Left = value; }
}
public int Y
{
get { return Top; }
set { Bottom -= (Top - value); Top = value; }
}
public int Height
{
get { return Bottom - Top; }
set { Bottom = value + Top; }
}
public int Width
{
get { return Right - Left; }
set { Right = value + Left; }
}
public System.Drawing.Point Location
{
get { return new System.Drawing.Point(Left, Top); }
set { X = value.X; Y = value.Y; }
}
public System.Drawing.Size Size
{
get { return new System.Drawing.Size(Width, Height); }
set { Width = value.Width; Height = value.Height; }
}
public static implicit operator System.Drawing.Rectangle(RECT r)
{
return new System.Drawing.Rectangle(r.Left, r.Top, r.Width, r.Height);
}
public static implicit operator RECT(System.Drawing.Rectangle r)
{
return new RECT(r);
}
public static bool operator ==(RECT r1, RECT r2)
{
return r1.Equals(r2);
}
public static bool operator !=(RECT r1, RECT r2)
{
return !r1.Equals(r2);
}
public bool Equals(RECT r)
{
return r.Left == Left && r.Top == Top && r.Right == Right && r.Bottom == Bottom;
}
public override bool Equals(object obj)
{
if (obj is RECT)
return Equals((RECT)obj);
else if (obj is System.Drawing.Rectangle)
return Equals(new RECT((System.Drawing.Rectangle)obj));
return false;
}
public override int GetHashCode()
{
return ((System.Drawing.Rectangle)this).GetHashCode();
}
public override string ToString()
{
return string.Format(System.Globalization.CultureInfo.CurrentCulture, "{{Left={0},Top={1},Right={2},Bottom={3}}}", Left, Top, Right, Bottom);
}
}
public Bitmap GetScreenshot(IntPtr ihandle)
{
IntPtr hwnd = ihandle;//handle here
RECT rc;
GetWindowRect(new HandleRef(null, hwnd), out rc);
Bitmap bmp = new Bitmap(rc.Right - rc.Left, rc.Bottom - rc.Top, PixelFormat.Format32bppArgb);
Graphics gfxBmp = Graphics.FromImage(bmp);
IntPtr hdcBitmap;
try
{
hdcBitmap = gfxBmp.GetHdc();
}
catch
{
return null;
}
bool succeeded = PrintWindow(hwnd, hdcBitmap, 0);
gfxBmp.ReleaseHdc(hdcBitmap);
if (!succeeded)
{
gfxBmp.FillRectangle(new SolidBrush(Color.Gray), new Rectangle(Point.Empty, bmp.Size));
}
IntPtr hRgn = CreateRectRgn(0, 0, 0, 0);
GetWindowRgn(hwnd, hRgn);
Region region = Region.FromHrgn(hRgn);//err here once
if (!region.IsEmpty(gfxBmp))
{
gfxBmp.ExcludeClip(region);
gfxBmp.Clear(Color.Transparent);
}
gfxBmp.Dispose();
return bmp;
}
public void WriteBitmapToFile(string filename, Bitmap bitmap)
{
bitmap.Save(filename, ImageFormat.Bmp);
}
So when the button click handler below is called a screenshot of the handbrake window is taken.
I write it to the harddrive to make sure its ok:
handbrake screen shot.
I create an instance of a CLR class library ClassLibrary1::Class1 and call the method "DoSomething" passing it the System.Drawing.Bitmap object.
private void button4_Click(object sender, RoutedEventArgs e)
{
string wName = "HandBrake";
IntPtr hWnd = IntPtr.Zero;
foreach (Process pList in Process.GetProcesses())
{
if (pList.MainWindowTitle.Contains(wName))
{
hWnd = pList.MainWindowHandle;
var sc = new ScreenCapture();
SetForegroundWindow(hWnd);
var bitmap = sc.GetScreenshot(hWnd);
sc.WriteBitmapToFile("handbrake.bmp", bitmap);
Bitmap image1 = (Bitmap)System.Drawing.Image.FromFile("handbrake.bmp", true);
ClassLibrary1.Class1 opencv = new ClassLibrary1.Class1();
opencv.DoSomething(image1);
}
}
}
Inside DoSomething I attempt to convert the System.Drawing.Bitmap to a OpenCV class cv::Mat. I call cv::imwrite to make sure the bitmap is still ok, unfortunately somethings gone wrong: mangled handbrake screenshot
void Class1::DoSomething(Bitmap ^mybitmap)
{
cv::Mat *imgOriginal;
// Lock the bitmap's bits.
Rectangle rect = Rectangle(0, 0, mybitmap->Width, mybitmap->Height);
Imaging::BitmapData^ bmpData = mybitmap->LockBits(rect, Imaging::ImageLockMode::ReadWrite, mybitmap->PixelFormat);
try
{
// Get the address of the first line.
IntPtr ptr = bmpData->Scan0;
// Declare an array to hold the bytes of the bitmap.
// This code is specific to a bitmap with 24 bits per pixels.
int bytes = Math::Abs(bmpData->Stride) * mybitmap->Height;
array<Byte>^rgbValues = gcnew array<Byte>(bytes);
// Copy the RGB values into the array.
System::Runtime::InteropServices::Marshal::Copy(ptr, rgbValues, 0, bytes);
imgOriginal = new cv::Mat(mybitmap->Height, mybitmap->Width, CV_8UC3, (void *)ptr, std::abs(bmpData->Stride));
}
finally { mybitmap->UnlockBits(bmpData); }//Remember to unlock!!!
cv::imwrite("from_mat.bmp", *imgOriginal);
}
Can anybody spot my error?
Since your image is stretched horizontally, I'm betting that you have the wrong pixel format selected. (It's not stretched vertically, nor skewed diagonally, so the stride is correct.) CV_8UC3 specifies 24 bits per pixel, but I think that your BMP file is using 32 bits per pixel.
Switch your pixel format to CV_8UC4, or better yet, read the number of bits per pixel from the image and select the correct CV format based on that.
Side note: Since you're doing sc.WriteBitmapToFile() followed by opencv.DoSomething(Image.FromFile(), the entire bit about how you're capturing the screenshot is irrelevant. You're reading the bitmap from a file; that's all that matters.
I'm currently working on creating an Ambilight for my computer monitor with C#, an arduino, and an Ikea Dioder. Currently the hardware portion runs flawlessly; however, I'm having a problem with detecting the average color of a section of screen.
I have two issues with the implementations that I'm using:
Performance - Both of these algorithms add a somewhat noticeable stutter to the screen. Nothing showstopping, but it's annoying while watching video.
No Fullscreen Game Support - When a game is in fullscreen mode both of these methods just return white.
public class DirectxColorProvider : IColorProvider
{
private static Device d;
private static Collection<long> colorPoints;
public DirectxColorProvider()
{
PresentParameters present_params = new PresentParameters();
if (d == null)
{
d = new Device(new Direct3D(), 0, DeviceType.Hardware, IntPtr.Zero, CreateFlags.SoftwareVertexProcessing, present_params);
}
if (colorPoints == null)
{
colorPoints = GetColorPoints();
}
}
public byte[] GetColors()
{
var color = new byte[4];
using (var screen = this.CaptureScreen())
{
DataRectangle dr = screen.LockRectangle(LockFlags.None);
using (var gs = dr.Data)
{
color = avcs(gs, colorPoints);
}
}
return color;
}
private Surface CaptureScreen()
{
Surface s = Surface.CreateOffscreenPlain(d, Screen.PrimaryScreen.Bounds.Width, Screen.PrimaryScreen.Bounds.Height, Format.A8R8G8B8, Pool.Scratch);
d.GetFrontBufferData(0, s);
return s;
}
private static byte[] avcs(DataStream gs, Collection<long> positions)
{
byte[] bu = new byte[4];
int r = 0;
int g = 0;
int b = 0;
int i = 0;
foreach (long pos in positions)
{
gs.Position = pos;
gs.Read(bu, 0, 4);
r += bu[2];
g += bu[1];
b += bu[0];
i++;
}
byte[] result = new byte[3];
result[0] = (byte)(r / i);
result[1] = (byte)(g / i);
result[2] = (byte)(b / i);
return result;
}
private Collection<long> GetColorPoints()
{
const long offset = 20;
const long Bpp = 4;
var box = GetBox();
var colorPoints = new Collection<long>();
for (var x = box.X; x < (box.X + box.Length); x += offset)
{
for (var y = box.Y; y < (box.Y + box.Height); y += offset)
{
long pos = (y * Screen.PrimaryScreen.Bounds.Width + x) * Bpp;
colorPoints.Add(pos);
}
}
return colorPoints;
}
private ScreenBox GetBox()
{
var box = new ScreenBox();
int m = 8;
box.X = (Screen.PrimaryScreen.Bounds.Width - m) / 3;
box.Y = (Screen.PrimaryScreen.Bounds.Height - m) / 3;
box.Length = box.X * 2;
box.Height = box.Y * 2;
return box;
}
private class ScreenBox
{
public long X { get; set; }
public long Y { get; set; }
public long Length { get; set; }
public long Height { get; set; }
}
}
You can find the file for the directX implmentation here.
public class GDIColorProvider : Form, IColorProvider
{
private static Rectangle box;
private readonly IColorHelper _colorHelper;
public GDIColorProvider()
{
_colorHelper = new ColorHelper();
box = _colorHelper.GetCenterBox();
}
public byte[] GetColors()
{
var colors = new byte[3];
IntPtr hDesk = GetDesktopWindow();
IntPtr hSrce = GetDC(IntPtr.Zero);
IntPtr hDest = CreateCompatibleDC(hSrce);
IntPtr hBmp = CreateCompatibleBitmap(hSrce, box.Width, box.Height);
IntPtr hOldBmp = SelectObject(hDest, hBmp);
bool b = BitBlt(hDest, box.X, box.Y, (box.Width - box.X), (box.Height - box.Y), hSrce, 0, 0, CopyPixelOperation.SourceCopy);
using(var bmp = Bitmap.FromHbitmap(hBmp))
{
colors = _colorHelper.AverageColors(bmp);
}
SelectObject(hDest, hOldBmp);
DeleteObject(hBmp);
DeleteDC(hDest);
ReleaseDC(hDesk, hSrce);
return colors;
}
// P/Invoke declarations
[DllImport("gdi32.dll")]
static extern bool BitBlt(IntPtr hdcDest, int xDest, int yDest, int
wDest, int hDest, IntPtr hdcSource, int xSrc, int ySrc, CopyPixelOperation rop);
[DllImport("user32.dll")]
static extern bool ReleaseDC(IntPtr hWnd, IntPtr hDc);
[DllImport("gdi32.dll")]
static extern IntPtr DeleteDC(IntPtr hDc);
[DllImport("gdi32.dll")]
static extern IntPtr DeleteObject(IntPtr hDc);
[DllImport("gdi32.dll")]
static extern IntPtr CreateCompatibleBitmap(IntPtr hdc, int nWidth, int nHeight);
[DllImport("gdi32.dll")]
static extern IntPtr CreateCompatibleDC(IntPtr hdc);
[DllImport("gdi32.dll")]
static extern IntPtr SelectObject(IntPtr hdc, IntPtr bmp);
[DllImport("user32.dll")]
private static extern IntPtr GetDesktopWindow();
[DllImport("user32.dll")]
private static extern IntPtr GetWindowDC(IntPtr ptr);
[DllImport("user32.dll")]
private static extern IntPtr GetDC(IntPtr ptr);
}
You Can Find the File for the GDI implementation Here.
The Full Codebase Can be Found Here.
Updated Answer
The problem of slow screen capture performance most likely is caused by BitBlt() doing a pixel conversion when the pixel formats of source and destination don't match. From the docs:
If the color formats of the source and destination device contexts do not match, the BitBlt function converts the source color format to match the destination format.
This is what caused slow performance in my code, especially in higher resolutions.
The default pixel format seems to be PixelFormat.Format32bppArgb, so that's what you should use for the buffer:
var screen = new Bitmap(bounds.Width, bounds.Height, PixelFormat.Format32bppArgb);
var gfx = Graphics.FromImage(screen);
gfx.CopyFromScreen(bounds.Location, new Point(0, 0), bounds.Size);
The next source for slow performance is Bitmap.GetPixel() which does boundary checks. Never use it when analyzing every pixel. Instead lock the bitmap data and get a pointer to it:
public unsafe Color GetAverageColor(Bitmap image, int sampleStep = 1) {
var data = image.LockBits(
new Rectangle(Point.Empty, Image.Size),
ImageLockMode.ReadOnly,
PixelFormat.Format32bppArgb);
var row = (int*)data.Scan0.ToPointer();
var (sumR, sumG, sumB) = (0L, 0L, 0L);
var stride = data.Stride / sizeof(int) * sampleStep;
for (var y = 0; y < data.Height; y += sampleStep) {
for (var x = 0; x < data.Width; x += sampleStep) {
var argb = row[x];
sumR += (argb & 0x00FF0000) >> 16;
sumG += (argb & 0x0000FF00) >> 8;
sumB += argb & 0x000000FF;
}
row += stride;
}
image.UnlockBits(data);
var numSamples = data.Width / sampleStep * data.Height / sampleStep;
var avgR = sumR / numSamples;
var avgG = sumG / numSamples;
var avgB = sumB / numSamples;
return Color.FromArgb((int)avgR, (int)avgG, (int)avgB);
}
This should get you well below 10 ms, depending on the screen size. In case it is still too slow you can increase the sampleStep parameter of GetAverageColor().
Original Answer
I recently did the same thing and came up with something that worked surprisingly good.
The trick is to create an additional bitmap that is 1x1 pixels in size and set a good interpolation mode on its graphics context (bilinear or bicubic, but not nearest neighbor).
Then you draw your captured bitmap into that 1x1 bitmap exploiting the interpolation and retrieve that pixel to get the average color.
I'm doing that at a rate of ~30 fps. When the screen shows a GPU rendering (e.g. watching YouTube full screen with enabled hardware acceleration in Chrome) there is no visible stuttering or anything. In fact, CPU utilization of the application is way below 10%. However, if I turn off Chrome's hardware acceleration then there is definitely some slight stuttering noticeable if you watch close enough.
Here are the relevant parts of the code:
using var screen = new Bitmap(width, height);
using var screenGfx = Graphics.FromImage(screen);
using var avg = new Bitmap(1, 1);
using var avgGfx = Graphics.FromImage(avg);
avgGfx.InterpolationMode = InterpolationMode.HighQualityBicubic;
while (true) {
screenGfx.CopyFromScreen(left, top, 0, 0, screen.Size);
avgGfx.DrawImage(screen, 0, 0, avg.Width, avg.Height);
var color = avg.GetPixel(0, 0);
var bright = (int)Math.Round(Math.Clamp(color.GetBrightness() * 100, 1, 100));
// set color and brightness on your device
// wait 1000/fps milliseconds
}
Note that this works for GPU renderings, because System.Drawing.Common uses GDI+ nowadays. However, it does not work when the content is DRM protected. So it won't work with Netflix for example :(
I published the code on GitHub. Even though I abandoned the project due to Netflix' DRM protection it might help someone else.
I'm attempting to load a variety of image formats to display with alpha blending in OpenGL for our company's software, but I seem to have no luck on this. I'm trying to load Bitmaps like this:
private Bitmap LoadBitmap(string filename)
{
Bitmap original = new Bitmap(filename);
Bitmap toReturn = new Bitmap(original.Width, original.Height, PixelFormat.Format24bppRgb);
using (Graphics gr = Graphics.FromImage(toReturn))
{
gr.DrawImage(original, new Rectangle(0, 0, toReturn.Width, toReturn.Height));
}
original.Dispose();
return toReturn;
}
Loading it into a texture like this:
private uint[] GetTexture(Bitmap convertToTexture)
{
// Setup return value
uint[] toReturn = null;
// Make sure the device and the bitmap exists
if (convertToTexture != null)
{
// Dispose the imagery first
DisposeTexture();
mImageryTexture = new uint[1];
// Setup the bitmap
Rectangle rect = new Rectangle(Point.Empty, convertToTexture.Size);
BitmapData bitmapdata = convertToTexture.LockBits(rect, ImageLockMode.ReadOnly, PixelFormat.Format24bppRgb);
// Bind the texture
glGenTextures(1, mImageryTexture);
glBindTexture(GL_TEXTURE_2D, mImageryTexture[0]);
glTexImage2D(GL_TEXTURE_2D, 0, (int)GL_RGB8, convertToTexture.Width, convertToTexture.Height, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, bitmapdata.Scan0);
// Linear Filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// Clamp
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
// Release the bitmap
convertToTexture.UnlockBits(bitmapdata);
// Set the return value to this imagery
toReturn = mImageryTexture;
}
return toReturn;
}
And using the texture like this:
public bool DrawGLScene()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
// Test bitmap
Rectangle rect = new Rectangle();
rect.Size = testBitmap.Size;
RenderQuad(testBitmap, rect);
RenderLines(rect);
rect.X += rect.Width;
// Test JPEG
rect.Size = testJpeg.Size;
RenderQuad(testJpeg, rect);
RenderLines(rect);
rect.X += rect.Width;
// Test PNG
rect.Size = testPng.Size;
RenderQuad(testPng, rect);
RenderLines(rect);
rect.X += rect.Width;
// Test GIF
rect.Size = testGif.Size;
RenderQuad(testGif, rect);
RenderLines(rect);
return true;
}
private void RenderQuad(Bitmap image, Rectangle rect)
{
uint[] imageryTexture = GetTexture(image);
// Color all the vertices white with transparency
glEnable(GL_TEXTURE_2D);
glColor4ub(255, 255, 255, 255);
///////////////////////////////////////////////////
// Draw the image of the area.
///////////////////////////////////////////////////
// Fill the path with the background transparent color.
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glBindTexture(GL_TEXTURE_2D, imageryTexture[0]);
// Building a quad of the image
glBegin(GL_QUADS);
// top left of texture
glTexCoord2f(0, 1);
glVertex2i(rect.Left, rect.Top);
// top right of texture
glTexCoord2f(1, 1);
glVertex2i(rect.Right, rect.Top);
// bottom right of texture
glTexCoord2f(1, 0);
glVertex2i(rect.Right, rect.Bottom);
// bottom left of texture
glTexCoord2f(0, 0);
glVertex2i(rect.Left, rect.Bottom);
glEnd();
}
private void RenderLines(Rectangle rect)
{
// Fill the path with the background transparent color.
// Color all the vertices transparent gray
Color LineColor = Color.Tomato;
glColor4ub(LineColor.R, LineColor.G, LineColor.B, 255);
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
// Building a quad of the image
glBegin(GL_QUADS);
// top left of texture
glVertex2i(rect.Left, rect.Top);
// top right of texture
glVertex2i(rect.Right, rect.Top);
// bottom right of texture
glVertex2i(rect.Right, rect.Bottom);
// bottom left of texture
glVertex2i(rect.Left, rect.Bottom);
glEnd();
}
What am I doing wrong? I can load some bitmap files, but not others. PNGs, JPEGs, and GIFs don't work at all. All I get are white squares (from the vertex colors).
Here's the full code below. I'm using NeHe's lesson 2 code as basis for this code (since I can't post the company software code in its entirety):
public class OpenGLForm : Form
{
#region Member Variables
private const string AbsolutePath = #"C:\<path-to-images>\";
/// <summary>
/// Required designer variable.
/// </summary>
private System.ComponentModel.Container components = null;
private Bitmap testBitmap = null;
private Bitmap testJpeg = null;
private Bitmap testPng = null;
private Bitmap testGif = null;
private uint[] mImageryTexture = null;
private static uint _hwnd = 0;
private static uint _hDC = 0;
private static uint _hRC = 0;
private bool _appActive = true;
private bool _done = true;
public bool Done
{
get
{
return _done;
}
set
{
_done = value;
}
}
#endregion
// Lots of OpenGL function and constant declaration here
#region Win32 Interop
// Constant values were found in the "WinUser.h" header file.
public const int WM_ACTIVATEAPP = 0x001C;
public const int WA_ACTIVE = 1;
public const int WA_CLICKACTIVE = 2;
public const int CDS_FULLSCREEN = 0x00000004; // Flag for ChangeDisplaySettings
public const int DISP_CHANGE_SUCCESSFUL = 0; // Return value for ChangeDisplaySettings
// Constant values were found in the "WinGDI.h" header file.
public const int CCHDEVICENAME = 32; // size of a device name string
public const int CCHFORMNAME = 32; // size of a form name string
public const int DM_BITSPERPEL = 0x40000;
public const int DM_PELSWIDTH = 0x80000;
public const int DM_PELSHEIGHT = 0x100000;
public const int BITSPIXEL = 12; // number of bits per pixel
public const uint PFD_DOUBLEBUFFER = 0x00000001; // PIXELFORMATDESCRIPTOR flag
public const uint PFD_DRAW_TO_WINDOW = 0x00000004; // PIXELFORMATDESCRIPTOR flag
public const uint PFD_SUPPORT_OPENGL = 0x00000020; // PIXELFORMATDESCRIPTOR flag
public const uint PFD_TYPE_RGBA = 0; // pixel type
public const uint PFD_MAIN_PLANE = 0; // layer type
[StructLayout(LayoutKind.Sequential)]
public struct PIXELFORMATDESCRIPTOR
{
public ushort nSize;
public ushort nVersion;
public uint dwFlags;
public byte iPixelType;
public byte cColorBits;
public byte cRedBits;
public byte cRedShift;
public byte cGreenBits;
public byte cGreenShift;
public byte cBlueBits;
public byte cBlueShift;
public byte cAlphaBits;
public byte cAlphaShift;
public byte cAccumBits;
public byte cAccumRedBits;
public byte cAccumGreenBits;
public byte cAccumBlueBits;
public byte cAccumAlphaBits;
public byte cDepthBits;
public byte cStencilBits;
public byte cAuxBuffers;
public byte iLayerType;
public byte bReserved;
public uint dwLayerMask;
public uint dwVisibleMask;
public uint dwDamageMask;
}
// by marking the structure with CharSet.Auto, the structure will get marshaled as Unicode characters
// on Unicode platforms, if not the name fields would always get marshaled as arrays of ANSI characters
[StructLayout(LayoutKind.Sequential, CharSet = CharSet.Auto)]
public class DEVMODE
{
[MarshalAs(UnmanagedType.ByValArray, SizeConst = CCHDEVICENAME)]
public char[] dmDeviceName;
public short dmSpecVersion;
public short dmDriverVersion;
public short dmSize;
public short dmDriverExtra;
public int dmFields;
public DEVMODE_UNION u;
public short dmColor;
public short dmDuplex;
public short dmYResolution;
public short dmTTOption;
public short dmCollate;
[MarshalAs(UnmanagedType.ByValArray, SizeConst = CCHFORMNAME)]
public char[] dmFormName;
public short dmLogPixels;
public int dmBitsPerPel;
public int dmPelsWidth;
public int dmPelsHeight;
public int dmDisplayFlagsOrdmNup; // union of dmDisplayFlags and dmNup
public int dmDisplayFrequency;
public int dmICMMethod;
public int dmICMIntent;
public int dmMediaType;
public int dmDitherType;
public int dmReserved1;
public int dmReserved2;
public int dmPanningWidth;
public int dmPanningHeight;
}
// modeling a union in C#, each possible struct data type starts at FieldOffset 0
[StructLayout(LayoutKind.Explicit)]
public struct DEVMODE_UNION
{
[FieldOffset(0)]
public short dmOrientation;
[FieldOffset(2)]
public short dmPaperSize;
[FieldOffset(4)]
public short dmPaperLength;
[FieldOffset(6)]
public short dmPaperWidth;
[FieldOffset(8)]
public short dmScale;
[FieldOffset(10)]
public short dmCopies;
[FieldOffset(12)]
public short dmDefaultSource;
[FieldOffset(14)]
public short dmPrintQuality;
[FieldOffset(0)]
public int dmPosition_x;
[FieldOffset(4)]
public int dmPosition_y;
[FieldOffset(0)]
public int dmDisplayOrientation;
[FieldOffset(0)]
public int dmDisplayFixedOutput;
}
#endregion
#region OpenGLSetup
private bool SetupPixelFormat(ref uint hdc)
{
PIXELFORMATDESCRIPTOR pfd = new PIXELFORMATDESCRIPTOR();
ushort pfdSize = (ushort)Marshal.SizeOf(typeof(PIXELFORMATDESCRIPTOR)); // sizeof(PIXELFORMATDESCRIPTOR)
pfd.nSize = pfdSize; // size of pfd
pfd.nVersion = 1; // version number
pfd.dwFlags = (PFD_SUPPORT_OPENGL | PFD_DRAW_TO_WINDOW | PFD_DOUBLEBUFFER); // flags
pfd.iPixelType = (byte)PFD_TYPE_RGBA; // RGBA type
pfd.cColorBits = (byte)GetDeviceCaps(hdc, BITSPIXEL); // color depth
pfd.cRedBits = 0; // color bits ignored
pfd.cRedShift = 0;
pfd.cGreenBits = 0;
pfd.cGreenShift = 0;
pfd.cBlueBits = 0;
pfd.cBlueShift = 0;
pfd.cAlphaBits = 0; // no alpha buffer
pfd.cAlphaShift = 0; // shift bit ignored
pfd.cAccumBits = 0; // no accumulation buffer
pfd.cAccumRedBits = 0; // accum bits ignored
pfd.cAccumGreenBits = 0;
pfd.cAccumBlueBits = 0;
pfd.cAccumAlphaBits = 0;
pfd.cDepthBits = 32; // 32-bit z-buffer
pfd.cStencilBits = 0; // no stencil buffer
pfd.cAuxBuffers = 0; // no auxiliary buffer
pfd.iLayerType = (byte)PFD_MAIN_PLANE; // main layer
pfd.bReserved = 0; // reserved
pfd.dwLayerMask = 0; // layer masks ignored
pfd.dwVisibleMask = 0;
pfd.dwDamageMask = 0;
int pixelformat = ChoosePixelFormat(hdc, ref pfd);
if (pixelformat == 0) // Did Windows Find A Matching Pixel Format?
{
MessageBox.Show("Can't Find A Suitable PixelFormat.", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
return false;
}
if (SetPixelFormat(hdc, pixelformat, ref pfd) == 0) // Are We Able To Set The Pixel Format?
{
MessageBox.Show("Can't Set The PixelFormat.", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
return false;
}
return true;
}
private bool InitGL() // All Setup For OpenGL Goes Here
{
glShadeModel(GL_SMOOTH); // Enable Smooth Shading
glClearColor(0.0f, 0.0f, 0.0f, 0.5f); // Black Background
glClearDepth(1.0f); // Depth Buffer Setup
// Add alpha blending support
glDepthFunc(GL_LEQUAL);
glDisable(GL_DEPTH_TEST);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE);
return true; // Initialization Went OK
}
public bool SetupRenderingContext()
{
if (!CreateGLWindow())
{
return false; // initialization failed, quit
}
_hwnd = (uint)((this.Handle).ToInt32());
_hDC = GetDC(_hwnd);
if (_hDC == 0)
{
MessageBox.Show("Can't Create A GL Device Context", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
return false;
}
// not doing the following wglSwapBuffers() on the DC will result in a failure to subsequently create the RC
wglSwapBuffers(_hDC);
if (!SetupPixelFormat(ref _hDC))
{
return false;
}
// create the rendering context and make it current
_hRC = wglCreateContext(_hDC);
if (_hRC == 0) // Are We Able To Get A Rendering Context?
{
MessageBox.Show("Can't Create A GL Rendering Context.", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
return false;
}
if (!wglMakeCurrent(_hDC, _hRC)) // Try To Activate The Rendering Context
{
MessageBox.Show("Can't Activate The GL Rendering Context.", "Error", MessageBoxButtons.OK, MessageBoxIcon.Error);
return false;
}
OpenGLForm_Resize(this, new EventArgs()); // Set up the perspective GL screen
return InitGL(); // Initialize Our Newly Created GL Window
}
#endregion
#region FormSetup
private bool CreateGLWindow()
{
Resize += new EventHandler(OpenGLForm_Resize);
TopMost = false;
WindowState = System.Windows.Forms.FormWindowState.Normal;
FormBorderStyle = System.Windows.Forms.FormBorderStyle.Sizable;
// The cursor is displayed only if the display count is greater than or equal to 0
do
{
}while (ShowCursor(true) < 0);
return true;
}
#endregion
#region Constructor/Destructor
public OpenGLForm()
{
//
// Required for Windows Form Designer support
//
InitializeComponent();
testBitmap = LoadBitmap(AbsolutePath + "bitmap.bmp");
testJpeg = LoadBitmap(AbsolutePath + "jpeg.jpg");
testPng = LoadBitmap(AbsolutePath + "png.png");
testGif = LoadBitmap(AbsolutePath + "gif.gif");
}
/// <summary>
/// Clean up any resources being used.
/// </summary>
protected override void Dispose(bool disposing)
{
if (disposing)
{
if (components != null)
{
components.Dispose();
}
if (_hRC != 0) // Do We Have A Rendering Context?
{
if (!wglMakeCurrent(0, 0)) // Are We Able To Release The DC And RC Contexts?
{
MessageBox.Show("Release Of DC And RC Failed.", "Shutdown Error", MessageBoxButtons.OK, MessageBoxIcon.Information);
}
if (!wglDeleteContext(_hRC)) // Are We Able To Delete The RC?
{
MessageBox.Show("Release Rendering Context Failed.", "Shutdown Error", MessageBoxButtons.OK, MessageBoxIcon.Information);
}
}
if (_hDC != 0 && ReleaseDC(_hwnd, _hDC) == 0) // Are We Able To Release The DC
{
MessageBox.Show("Release Device Context Failed.", "Shutdown Error", MessageBoxButtons.OK, MessageBoxIcon.Information);
}
}
base.Dispose(disposing);
}
private Bitmap LoadBitmap(string filename)
{
Bitmap original = new Bitmap(filename);
Bitmap toReturn = new Bitmap(original.Width, original.Height, PixelFormat.Format24bppRgb);
using (Graphics gr = Graphics.FromImage(toReturn))
{
gr.DrawImage(original, new Rectangle(0, 0, toReturn.Width, toReturn.Height));
}
original.Dispose();
return toReturn;
}
#endregion
#region Windows Form Designer generated code
/// <summary>
/// Required method for Designer support - do not modify
/// the contents of this method with the code editor.
/// </summary>
private void InitializeComponent()
{
//
// OpenGLForm
//
this.AutoScaleBaseSize = new System.Drawing.Size(5, 13);
this.ClientSize = new System.Drawing.Size(632, 453);
this.KeyPreview = true;
this.Name = "OpenGLForm";
this.StartPosition = System.Windows.Forms.FormStartPosition.CenterScreen;
this.Text = "\"NeHe\'s First Polygon Tutorial\"";
this.KeyUp += new System.Windows.Forms.KeyEventHandler(this.OpenGLForm_KeyUp);
}
#endregion
#region Events
protected override void WndProc(ref System.Windows.Forms.Message m)
{
// Listen for operating system messages.
switch (m.Msg)
{
// The WM_ACTIVATEAPP message occurs when the application
// becomes the active application or becomes inactive.
case WM_ACTIVATEAPP:
{
// The WParam value identifies what is occurring.
_appActive = ((int)m.WParam == WA_ACTIVE || (int)m.WParam == WA_CLICKACTIVE);
// Invalidate to get new scene painted.
Invalidate();
break;
}
default:
{
break;
}
}
base.WndProc(ref m);
}
/*!
This will stop the display from flickering on Paint event
*/
protected override void OnPaintBackground(PaintEventArgs e)
{
}
protected override void OnPaint(PaintEventArgs e)
{
// make sure the app is active
if (_appActive)
{
DrawGLScene();
wglSwapBuffers(_hDC);
Invalidate();
}
}
private void OpenGLForm_KeyUp(object sender, System.Windows.Forms.KeyEventArgs e)
{
switch (e.KeyCode)
{
case Keys.Escape:
{
Close();
break;
}
default:
{
break;
}
}
}
private void OpenGLForm_Resize(object sender, EventArgs e) // Resize And Initialize The GL Window
{
int width = ClientRectangle.Width;
int height = ClientRectangle.Height;
if (height == 0) // Prevent A Divide By Zero By
{
height = 1; // Making Height Equal One
}
// Switch to projection mode
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
// Change to ortho-graphic camera, with top left as (0, 0) coordinate,
// and bottom right as the control size
gluOrtho2D(0, ClientRectangle.Width, ClientRectangle.Height, 0);
glViewport(0, 0, ClientRectangle.Width, ClientRectangle.Height);
// Switch back to model view
glMatrixMode(GL_MODELVIEW);
glLoadIdentity(); // Reset The Modelview Matrix
}
#endregion
/// <summary>
/// The main entry point for the application.
/// </summary>
[STAThread]
static void Main()
{
while (true)
{
OpenGLForm form = new OpenGLForm(); // create the form
if (!form.SetupRenderingContext()) // setup form and OpenGL
{
break; // initialization failed, quit
}
Application.Run(form);
if (form.Done) // Was There A Quit Received?
{
form.DisposeTexture();
break;
}
}
}
public bool DrawGLScene()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
// Test bitmap
Rectangle rect = new Rectangle();
rect.Size = testBitmap.Size;
RenderQuad(testBitmap, rect);
RenderLines(rect);
rect.X += rect.Width;
// Test JPEG
rect.Size = testJpeg.Size;
RenderQuad(testJpeg, rect);
RenderLines(rect);
rect.X += rect.Width;
// Test PNG
rect.Size = testPng.Size;
RenderQuad(testPng, rect);
RenderLines(rect);
rect.X += rect.Width;
// Test GIF
rect.Size = testGif.Size;
RenderQuad(testGif, rect);
RenderLines(rect);
return true;
}
private void RenderQuad(Bitmap image, Rectangle rect)
{
uint[] imageryTexture = GetTexture(image);
// Color all the vertices white with transparency
glEnable(GL_TEXTURE_2D);
glColor4ub(255, 255, 255, 255);
///////////////////////////////////////////////////
// Draw the image of the area.
///////////////////////////////////////////////////
// Fill the path with the background transparent color.
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glBindTexture(GL_TEXTURE_2D, imageryTexture[0]);
// Building a quad of the image
glBegin(GL_QUADS);
// top left of texture
glTexCoord2f(0, 1);
glVertex2i(rect.Left, rect.Top);
// top right of texture
glTexCoord2f(1, 1);
glVertex2i(rect.Right, rect.Top);
// bottom right of texture
glTexCoord2f(1, 0);
glVertex2i(rect.Right, rect.Bottom);
// bottom left of texture
glTexCoord2f(0, 0);
glVertex2i(rect.Left, rect.Bottom);
glEnd();
}
private void RenderLines(Rectangle rect)
{
// Fill the path with the background transparent color.
// Color all the vertices transparent gray
Color LineColor = Color.Tomato;
glColor4ub(LineColor.R, LineColor.G, LineColor.B, 255);
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
// Building a quad of the image
glBegin(GL_QUADS);
// top left of texture
glVertex2i(rect.Left, rect.Top);
// top right of texture
glVertex2i(rect.Right, rect.Top);
// bottom right of texture
glVertex2i(rect.Right, rect.Bottom);
// bottom left of texture
glVertex2i(rect.Left, rect.Bottom);
glEnd();
}
// Creates a texture from a given bitmap.
private uint[] GetTexture(Bitmap convertToTexture)
{
// Setup return value
uint[] toReturn = null;
// Make sure the device and the bitmap exists
if (convertToTexture != null)
{
// Dispose the imagery first
DisposeTexture();
mImageryTexture = new uint[1];
// Setup the bitmap
Rectangle rect = new Rectangle(Point.Empty, convertToTexture.Size);
BitmapData bitmapdata = convertToTexture.LockBits(rect, ImageLockMode.ReadOnly, PixelFormat.Format24bppRgb);
// Bind the texture
glGenTextures(1, mImageryTexture);
glBindTexture(GL_TEXTURE_2D, mImageryTexture[0]);
glTexImage2D(GL_TEXTURE_2D, 0, (int)GL_RGB8, convertToTexture.Width, convertToTexture.Height, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, bitmapdata.Scan0);
// Linear Filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// Clamp
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
// Release the bitmap
convertToTexture.UnlockBits(bitmapdata);
// Set the return value to this imagery
toReturn = mImageryTexture;
}
return toReturn;
}
// Disposes the currently held texture.
public void DisposeTexture()
{
if (mImageryTexture != null)
{
glDeleteTextures(1, mImageryTexture);
mImageryTexture = null;
}
}
}
It turns out my code does work if the image dimensions are a power of two. It seems like I'll have to generate a larger image with the dimensions set to power of two, and change the UV values to remove the newly added dimensions.
I'm currently attempting to take snapshot of a specified portion of my application's window from a specified starting coordinate (which is where my problem comes in).
Rectangle bounds = new Rectangle((this.Width/2)-400,(this.Height/2)-200, 800,400);
using (Bitmap bitmap = new Bitmap(bounds.Width, bounds.Height, PixelFormat.Format32bppArgb))
{
using (Graphics graphics = Graphics.FromImage(bitmap))
{
IntPtr hdc = graphics.GetHdc();
PrintWindow(this.axS.Handle, hdc, 0);
graphics.ReleaseHdc(hdc);
graphics.Flush();
string file = "example.png";
bitmap.Save(file, ImageFormat.Png);
}
}
I'm attempting to make a dynamic-adaptive method to take a screenshot of the center of the window, even after being resized. I'm not sure how to apply x and y to the screenshot as a starting point for the screenshot. Dimensions will always remain 800,400 and always taking a screenshot of the center of the application regardless of window size.
Every attempt I have pegged, the bitmap took a screenshot from 0 (+800), 0 (+400) where 0, 0 I need to change.
Is Bitmap capable of this? If not, what other method could I use?
You can use SetViewportOrgEx to set the origin on the HDC. I found that the title bar of the window was throwing off the calculation of the center point, so I took that into account as well.
int x = (this.Width / 2) - 400;
int y = ((this.Height + SystemInformation.CaptionHeight) / 2) - 200;
Rectangle bounds = new Rectangle(x, y, 800, 400);
using (Bitmap bitmap = new Bitmap(bounds.Width, bounds.Height, PixelFormat.Format32bppArgb))
{
using (Graphics graphics = Graphics.FromImage(bitmap))
{
IntPtr hdc = graphics.GetHdc();
POINT pt;
SetViewportOrgEx(hdc, -x, -y, out pt);
// rest as before
}
}
And the signatures for SetViewportOrgEx and POINT:
[DllImport("gdi32.dll")]
static extern bool SetViewportOrgEx(IntPtr hdc, int X, int Y, out POINT lpPoint);
[StructLayout(LayoutKind.Sequential)]
public struct POINT
{
public int X;
public int Y;
public POINT(int x, int y)
{
this.X = x;
this.Y = y;
}
public static implicit operator System.Drawing.Point(POINT p)
{
return new System.Drawing.Point(p.X, p.Y);
}
public static implicit operator POINT(System.Drawing.Point p)
{
return new POINT(p.X, p.Y);
}
}
Instead of using PrintWindow try using Graphics.CopyFromScreen which allows you to specify both an upper-left corner as well as dimensions.
http://msdn.microsoft.com/en-us/library/6yfzc507.aspx
Performs a bit-block transfer of color data, corresponding to a rectangle of pixels, from the screen to the drawing surface of the Graphics.
CopyFromScreen works on screen coordinates so you'll have to calculate that for the call.
BACKGROUND
I am writing a screen capture application
My code is based derived from this project: http://www.codeproject.com/KB/cs/DesktopCaptureWithMouse.aspx?display=Print
Note that the code captures the the mouse cursor also (which is desirable for me)
MY PROBLEM
Code works fine when the mouse cursor is the normal pointer or hand icon - the mouse is rendered correctly on the screenshot
However, when the mouse cursor is changed to the insertion point (the "I-beam" cursor) - for example typing in NOTEPAD - then code doesn't work - the result is that I get a faint image of the cursor - like a very translucent (gray) version of it instead of the blank & white one would expect.
MY QUESTION
How can I capture the mouse cursor image when the image is one of these "I-beam"-type images
NOTE: If you click on the original article someone offers a suggestion - it doesn't work
SOURCE
This is from the original article.
static Bitmap CaptureCursor(ref int x, ref int y)
{
Bitmap bmp;
IntPtr hicon;
Win32Stuff.CURSORINFO ci = new Win32Stuff.CURSORINFO();
Win32Stuff.ICONINFO icInfo;
ci.cbSize = Marshal.SizeOf(ci);
if (Win32Stuff.GetCursorInfo(out ci))
{
if (ci.flags == Win32Stuff.CURSOR_SHOWING)
{
hicon = Win32Stuff.CopyIcon(ci.hCursor);
if (Win32Stuff.GetIconInfo(hicon, out icInfo))
{
x = ci.ptScreenPos.x - ((int)icInfo.xHotspot);
y = ci.ptScreenPos.y - ((int)icInfo.yHotspot);
Icon ic = Icon.FromHandle(hicon);
bmp = ic.ToBitmap();
return bmp;
}
}
}
return null;
}
While I can't explain exactly why this happens, I think I can show how to get around it.
The ICONINFO struct contains two members, hbmMask and hbmColor, that contain the mask and color bitmaps, respectively, for the cursor (see the MSDN page for ICONINFO for the official documentation).
When you call GetIconInfo() for the default cursor, the ICONINFO struct contains both valid mask and color bitmaps, as shown below (Note: the red border has been added to clearly show the image boundaries):
Default Cursor Mask Bitmap
Default Cursor Color Bitmap
When Windows draws the default cursor, the mask bitmap is first applied with an AND raster operation, then the color bitmap is applied with an XOR raster operation. This results in an opaque cursor and a transparent background.
When you call GetIconInfo() for the I-Beam cursor, though, the ICONINFO struct only contains a valid mask bitmap, and no color bitmap, as shown below (Note: again, the red border has been added to clearly show the image boundaries):
I-Beam Cursor Mask Bitmap
According to the ICONINFO documentation, the I-Beam cursor is then a monochrome cursor. The top half of the mask bitmap is the AND mask, and the bottom half of the mask bitmap is the XOR bitmap. When Windows draws the I-Beam cursor, the top half of this bitmap is first drawn over the desktop with an AND raster operation. The bottom half of the bitmap is then drawn over top with an XOR raster operation. Onscreen, The cursor will appear as the inverse of the content behind it.
One of the comments for the original article that you linked mentions this. On the desktop, since the raster operations are applied over the desktop content, the cursor will appear correct. However, when the image is drawn over no background, as in your posted code, the raster operations that Windows performs result in a faded image.
That being said, this updated CaptureCursor() method will handle both color and monochrome cursors, supplying a plain black cursor image when the cursor is monochrome.
static Bitmap CaptureCursor(ref int x, ref int y)
{
Win32Stuff.CURSORINFO cursorInfo = new Win32Stuff.CURSORINFO();
cursorInfo.cbSize = Marshal.SizeOf(cursorInfo);
if (!Win32Stuff.GetCursorInfo(out cursorInfo))
return null;
if (cursorInfo.flags != Win32Stuff.CURSOR_SHOWING)
return null;
IntPtr hicon = Win32Stuff.CopyIcon(cursorInfo.hCursor);
if (hicon == IntPtr.Zero)
return null;
Win32Stuff.ICONINFO iconInfo;
if (!Win32Stuff.GetIconInfo(hicon, out iconInfo))
return null;
x = cursorInfo.ptScreenPos.x - ((int)iconInfo.xHotspot);
y = cursorInfo.ptScreenPos.y - ((int)iconInfo.yHotspot);
using (Bitmap maskBitmap = Bitmap.FromHbitmap(iconInfo.hbmMask))
{
// Is this a monochrome cursor?
if (maskBitmap.Height == maskBitmap.Width * 2)
{
Bitmap resultBitmap = new Bitmap(maskBitmap.Width, maskBitmap.Width);
Graphics desktopGraphics = Graphics.FromHwnd(Win32Stuff.GetDesktopWindow());
IntPtr desktopHdc = desktopGraphics.GetHdc();
IntPtr maskHdc = Win32Stuff.CreateCompatibleDC(desktopHdc);
IntPtr oldPtr = Win32Stuff.SelectObject(maskHdc, maskBitmap.GetHbitmap());
using (Graphics resultGraphics = Graphics.FromImage(resultBitmap))
{
IntPtr resultHdc = resultGraphics.GetHdc();
// These two operation will result in a black cursor over a white background.
// Later in the code, a call to MakeTransparent() will get rid of the white background.
Win32Stuff.BitBlt(resultHdc, 0, 0, 32, 32, maskHdc, 0, 32, Win32Stuff.TernaryRasterOperations.SRCCOPY);
Win32Stuff.BitBlt(resultHdc, 0, 0, 32, 32, maskHdc, 0, 0, Win32Stuff.TernaryRasterOperations.SRCINVERT);
resultGraphics.ReleaseHdc(resultHdc);
}
IntPtr newPtr = Win32Stuff.SelectObject(maskHdc, oldPtr);
Win32Stuff.DeleteObject(newPtr);
Win32Stuff.DeleteDC(maskHdc);
desktopGraphics.ReleaseHdc(desktopHdc);
// Remove the white background from the BitBlt calls,
// resulting in a black cursor over a transparent background.
resultBitmap.MakeTransparent(Color.White);
return resultBitmap;
}
}
Icon icon = Icon.FromHandle(hicon);
return icon.ToBitmap();
}
There are some issues with the code that may or may not be a problem.
The check for a monochrome cursor simply tests whether the height is twice the width. While this seems logical, the ICONINFO documentation does not mandate that only a monochrome cursor is defined by this.
There is probably a better way to render the cursor that the BitBlt() - BitBlt() - MakeTransparent() combination of method calls I used.
[StructLayout(LayoutKind.Sequential)]
struct CURSORINFO
{
public Int32 cbSize;
public Int32 flags;
public IntPtr hCursor;
public POINTAPI ptScreenPos;
}
[StructLayout(LayoutKind.Sequential)]
struct POINTAPI
{
public int x;
public int y;
}
[DllImport("user32.dll")]
static extern bool GetCursorInfo(out CURSORINFO pci);
[DllImport("user32.dll")]
static extern bool DrawIcon(IntPtr hDC, int X, int Y, IntPtr hIcon);
const Int32 CURSOR_SHOWING = 0x00000001;
public static Bitmap CaptureScreen(bool CaptureMouse)
{
Bitmap result = new Bitmap(Screen.PrimaryScreen.Bounds.Width, Screen.PrimaryScreen.Bounds.Height, PixelFormat.Format24bppRgb);
try
{
using (Graphics g = Graphics.FromImage(result))
{
g.CopyFromScreen(0, 0, 0, 0, Screen.PrimaryScreen.Bounds.Size, CopyPixelOperation.SourceCopy);
if (CaptureMouse)
{
CURSORINFO pci;
pci.cbSize = System.Runtime.InteropServices.Marshal.SizeOf(typeof(CURSORINFO));
if (GetCursorInfo(out pci))
{
if (pci.flags == CURSOR_SHOWING)
{
DrawIcon(g.GetHdc(), pci.ptScreenPos.x, pci.ptScreenPos.y, pci.hCursor);
g.ReleaseHdc();
}
}
}
}
}
catch
{
result = null;
}
return result;
}
Here's a modified version of Dimitar's response (using DrawIconEx) that worked for me on multiple screens:
public class ScreenCapturePInvoke
{
[StructLayout(LayoutKind.Sequential)]
private struct CURSORINFO
{
public Int32 cbSize;
public Int32 flags;
public IntPtr hCursor;
public POINTAPI ptScreenPos;
}
[StructLayout(LayoutKind.Sequential)]
private struct POINTAPI
{
public int x;
public int y;
}
[DllImport("user32.dll")]
private static extern bool GetCursorInfo(out CURSORINFO pci);
[DllImport("user32.dll", SetLastError = true)]
static extern bool DrawIconEx(IntPtr hdc, int xLeft, int yTop, IntPtr hIcon, int cxWidth, int cyHeight, int istepIfAniCur, IntPtr hbrFlickerFreeDraw, int diFlags);
private const Int32 CURSOR_SHOWING = 0x0001;
private const Int32 DI_NORMAL = 0x0003;
public static Bitmap CaptureFullScreen(bool captureMouse)
{
var allBounds = Screen.AllScreens.Select(s => s.Bounds).ToArray();
Rectangle bounds = Rectangle.FromLTRB(allBounds.Min(b => b.Left), allBounds.Min(b => b.Top), allBounds.Max(b => b.Right), allBounds.Max(b => b.Bottom));
var bitmap = CaptureScreen(bounds, captureMouse);
return bitmap;
}
public static Bitmap CapturePrimaryScreen(bool captureMouse)
{
Rectangle bounds = Screen.PrimaryScreen.Bounds;
var bitmap = CaptureScreen(bounds, captureMouse);
return bitmap;
}
public static Bitmap CaptureScreen(Rectangle bounds, bool captureMouse)
{
Bitmap result = new Bitmap(bounds.Width, bounds.Height);
try
{
using (Graphics g = Graphics.FromImage(result))
{
g.CopyFromScreen(bounds.Location, Point.Empty, bounds.Size);
if (captureMouse)
{
CURSORINFO pci;
pci.cbSize = Marshal.SizeOf(typeof (CURSORINFO));
if (GetCursorInfo(out pci))
{
if (pci.flags == CURSOR_SHOWING)
{
var hdc = g.GetHdc();
DrawIconEx(hdc, pci.ptScreenPos.x-bounds.X, pci.ptScreenPos.y-bounds.Y, pci.hCursor, 0, 0, 0, IntPtr.Zero, DI_NORMAL);
g.ReleaseHdc();
}
}
}
}
}
catch
{
result = null;
}
return result;
}
}
Based on the other answers I made a version without all the Windows API stuff (for the monochrome part) because the solutions did not work for all monochrome cursors. I create the cursor from the mask by combining the two mask parts.
My solution:
Bitmap CaptureCursor(ref Point position)
{
CURSORINFO cursorInfo = new CURSORINFO();
cursorInfo.cbSize = Marshal.SizeOf(cursorInfo);
if (!GetCursorInfo(out cursorInfo))
return null;
if (cursorInfo.flags != CURSOR_SHOWING)
return null;
IntPtr hicon = CopyIcon(cursorInfo.hCursor);
if (hicon == IntPtr.Zero)
return null;
ICONINFO iconInfo;
if (!GetIconInfo(hicon, out iconInfo))
return null;
position.X = cursorInfo.ptScreenPos.x - iconInfo.xHotspot;
position.Y = cursorInfo.ptScreenPos.y - iconInfo.yHotspot;
using (Bitmap maskBitmap = Bitmap.FromHbitmap(iconInfo.hbmMask))
{
// check for monochrome cursor
if (maskBitmap.Height == maskBitmap.Width * 2)
{
Bitmap cursor = new Bitmap(32, 32, PixelFormat.Format32bppArgb);
Color BLACK = Color.FromArgb(255, 0, 0, 0); //cannot compare Color.Black because of different names
Color WHITE = Color.FromArgb(255, 255, 255, 255); //cannot compare Color.White because of different names
for (int y = 0; y < 32; y++)
{
for (int x = 0; x < 32; x++)
{
Color maskPixel = maskBitmap.GetPixel(x, y);
Color cursorPixel = maskBitmap.GetPixel(x, y + 32);
if (maskPixel == WHITE && cursorPixel == BLACK)
{
cursor.SetPixel(x, y, Color.Transparent);
}
else if (maskPixel == BLACK)
{
cursor.SetPixel(x, y, cursorPixel);
}
else
{
cursor.SetPixel(x, y, cursorPixel == BLACK ? WHITE : BLACK);
}
}
}
return cursor;
}
}
Icon icon = Icon.FromHandle(hicon);
return icon.ToBitmap();
}
This is the patched version with all fixes for the bugs presented on this page:
public static Bitmap CaptureImageCursor(ref Point point)
{
try
{
var cursorInfo = new CursorInfo();
cursorInfo.cbSize = Marshal.SizeOf(cursorInfo);
if (!GetCursorInfo(out cursorInfo))
return null;
if (cursorInfo.flags != CursorShowing)
return null;
var hicon = CopyIcon(cursorInfo.hCursor);
if (hicon == IntPtr.Zero)
return null;
Iconinfo iconInfo;
if (!GetIconInfo(hicon, out iconInfo))
{
DestroyIcon(hicon);
return null;
}
point.X = cursorInfo.ptScreenPos.X - iconInfo.xHotspot;
point.Y = cursorInfo.ptScreenPos.Y - iconInfo.yHotspot;
using (var maskBitmap = Image.FromHbitmap(iconInfo.hbmMask))
{
//Is this a monochrome cursor?
if (maskBitmap.Height == maskBitmap.Width * 2 && iconInfo.hbmColor == IntPtr.Zero)
{
var final = new Bitmap(maskBitmap.Width, maskBitmap.Width);
var hDesktop = GetDesktopWindow();
var dcDesktop = GetWindowDC(hDesktop);
using (var resultGraphics = Graphics.FromImage(final))
{
var resultHdc = resultGraphics.GetHdc();
BitBlt(resultHdc, 0, 0, final.Width, final.Height, dcDesktop, (int)point.X + 3, (int)point.Y + 3, CopyPixelOperation.SourceCopy);
DrawIconEx(resultHdc, 0, 0, cursorInfo.hCursor, 0, 0, 0, IntPtr.Zero, 0x0003);
//TODO: I have to try removing the background of this cursor capture.
//Native.BitBlt(resultHdc, 0, 0, final.Width, final.Height, dcDesktop, (int)point.X + 3, (int)point.Y + 3, Native.CopyPixelOperation.SourceErase);
resultGraphics.ReleaseHdc(resultHdc);
ReleaseDC(hDesktop, dcDesktop);
}
DeleteObject(iconInfo.hbmMask);
DeleteDC(dcDesktop);
DestroyIcon(hicon);
return final;
}
DeleteObject(iconInfo.hbmColor);
DeleteObject(iconInfo.hbmMask);
DestroyIcon(hicon);
}
var icon = Icon.FromHandle(hicon);
return icon.ToBitmap();
}
catch (Exception ex)
{
//You should catch exception with your method here.
//LogWriter.Log(ex, "Impossible to get the cursor.");
}
return null;
}
This version works with:
I-Beam cursors.
Black cursors.
Normal cursors.
Inverted cursors.
See working, here: https://github.com/NickeManarin/ScreenToGif/blob/master/ScreenToGif/Util/Native.cs#L991
Your description of a translucent 'gray' version of the I-beam cursor makes me wonder if you're encountering an issue with image scaling or mispositioning of the cursor.
One of the people posting on that site provided a (broken) link to a report with peculiar behavior that I've tracked down to: http://www.efg2.com/Lab/Graphics/CursorOverlay.htm
The examples on that page are not in C# but the author of the codeproject solution may have been doing something similar and I know I've screwed up my scaling when using the graphics object on plenty of occassions myself:
In any ImageMouseDown event once an
image is loaded, the CusorBitmap is
drawn with transparency on top of the
bitmap using the Canvas.Draw method.
Note some coordinate adjustments
(rescaling) are needed in case the
bitmap is stretched to fit in the
TImage.