I have a program that captures a frame from webcam on a click. The capture works ok and I save it as a bitmap ok, but I have a weird problem that the bitmap is offset - the size is correct, but it cuts from the bottom a part away (like 25% of the picture) and the top is all black.
Any ideas what causes this?
The flow goes like:
public static string TempPicLocation = #"%USERPROFILE%\AppData\Local\temppic.bmp";
private void StartButton_Click(object sender, RoutedEventArgs e)
{
int capturedeviceindex = cboDevices.SelectedIndex;
FilterInfo cd = CaptureDevice[cboDevices.SelectedIndex];
string cdms = cd.MonikerString;
FinalFrame = new VideoCaptureDevice(cdms);
FinalFrame.NewFrame += FinalFrame_NewFrame;
FinalFrame.Start();
}
private void FinalFrame_NewFrame(object sender, NewFrameEventArgs eventArgs)
{
var imageSource = ImageSourceForBitmap(eventArgs.Frame);
imageSource.Freeze();
this.Dispatcher.Invoke(
new Action(
() =>
{
pboLive.Source = imageSource;
return;
}
)
);
}
//If you get 'dllimport unknown'-, then add 'using System.Runtime.InteropServices;'
[DllImport("gdi32.dll", EntryPoint = "DeleteObject")]
[return: MarshalAs(UnmanagedType.Bool)]
public static extern bool DeleteObject([In] IntPtr hObject);
public ImageSource ImageSourceForBitmap(Bitmap bmp)
{
var handle = bmp.GetHbitmap();
try
{
return Imaging.CreateBitmapSourceFromHBitmap(handle, IntPtr.Zero, Int32Rect.Empty, BitmapSizeOptions.FromEmptyOptions());
}
finally { DeleteObject(handle); }
}
private void CaptureButton_Click(object sender, RoutedEventArgs e)
{
ImageSource Captured = pboLive.Source;
pboSnap.Source = Captured.Clone();
capturedpictures.Add(pboLive.Source);
var filePath = Environment.ExpandEnvironmentVariables(TempPicLocation);
ImageHandlers.SaveToBmp(pboLive, filePath);
}
internal static void SaveToBmp(FrameworkElement visual, string fileName)
{
var encoder = new BmpBitmapEncoder(); //In System.Windows.Media.Imaging
SaveUsingEncoder(visual, fileName, encoder);
}
internal static void SaveUsingEncoder(FrameworkElement visual, string fileName, BitmapEncoder encoder)
{
//Here the commented part is the right size, but with 5k x 5k is used to check that the entire picture actually is there. And yes, it indeed is.
//RenderTargetBitmap bitmap = new RenderTargetBitmap((int)visual.ActualWidth, (int)visual.ActualHeight, 96, 96, PixelFormats.Pbgra32); //In System.Windows.Media.Imaging
RenderTargetBitmap bitmap = new RenderTargetBitmap(5000, 5000, 96, 96, PixelFormats.Pbgra32); //In System.Windows.Media.Imaging
bitmap.Render(visual);
BitmapFrame frame = BitmapFrame.Create(bitmap); //In System.Windows.Media.Imaging
encoder.Frames.Add(frame);
string filePath = fileName.Replace(Path.GetFileName(fileName), string.Empty);
System.IO.Directory.CreateDirectory(filePath);
using (var stream = File.Create(fileName))
{
encoder.Save(stream);
}
}
https://social.msdn.microsoft.com/Forums/vstudio/en-US/984da366-33d3-4fd3-b4bd-4782971785f8/questions-about-rendertargetbitmap?forum=wpf
public static BitmapSource CreateBitmapFromVisual(Visual target, Double dpiX, Double dpiY)
{
if (target == null)
{
return null;
}
Rect bounds = VisualTreeHelper.GetContentBounds(target);
RenderTargetBitmap rtb = new RenderTargetBitmap((Int32)(bounds.Width * dpiX / 96.0),
(Int32)(bounds.Height * dpiY / 96.0),
dpiX,
dpiY,
PixelFormats.Pbgra32);
DrawingVisual dv = new DrawingVisual();
using (DrawingContext dc = dv.RenderOpen())
{
VisualBrush vb = new VisualBrush(target);
dc.DrawRectangle(vb, null, new Rect(new Point(), bounds.Size));
}
rtb.Render(dv);
return rtb;
}
this piece of code is posted by some one from microsoft, and then you can do this:
using (FileStream outStream = new FileStream(#"C:\mycanvas.png", FileMode.Create))
{
PngBitmapEncoder enc = new PngBitmapEncoder();
enc.Frames.Add(BitmapFrame.Create(CreateBitmapFromVisual(myCanvas, 96, 96)));
enc.Save(outStream);
}
Related
I need to capture UI element in this case LiveCharts chart and save it to a PNG/JPEG
I got this code
private void Button_Click(object sender, RoutedEventArgs e) {
var filePath = "qwerty.png";
var res = CaptureScreen(charts, charts.ActualWidth, charts.ActualHeight);
using (var fileStream = new FileStream(filePath, FileMode.Create)) {
BitmapEncoder encoder = new PngBitmapEncoder();
encoder.Frames.Add(BitmapFrame.Create(res));
encoder.Save(fileStream);
}
}
private BitmapSource CaptureScreen(Visual target, double dpiX, double dpiY) {
if (target == null) {
return null;
}
Rect bounds = VisualTreeHelper.GetDescendantBounds(target);
RenderTargetBitmap rtb = new RenderTargetBitmap((int)(bounds.Width * dpiX / 96.0),
(int)(bounds.Height * dpiY / 96.0),
dpiX,
dpiY,
PixelFormats.Pbgra32);
DrawingVisual dv = new DrawingVisual();
using (DrawingContext ctx = dv.RenderOpen()) {
VisualBrush vb = new VisualBrush(target);
ctx.DrawRectangle(vb, null, new Rect(new System.Windows.Point(), bounds.Size));
}
rtb.Render(dv);
return rtb;
}
I can get a png no problem. Though it is the picture created is 30k x 9k pixels. The picture has a transparent background. what would be the best practice to capture a UI element and export as a picture with a white backgroun?
I am new in SharpDX and I want to simulate code to render a 24-bit bitmap image straight from memory and display to PictureBox. *This code is to be use in later project to quickly render images from memory.
I have no issue render using standard DrawImage() method. I opt for SharpDX because DrawImage is too slow.
But when I try render using SharpDX, the image become grey in color and corrupted (see image below)
The image I want to render is in 24-bit RGB bitmap.
Using DrawImage
Using SharpDX
What is wrong with my code?
Below is my code:
using System;
using System.Windows.Forms;
using SharpDX;
using SharpDX.Direct2D1;
using System.Diagnostics;
namespace bitmap_test
{
public partial class Form1 : Form
{
private System.Drawing.Bitmap image1 = null;
private System.Drawing.Imaging.BitmapData bmpdata1 = null;
//target of rendering
WindowRenderTarget target;
//factory for creating 2D elements
SharpDX.Direct2D1.Factory factory = new SharpDX.Direct2D1.Factory();
//this one is for creating DirectWrite Elements
SharpDX.DirectWrite.Factory factoryWrite = new SharpDX.DirectWrite.Factory();
private SharpDX.DXGI.Format bmp_format = SharpDX.DXGI.Format.B8G8R8A8_UNorm;
private AlphaMode bmp_alphamode = AlphaMode.Ignore;
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
InitSharpDX();
//load 24-bit depth bitmap
LoadBitmapFromFile(#"D:\lena.bmp"); //https://software.intel.com/sites/default/files/forum/351974/lena.bmp
}
private void InitSharpDX()
{
//Init Direct Draw
//Set Rendering properties
RenderTargetProperties renderProp = new RenderTargetProperties()
{
DpiX = 0,
DpiY = 0,
MinLevel = FeatureLevel.Level_10,
PixelFormat = new PixelFormat(bmp_format, bmp_alphamode),
Type = RenderTargetType.Hardware,
Usage = RenderTargetUsage.None
};
//set hwnd target properties (permit to attach Direct2D to window)
HwndRenderTargetProperties winProp = new HwndRenderTargetProperties()
{
Hwnd = this.pictureBox1.Handle,
PixelSize = new Size2(this.pictureBox1.ClientSize.Width, this.pictureBox1.ClientSize.Height),
PresentOptions = PresentOptions.Immediately
};
//target creation
target = new WindowRenderTarget(factory, renderProp, winProp);
}
//load bmp file into program memory
private void LoadBitmapFromFile(string file)
{
image1 = (System.Drawing.Bitmap)System.Drawing.Image.FromFile(file, true);
var sourceArea = new System.Drawing.Rectangle(0, 0, image1.Width, image1.Height);
bmpdata1 = image1.LockBits(sourceArea, System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
image1.UnlockBits(bmpdata1);
}
private void drawBitmap(IntPtr pBuffer, int len, int width, int height)
{
try
{
var bitmapProperties = new BitmapProperties(new PixelFormat(bmp_format, bmp_alphamode));
var size = new Size2(width, height);
int stride = width * 3; //only want RGB, ignore alpha
var bmp = new SharpDX.Direct2D1.Bitmap(target, size, new DataPointer(pBuffer, len), stride, bitmapProperties);
//draw elements
Draw(ref bmp);
bmp.Dispose();
}
finally
{
}
}
private void Draw(ref SharpDX.Direct2D1.Bitmap bmp)
{
//begin rendering
target.BeginDraw();
//clear target
target.Clear(null);
//draw bitmap
target.DrawBitmap(bmp, 1.0f, BitmapInterpolationMode.Linear);
//end drawing
target.EndDraw();
}
//draw image using SharpDX
private void cmdRender_Click(object sender, EventArgs e)
{
if (bmpdata1 != null)
{
int len = bmpdata1.Width * bmpdata1.Height * 3;
var sw = Stopwatch.StartNew();
drawBitmap(bmpdata1.Scan0, len, bmpdata1.Width, bmpdata1.Height);
sw.Stop();
Console.WriteLine("SharpDX: {0}us", sw.ElapsedTicks / (TimeSpan.TicksPerMillisecond / 1000));
}
}
//draw image using DrawImage()
private void cmdDrawImage_Click(object sender, EventArgs e)
{
if (image1 != null)
{
var g = pictureBox1.CreateGraphics();
var sourceArea = new System.Drawing.Rectangle(0, 0, image1.Width, image1.Height);
var sw = Stopwatch.StartNew();
g.DrawImage(image1, sourceArea); //DrawImage is slow
sw.Stop();
Console.WriteLine("DrawImage: {0}us", sw.ElapsedTicks/(TimeSpan.TicksPerMillisecond / 1000));
}
}
}
}
The bmp_format B8G8R8A8_UNorm doesn't match the System.Drawing lock format Format24bppRgb... also use bmpdata1.Stride instead of calculating a potential invalid stride.
Your stride is usually the width of the bitmap multipled by the byte depth.
so 512 * 4 would be a 512 wide bitmap with a 32 bit palette. Eg RGBA
I use the following code for resizing a gif file.
public void Resize(int newWidth, int newHeight)
{
var dimension = new FrameDimension(_image.FrameDimensionsList[0]);
var frameCount = _image.GetFrameCount(dimension);
// load frames
var encoder = new GifBitmapEncoder();
for (var i = 0; i < frameCount; ++i)
{
_image.SelectActiveFrame(dimension, i);
encoder.Frames.Add(BitmapFrame.Create(
new Bitmap(_image, newWidth, newHeight).CreateBitmapSource()));
}
using (var ms = new MemoryStream())
{
encoder.Save(ms);
ms.Position = 0;
_image = new Bitmap(ms);
}
}
public static BitmapSource CreateBitmapSource(this Bitmap bitmap)
{
if (bitmap == null)
throw new ArgumentNullException("bitmap");
lock (bitmap)
{
IntPtr hBitmap = bitmap.GetHbitmap();
return System.Windows.Interop.Imaging.CreateBitmapSourceFromHBitmap(
hBitmap,
IntPtr.Zero,
Int32Rect.Empty,
BitmapSizeOptions.FromEmptyOptions());
}
}
If I output it to a filestream, the gif is animated (altough the frames seem a little slower), but if i load it into an image, it doesn't matter wether from the memorystream or loading it from the "working" gif that came from the filestream, it only contains 1 frame.
What am I doing wrong?
I am using kinect!!I get a frame and then I convert it to bitmap in order to use Emgucv to convert frame in grayscale then convert bitmap to bitmpa source in order to show in window!I am usign C# visual studio WPF!But my program consume much CPU usage and in case the video is frozen for seconds!!I guess that is the conversion bitmpa source to bitmap and viceverse
byte[] colorData = null;
WriteableBitmap colorImageBitmap = null;
void myKinect_ColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
{
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (colorFrame == null) return;
if (colorData == null)
colorData = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo(colorData);
if (colorImageBitmap == null)
{
this.colorImageBitmap = new WriteableBitmap(
colorFrame.Width,
colorFrame.Height,
96, // DpiX
96, // DpiY
PixelFormats.Bgr32,
null);
}
this.colorImageBitmap.WritePixels(
new Int32Rect(0, 0, colorFrame.Width, colorFrame.Height),
colorData, // video data
colorFrame.Width * colorFrame.BytesPerPixel, // stride,
0 // offset into the array - start at 0
);
Image<Gray, Byte> My_Image = new Image<Gray, byte>(BitmapFromSource(colorImageBitmap));
kinectVideo.Source = ToBitmapSource(My_Image);
}
}
private System.Drawing.Bitmap BitmapFromSource(BitmapSource bitmapsource)
{
System.Drawing.Bitmap bitmap;
using (MemoryStream outStream = new MemoryStream())
{
BitmapEncoder enc = new BmpBitmapEncoder();
enc.Frames.Add(BitmapFrame.Create(bitmapsource));
enc.Save(outStream);
bitmap = new System.Drawing.Bitmap(outStream);
}
return bitmap;
}
[DllImport("gdi32")]
private static extern int DeleteObject(IntPtr o);
public static BitmapSource ToBitmapSource(IImage image)
{
using (System.Drawing.Bitmap source = image.Bitmap)
{
IntPtr ptr = source.GetHbitmap(); //obtain the Hbitmap
BitmapSource bs = System.Windows.Interop.Imaging.CreateBitmapSourceFromHBitmap(
ptr,
IntPtr.Zero,
Int32Rect.Empty,
System.Windows.Media.Imaging.BitmapSizeOptions.FromEmptyOptions());
DeleteObject(ptr); //release the HBitmap
return bs;
}
}
}
Instead of using Emgucv to do the greyscaling...which means you have to create a GDI+ Bitmap (System.Drawing.Bitmap) pass it to Emgucv, then convert it back from the GDI+ Bitmap to a BitmapSource, you could use FormatConvertedBitmap to keep it in the WPF world.
void myKinect_ColorFrameReady(object sender, ColorImageFrameReadyEventArgs e)
{
using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
{
if (colorFrame == null) return;
if (colorData == null)
colorData = new byte[colorFrame.PixelDataLength];
colorFrame.CopyPixelDataTo(colorData);
if (colorImageBitmap == null)
{
this.colorImageBitmap = new WriteableBitmap(
colorFrame.Width,
colorFrame.Height,
96, // DpiX
96, // DpiY
PixelFormats.Bgr32,
null);
}
this.colorImageBitmap.WritePixels(
new Int32Rect(0, 0, colorFrame.Width, colorFrame.Height),
colorData, // video data
colorFrame.Width * colorFrame.BytesPerPixel, // stride,
0 // offset into the array - start at 0
);
kinectVideo.Source = new FormatConvertedBitmap(colorImageBitmap, PixelFormats.Gray32Float, null, 0);
}
}
http://www.shujaat.net/2010/08/wpf-image-format-conversion-including.html
Another option is to use a pixel shader that applies a greyscale effect on your "kinectVideo" element (which is presumably an Image element to display the frame).
http://bursjootech.blogspot.co.uk/2008/06/grayscale-effect-pixel-shader-effect-in.html
Say I have a grid named 'GridA'
everywhere i've searched suggests that i use
GridA.DrawToBitmap
But grids don't have that method..
And then i got crafty and wrapped it in a stackpanel and called 'stackpanel1'
panels don't have that method either.
So how should i go about saving my grid as an image in wpf ?
You can convert any Drawing Visual into a Bitmap. Here's some code I use to add icon overlays from WPF drawn controls, add it to a UserControl or refactor it.
For a full example see http://alski.net/post/2012/01/11/WPF-Icon-Overlays.aspx
protected void InitializeBitmapGeneration()
{
LayoutUpdated += (sender, e) => _UpdateImageSource();
}
public static readonly DependencyProperty ImageSourceProperty = DependencyProperty.Register(
"ImageSource",
typeof(ImageSource),
typeof(CountControl),
new PropertyMetadata(null));
/// <summary>
/// Gets or sets the ImageSource property. This dependency property
/// indicates ....
/// </summary>
public ImageSource ImageSource
{
get { return (ImageSource)GetValue(ImageSourceProperty); }
set { SetValue(ImageSourceProperty, value); }
}
private void _UpdateImageSource()
{
if (ActualWidth == 0 || ActualHeight == 0)
{
return;
}
ImageSource = GenerateBitmapSource(this, 16, 16);
}
public static BitmapSource GenerateBitmapSource(ImageSource img)
{
return GenerateBitmapSource(img, img.Width, img.Height);
}
public static BitmapSource GenerateBitmapSource(ImageSource img, double renderWidth, double renderHeight)
{
var dv = new DrawingVisual();
using (DrawingContext dc = dv.RenderOpen())
{
dc.DrawImage(img, new Rect(0, 0, renderWidth, renderHeight));
}
var bmp = new RenderTargetBitmap((int)renderWidth, (int)renderHeight, 96, 96, PixelFormats.Pbgra32);
bmp.Render(dv);
return bmp;
}
public static BitmapSource GenerateBitmapSource(Visual visual, double renderWidth, double renderHeight)
{
var bmp = new RenderTargetBitmap((int)renderWidth, (int)renderHeight, 96, 96, PixelFormats.Pbgra32);
var dv = new DrawingVisual();
using (DrawingContext dc = dv.RenderOpen())
{
dc.DrawRectangle(new VisualBrush(visual), null, new Rect(0, 0, renderWidth, renderHeight));
}
bmp.Render(dv);
return bmp;
}
}