I received a code from a friend for study purposes, in this code I get a Bitmap through Bitblt, after getting this Bitmap the code converts to Texture2D (SharpDX.Direct3D11 Texture2D), after using this Texture2D I would like to convert it back to Bitmap, what is the best way to do this? For having little experience in this matter I found myself stuck in this problem, below the code of the conversion.
public Texture2D TextureGenerator(Device device)
{
var hdcSrc = NativeMethods.GetDCEx(_hWnd, IntPtr.Zero, DeviceContextValues.Window | DeviceContextValues.Cache | DeviceContextValues.LockWindowUpdate);
var hdcDest = NativeMethods.CreateCompatibleDC(hdcSrc);
NativeMethods.GetWindowRect(_hWnd, out var rect);
var (width, height) = (rect.Right - rect.Left, rect.Bottom - rect.Top);
var hBitmap = NativeMethods.CreateCompatibleBitmap(hdcSrc, width, height);
var hOld = NativeMethods.SelectObject(hdcDest, hBitmap);
NativeMethods.BitBlt(hdcDest, 0, 0, width, height, hdcSrc, 0, 0, TernaryRasterOperations.SRCCOPY);
NativeMethods.SelectObject(hdcDest, hOld);
NativeMethods.DeleteDC(hdcDest);
NativeMethods.ReleaseDC(_hWnd, hdcSrc);
using var img = Image.FromHbitmap(hBitmap);
NativeMethods.DeleteObject(hBitmap);
using Bitmap bitmap = img.Clone(Rectangle.FromLTRB(0, 0, width, height), PixelFormat.Format32bppArgb);
var bits = bitmap.LockBits(Rectangle.FromLTRB(0, 0, width, height), ImageLockMode.ReadOnly, img.PixelFormat);
DataBox data = new DataBox { DataPointer = bits.Scan0, RowPitch = bits.Width * 4, SlicePitch = bits.Height };
Texture2DDescription texture2dDescription = new Texture2DDescription
{
ArraySize = 1,
BindFlags = BindFlags.ShaderResource | BindFlags.RenderTarget,
CpuAccessFlags = CpuAccessFlags.None,
Format = Format.B8G8R8A8_UNorm,
Height = height,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Default,
Width = width
};
Texture2D texture2d = new Texture2D(device, texture2dDescription, new[] { data });
bitmap.UnlockBits(bits);
return texture2d;
}
I tried looking in other topics on StackOverflow but I didn't find this type of conversion.
Related
So I've heard that GDI supports hardware acceleration. I have this code here:
var x = 1;
if (solidBrush == IntPtr.Zero)
{
solidBrush = CreateSolidBrush(ColorTranslator.ToWin32(Color.FromArgb(120, 120, 120)));
hDC = CreateGraphics().GetHdc();
}
index += x;
int w = x;
int h = Height;
//create memory device context
var memdc = CreateCompatibleDC(hDC);
//create bitmap
var hbitmap = CreateCompatibleBitmap(hDC, index, h);
////select bitmap in to memory device context
var holdbmp = SelectObject(memdc, hbitmap);
RECT rect = new RECT(new Rectangle(0, 0, w, h));
FillRect(memdc, ref rect, solidBrush);
AlphaBlend(hDC, index - x, 0, w, h, memdc, 0, 0, w, h, new BLENDFUNCTION(0, 0, 128, 0));
SelectObject(memdc, holdbmp);
DeleteObject(hbitmap);
DeleteDC(memdc);
which uses GDI to draw an animation (a box), but I see no GPU usage. Is it that GDI just doesn't support HW acceleration?
Thanks.
I need to render an image (with depth) which I get from outside. I can construct two textures and pass them into a shader with no problem (I can verify values sampled in a pixel shader being correct).
Here's how my HLSL looks like:
// image texture
Texture2D m_TextureColor : register(t0);
// depth texture with values [0..1]
Texture2D<float> m_TextureDepth : register(t1);
// sampler to forbid linear filtering since we're dealing with pixels
SamplerState m_TextureSampler { Filter = MIN_MAG_MIP_POINT; };
struct VS_IN
{
float4 position : POSITION;
float2 texcoord : TEXCOORD;
};
struct VS_OUT
{
float4 position : SV_POSITION;
float2 texcoord : TEXCOORD0;
};
struct PS_OUT
{
float4 color : COLOR0;
float depth : DEPTH0;
};
VS_OUT VS(VS_IN input)
{
VS_OUT output = (VS_OUT)0;
output.position = input.position;
output.texcoord = input.texcoord;
return output;
}
PS_OUT PS(VS_OUT input) : SV_Target
{
PS_OUT output = (PS_OUT)0;
output.color = m_TextureColor.SampleLevel(m_TextureSampler, input.texcoord, 0);
// I want to modify depth of the pixel,
// but it looks like it has no effect on depth no matter what I set here
output.depth = m_TextureDepth.SampleLevel(m_TextureSampler, input.texcoord, 0);
return output;
}
I construct vertex buffer from those (with PrimitiveTopology.TriangleStrip) where first argument Vector4 is position and second argument Vector2 is texture coordinate:
new[]
{
new Vertex(new Vector4(-1, -1, 0.5f, 1), new Vector2(0, 1)),
new Vertex(new Vector4(-1, 1, 0.5f, 1), new Vector2(0, 0)),
new Vertex(new Vector4(1, -1, 0.5f, 1), new Vector2(1, 1)),
new Vertex(new Vector4(1, 1, 0.5f, 1), new Vector2(1, 0)),
}
Everything works just fine: I'm seeing my image, I can sample depth from depth texture and construct something visual from it (that's how I can verify that
depth values I'm sampling are correct). However I can't figure out how to modify pixel's depth so that it would be eaten properly when the depth-test would be happening. Because at the moment it all depends on what kind of z value I set as my vertex position.
This is how I'm setting up DirectX11 (I'm using SharpDX and C#):
var swapChainDescription = new SwapChainDescription
{
BufferCount = 1,
ModeDescription = new ModeDescription(bufferSize.Width, bufferSize.Height, new Rational(60, 1), Format.R8G8B8A8_UNorm),
IsWindowed = true,
OutputHandle = HostHandle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput,
};
var swapChainFlags = DeviceCreationFlags.None | DeviceCreationFlags.BgraSupport;
SharpDX.Direct3D11.Device.CreateWithSwapChain(DriverType.Hardware, swapChainFlags, swapChainDescription, out var device, out var swapchain);
Setting back buffer and depth/stencil buffer:
// color buffer
using (var textureColor = SwapChain.GetBackBuffer<Texture2D>(0))
{
TextureColorResourceView = new RenderTargetView(Device, textureColor);
}
// depth buffer
using (var textureDepth = new Texture2D(Device, new Texture2DDescription
{
Format = Format.D32_Float,
ArraySize = 1,
MipLevels = 1,
Width = BufferSize.Width,
Height = BufferSize.Height,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Default,
BindFlags = BindFlags.DepthStencil,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.None
}))
{
TextureDepthResourceView = new DepthStencilView(Device, textureDepth);
}
DeviceContext.OutputMerger.SetTargets(TextureDepthResourceView, TextureColorResourceView);
Preparing depth stencil state:
var description = DepthStencilStateDescription.Default();
description.DepthComparison = Comparison.LessEqual;
description.IsDepthEnabled = true;
description.DepthWriteMask = DepthWriteMask.All;
DepthState = new DepthStencilState(Device, description);
And using it:
DeviceContext.OutputMerger.SetDepthStencilState(DepthState);
This is how I construct my color/depth textures I'm sending to shader:
public static (ShaderResourceView resource, Texture2D texture) CreateTextureDynamic(this Device device, System.Drawing.Size size, Format format)
{
var textureDesc = new Texture2DDescription
{
MipLevels = 1,
Format = format,
Width = size.Width,
Height = size.Height,
ArraySize = 1,
BindFlags = BindFlags.ShaderResource,
Usage = ResourceUsage.Dynamic,
SampleDescription = new SampleDescription(1, 0),
CpuAccessFlags = CpuAccessFlags.Write,
};
var texture = new Texture2D(device, textureDesc);
return (new ShaderResourceView(device, texture), texture);
}
Also since I need to update them frequently:
public static void UpdateResource(this Texture2D texture, int[] buffer, System.Drawing.Size size)
{
var dataBox = texture.Device.ImmediateContext.MapSubresource(texture, 0, MapMode.WriteDiscard, MapFlags.None, out var dataStream);
Parallel.For(0, size.Height, rowIndex => Marshal.Copy(buffer, size.Width * rowIndex, dataBox.DataPointer + dataBox.RowPitch * rowIndex, size.Width));
dataStream.Dispose();
texture.Device.ImmediateContext.UnmapSubresource(texture, 0);
}
public static void UpdateResource(this Texture2D texture, float[] buffer, System.Drawing.Size size)
{
var dataBox = texture.Device.ImmediateContext.MapSubresource(texture, 0, MapMode.WriteDiscard, MapFlags.None, out var dataStream);
Parallel.For(0, size.Height, rowIndex => Marshal.Copy(buffer, size.Width * rowIndex, dataBox.DataPointer + dataBox.RowPitch * rowIndex, size.Width));
dataStream.Dispose();
texture.Device.ImmediateContext.UnmapSubresource(texture, 0);
}
I also googled a lot about this, found similar posts like this: https://www.gamedev.net/forums/topic/573961-how-to-set-depth-value-in-pixel-shader/ however couldn't managed solve it on my side.
Thanks in advance!
To write to the depth buffer, you need to target the SV_Depth system-value semantic. So your pixel shader output struct would look more like the following:
struct PS_OUT
{
float4 color : SV_Target;
float depth : SV_Depth;
};
And the shader would not specify SV_Target as in your example (the SV_ outputs are defined within the struct). So it would look like:
PS_OUT PS(VS_OUT input)
{
PS_OUT output = (PS_OUT)0;
output.color = m_TextureColor.SampleLevel(m_TextureSampler, input.texcoord, 0);
// Now that output.depth is defined with SV_Depth, and you have depth-write enabled,
// this should write to the depth buffer.
output.depth = m_TextureDepth.SampleLevel(m_TextureSampler, input.texcoord, 0);
return output;
}
Note that you may incur some performance penalties on explicitly writing to depth (specifically on AMD hardware) since that forces a bypass of their early-depth hardware optimization. All future draw calls using that depth buffer will have early-Z optimizations disabled, so it's generally a good idea to perform the depth-write operation as late as possible.
The following code behaves very oddly. It adds some extra spacing at the bottom of the image, and I can not see why. Result of code:
And this is the code that I am working with:
public static Image ReDraw(this Image main, int w, int h,
CompositingQuality quality = CompositingQuality.Default, //linear?
SmoothingMode smoothing_mode = SmoothingMode.None,
InterpolationMode ip_mode = InterpolationMode.NearestNeighbor)
{
//size
double dbl = (double)main.Width / (double)main.Height;
//preserve size ratio
if ((int)((double)h * dbl) <= w)
w = (int)((double)h * dbl);
else
h = (int)((double)w / dbl);
//draw
Image newImage = new System.Drawing.Bitmap(w, h);
Graphics thumbGraph = Graphics.FromImage(newImage);
thumbGraph.CompositingQuality = quality;
thumbGraph.SmoothingMode = smoothing_mode;
thumbGraph.InterpolationMode = ip_mode;
thumbGraph.Clear(Color.Transparent);
thumbGraph.DrawImage(main, 0, 0, w, h);
thumbGraph.DrawImage(main,
new System.Drawing.Rectangle(0, 0, w, h),
new System.Drawing.Rectangle(0, 0, main.Width, main.Height),
System.Drawing.GraphicsUnit.Pixel);
return newImage;
}
thumbGraph.PixelOffsetMode = System.Drawing.Drawing2D.PixelOffsetMode.HighQuality;
I want to resize the image coming through file uploader. For that I was referred to the following:
How to set Bitmap.Width and Bitmap.height
(Javed Akram's Answer)
Made following code:
Dim imgSmall As Bitmap = New Bitmap(FUpload.PostedFile.InputStream, False)
imgSmall.Size = New Size(250, 300)
But giving me error:
Size is read-only property.
How can I resize the image in this case?
It is read-only; you'll have to create a new bitmap from it. Try this: -
internal static Image ScaleByPercent(Image image, Size size, float percent)
{
int sourceWidth = image.Width,
sourceHeight = image.Height;
int destWidth = (int)(sourceWidth * percent),
destHeight = (int)(sourceHeight * percent);
if (destWidth <= 0)
{
destWidth = 1;
}
if (destHeight <= 0)
{
destHeight = 1;
}
var resizedImage = new Bitmap(destWidth, destHeight, PixelFormat.Format24bppRgb);
resizedImage.SetResolution(image.HorizontalResolution, image.VerticalResolution);
// get handle to new bitmap
using (var graphics = Graphics.FromImage(resizedImage))
{
InterpolationMode = System.Drawing.Drawing2D.InterpolationMode.HighQualityBicubic;
// create a rect covering the destination area
var destRect = new Rectangle(0, 0, destWidth, destHeight);
var brush = new SolidBrush(drawing.Color.White);
graphics.FillRectangle(brush, destRect);
// draw the source image to the destination rect
graphics.DrawImage(image,
destRect,
new Rectangle(0, 0, sourceWidth, sourceHeight),
GraphicsUnit.Pixel);
}
return resizedImage;
}
This is from a site I have in production; if you want I can send you the code that works out how to keep the correct aspect ratio when resizing (i.e. what gets passed in the 'size' parameter)
Hope that helps
I am trying to print my form using GDI ,but when I print it ,the quality of the print is not that good(donknow whether Image getting aliased?) ,form size is 700x700 ,also there is one parameter which dint understood -raster op code-,here is code am using...
private void printDocument1_PrintPage(object sender, PrintPageEventArgs e)
{
Graphics g1 = this.CreateGraphics();
System.Drawing.Image MyImage = new Bitmap(this.ClientRectangle.Width, this.ClientRectangle.Height, g1);
Graphics g2 = Graphics.FromImage(MyImage);
IntPtr dc1 = g1.GetHdc();
IntPtr dc2 = g2.GetHdc();
BitBlt(dc2, 0, 0, this.ClientRectangle.Width, this.ClientRectangle.Height, dc1, 0, 0, 13369376);
g1.ReleaseHdc(dc1);
g2.ReleaseHdc(dc2);
Bitmap bmp = new Bitmap(MyImage);
int x = e.MarginBounds.X;
int y = e.MarginBounds.Y;
int width = bmp.Width;
int height = bmp.Height;
if ((width / e.MarginBounds.Width) > (height / e.MarginBounds.Height))
{
width = e.MarginBounds.Width;
height = bmp.Height * e.MarginBounds.Width / bmp.Width;
}
else
{
height = e.MarginBounds.Height;
width = bmp.Width * e.MarginBounds.Height / bmp.Height;
}
System.Drawing.Rectangle destRect = new System.Drawing.Rectangle(x, y, width, height);
e.Graphics.DrawImage(bmp, destRect, 0, 0, bmp.Width, bmp.Height, System.Drawing.GraphicsUnit.Pixel);
}
Maybe you have a problem with the original image. Give me a link to an image. Check the image size.
Try insert line
g2.CompositingQuality = System.Drawing.Drawing2D.CompositingQuality.HighQuality;
Good luck!
It is normal that the result will be scaled and aliased. The source has too few pixels compared to the resolution of a modern printer.
Consider using WPF, that uses a vector based rendering thus there's no loss/distortion when scaling.
Cheers