we apply a d2dcontext effect gaussianblureffect to a JPG file 4000*3000 32bit , sharpdx takes 270ms , c++ dx version takes 6ms, seems that the graphic accelerate was not functional. the sharpdx code is below.
enter var desc = new SwapChainDescription()
{
BufferCount = 1,
ModeDescription = new ModeDescription(Width, Height, new Rational(60, 1), Format.R8G8B8A8_UNorm),
IsWindowed = true,
OutputHandle = this.Handle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput
};
// Create Device and SwapChain
SharpDX.Direct3D11.Device.CreateWithSwapChain(SharpDX.Direct3D.DriverType.Hardware, DeviceCreationFlags.BgraSupport,
new[] { SharpDX.Direct3D.FeatureLevel.Level_10_0 }, desc, out _device, out _swapChain);
// Ignore all windows events
SharpDX.DXGI.Factory factory = _swapChain.GetParent<SharpDX.DXGI.Factory>();
factory.MakeWindowAssociation(this.Handle, WindowAssociationFlags.IgnoreAll);//改这个换渲染的
Factory2D = new SharpDX.Direct2D1.Factory();
_backBuffer = Texture2D.FromSwapChain<Texture2D>(_swapChain, 0);
_backBufferView = new RenderTargetView(_device, _backBuffer);
_d3DDeviceContext = _device.ImmediateContext;
_d3DDeviceContext.OutputMerger.SetRenderTargets(_backBufferView);
_d3DDeviceContext.ClearRenderTargetView(_backBufferView, ColorToRaw4(SharpDX.Color.Gray));
using (var surface = BackBuffer.QueryInterface<Surface>())//texture2d
using (var dxgiDevice = _device.QueryInterface<dxgi.Device>())//d3d11
{
//这里设置了RenderTarget2D的工厂,绘图表面为FORM1(前边有配置)
RenderTarget2D = new RenderTarget(Factory2D, surface, new RenderTargetProperties(new SharpDX.Direct2D1.PixelFormat(Format.Unknown, AlphaMode.Premultiplied)));
RenderTarget2D2 = new RenderTarget(Factory2D, surface, new RenderTargetProperties(new SharpDX.Direct2D1.PixelFormat(Format.Unknown, AlphaMode.Premultiplied)));
d2dDevice = new d2.Device(dxgiDevice); // initialize the D2D device
d2dContext = new d2.DeviceContext(d2dDevice, d2.DeviceContextOptions.EnableMultithreadedOptimizations);// initialize the DeviceContext - it will be the D2D render target and will allow all rendering operations
// d2dContext = new d2.DeviceContext(surface);
var d2PixelFormat = new d2.PixelFormat(dxgi.Format.R8G8B8A8_UNorm, d2.AlphaMode.Premultiplied);
var d2dBitmapProps = new d2.BitmapProperties1(d2PixelFormat, 96, 96, d2.BitmapOptions.Target | d2.BitmapOptions.CannotDraw);
d2dContext.Target = new d2.Bitmap1(d2dContext, surface, d2dBitmapProps);
sen();
} here
thanks
Related
I am having difficulties in exporting LiveCharts PieChart to a .png file.
So far what I have done is trying to draw the control to a bitmap (DrawToBitmap), but it is just outputting a black image. I have discarded other alternatives such as screenshots because the chart is not created to be deployed in a custom form for visualization. Its main purpose is just graphic statistic exporting.
This is my main code:
LiveCharts.WinForms.PieChart chart = initializePieChart2DFolder(true);
Bitmap bmp = new Bitmap(chart.Width, chart.Height, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
chart.DrawToBitmap(bmp, new System.Drawing.Rectangle(0, 0, bmp.Width, bmp.Height));
bmp.Save("graphFolder.png", System.Drawing.Imaging.ImageFormat.Png);
This is the method I use to create the Pie chart:
private LiveCharts.WinForms.PieChart initializePieChart2DFolder()
{
LiveCharts.WinForms.PieChart chart = new LiveCharts.WinForms.PieChart();
chart.Anchor = System.Windows.Forms.AnchorStyles.None;
chart.Location = new System.Drawing.Point(17, 56);
chart.Name = "pieChart2DFolder";
chart.Size = new System.Drawing.Size(364, 250);
chart.TabIndex = 0;
chart.BackColorTransparent = false;
chart.BackColor = Color.White;
chart.ForeColor = Color.Black;
SeriesCollection chartData = new SeriesCollection();
foreach(var annot in numAnnotsPerLabel)
{
System.Windows.Media.Color newColor = System.Windows.Media.Color.FromArgb(color[annot.Key].A, color[annot.Key].R, color[annot.Key].G, color[annot.Key].B);
chartData.Add( new PieSeries { Title = annot.Key,
Values = new ChartValues<int> { annot.Value },
DataLabels = true,
Stroke = System.Windows.Media.Brushes.DimGray,
Foreground = System.Windows.Media.Brushes.Black,
FontSize = 9,
Fill = new
System.Windows.Media.SolidColorBrush(newColor)});
}
chart.Series = chartData;
DefaultLegend customLegend = new DefaultLegend();
customLegend.BulletSize = 15;
customLegend.Foreground = System.Windows.Media.Brushes.Black;
customLegend.Orientation = System.Windows.Controls.Orientation.Vertical;
customLegend.FontSize = 10;
chart.DefaultLegend = customLegend;
chart.LegendLocation = LegendLocation.Right;
var tooltip = chart.DataTooltip as DefaultTooltip;
tooltip.SelectionMode = LiveCharts.TooltipSelectionMode.OnlySender;
return chart;
}
Thank you very much in advance!
I'm trying to create an off-screen bitmap to draw on it and to draw it with Direct2D1.RenderTarget.DrawBitmap then. So I create Texture2D and get the Bitmap from it. But I receive the error
[D2DERR_UNSUPPORTED_PIXEL_FORMAT/UnsupportedPixelFormat]
in last string of code. Please help me to understand, what have i done wrong here?
m_texture = new Texture2D(
context.Device,
new Texture2DDescription() {
ArraySize = 1,
BindFlags = BindFlags.RenderTarget | BindFlags.ShaderResource,
CpuAccessFlags = CpuAccessFlags.None,
Format = Format.B8G8R8A8_UNorm,
Height = bitmapSize.Height,
Width = bitmapSize.Width,
MipLevels = 1,
OptionFlags = ResourceOptionFlags.None,
SampleDescription = new SampleDescription() {
Count = 1,
Quality = 0
},
Usage = ResourceUsage.Default
}
);
m_surface = m_texture.QueryInterface<Surface>();
using (SharpDX.Direct2D1.Factory factory = new SharpDX.Direct2D1.Factory()) {
m_renderTarget = new RenderTarget(
factory,
m_surface,
new RenderTargetProperties() {
DpiX = 0.0f, // default dpi
DpiY = 0.0f, // default dpi
MinLevel = SharpDX.Direct2D1.FeatureLevel.Level_DEFAULT,
Type = RenderTargetType.Hardware,
Usage = RenderTargetUsage.None,
PixelFormat = new SharpDX.Direct2D1.PixelFormat(
Format.Unknown,
AlphaMode.Premultiplied
)
}
);
}
m_bitmap = new SharpDX.Direct2D1.Bitmap(m_renderTarget, m_surface);
public static SharpDX.Direct2D1.Bitmap GetBitmapFromSRV(SharpDX.Direct3D11.ShaderResourceView srv, RenderTarget renderTarger)
{
using (var texture = srv.ResourceAs<Texture2D>())
using (var surface = texture.QueryInterface<Surface>())
{
var bitmap = new SharpDX.Direct2D1.Bitmap(renderTarger, surface, new SharpDX.Direct2D1.BitmapProperties(new SharpDX.Direct2D1.PixelFormat(
Format.R8G8B8A8_UNorm,
SharpDX.Direct2D1.AlphaMode.Premultiplied)));
return bitmap;
}
}
I'm trying to save a user-generated Texture2D to disk, but it looks like the standard ways of accomplishing this aren't available when targeting DirectX11.1 / Win8.1 / Metro. ToStream/FromStream are absent, and the DX11 methods for writing a texture to disk are absent as well.
Any ideas or suggestions?
Here's the general solution I ended up with. Getting a DataStream through mapping a texture resource is what got me on the right track.
General flow is create a texture with CpuAccessFlags.Read, then CopyResource from the source texture to the new texture, then read data from the new texture into a WIC Bitmap.
public static class Texture2DExtensions
{
public static void Save(this Texture2D texture, IRandomAccessStream stream, DeviceManager deviceManager)
{
var textureCopy = new Texture2D(deviceManager.DeviceDirect3D, new Texture2DDescription
{
Width = (int)texture.Description.Width,
Height = (int)texture.Description.Height,
MipLevels = 1,
ArraySize = 1,
Format = texture.Description.Format,
Usage = ResourceUsage.Staging,
SampleDescription = new SampleDescription(1, 0),
BindFlags = BindFlags.None,
CpuAccessFlags = CpuAccessFlags.Read,
OptionFlags = ResourceOptionFlags.None
});
deviceManager.ContextDirect3D.CopyResource(texture, textureCopy);
DataStream dataStream;
var dataBox = deviceManager.ContextDirect3D.MapSubresource(
textureCopy,
0,
0,
MapMode.Read,
SharpDX.Direct3D11.MapFlags.None,
out dataStream);
var dataRectangle = new DataRectangle
{
DataPointer = dataStream.DataPointer,
Pitch = dataBox.RowPitch
};
var bitmap = new Bitmap(
deviceManager.WICFactory,
textureCopy.Description.Width,
textureCopy.Description.Height,
PixelFormat.Format32bppBGRA,
dataRectangle);
using (var s = stream.AsStream())
{
s.Position = 0;
using (var bitmapEncoder = new PngBitmapEncoder(deviceManager.WICFactory, s))
{
using (var bitmapFrameEncode = new BitmapFrameEncode(bitmapEncoder))
{
bitmapFrameEncode.Initialize();
bitmapFrameEncode.SetSize(bitmap.Size.Width, bitmap.Size.Height);
var pixelFormat = PixelFormat.FormatDontCare;
bitmapFrameEncode.SetPixelFormat(ref pixelFormat);
bitmapFrameEncode.WriteSource(bitmap);
bitmapFrameEncode.Commit();
bitmapEncoder.Commit();
}
}
}
deviceManager.ContextDirect3D.UnmapSubresource(textureCopy, 0);
textureCopy.Dispose();
bitmap.Dispose();
}
}
I try to generate an image for a tile consisting of a grid with background color and a PNG with transparent background.
var TestTile = new Grid()
{
Background = colTemp,
HorizontalAlignment = HorizontalAlignment.Stretch,
VerticalAlignment = VerticalAlignment.Stretch,
Margin = new Thickness( 0, 12, 0, 0 ),
};
TestTile.Arrange(new Rect(0, 0, 366, 366));
var ico = new Image() {
Source = new BitmapImage(new Uri("Images/mCloudSunT.png", UriKind.Relative)),
};
TestTile.Children.Add(ico);
...
bitmap.Render(TestTile, new TranslateTransform());
...
I get the image with background color but without the PNG. I get no error and the URI is correct (tested).
new code:
var colTemp = new SolidColorBrush(Color.FromArgb(255, 174, 190, 206));
var TestTile = new Grid()
{
Background = colTemp,
Height = 336,
Width = 336,
};
var ico = new Image() {
Source = new BitmapImage(new Uri("Images/mCloudSunT.png", UriKind.Relative)),
};
Grid.SetColumn(ico, 0);
Grid.SetRow(ico, 0);
TestTile.Children.Add(ico);
TestTile.Measure(new Size(336, 336));
TestTile.Arrange(new Rect(0, 0, 366, 366));
TestTile.UpdateLayout();
Here is the code for generating the image:
using (IsolatedStorageFile store = IsolatedStorageFile.GetUserStoreForApplication())
{
if (!store.DirectoryExists("shared/shellcontent"))
{
store.CreateDirectory("shared/shellcontent");
}
var bitmap = new WriteableBitmap(336, 336);
bitmap.Render(TestTile, new TranslateTransform());
var stream = store.CreateFile("/shared/shellcontent/test.jpg");
bitmap.Invalidate();
bitmap.SaveJpeg(stream, 366, 336, 0, 100);
stream.Close();
And here the code for the use as tile background:
ShellTile PinnedTile = ShellTile.ActiveTiles.First();
FlipTileData UpdatedTileData = new FlipTileData
{
BackgroundImage = new Uri("isostore:/shared/shellcontent/test.jpg", UriKind.RelativeOrAbsolute),
};
PinnedTile.Update(UpdatedTileData);
Call Measure and then Arrange after adding icon.
TestTile.Measure( new Size( 366,366 ) );
TestTile.Arrange( new Rect( 0, 0, 366, 366 ) );
TestTile.UpdateLayout();
...
WritableBitmap...
I see no reason for calling SetColumn, SetRow, Measure, Arrange, or UpdateLayout. Also, after calling Render(), call Invalidate(). Try this:
var colTemp = new SolidColorBrush(Color.FromArgb(255, 174, 190, 206));
var TestTile = new Grid()
{
Background = colTemp,
Height = 336,
Width = 336,
};
var ico = new Image() {
Source = new BitmapImage(new Uri("Images/mCloudSunT.png", UriKind.Relative)),
};
TestTile.Children.Add(ico);
....
bitmap.Render(TestTile, new TranslateTransform());
bitmap.Invalidate();
I've been having a whale of a time diagnosing a leak in a service which is rendering multiple images to a canvas and then pushing out a bitmap or PNG at the end (and doing this multiple times). On start up the service will rocket up to 600MB+ and then keep on increasing until it's taken almost all that it can get hold of. It will never decrease once started.
I've instrumented and run the service using the VS2012 perf mon and seen that there are large amounts of Byte arrays laying about after processing has been completed. I've tried a GC clear to see if it would wipe them out to no avail. Looking into it using WinDbg I can see that the byte array is being held onto by a long chain of items (mostly WPF objects) which is keeping the image in ram.
I've had a look over MSDN for all the objects that are being used and can't find anything that points out a problem with the way the code is running.
I've put together some sample code that closely follows the service (some bits reduced for bevity). This is called off a thread (set to STA). The only other differece is the service code uses MemoryStreams to load images sourced from a DB rather than the URIs I am using. The streams are disposed of:
public static void TestThread()
{
const int rounds = 50;
const int innerRounds = 50;
var randomiser = new Random(DateTime.Now.Millisecond);
// Simulating some live values
const float scaling = 2.67F;
const int pageWidth = 363;
const int pageHeight = 516;
const int dpi = 96;
// To simulate the live system using multiple images
// This is an list of images of different sizes etc
var imageList = new List<ImageData>
{
new ImageData{Uri = new Uri(#"..."), Height = 2592},
new ImageData{Uri = new Uri(#"..."), Height = 1339},
new ImageData{Uri = new Uri(#"..."), Height = 386},
new ImageData{Uri = new Uri(#"..."), Height = 968},
new ImageData{Uri = new Uri(#"..."), Height = 1952},
new ImageData{Uri = new Uri(#"..."), Height = 1024},
};
var proc = Process.GetCurrentProcess();
for (var i = 0; i < rounds; ++i)
{
var canvas = new Canvas();
canvas.BeginInit();
canvas.SnapsToDevicePixels = false;
canvas.UseLayoutRounding = false;
canvas.Width = pageWidth;
canvas.Height = pageHeight;
canvas.Background = Brushes.White;
for (var j = 0; j < innerRounds; ++j)
{
var img = new Image {Stretch = Stretch.Fill};
var bitmapImage = new BitmapImage();
bitmapImage.BeginInit();
bitmapImage.CacheOption = BitmapCacheOption.OnLoad;
var imageNo = randomiser.Next(0, imageList.Count - 1);
bitmapImage.UriSource = imageList[imageNo].Uri;
int imageHeight = imageList[imageNo].Height;
bitmapImage.DecodePixelHeight = (int) (imageHeight * scaling * 1.8);
bitmapImage.EndInit();
if (bitmapImage.CanFreeze)
{
bitmapImage.Freeze();
}
var opacityMask = new ImageBrush();
var opactityBitmap = new BitmapImage();
opactityBitmap.BeginInit();
opactityBitmap.CacheOption = BitmapCacheOption.OnLoad;
imageNo = randomiser.Next(0, imageList.Count - 1);
opactityBitmap.UriSource = imageList[imageNo].Uri;
int opacityImageHeight = imageList[imageNo].Height; ;
opactityBitmap.DecodePixelHeight = (int)(opacityImageHeight * scaling * 1.8);
opactityBitmap.EndInit();
if (opactityBitmap.CanFreeze)
{
opactityBitmap.Freeze();
}
opacityMask.ImageSource = opactityBitmap;
img.OpacityMask = opacityMask;
img.Source = bitmapImage;
img.Width = pageWidth * scaling;
img.Height = pageHeight * scaling;
Canvas.SetLeft(img, 0);
Canvas.SetTop(img, 0);
canvas.Children.Add(img);
img.Opacity = 50F;
}
canvas.LayoutTransform = null;
var size = new Size(Math.Max(canvas.Width, 5), Math.Max(canvas.Height, 5));
canvas.Measure(size);
canvas.Arrange(new Rect(size));
canvas.EndInit();
var renderTargetBitmap = new RenderTargetBitmap(pageWidth, pageHeight, dpi, dpi, PixelFormats.Default); //xxx
renderTargetBitmap.Render(canvas);
if (renderTargetBitmap.CanFreeze)
{
renderTargetBitmap.Freeze();
}
System.Drawing.Image imageData;
using (var ms = new MemoryStream())
{
var image = new PngBitmapEncoder();
image.Frames.Add(BitmapFrame.Create(renderTargetBitmap));
image.Save(ms);
imageData = System.Drawing.Image.FromStream(ms);
}
var encoder = Encoder.Quality;
var encoderParams = new EncoderParameters(1);
encoderParams.Param[0] = new EncoderParameter(encoder, 100L);
ImageCodecInfo[] codecs = ImageCodecInfo.GetImageDecoders();
var codecInfo = codecs.FirstOrDefault(x => x.FormatID == ImageFormat.Png.Guid);
Byte[] bitmapArray;
using (var ms = new MemoryStream())
{
imageData.Save(ms, codecInfo, encoderParams);
bitmapArray = ms.GetBuffer();
}
var filepath = string.Format(#"C:\temp\{0}.png", i);
imageData.Save(filepath, ImageFormat.Png);
var gcMemory = GC.GetTotalMemory(false) / 1024;
Console.WriteLine("Proc mem = {0}KB, GC = {1}KB", (proc.PrivateMemorySize64 / 1024), gcMemory);
}
Console.WriteLine("Exiting");
}
I'm hoping it's something obvious I'm missing here. Thanks for all your help!