I want to develop a noise reduction project, so
how to find pixel coordinates in an image .
you can try this
Bitmap bmp = new Bitmap("filename");
//to get the pixel color
Color c= bmp.GetPixel(50,50);
//to set the color of the pixel
bmp.SetPixel(50, 50, Color.Green);
The below code will give you all pixel coordinates in image
namespace WindowsFormsApplication4
{
using System.Collections.Generic;
using System.Drawing;
using System.Windows.Forms;
public partial class Form1 : Form
{
private const string FILE_NAME = #"C:\Temp\Capture.png";
private const double BW_THRESHOLD = 0.5;
private readonly Color colorBlack =
Color.FromArgb(255, 0, 0, 0);
private readonly Color colorWhite =
Color.FromArgb(255, 255, 255, 255);
private readonly Bitmap originalImage;
private readonly Bitmap convertedImage;
private readonly List<Vertex> vertices = new List<Vertex>();
public Form1()
{
InitializeComponent();
pictureBox1.ImageLocation = FILE_NAME;
this.originalImage = new Bitmap(FILE_NAME);
this.convertedImage = this.Img2BW(this.originalImage, BW_THRESHOLD);
foreach (Vertex vert in this.vertices)
{
listBox1.Items.Add(vert.ToString());
}
}
private Bitmap Img2BW(Bitmap imgSrc, double threshold)
{
int width = imgSrc.Width;
int height = imgSrc.Height;
Color pixel;
Bitmap imgOut = new Bitmap(imgSrc);
for (int row = 0; row < height - 1; row++)
{
for (int col = 0; col < width - 1; col++)
{
pixel = imgSrc.GetPixel(col, row);
if (pixel.GetBrightness() < threshold)
{
this.vertices.Add(new Vertex(col, row));
imgOut.SetPixel(col, row, this.colorBlack);
}
else
{
imgOut.SetPixel(col, row, this.colorWhite);
}
}
}
return imgOut;
}
}
public class Vertex
{
public Vertex(int i, int j)
{
this.X = i;
this.Y = j;
}
public int X { get; set; }
public int Y { get; set; }
public string ToString()
{
return string.Format("({0}/{1})", this.X, this.Y);
}
}
}
A pixel is merely a colour located at a position in a cartesian coordinate system, to easily read and write these colours in an image, you can use the Bitmap class, using the GetPixel and SetPixel methods.
Related
I am in the process of developing an app that will be later integrated with another app I developed some time ago.
Basically, this app will generate an image file of X and Y dimensions with a grid printed on it, of which the user also specifies it's interval.
I've done this form so far, but I am having difficulty deciding what the best way to generate an actual image with the proper dimensions and grid spacing should be.
I don't want to save the image that is displayed on the form because it is only a representation and could very well be extremely dissimilar from the final product.
So I guess my question is, what do you think the best way is to generate a black and white image when I click "save"?
Also, I have no need to see the image being saved, I just want to generate and save it behind the scenes.
Here is the "draw" button click event
private void btnDraw_Click(object sender, EventArgs e)
{
this.Width = 560;
using (g = pb.CreateGraphics())
{
g.Clear(Color.White);
PaintCanvass canvass = new PaintCanvass();
canvass.Width = Convert.ToDouble(tbWidth.Text);
canvass.Height = Convert.ToDouble(tbHeight.Text);
canvass.Resolution = Convert.ToInt32(cbResolution.Text.Substring(0,3));
canvass.UOM = cbUOM.Text;
canvass.Interval = Convert.ToInt32(tbInterval.Text);
Pen pencil = new Pen(Color.Black, 2);
int hpfact = Convert.ToInt32((double)pb.Height / (double)canvass.horizontalLinesQuantity);
int hp = hpfact;
for (int i = 0; i < canvass.horizontalLinesQuantity-1; i++)
{
g.DrawLine(pencil, new Point(0, hp), new Point(pb.Width, hp));
hp = hp + hpfact;
}
int vpfact = Convert.ToInt32((double)pb.Width / (double)canvass.verticalLinesQuantity);
int vp = vpfact;
for (int i = 0; i < canvass.verticalLinesQuantity-1; i++)
{
g.DrawLine(pencil, new Point(vp, 0), new Point(vp, pb.Height));
vp = vp + vpfact;
}
canvass = null;
And here is my PaintCanvass class which seems to just be ending up as a container for properties for now
class PaintCanvass
{
public double Width { get; set; }
public double Height { get; set; }
public string UOM { get; set; }
public int Resolution { get; set; }
public int Interval { get; set; } = 50;
public int hdots
{
get
{
if (this.UOM == "mm")
{
return Convert.ToInt32(Width * 0.03937008F * Resolution);
}
else
{
return Convert.ToInt32(Width * Resolution);
};
}
}
public int vdots
{
get
{
// Set the quantity of lines
if (this.UOM == "mm")
{
return Convert.ToInt32(Height * 0.03937008F * Resolution);
}
else
{
return Convert.ToInt32(Height * Resolution);
};
}
}
public int horizontalLinesQuantity
{
get
{
return vdots / this.Interval;
}
}
public int verticalLinesQuantity
{
get
{
return hdots / this.Interval;
}
}
}
Edit: as suggested I went with the bitmap method.
private void btnSave_Click(object sender, EventArgs e)
{
SetupCanvass();
using (Bitmap bmp = new Bitmap(canvass.hdots, canvass.vdots))
{
using (Graphics g = Graphics.FromImage(bmp))
{
using (SolidBrush brush = new SolidBrush(Color.FromArgb(255, 255, 255)))
{
g.FillRectangle(brush, 0, 0, canvass.hdots, canvass.vdots);
}
int hp = canvass.Interval;
for (int i = 0; i < canvass.horizontalLinesQuantity - 1; i++)
{
g.DrawLine(pencil, new Point(0, hp), new Point(canvass.hdots, hp));
hp = hp + canvass.Interval;
}
int vp = canvass.Interval;
for (int i = 0; i < canvass.verticalLinesQuantity - 1; i++)
{
g.DrawLine(pencil, new Point(vp, 0), new Point(vp, canvass.vdots));
vp = vp + canvass.Interval;
}
}
bmp.Save(Path.Combine(Path.GetTempPath(), "labelGrid.png")); //Save image somwhere on disk
}
}
If you want to save an image without displaying a preview, just do something like this:
using (Bitmap bmp = new Bitmap(500, 500))
{
using (Graphics g = Graphics.FromImage(bmp))
{
//Draw your stuff directly onto the bitmap here
}
bmp.Save("C:\\image.bmp"); //Save image somwhere on disk
}
I'm drawing rectangles in a panel starting from the left side.
When I reach the right side of the panel I'd like to shift left the rectangles previously drawn to have the space to draw another one, and so on.
Which is the simplest way to do it?
I'm drawing using System.Drawings.Graphics.
I'm using Winforms. The code is:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Runtime.InteropServices;
namespace FFViewer
{
public partial class Form1 : Form
{
[DllImport("shlwapi.dll")]
public static extern int ColorHLSToRGB(int H, int L, int S);
int x=0;
int y=300;
int h = 0;
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
panel_Paint();
x = x + 40;
h = h + 40;
if (h >= 240)
h = 0;
}
private void panel_Paint()
{
int val;
Color color;
val = ColorHLSToRGB(h, 128, 240);
Graphics g = panel1.CreateGraphics();
color = ColorTranslator.FromWin32(val);
SolidBrush sb = new SolidBrush( color );
g.FillRectangle(sb, x, y, 40, 100);
}
}
}
So, when I draw the latest rectangle on the right side, I'd like to shift left all the rectangles to leave the space to draw another one on the right side.
P.S. I don't have enough reputation to post images :(
Here's the "old school" way of doing it. This is basically what was done when a continuous graph of a real-time value needed to be displayed AND you didn't want to store the any of the values anywhere. This has severe limitations as it copies from the screen and the drawing will be erased the when window repaints itself. This first example is simply here to demonstrate the process, and is an extension of the way you created the initial blocks:
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
[DllImport("shlwapi.dll")]
public static extern int ColorHLSToRGB(int H, int L, int S);
int x = 0;
int width = 40;
int y = 300;
int height = 100;
int h = 0;
private void button1_Click(object sender, EventArgs e)
{
if (x + width > panel1.ClientSize.Width) // if drawing the next block would exceed the panel width...
{
// capture what's currently on the screen
Bitmap bmp = new Bitmap(x, height);
using (Graphics g = Graphics.FromImage(bmp))
{
g.CopyFromScreen(panel1.PointToScreen(new Point(0, y)), new Point(0, 0), bmp.Size);
}
// draw it shifted to the left
using (Graphics g = panel1.CreateGraphics())
{
g.DrawImage(bmp, new Point(-width, y));
}
// move x back so the new rectangle will draw where the last one was previously
x = x - width;
}
// draw the new block and increment values
panel_Paint();
x = x + width;
h = h + width;
if (h >= 240)
{
h = 0;
}
}
private void panel_Paint()
{
int val;
Color color;
val = ColorHLSToRGB(h, 128, 240);
color = ColorTranslator.FromWin32(val);
using (Graphics g = panel1.CreateGraphics())
{
using (SolidBrush sb = new SolidBrush(color))
{
g.FillRectangle(sb, x, y, width, height);
}
}
}
}
This could be fixed by creating a Bitmap of the correct size and drawing to that instead. Then you shift everything and draw the new block on the right side. Finally, you'd draw that Bitmap in the Paint() event. So this is doing the same thing as above except we aren't copying from the screen, and the panel will properly redraw itself when requested:
public partial class Form1 : Form
{
[DllImport("shlwapi.dll")]
public static extern int ColorHLSToRGB(int H, int L, int S);
int x = 0;
int width = 40;
int y = 300;
int height = 100;
int h = 0;
Bitmap bmp;
public Form1()
{
InitializeComponent();
this.Load += Form1_Load;
panel1.Paint += Panel1_Paint;
}
private void Form1_Load(object sender, EventArgs e)
{
int numBlocks = (int)(panel1.Width / width);
bmp = new Bitmap(numBlocks * width, height);
using (Graphics g = Graphics.FromImage(bmp))
{
g.Clear(panel1.BackColor);
}
}
private void Panel1_Paint(object sender, PaintEventArgs e)
{
if (bmp != null)
{
e.Graphics.DrawImage(bmp, new Point(0, y));
}
}
private void button1_Click(object sender, EventArgs e)
{
using (Graphics g = Graphics.FromImage(bmp))
{
if (x + width > bmp.Width) // if drawing the next block would exceed the bmp width...
{
g.DrawImage(bmp, new Point(-width, 0)); // draw ourself shifted to the left
x = x - width;
}
// draw the new block
int val;
Color color;
val = ColorHLSToRGB(h, 128, 240);
color = ColorTranslator.FromWin32(val);
using (SolidBrush sb = new SolidBrush(color))
{
g.FillRectangle(sb, x, 0, width, height);
}
}
x = x + width;
h = h + width;
if (h >= 240)
{
h = 0;
}
panel1.Invalidate(); // force panel1 to redraw itself
}
}
You should not use panel1.CreateGraphics(), but instead handle the Paint event of the panel, otherwise the rectangles might disappear, for instance after a popup appears in front of your form:
panel1.Paint += new PaintEventHandler(panel1_paint);
You'll need to paint all (visible) rectangles in the paint handler; you could keep a List<Rectangle> in your form to store the rectangles you have added:
private List<Rectangle> rectangles = new List<Rectangle>();
...
private void button1_Click(object sender, EventArgs e)
{
rectangles.Add(new Rectangle(x, y, width, height));
panel1.Invalidate(); // cause the paint event to be called
// todo increment x/y
}
Then, in the panel1_Paint handler you can simply draw the rectangles, after having called Graphics.TranslateTransform() to shift the whole drawing area:
private void panel1_Paint(object sender, PaintEventArgs e)
{
e.Graphics.TranslateTransform(-x, 0);
foreach (Rectangle rectangle in rectangles)
{
// paint em
}
e.Graphics.ResetTransform();
}
I've got two images, one of them I'll create using Graphics (a simple circle/ellipse).
Now I want to remove a part of the circle using another image. It should support removing alpha values too.
I hope the link works, if not please write it into comments & I'll fix it.
Thanks for any advice
EDIT:
Image 2 does not really have any border, it is just to show the frame size...
The following will do:
using System;
using System.Windows;
using System.Windows.Media;
using System.Windows.Media.Imaging;
namespace WpfApplication4
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
public MainWindow()
{
InitializeComponent();
Loaded += MainWindow_Loaded;
}
private void MainWindow_Loaded(object sender, RoutedEventArgs e)
{
var image1 = new BitmapImage(new Uri("1.png", UriKind.Relative));
var image2 = new BitmapImage(new Uri("2.png", UriKind.Relative));
var bitmap1 = BitmapFactory.ConvertToPbgra32Format(image1);
var bitmap2 = BitmapFactory.ConvertToPbgra32Format(image2);
var width = 256;
var height = 256;
var bitmap3 = BitmapFactory.New(width, height);
var transparent = Color.FromArgb(0, 0, 0, 0);
for (var y = 0; y < height; y++)
{
for (var x = 0; x < width; x++)
{
var color1 = bitmap1.GetPixel(x, y);
var color2 = bitmap2.GetPixel(x, y);
Color color3;
if (color1.Equals(transparent))
{
color3 = transparent;
}
else
{
if (color2.Equals(transparent))
{
color3 = color1;
}
else
{
color3 = transparent;
}
}
bitmap3.SetPixel(x, y, color3);
}
}
Image1.Source = bitmap3;
}
}
}
I've used https://www.nuget.org/packages/WriteableBitmapEx/ to make things simpler, be careful about having 32-bit PNG as well as what is a transparent color because in WPF it is a transparent white in fact.
You should be able to translate that to Forms easily if this is what you're using, with this https://msdn.microsoft.com/en-us/library/system.drawing.bitmap%28v=vs.110%29.aspx.
EDIT : you could have used an opacity mask but since pic.2 does not its outer dark, it wouldn't have worked.
Finally, I wrote the code by myself. This is it:
public static Bitmap RemovePart(Bitmap source, Bitmap toRemove)
{
Color c1, c2, c3;
c3 = Color.FromArgb(0, 0, 0, 0);
for (int x = 0; x < source.Width; x++)
{
for (int y = 0; y < source.Height; y++)
{
c1 = source.GetPixel(x, y);
c2 = toRemove.GetPixel(x, y);
if (c2 != c3)
{
source.SetPixel(x, y, Color.FromArgb(c2.A, c1));
}
}
}
}
I am developing a windows form application in C#. I have to add face recognition functionality. For that purpose I am using OpenNI Library. The hardware for video capture is Xtion PRO LIVE.
I have successfully installed it and I was able to run the sample code. This code continuously recording video and nothing else. I have modified it in such a way that after pressing Capture button, It saves the current picture to hard drive (Its fine!).
Now what I want to accomplish is to detect face by facial landmarks so that I can verify a person's image with saved images in database. How can I do this with OpenNI?
Here is my code:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using OpenNI;
using System.Threading;
using System.Drawing.Imaging;
namespace CameraApp
{
public partial class MainWindow : Form
{
public MainWindow()
{
InitializeComponent();
this.context = Context.CreateFromXmlFile(SAMPLE_XML_FILE, out scriptNode);
this.depth = context.FindExistingNode(NodeType.Depth) as DepthGenerator;
if (this.depth == null)
{
throw new Exception("Viewer must have a depth node!");
}
this.histogram = new int[this.depth.DeviceMaxDepth];
MapOutputMode mapMode = this.depth.MapOutputMode;
this.bitmap = new Bitmap((int)mapMode.XRes, (int)mapMode.YRes, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
this.shouldRun = true;
this.readerThread = new Thread(ReaderThread);
this.readerThread.Start();
}
protected override void OnPaint(PaintEventArgs e)
{
base.OnPaint(e);
lock (this)
{
e.Graphics.DrawImage(this.bitmap,
this.panelView.Location.X,
this.panelView.Location.Y,
this.panelView.Size.Width,
this.panelView.Size.Height);
}
}
protected override void OnPaintBackground(PaintEventArgs pevent)
{
//Don't allow the background to paint
}
protected override void OnClosing(CancelEventArgs e)
{
this.shouldRun = false;
this.readerThread.Join();
base.OnClosing(e);
}
protected override void OnKeyPress(KeyPressEventArgs e)
{
if (e.KeyChar == 27)
{
Close();
}
base.OnKeyPress(e);
}
private unsafe void CalcHist(DepthMetaData depthMD)
{
// reset
for (int i = 0; i < this.histogram.Length; ++i)
this.histogram[i] = 0;
ushort* pDepth = (ushort*)depthMD.DepthMapPtr.ToPointer();
int points = 0;
for (int y = 0; y < depthMD.YRes; ++y)
{
for (int x = 0; x < depthMD.XRes; ++x, ++pDepth)
{
ushort depthVal = *pDepth;
if (depthVal != 0)
{
this.histogram[depthVal]++;
points++;
}
}
}
for (int i = 1; i < this.histogram.Length; i++)
{
this.histogram[i] += this.histogram[i - 1];
}
if (points > 0)
{
for (int i = 1; i < this.histogram.Length; i++)
{
this.histogram[i] = (int)(256 * (1.0f - (this.histogram[i] / (float)points)));
}
}
}
private unsafe void ReaderThread()
{
DepthMetaData depthMD = new DepthMetaData();
while (this.shouldRun)
{
try
{
this.context.WaitOneUpdateAll(this.depth);
}
catch (Exception)
{
}
this.depth.GetMetaData(depthMD);
CalcHist(depthMD);
lock (this)
{
Rectangle rect = new Rectangle(0, 0, this.bitmap.Width, this.bitmap.Height);
BitmapData data = this.bitmap.LockBits(rect, ImageLockMode.WriteOnly, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
//ushort* pDepth = (ushort*)this.depth.DepthMapPtr.ToPointer();
//// set pixels
//for (int y = 0; y < depthMD.YRes; ++y)
//{
// byte* pDest = (byte*)data.Scan0.ToPointer() + y * data.Stride;
// for (int x = 0; x < depthMD.XRes; ++x, ++pDepth, pDest += 3)
// {
// byte pixel = (byte)this.histogram[*pDepth];
// pDest[0] = 0;
// pDest[1] = pixel;
// pDest[2] = pixel;
// }
//}
// This will point to the depth image.
ushort* pDepth = (ushort*)this.depth.DepthMapPtr.ToPointer();
// This will point to the RGB image.
RGB24Pixel* pRGB =
(RGB24Pixel*)this.depth.DepthMapPtr.ToPointer();
// Go over the depth and RGB image and set the bitmaps
// we're copying to based on our depth & RGB values.
for (int y = 0; y < depthMD.YRes; ++y)
{
// Assuming that the size of each data frame is
// 640x480.
// Scan line by line (480 lines), each line
// consists of 640 pointers.
byte* pDest_Depth =
(byte*)data.Scan0.ToPointer() + y *
data.Stride;
byte* pDest_Rgb = (byte*)data.Scan0.ToPointer()
+ y * data.Stride;
for (int x = 0; x < depthMD.XRes; ++x,
++pDepth, pDest_Depth += 3,
++pRGB, pDest_Rgb += 3)
{
// Change the color of the bitmap
// based on depth value.
byte pixel = (byte)this.histogram[*pDepth];
pDest_Depth[0] = 0;
pDest_Depth[1] = pixel;
pDest_Depth[2] = pixel;
// Get the RGB values to generate
// a whole RGB image.
byte red = pRGB->Red;
byte green = pRGB->Green;
byte blue = pRGB->Blue;
// Get depth information.
ushort depthVal = *pDepth;
}
}
this.bitmap.UnlockBits(data);
}
this.Invalidate();
}
}
private readonly string SAMPLE_XML_FILE = #"C:/Program Files/Microsoft Visual Studio 10.0/Microsoft Visual Studio 2010 Ultimate - ENU/OpenNI/Data/SamplesConfig.xml";
private Context context;
private ScriptNode scriptNode;
private DepthGenerator depth;
private Thread readerThread;
private bool shouldRun;
private Bitmap bitmap;
private int[] histogram;
private void button1_Click(object sender, EventArgs e)
{
this.readerThread.Abort();
this.bitmap.Save("D:\\Screenshot.jpeg", System.Drawing.Imaging.ImageFormat.Jpeg);
this.readerThread = new Thread(ReaderThread);
this.readerThread.Start();
}
}
}
Any kind of help will be appreciated. Any tutorial/link anything!!!
For a couple of days now I've tried to figure out why my nine-slice code does not work as expected. As far as I can see, there seems to be an issue with the Graphics.DrawImage method which handles my nine slice images incorrectly. So my problem is how to compensate for the incorrect scaling that is performed when running my code on the compact framework. I might add that this code of course works perfectly when running in the full framework environment. The problem only occurs when scaling the image to a larger image not the other way around. Here is the snippet:
public class NineSliceBitmapSnippet
{
private Bitmap m_OriginalBitmap;
public int CornerLength { get; set; }
/// <summary>
/// Initializes a new instance of the NineSliceBitmapSnippet class.
/// </summary>
public NineSliceBitmapSnippet(Bitmap bitmap)
{
CornerLength = 5;
m_OriginalBitmap = bitmap;
}
public Bitmap ScaleSingleBitmap(Size size)
{
Bitmap scaledBitmap = new Bitmap(size.Width, size.Height);
int[] horizontalTargetSlices = Slice(size.Width);
int[] verticalTargetSlices = Slice(size.Height);
int[] horizontalSourceSlices = Slice(m_OriginalBitmap.Width);
int[] verticalSourceSlices = Slice(m_OriginalBitmap.Height);
using (Graphics graphics = Graphics.FromImage(scaledBitmap))
{
using (Brush brush = new SolidBrush(Color.Fuchsia))
{
graphics.FillRectangle(brush, new Rectangle(0, 0, size.Width, size.Height));
}
int horizontalTargetOffset = 0;
int verticalTargetOffset = 0;
int horizontalSourceOffset = 0;
int verticalSourceOffset = 0;
for (int x = 0; x < horizontalTargetSlices.Length; x++)
{
verticalTargetOffset = 0;
verticalSourceOffset = 0;
for (int y = 0; y < verticalTargetSlices.Length; y++)
{
Rectangle destination = new Rectangle(horizontalTargetOffset, verticalTargetOffset, horizontalTargetSlices[x], verticalTargetSlices[y]);
Rectangle source = new Rectangle(horizontalSourceOffset, verticalSourceOffset, horizontalSourceSlices[x], verticalSourceSlices[y]);
graphics.DrawImage(m_OriginalBitmap, destination, source, GraphicsUnit.Pixel);
verticalTargetOffset += verticalTargetSlices[y];
verticalSourceOffset += verticalSourceSlices[y];
}
horizontalTargetOffset += horizontalTargetSlices[x];
horizontalSourceOffset += horizontalSourceSlices[x];
}
}
return scaledBitmap;
}
public int[] Slice(int length)
{
int cornerLength = CornerLength;
if (length <= (cornerLength * 2))
throw new Exception("Image to small for sliceing up");
int[] slices = new int[3];
slices[0] = cornerLength;
slices[1] = length - (2 * cornerLength);
slices[2] = cornerLength;
return slices;
}
}
So, my question is, does anybody now how I could compensate the incorrect scaling?
/Dan
After some more trial and error I've finally found a solution to my problem. The scaling problems has always been to the top-center, right-center, bottom-center and left-center slices since they're always stretched in only one direction according to the logic of nine slice scaling. If I apply a temporarely square stretch to those slices before applying the correct stretch the final bitmap will be correct. Once again the problem is only visible in the .Net Compact Framework of a Windows CE device (Smart Device). Here's a snippet with code adjusting for the bug in CF. My only concern now is that the slices that get square stretched will take much more memory due to the correction code. On the other hand this step is only a short period of time so I might get away with it. ;)
public class NineSliceBitmapSnippet
{
private Bitmap m_OriginalBitmap;
public int CornerLength { get; set; }
public NineSliceBitmapSnippet(Bitmap bitmap)
{
CornerLength = 5;
m_OriginalBitmap = bitmap;
}
public Bitmap Scale(Size size)
{
if (m_OriginalBitmap != null)
{
return ScaleSingleBitmap(size);
}
return null;
}
public Bitmap ScaleSingleBitmap(Size size)
{
Bitmap scaledBitmap = new Bitmap(size.Width, size.Height);
int[] horizontalTargetSlices = Slice(size.Width);
int[] verticalTargetSlices = Slice(size.Height);
int[] horizontalSourceSlices = Slice(m_OriginalBitmap.Width);
int[] verticalSourceSlices = Slice(m_OriginalBitmap.Height);
using (Graphics graphics = Graphics.FromImage(scaledBitmap))
{
using (Brush brush = new SolidBrush(Color.Fuchsia))
{
graphics.FillRectangle(brush, new Rectangle(0, 0, size.Width, size.Height));
}
int horizontalTargetOffset = 0;
int verticalTargetOffset = 0;
int horizontalSourceOffset = 0;
int verticalSourceOffset = 0;
for (int x = 0; x < horizontalTargetSlices.Length; x++)
{
verticalTargetOffset = 0;
verticalSourceOffset = 0;
for (int y = 0; y < verticalTargetSlices.Length; y++)
{
Rectangle destination = new Rectangle(horizontalTargetOffset, verticalTargetOffset, horizontalTargetSlices[x], verticalTargetSlices[y]);
Rectangle source = new Rectangle(horizontalSourceOffset, verticalSourceOffset, horizontalSourceSlices[x], verticalSourceSlices[y]);
bool isWidthAffectedByVerticalStretch = (y == 1 && (x == 0 || x == 2) && destination.Height > source.Height);
bool isHeightAffectedByHorizontalStretch = (x == 1 && (y == 0 || y == 2) && destination.Width > source.Width);
if (isHeightAffectedByHorizontalStretch)
{
BypassDrawImageError(graphics, destination, source, Orientation.Horizontal);
}
else if (isWidthAffectedByVerticalStretch)
{
BypassDrawImageError(graphics, destination, source, Orientation.Vertical);
}
else
{
graphics.DrawImage(m_OriginalBitmap, destination, source, GraphicsUnit.Pixel);
}
verticalTargetOffset += verticalTargetSlices[y];
verticalSourceOffset += verticalSourceSlices[y];
}
horizontalTargetOffset += horizontalTargetSlices[x];
horizontalSourceOffset += horizontalSourceSlices[x];
}
}
return scaledBitmap;
}
private void BypassDrawImageError(Graphics graphics, Rectangle destination, Rectangle source, Orientation orientationAdjustment)
{
Size adjustedSize = Size.Empty;
switch (orientationAdjustment)
{
case Orientation.Horizontal:
adjustedSize = new Size(destination.Width, destination.Width);
break;
case Orientation.Vertical:
adjustedSize = new Size(destination.Height, destination.Height);
break;
default:
break;
}
using (Bitmap quadScaledBitmap = new Bitmap(adjustedSize.Width, adjustedSize.Height))
{
using (Graphics tempGraphics = Graphics.FromImage(quadScaledBitmap))
{
tempGraphics.Clear(Color.Fuchsia);
tempGraphics.DrawImage(m_OriginalBitmap, new Rectangle(0, 0, adjustedSize.Width, adjustedSize.Height), source, GraphicsUnit.Pixel);
}
graphics.DrawImage(quadScaledBitmap, destination, new Rectangle(0, 0, quadScaledBitmap.Width, quadScaledBitmap.Height), GraphicsUnit.Pixel);
}
}
public int[] Slice(int length)
{
int cornerLength = CornerLength;
if (length <= (cornerLength * 2))
throw new Exception("Image to small for sliceing up");
int[] slices = new int[3];
slices[0] = cornerLength;
slices[1] = length - (2 * cornerLength);
slices[2] = cornerLength;
return slices;
}
}