I'm looking at doing a project in C# that looks at an image file not sure of extension yet, and notes the RGB value and if its too dark moves it to another folder for me to look at later
So here it is in block form
Load multiple images from directory > Check RGB value of every file > if too dark > move to different folder. if not ignore (leave in original folder)
I know the basics like get files from dir but checking RGB value of whole picture and then moving it or ignoring it I'm stumped.
I have this code:
private void button1_Click(object sender, EventArgs e)
{
CompareImages(Environment.GetFolderPath(Environment.SpecialFolder.MyPictures),
Path.Combine(Environment.GetFolderPath(Environment.SpecialFolder.DesktopDirectory), "checked"), 127.0, new string[] {"*.jpg", "*.png"});
}
private void CompareImages(string sourceFolder, string disposedImgFolder, double threshold, string[] extensions)
{
if (Directory.Exists(sourceFolder))
{
DirectoryInfo dir = new DirectoryInfo(sourceFolder);
List<FileInfo> pictures = new List<FileInfo>();
foreach (string ext in extensions)
{
FileInfo[] fi = dir.GetFiles(ext);
pictures.AddRange(fi);
}
Directory.CreateDirectory(disposedImgFolder);
int j = 0;
if (pictures.Count > 0)
{
for (int i = 0; i < pictures.Count; i++)
{
Image img = null;
Bitmap bmp = null;
try
{
img = Image.FromFile(pictures[i].FullName);
bmp = new Bitmap(img);
img.Dispose();
double avg = GetAveragePixelValue(bmp);
bmp.Dispose();
if (avg < threshold)
{
string dest = Path.Combine(disposedImgFolder, pictures[i].Name);
if (File.Exists(dest) == false)
{
pictures[i].MoveTo(dest);
j++;
}
else
{
}
}
else
{
}
}
catch
{
if (img != null)
img.Dispose();
if (bmp != null)
bmp.Dispose();
}
}
MessageBox.Show("Done, " + j.ToString() + " files moved.");
}
}
}
private unsafe double GetAveragePixelValue(Bitmap bmp)
{
BitmapData bmData = null;
try
{
bmData = bmp.LockBits(new Rectangle(0, 0, bmp.Width, bmp.Height), ImageLockMode.ReadOnly, PixelFormat.Format32bppArgb);
int stride = bmData.Stride;
IntPtr scan0 = bmData.Scan0;
int w = bmData.Width;
int h = bmData.Height;
double sum = 0;
long pixels = bmp.Width * bmp.Height;
byte* p = (byte*)scan0.ToPointer();
for (int y = 0; y < h; y++)
{
p = (byte*)scan0.ToPointer();
p += y * stride;
for (int x = 0; x < w; x++)
{
double i = ((double)p[0] + p[1] + p[2]) / 3.0;
sum += i;
p += 4;
}
}
bmp.UnlockBits(bmData);
double result = sum / (double)pixels;
return result;
}
catch
{
try
{
bmp.UnlockBits(bmData);
}
catch
{
}
}
return -1;
}
How do I define the threashold?
If you want to read every pixel of an image, it must be converted to a bitmap. Then you can use the GetPixel method. But, this process is very slow and it takes a lot of CPU. If you do so, you really should run some test before.
using (var m = new MemoryStream())
{
using (var img = Image.FromFile(args[0]))
{
img.Save(m, ImageFormat.Bmp);
}
m.Position = 0;
using (var bitmap = (Bitmap)Bitmap.FromStream(m))
{
for (var x = 0; x < bitmap.Width; x++)
{
for (var y = 0; y < bitmap.Height; y++)
{
var color = bitmap.GetPixel(x, y);
// TODO: Do what ever you want
}
}
}
}
I think you need to read up a bit on RGB. Every pixel will have an Red, Green and Blue value associated with it and i guess you are looking for a way to get some meassure of how bright the "average" pixel is? If so you need to loop over all pixels. While doing so calculate brightness of each pixel. "Brightness" of each pixels can be calculated in several ways, you could simply do (R + G + B)/3, or you could try to mimic that the human eye isn't equaly sensitive to R, G and B.
Then you will have to decide how to use your "brightness" of pixel. Mean, Median, something else? It depends on what you want to do..
Update: After reading more of your comments I'm still not really sure what you mean by "bright" or "dark". It also seems that you have your terminology a bit confused, you keep talking about a RGB value for an entire image, but RGB values in an image refer to individual pixel values.
I believe that this page could help you doing what you want:
http://www.nbdtech.com/Blog/archive/2008/04/27/Calculating-the-Perceived-Brightness-of-a-Color.aspx
Also, some complimentary reading to understand RGB:
http://en.wikipedia.org/wiki/Luma_(video)
http://en.wikipedia.org/wiki/RGB_color_space
Related
I've written a quick winform app that can take some entered text, generate images based on all the system fonts and then consolidate those images into a single image to give examples of the fonts. For ease, I've separated the two functions, one to generate the images and another to consolidate them so I can remove fonts I don't want in the single image. It was all working yesterday, but now when it comes to saving the consolidated image ("complete.save("consolidated.png");) it gives the useless GDI+ error. I've checked paths and access, all are fine and correct. Nothing is locking the image, so I'm totally at a loss as what is causing this. Any ideas? Code below
StringBuilder sb = new StringBuilder();
List<string> files = FileSystemUtilities.ListFiles("fonts");
int height = 0;
int width = 0;
Bitmap test = new Bitmap(1000, 1000);
Graphics gTest = Graphics.FromImage(test);
Font font = new Font("Arial", 128);
int numWidth = 0;
int count = 1;
foreach (var file in files)
{
sb.AppendFormat("{0}\r", FileSystemUtilities.GetFileName(file).Replace(".png", string.Empty));
Bitmap bitmap = new Bitmap(file);
height = height + bitmap.Height + 10;
if (width < bitmap.Width)
{
width = bitmap.Width;
}
SizeF numSize = gTest.MeasureString(Convert.ToString(count), font);
if (numWidth < numSize.Width)
{
numWidth = Convert.ToInt32(numSize.Width + 1);
}
bitmap.Dispose();
count++;
}
test.Dispose();
gTest.Dispose();
numWidth = numWidth + 10;
count = 1;
Bitmap complete = new Bitmap(width + numWidth, height);
Graphics g = Graphics.FromImage(complete);
g.FillRectangle(Brushes.White, 0, 0, complete.Width, complete.Height);
int y = 0;
foreach (var file in files)
{
Bitmap bitmap = new Bitmap(file);
g.DrawString(Convert.ToString(count) + ".", font, Brushes.Black, 0, y);
g.DrawImage(bitmap, numWidth, y);
y = y + bitmap.Height + 10;
bitmap.Dispose();
count++;
}
string filename = "consolidated.png";
if (File.Exists(filename))
{
File.Delete(filename);
}
g.Dispose();
complete.Save("consolidated.png");
complete.Dispose();
TextFileUtilities.WriteTextFile("consolidated.txt", sb.ToString());
Turns out it was due to excessive height... I couldn't see it. Thank you for all that commented and helped me out.
I have some raw sensor data which is in a single dimension byte array. The data is actually in IEEE single precision floating point format. I know the X and Y axis legths and I want to create a Windows Bitmap (greyscale - there is only one colour plane containing luminance data) from my data.
Here's what I'm trying so far:
var bitmap = new Bitmap(xAxis, yAxis, PixelFormat.Format16bppGrayScale);
var pixelReader = GetPixelReader(hdu.MandatoryKeywords.BitsPerPixel);
using (var stream = new MemoryStream(hdu.RawData, writable: false))
{
using (var reader = new BinaryReader(stream, Encoding.ASCII))
{
for (var y = 0; y < yAxis; y++)
{
for (var x = 0; x < xAxis; x++)
{
var pixel = pixelReader(reader);
var argb = Color.FromArgb(pixel, pixel, pixel);
bitmap.SetPixel(x, y, argb);
}
}
}
}
return bitmap;
pixelReader is a delegate and defined as:
private static int ReadIeeeSinglePrecision(BinaryReader reader)
{
return (int) reader.ReadSingle();
}
When I run this code, I get an exception InvalidArgumentException on the line where I try to set the pixel value. I stepped it in the debugger and x=0, y=0 and pixel=0. It doesn't say which argument is invalid or why (thanks Microsoft).
So clearly I'm doing something wrong and actually, I suspect there is probably a more efficient way of going about this. I would appreciate any suggestions. For reasons I can't quite put my finger on, I am finding this code very challenging to write.
OK here is what worked in the end. Based on code taken from Mark Dawson's answer to this question: https://social.msdn.microsoft.com/Forums/vstudio/en-US/10252c05-c4b6-49dc-b2a3-4c1396e2c3ab/writing-a-16bit-grayscale-image?forum=csharpgeneral
private static Bitmap CreateBitmapFromBytes(byte[] pixelValues, int width, int height)
{
//Create an image that will hold the image data
Bitmap pic = new Bitmap(width, height, PixelFormat.Format16bppGrayScale);
//Get a reference to the images pixel data
Rectangle dimension = new Rectangle(0, 0, pic.Width, pic.Height);
BitmapData picData = pic.LockBits(dimension, ImageLockMode.ReadWrite, pic.PixelFormat);
IntPtr pixelStartAddress = picData.Scan0;
//Copy the pixel data into the bitmap structure
System.Runtime.InteropServices.Marshal.Copy(pixelValues, 0, pixelStartAddress, pixelValues.Length);
pic.UnlockBits(picData);
return pic;
}
So then I modified my own code to convert from the IEEE float data into 16 bit integers, and then create the bitmap directly from that, like so:
var pixelReader = GetPixelReader(hdu.MandatoryKeywords.BitsPerPixel);
var imageBytes = new byte[xAxis * yAxis * sizeof(Int16)];
using (var outStream = new MemoryStream(imageBytes, writable: true))
using (var writer = new BinaryWriter(outStream))
using (var inStream = new MemoryStream(hdu.RawData, writable: false))
using (var reader = new BinaryReader(inStream, Encoding.ASCII))
for (var y = 0; y < yAxis; y++)
{
for (var x = 0; x < xAxis; x++)
{
writer.Write(pixelReader(reader));
}
}
var bitmap = CreateGreyscaleBitmapFromBytes(imageBytes, xAxis, yAxis);
return bitmap;
This appears to also address the efficiency problem highlighted in the comments.
I have a cropped version of an image that should appear on my screen.
Image 6Island = Image.FromFile("C:\\Users\\6Island.png");
Now the next goal is to Take an image of the screen.
Bitmap CaptureScreen()
{
var image = new Bitmap(Screen.PrimaryScreen.Bounds.Width, Screen.PrimaryScreen.Bounds.Height, PixelFormat.Format32bppArgb);
var gfx = Graphics.FromImage(image);
gfx.CopyFromScreen(Screen.PrimaryScreen.Bounds.X, Screen.PrimaryScreen.Bounds.Y, 0, 0, Screen.PrimaryScreen.Bounds.Size, CopyPixelOperation.SourceCopy);
return image;
}
Image 6Island = Image.FromFile("C:\\Users\\6Island.png");
Image currentView = CaptureScreen();
I then want to see if I can I can find the image 6Island inside the new image. And the colors may vary a tiny bit. Is there anyway to do that?
This is just sample quick and dirty and very slow, but it works. This code make a "crop" of your big bitmap and compare it with your small bitmap. If equal then percentage must be 100, if unequal then percentage lower than that. I would say, if bigger than 98%, then you found it.
private static void CompareBigAndSmallBitmaps(string fileName1, string fileName2)
{
var bmpBig = (Bitmap) Image.FromFile(fileName1);
var bmpSmall = (Bitmap) Image.FromFile(fileName2);
for (var offX = 0; offX < bmpBig.Width - bmpSmall.Width; offX++)
{
for (var offY = 0; offY < bmpBig.Height - bmpSmall.Height; offY++)
{
var percentage = CompareSmallBitmaps(bmpBig, bmpSmall, offX, offY);
if (percentage > 98.0) // define percentage of equality
{
// Aha... found something here....and exit here if you want
}
}
}
}
private static double CompareSmallBitmaps(Bitmap bmpBig, Bitmap bmpSmall, int offX, int offY)
{
var equals = 0;
for (var x = 0; x < bmpSmall.Width; x++)
{
for (var y = 0; y < bmpSmall.Height; y++)
{
var color1 = bmpBig.GetPixel(x + offX, y + offY).ToArgb();
var color2 = bmpSmall.GetPixel(x, y).ToArgb();
if (color1 == color2)
{
equals++;
}
}
}
return (Convert.ToDouble(equals)/Convert.ToDouble(bmpSmall.Width*bmpSmall.Height))*100.0;
}
I copied the AForge-Sample from here:
http://www.aforgenet.com/framework/features/template_matching.html
And hoped, it would work with 2 Bitmaps as sources as in the following code:
Bitmap findTemplate (Bitmap sourceImage, Bitmap template)
{
// create template matching algorithm's instance
// (set similarity threshold to x.y%, 1.0f = 100%)
ExhaustiveTemplateMatching tm = new ExhaustiveTemplateMatching( 0.4f );
// find all matchings with specified above similarity
TemplateMatch[] matchings = tm.ProcessImage( sourceImage, template ); **// "Unsupported pixel format of the source or template image." as error message**
// highlight found matchings
BitmapData data = sourceImage.LockBits(
new Rectangle( 0, 0, sourceImage.Width, sourceImage.Height ),
ImageLockMode.ReadWrite, sourceImage.PixelFormat );
foreach ( TemplateMatch m in matchings )
{
AForge.Imaging.Drawing.Rectangle( data, m.Rectangle, System.Drawing.Color.White );
// do something else with matching
}
sourceImage.UnlockBits( data );
return sourceImage;
}
But when calling TemplateMatch[] matchings = tm.P.... it gives the error mentioned above.
The template is generated this way:
Bitmap templatebitmap=(Bitmap)AForge.Imaging.Image.FromFile("template.jpg");
the source is generated with the kinect-webcam, where the PlanarImage is formatted as Bitmap (method copied from somewhere, but it was working up to now)
Bitmap PImageToBitmap(PlanarImage PImage)
{
Bitmap bmap = new Bitmap(
PImage.Width,
PImage.Height,
System.Drawing.Imaging.PixelFormat.Format32bppRgb);
BitmapData bmapdata = bmap.LockBits(
new Rectangle(0, 0, PImage.Width,
PImage.Height),
ImageLockMode.WriteOnly,
bmap.PixelFormat);
IntPtr ptr = bmapdata.Scan0;
Marshal.Copy(PImage.Bits,
0,
ptr,
PImage.Width *
PImage.BytesPerPixel *
PImage.Height);
bmap.UnlockBits(bmapdata);
return bmap;
}
So, is anbody able to help me, where my mistake might be?
Or maybe anyone knows a better way to match a template with a Kinect?
The overall job is to detect a known object with the kinect, in my case a rubberduck.
Thank you in advamce.
here's the solution using AForge
but it is slow take around 5 seconds but it works
As usuall u need to introduce AForge framework download and install it.
specify using AForge namespace
and copy paste to make it work
System.Drawing.Bitmap sourceImage = (Bitmap)Bitmap.FromFile(#"C:\SavedBMPs\1.jpg");
System.Drawing.Bitmap template = (Bitmap)Bitmap.FromFile(#"C:\SavedBMPs\2.jpg");
// create template matching algorithm's instance
// (set similarity threshold to 92.5%)
ExhaustiveTemplateMatching tm = new ExhaustiveTemplateMatching(0.921f);
// find all matchings with specified above similarity
TemplateMatch[] matchings = tm.ProcessImage(sourceImage, template);
// highlight found matchings
BitmapData data = sourceImage.LockBits(
new Rectangle(0, 0, sourceImage.Width, sourceImage.Height),
ImageLockMode.ReadWrite, sourceImage.PixelFormat);
foreach (TemplateMatch m in matchings)
{
Drawing.Rectangle(data, m.Rectangle, Color.White);
MessageBox.Show(m.Rectangle.Location.ToString());
// do something else with matching
}
sourceImage.UnlockBits(data);
So, I just implemented it myself. But it is so slow - so if anyone has an idea to improve, feel free to criticize my code:
public class Position
{
public int bestRow { get; set; }
public int bestCol { get; set; }
public double bestSAD { get; set; }
public Position(int row, int col, double sad)
{
bestRow = row;
bestCol = col;
bestSAD = sad;
}
}
Position element_position = new Position(0, 0, double.PositiveInfinity);
Position ownSearch(Bitmap search, Bitmap template) {
Position position = new Position(0,0,double.PositiveInfinity);
double minSAD = double.PositiveInfinity;
// loop through the search image
for (int x = 0; x <= search.PhysicalDimension.Width - template.PhysicalDimension.Width; x++)
{
for (int y = 0; y <= search.PhysicalDimension.Height - template.PhysicalDimension.Height; y++)
{
position_label2.Content = "Running: X=" + x + " Y=" + y;
double SAD = 0.0;
// loop through the template image
for (int i = 0; i < template.PhysicalDimension.Width; i++)
{
for (int j = 0; j < template.PhysicalDimension.Height; j++)
{
int r = Math.Abs(search.GetPixel(x + i, y + j).R - template.GetPixel(i, j).R);
int g = Math.Abs(search.GetPixel(x + i, y + j).G - template.GetPixel(i, j).G);
int b = Math.Abs(search.GetPixel(x + i, y + j).B - template.GetPixel(i, j).B);
int a = template.GetPixel(i, j).A;
SAD = SAD + ((r + g + b)*a/255 );
}
}
// save the best found position
if (minSAD > SAD)
{
minSAD = SAD;
// give me VALUE_MAX
position.bestRow = x;
position.bestCol = y;
position.bestSAD = SAD;
}
}
}
return position;
}
For a couple of days now I've tried to figure out why my nine-slice code does not work as expected. As far as I can see, there seems to be an issue with the Graphics.DrawImage method which handles my nine slice images incorrectly. So my problem is how to compensate for the incorrect scaling that is performed when running my code on the compact framework. I might add that this code of course works perfectly when running in the full framework environment. The problem only occurs when scaling the image to a larger image not the other way around. Here is the snippet:
public class NineSliceBitmapSnippet
{
private Bitmap m_OriginalBitmap;
public int CornerLength { get; set; }
/// <summary>
/// Initializes a new instance of the NineSliceBitmapSnippet class.
/// </summary>
public NineSliceBitmapSnippet(Bitmap bitmap)
{
CornerLength = 5;
m_OriginalBitmap = bitmap;
}
public Bitmap ScaleSingleBitmap(Size size)
{
Bitmap scaledBitmap = new Bitmap(size.Width, size.Height);
int[] horizontalTargetSlices = Slice(size.Width);
int[] verticalTargetSlices = Slice(size.Height);
int[] horizontalSourceSlices = Slice(m_OriginalBitmap.Width);
int[] verticalSourceSlices = Slice(m_OriginalBitmap.Height);
using (Graphics graphics = Graphics.FromImage(scaledBitmap))
{
using (Brush brush = new SolidBrush(Color.Fuchsia))
{
graphics.FillRectangle(brush, new Rectangle(0, 0, size.Width, size.Height));
}
int horizontalTargetOffset = 0;
int verticalTargetOffset = 0;
int horizontalSourceOffset = 0;
int verticalSourceOffset = 0;
for (int x = 0; x < horizontalTargetSlices.Length; x++)
{
verticalTargetOffset = 0;
verticalSourceOffset = 0;
for (int y = 0; y < verticalTargetSlices.Length; y++)
{
Rectangle destination = new Rectangle(horizontalTargetOffset, verticalTargetOffset, horizontalTargetSlices[x], verticalTargetSlices[y]);
Rectangle source = new Rectangle(horizontalSourceOffset, verticalSourceOffset, horizontalSourceSlices[x], verticalSourceSlices[y]);
graphics.DrawImage(m_OriginalBitmap, destination, source, GraphicsUnit.Pixel);
verticalTargetOffset += verticalTargetSlices[y];
verticalSourceOffset += verticalSourceSlices[y];
}
horizontalTargetOffset += horizontalTargetSlices[x];
horizontalSourceOffset += horizontalSourceSlices[x];
}
}
return scaledBitmap;
}
public int[] Slice(int length)
{
int cornerLength = CornerLength;
if (length <= (cornerLength * 2))
throw new Exception("Image to small for sliceing up");
int[] slices = new int[3];
slices[0] = cornerLength;
slices[1] = length - (2 * cornerLength);
slices[2] = cornerLength;
return slices;
}
}
So, my question is, does anybody now how I could compensate the incorrect scaling?
/Dan
After some more trial and error I've finally found a solution to my problem. The scaling problems has always been to the top-center, right-center, bottom-center and left-center slices since they're always stretched in only one direction according to the logic of nine slice scaling. If I apply a temporarely square stretch to those slices before applying the correct stretch the final bitmap will be correct. Once again the problem is only visible in the .Net Compact Framework of a Windows CE device (Smart Device). Here's a snippet with code adjusting for the bug in CF. My only concern now is that the slices that get square stretched will take much more memory due to the correction code. On the other hand this step is only a short period of time so I might get away with it. ;)
public class NineSliceBitmapSnippet
{
private Bitmap m_OriginalBitmap;
public int CornerLength { get; set; }
public NineSliceBitmapSnippet(Bitmap bitmap)
{
CornerLength = 5;
m_OriginalBitmap = bitmap;
}
public Bitmap Scale(Size size)
{
if (m_OriginalBitmap != null)
{
return ScaleSingleBitmap(size);
}
return null;
}
public Bitmap ScaleSingleBitmap(Size size)
{
Bitmap scaledBitmap = new Bitmap(size.Width, size.Height);
int[] horizontalTargetSlices = Slice(size.Width);
int[] verticalTargetSlices = Slice(size.Height);
int[] horizontalSourceSlices = Slice(m_OriginalBitmap.Width);
int[] verticalSourceSlices = Slice(m_OriginalBitmap.Height);
using (Graphics graphics = Graphics.FromImage(scaledBitmap))
{
using (Brush brush = new SolidBrush(Color.Fuchsia))
{
graphics.FillRectangle(brush, new Rectangle(0, 0, size.Width, size.Height));
}
int horizontalTargetOffset = 0;
int verticalTargetOffset = 0;
int horizontalSourceOffset = 0;
int verticalSourceOffset = 0;
for (int x = 0; x < horizontalTargetSlices.Length; x++)
{
verticalTargetOffset = 0;
verticalSourceOffset = 0;
for (int y = 0; y < verticalTargetSlices.Length; y++)
{
Rectangle destination = new Rectangle(horizontalTargetOffset, verticalTargetOffset, horizontalTargetSlices[x], verticalTargetSlices[y]);
Rectangle source = new Rectangle(horizontalSourceOffset, verticalSourceOffset, horizontalSourceSlices[x], verticalSourceSlices[y]);
bool isWidthAffectedByVerticalStretch = (y == 1 && (x == 0 || x == 2) && destination.Height > source.Height);
bool isHeightAffectedByHorizontalStretch = (x == 1 && (y == 0 || y == 2) && destination.Width > source.Width);
if (isHeightAffectedByHorizontalStretch)
{
BypassDrawImageError(graphics, destination, source, Orientation.Horizontal);
}
else if (isWidthAffectedByVerticalStretch)
{
BypassDrawImageError(graphics, destination, source, Orientation.Vertical);
}
else
{
graphics.DrawImage(m_OriginalBitmap, destination, source, GraphicsUnit.Pixel);
}
verticalTargetOffset += verticalTargetSlices[y];
verticalSourceOffset += verticalSourceSlices[y];
}
horizontalTargetOffset += horizontalTargetSlices[x];
horizontalSourceOffset += horizontalSourceSlices[x];
}
}
return scaledBitmap;
}
private void BypassDrawImageError(Graphics graphics, Rectangle destination, Rectangle source, Orientation orientationAdjustment)
{
Size adjustedSize = Size.Empty;
switch (orientationAdjustment)
{
case Orientation.Horizontal:
adjustedSize = new Size(destination.Width, destination.Width);
break;
case Orientation.Vertical:
adjustedSize = new Size(destination.Height, destination.Height);
break;
default:
break;
}
using (Bitmap quadScaledBitmap = new Bitmap(adjustedSize.Width, adjustedSize.Height))
{
using (Graphics tempGraphics = Graphics.FromImage(quadScaledBitmap))
{
tempGraphics.Clear(Color.Fuchsia);
tempGraphics.DrawImage(m_OriginalBitmap, new Rectangle(0, 0, adjustedSize.Width, adjustedSize.Height), source, GraphicsUnit.Pixel);
}
graphics.DrawImage(quadScaledBitmap, destination, new Rectangle(0, 0, quadScaledBitmap.Width, quadScaledBitmap.Height), GraphicsUnit.Pixel);
}
}
public int[] Slice(int length)
{
int cornerLength = CornerLength;
if (length <= (cornerLength * 2))
throw new Exception("Image to small for sliceing up");
int[] slices = new int[3];
slices[0] = cornerLength;
slices[1] = length - (2 * cornerLength);
slices[2] = cornerLength;
return slices;
}
}