I want to save a picture using the device native camera. Currently I cannot get the image to save to file. I have a rawimage which's texture is the native device camera image. I am taking the bytes from that rawimage and encoding to png. Then I write the png to file on my computer.
public WebCamTexture webCamTexture;
public RawImage myImage;
public void start () {
webCamTexture = new WebCamTexture ();
myImage.texture = webCamTexture;
myImage.transform.localScale = new Vector3 (1,-1,1);
webCamTexture.Play ();
int width = (int)GameObject.Find("myImage").GetComponent<Rect> ().width;
int height = (int)GameObject.Find("myImage").GetComponent<Rect>().height;
Texture2D tex = new Texture2D (width, height, TextureFormat.RGB24, false);
tex.ReadPixels (new Rect (0, 0, width, height), 0, 0);
tex.Apply ();
byte[] bytes = tex.EncodeToPNG ();
System.IO.File.WriteAllBytes(Application.dataPath + "/"+"imgcap.png",bytes);
Object.Destroy (tex);
}
using System;
using System.IO;
using System.Runtime.InteropServices;
using UnityEngine;
public class WebCamScreenShot : MonoBehaviour
{
public static int fileCounter = 0;
WebCamTexture webCamTexture;
public string path = "C:/temp/UnityScreenShots";
public string fileNamePrefix = "image_";
void Start()
{
var devices = WebCamTexture.devices;
if (devices.Length < 1) throw new System.Exception("No webcams was found");
var device = devices[0];
webCamTexture = new WebCamTexture(device.name);
webCamTexture.requestedFPS = 30;
webCamTexture.requestedWidth = 320;
webCamTexture.requestedHeight = 240;
webCamTexture.Play();
if (webCamTexture.width < 1 || webCamTexture.height < 1) throw new System.Exception("Invalid resolution");
}
public void SaveToPNG()
{
string zeros =
(fileCounter < 10000 ? "0000" :
(fileCounter < 1000 ? "000" :
(fileCounter < 100 ? "00" :
(fileCounter < 10 ? "0" : ""))));
string image_path = path + $"/{ fileNamePrefix + zeros + fileCounter }.png";
byte[] data = ScreenshotWebcam( webCamTexture );
File.WriteAllBytes(image_path, data);
fileCounter ++ ;
}
static byte[] ScreenshotWebcam(WebCamTexture wct)
{
Texture2D colorTex = new Texture2D(wct.width, wct.height, TextureFormat.RGBA32, false);
byte[] colorByteData = Color32ArrayToByteArray(wct.GetPixels32());
colorTex.LoadRawTextureData(colorByteData);
colorTex.Apply();
return colorTex.EncodeToPNG();
}
static byte[] Color32ArrayToByteArray(Color32[] colors)
{
// https://stackoverflow.com/a/21575147/2496170
if (colors == null || colors.Length == 0) return null;
int lengthOfColor32 = Marshal.SizeOf(typeof(Color32));
int length = lengthOfColor32 * colors.Length;
byte[] bytes = new byte[length];
GCHandle handle = default(GCHandle);
try
{
handle = GCHandle.Alloc(colors, GCHandleType.Pinned);
IntPtr ptr = handle.AddrOfPinnedObject();
Marshal.Copy(ptr, bytes, 0, length);
}
finally
{
if (handle != default(GCHandle)) handle.Free();
}
return bytes;
}
}
Related
Im using xamarin form to create Image Classification which i got the documentation in xamarin blog.
but i got an error when passing image to bytebuffer, once the tensorflow-lite interpreter run the output i got an error saying "Cannot convert between a TensorFlowLite buffer with 150528 bytes and a ByteBuffer with 602112 bytes."
Hope someone help me in this error. im little frustrated, thank you in advance. below is my code
public class TensorflowClassifier : IClassify
{
//FloatSize is a constant with the value of 4 because a float value is 4 bytes
const int FloatSize = 4;
//PixelSize is a constant with the value of 3 because a pixel has three color channels: Red Green and Blue
const int PixelSize = 3;
public List<ImageClassificationModel> Classify(byte[] image)
{
var mappedByteBuffer = GetModelAsMappedByteBuffer();
var interpreter = new Xamarin.TensorFlow.Lite.Interpreter(mappedByteBuffer);
//To resize the image, we first need to get its required width and height
var tensor = interpreter.GetInputTensor(0);
var shape = tensor.Shape();
var width = shape[1];
var height = shape[2];
var byteBuffer = GetPhotoAsByteBuffer(image, width, height);
//use StreamReader to import the labels from labels.txt
var streamReader = new StreamReader(Application.Context.Assets.Open("labels.txt"));
//Transform labels.txt into List<string>
var labels = streamReader.ReadToEnd().Split('\n').Select(s => s.Trim()).Where(s => !string.IsNullOrEmpty(s)).ToList();
//Convert our two-dimensional array into a Java.Lang.Object, the required input for Xamarin.TensorFlow.List.Interpreter
var outputLocations = new float[1][] { new float[labels.Count] };
var outputs = Java.Lang.Object.FromArray(outputLocations);
interpreter.Run(byteBuffer, outputs);
var classificationResult = outputs.ToArray<float[]>();
//Map the classificationResult to the labels and sort the result to find which label has the highest probability
var classificationModelList = new List<ImageClassificationModel>();
for (var i = 0; i < labels.Count; i++)
{
var label = labels[i]; classificationModelList.Add(new ImageClassificationModel(label, classificationResult[0][i]));
}
return classificationModelList;
}
//Convert model.tflite to Java.Nio.MappedByteBuffer , the require type for Xamarin.TensorFlow.Lite.Interpreter
private MappedByteBuffer GetModelAsMappedByteBuffer()
{
var assetDescriptor = Application.Context.Assets.OpenFd("model.tflite");
var inputStream = new FileInputStream(assetDescriptor.FileDescriptor);
var mappedByteBuffer = inputStream.Channel.Map(FileChannel.MapMode.ReadOnly, assetDescriptor.StartOffset, assetDescriptor.DeclaredLength);
return mappedByteBuffer;
}
//Resize the image for the TensorFlow interpreter
private ByteBuffer GetPhotoAsByteBuffer(byte[] image, int width, int height)
{
var bitmap = BitmapFactory.DecodeByteArray(image, 0, image.Length);
var resizedBitmap = Bitmap.CreateScaledBitmap(bitmap, width, height, true);
var modelInputSize = FloatSize * height * width * PixelSize;
var byteBuffer = ByteBuffer.AllocateDirect(modelInputSize);
byteBuffer.Order(ByteOrder.NativeOrder());
var pixels = new int[width * height];
resizedBitmap.GetPixels(pixels, 0, resizedBitmap.Width, 0, 0, resizedBitmap.Width, resizedBitmap.Height);
var pixel = 0;
//Loop through each pixels to create a Java.Nio.ByteBuffer
for (var i = 0; i < width; i++)
{
for (var j = 0; j < height; j++)
{
var pixelVal = pixels[pixel++];
byteBuffer.PutFloat(pixelVal >> 16 & 0xFF / 4);
byteBuffer.PutFloat(pixelVal >> 8 & 0xFF / 4);
byteBuffer.PutFloat(pixelVal & 0xFF / 4);
}
}
bitmap.Recycle();
return byteBuffer;
}
}
On my project i passed the byte array to the classifier
private async void SaveImagePad(object sender, EventArgs e)
{
Stream image = await PadView.GetImageStreamAsync(SignatureImageFormat.Jpeg);
//byte[] bytes = ReadFully(image);
byte[] bytes = new byte[image.Length];
image.Read(bytes, 0, bytes.Length);
List<ImageClassificationModel> classifyImage = DependencyService.Get<IClassify>().Classify(bytes);
}
so the situation is like this
I use sharp dx to create a swap chain that linked to the XAML element SwapchainPanel now
I have something rendered over it and then I try to grab rendered data into byte[] to place into an image buffer
for that, I use this function ReadBuffer
private D3D11.Texture2D OffscreenstagingTexture;
private D3D11.Texture2D GetTexture(D3D11.Device _Device,
D3D11.DeviceContext _Context,
D3D11.Texture2DDescription _Desc)
{
if (OffscreenstagingTexture == null ||
OffscreenstagingTexture.Description.Width != _Desc.Width ||
OffscreenstagingTexture.Description.Height != _Desc.Height)
{
RemoveAndDispose(ref OffscreenstagingTexture);
D3D11.Texture2DDescription _2DDesc = new D3D11.Texture2DDescription
{
Format = DXGI.Format.B8G8R8A8_UNorm,
Height = _Desc.Height,
Width = _Desc.Width,
SampleDescription = new DXGI.SampleDescription(1, 0),
ArraySize = _Desc.ArraySize,
BindFlags = D3D11.BindFlags.None,
CpuAccessFlags = D3D11.CpuAccessFlags.Read | D3D11.CpuAccessFlags.Write,
Usage = D3D11.ResourceUsage.Staging,
MipLevels = _Desc.MipLevels
};
OffscreenstagingTexture = ToDispose(new D3D11.Texture2D(_Device, _2DDesc));
}
return OffscreenstagingTexture;
}
public byte[] ReadBuffer(D3D11.Texture2D _2DTexture)
{
byte[] data = null;
D3D11.Texture2DDescription _Desc = _2DTexture.Description;
D3D11.Device _Device = _2DTexture.Device;
D3D11.DeviceContext _Context = _Device.ImmediateContext;
D3D11.Texture2D _2DMappedTexture = GetTexture(_Device, _Context, _Desc);
try
{
_Context.CopyResource(_2DTexture, _2DMappedTexture);
int size = _Desc.Width * _Desc.Height * 4;
DataBox box = _Context.MapSubresource(_2DMappedTexture, 0, 0, D3D11.MapMode.Read, D3D11.MapFlags.None, out DataStream stream);
data = ToByteArray(stream);
stream.Dispose();
}
catch (Exception)
{
}
return data;
}
private byte[] ToByteArray(Stream inputStream)
{
using (MemoryStream ms = new MemoryStream())
{
inputStream.CopyTo(ms);
return ms.ToArray();
}
}
now, on most of the pc, it works fine but there are some pc where SwapChainPanle is returning bigger byte[] than expected
if I have 100 * 100 SwapChainPanel then the expected byte array size is 100 * 100 * 4 but it returns a bigger size array then that and because of that on some pc my app getting crashes
and I have checked this same crash at my dell Vostro 3558 also
So I found the fix, I have to update the Readbuffer function
public byte[] ReadBuffer(D3D11.Texture2D _2DTexture)
{
D3D11.Texture2DDescription _Desc = _2DTexture.Description;
D3D11.Device _Device = _2DTexture.Device;
D3D11.DeviceContext _Context = _Device.ImmediateContext;
D3D11.Texture2D _2DMappedTexture = GetTexture(_Device, _Context, _Desc);
_Context.CopyResource(_2DTexture, _2DMappedTexture);
DataBox box = _Context.MapSubresource(_2DMappedTexture, 0, 0, D3D11.MapMode.Read, D3D11.MapFlags.None, out DataStream stream);
var dataRectangle = new DataRectangle
{
DataPointer = stream.DataPointer,
Pitch = box.RowPitch
};
var strid = _Desc.Width * 4;
int size = _Desc.Width * _Desc.Height * 4;
using (var bitmap = new Bitmap(manager.WICFactory,
_2DMappedTexture.Description.Width,
_2DMappedTexture.Description.Height,
PixelFormat.Format32bppBGRA,
dataRectangle))
{
var data = new byte[size];
bitmap.CopyPixels(data, strid);
_Context.UnmapSubresource(_2DMappedTexture, 0);
return data;
}
}
and it works like a charm
In my Unity game each frame is rendered into a texture and then put together into a video using FFmpeg. Now my questions is if I am doing this right because avcodec_send_frame throws every time an exception.
I am pretty sure that I am doing something wrong or in the wrong order or simply missing something.
Here is the code for capturing the texture:
void Update() {
//StartCoroutine(CaptureFrame());
if (rt == null)
{
rect = new Rect(0, 0, captureWidth, captureHeight);
rt = new RenderTexture(captureWidth, captureHeight, 24);
frame = new Texture2D(captureWidth, captureHeight, TextureFormat.RGB24, false);
}
Camera camera = this.GetComponent<Camera>(); // NOTE: added because there was no reference to camera in original script; must add this script to Camera
camera.targetTexture = rt;
camera.Render();
RenderTexture.active = rt;
frame.ReadPixels(rect, 0, 0);
frame.Apply();
camera.targetTexture = null;
RenderTexture.active = null;
byte[] fileData = null;
fileData = frame.GetRawTextureData();
encoding(fileData, fileData.Length);
}
And here is the code for encoding and sending the byte data:
private unsafe void encoding(byte[] bytes, int size)
{
Debug.Log("Encoding...");
AVCodec* codec;
codec = ffmpeg.avcodec_find_encoder(AVCodecID.AV_CODEC_ID_H264);
int ret, got_output = 0;
AVCodecContext* codecContext = null;
codecContext = ffmpeg.avcodec_alloc_context3(codec);
codecContext->bit_rate = 400000;
codecContext->width = captureWidth;
codecContext->height = captureHeight;
//codecContext->time_base.den = 25;
//codecContext->time_base.num = 1;
AVRational timeBase = new AVRational();
timeBase.num = 1;
timeBase.den = 25;
codecContext->time_base = timeBase;
//AVStream* videoAVStream = null;
//videoAVStream->time_base = timeBase;
AVRational frameRate = new AVRational();
frameRate.num = 25;
frameRate.den = 1;
codecContext->framerate = frameRate;
codecContext->gop_size = 10;
codecContext->max_b_frames = 1;
codecContext->pix_fmt = AVPixelFormat.AV_PIX_FMT_YUV420P;
AVFrame* inputFrame;
inputFrame = ffmpeg.av_frame_alloc();
inputFrame->format = (int)codecContext->pix_fmt;
inputFrame->width = captureWidth;
inputFrame->height = captureHeight;
inputFrame->linesize[0] = inputFrame->width;
AVPixelFormat dst_pix_fmt = AVPixelFormat.AV_PIX_FMT_YUV420P, src_pix_fmt = AVPixelFormat.AV_PIX_FMT_RGBA;
int src_w = 1920, src_h = 1080, dst_w = 1920, dst_h = 1080;
SwsContext* sws_ctx;
GCHandle pinned = GCHandle.Alloc(bytes, GCHandleType.Pinned);
IntPtr address = pinned.AddrOfPinnedObject();
sbyte** inputData = (sbyte**)address;
sws_ctx = ffmpeg.sws_getContext(src_w, src_h, src_pix_fmt,
dst_w, dst_h, dst_pix_fmt,
0, null, null, null);
fixed (int* lineSize = new int[1])
{
lineSize[0] = 4 * captureHeight;
// Convert RGBA to YUV420P
ffmpeg.sws_scale(sws_ctx, inputData, lineSize, 0, codecContext->width, inputFrame->extended_data, inputFrame->linesize);
}
inputFrame->pts = counter++;
if (ffmpeg.avcodec_send_frame(codecContext, inputFrame) < 0)
throw new ApplicationException("Error sending a frame for encoding!");
AVPacket pkt;
pkt = new AVPacket();
//pkt.data = inData;
AVPacket* packet = &pkt;
ffmpeg.av_init_packet(packet);
Debug.Log("pkt.size " + pkt.size);
pinned.Free();
AVDictionary* options = null;
ffmpeg.av_dict_set(&options, "pkt_size", "1300", 0);
ffmpeg.av_dict_set(&options, "buffer_size", "65535", 0);
AVIOContext* server = null;
ffmpeg.avio_open2(&server, "udp://192.168.0.1:1111", ffmpeg.AVIO_FLAG_WRITE, null, &options);
Debug.Log("encoded");
ret = ffmpeg.avcodec_encode_video2(codecContext, &pkt, inputFrame, &got_output);
ffmpeg.avio_write(server, pkt.data, pkt.size);
ffmpeg.av_free_packet(&pkt);
pkt.data = null;
pkt.size = 0;
}
And every time I start the game
if (ffmpeg.avcodec_send_frame(codecContext, inputFrame) < 0)
throw new ApplicationException("Error sending a frame for encoding!");
throws the exception.
Any help in fixing the issue would be greatly appreciated :)
Twain scan images to memory and it consumes too much memory. So I scanned images in batch, and compress images in memory. So source images should be released. how can I do it?
protected void OnTwainTransferReady()
{
if (TwainTransferReady == null)
return; // not likely..
List<ImageSource> imageSources = new List<ImageSource>();
ArrayList pics = tw.TransferPictures();
tw.CloseSrc();
EndingScan();
picnumber++;
for (int i = 0; i < pics.Count; i++) {
IntPtr imgHandle = (IntPtr)pics[i];
/*if (i == 0) { // first image only
imageSources.Add(DibToBitmap.FormHDib(imgHandle));
//Refresh(image1); // force a redraw
}*/
ImageSource src = DibToBitmap.FormHDib(imgHandle);
ImageSource temp = ImageHelper.compressImage(src);
src = null;
imageSources.Add(temp);
Win32.GlobalFree(imgHandle);
}
// for some reason the main window does not refresh properly - resizing of the window solves the proble.
// happens only with the Canon LIDE Scanner
// Suspected: some messages where eaten by Twain
TwainTransferReady(this, imageSources);
}
Generate the BitSource
public static BitmapSource FormHDib(IntPtr dibHandle)
{
BitmapSource bs = null;
IntPtr bmpPtr = IntPtr.Zero;
bool flip = true; // vertivcally flip the image
try {
bmpPtr = Win32.GlobalLock(dibHandle);
Win32.BITMAPINFOHEADER bmi = new Win32.BITMAPINFOHEADER();
Marshal.PtrToStructure(bmpPtr, bmi);
if (bmi.biSizeImage == 0)
bmi.biSizeImage = (uint)(((((bmi.biWidth * bmi.biBitCount) + 31) & ~31) >> 3) * bmi.biHeight);
int palettSize = 0;
if (bmi.biClrUsed != 0)
throw new NotSupportedException("DibToBitmap: DIB with pallet is not supported");
// pointer to the beginning of the bitmap bits
IntPtr pixptr = (IntPtr)((int)bmpPtr + bmi.biSize + palettSize);
// Define parameters used to create the BitmapSource.
PixelFormat pf = PixelFormats.Default;
switch (bmi.biBitCount) {
case 32:
pf = PixelFormats.Bgr32;
break;
case 24:
pf = PixelFormats.Bgr24;
break;
case 8:
pf = PixelFormats.Gray8;
break;
case 1:
pf = PixelFormats.BlackWhite;
break;
default: // not supported
throw new NotSupportedException("DibToBitmap: Can't determine picture format (biBitCount=" + bmi.biBitCount + ")");
// break;
}
int width = bmi.biWidth;
int height = bmi.biHeight;
int stride = (int)(bmi.biSizeImage / height);
byte[] imageBytes = new byte[stride * height];
//Debug: Initialize the image with random data.
//Random value = new Random();
//value.NextBytes(rawImage);
if (flip) {
for (int i = 0, j = 0, k = (height - 1) * stride; i < height; i++, j += stride, k -= stride)
Marshal.Copy(((IntPtr)((int)pixptr + j)), imageBytes, k, stride);
} else {
Marshal.Copy(pixptr, imageBytes, 0, imageBytes.Length);
}
int xDpi = (int)Math.Round(bmi.biXPelsPerMeter * 2.54 / 100); // pels per meter to dots per inch
int yDpi = (int)Math.Round(bmi.biYPelsPerMeter * 2.54 / 100);
// Create a BitmapSource.
bs = BitmapSource.Create(width, height, xDpi, yDpi, pf, null, imageBytes, stride);
Win32.GlobalUnlock(pixptr);
Win32.GlobalFree(pixptr);
} catch (Exception ex) {
string msg = ex.Message;
}
finally {
// cleanup
if (bmpPtr != IntPtr.Zero) { // locked sucsessfully
Win32.GlobalUnlock(dibHandle);
}
}
Win32.GlobalUnlock(bmpPtr);
Win32.GlobalFree(bmpPtr);
return bs;
}
Compress Image in Memory
internal static System.Windows.Media.ImageSource compressImage(System.Windows.Media.ImageSource ims)
{
using (MemoryStream stream = new MemoryStream())
{
using (MemoryStream outS = new MemoryStream())
{
BitmapSource bs = ims as BitmapSource;
JpegBitmapEncoder encoder = new JpegBitmapEncoder();
BitmapFrame bf = BitmapFrame.Create(bs);
//encoder.Frames.Add(BitmapFrame.Create(image1.Source));
encoder.Frames.Add(bf);
encoder.Save(stream);
stream.Flush();
try
{
// Read first frame of gif image
using (MagickImage image = new MagickImage(stream))
{
image.Quality = 75;
image.Resize(new Percentage(0.65));
image.Density = new Density(200, DensityUnit.PixelsPerInch);
image.Write(outS);
image.Dispose();
}
stream.Close();
stream.Dispose();
BitmapImage bitmap = new BitmapImage();
bitmap.BeginInit();
bitmap.CacheOption = BitmapCacheOption.OnLoad;
outS.Position = 0;
bitmap.StreamSource = outS;
//
bitmap.EndInit();
//bitmap.Freeze();
outS.Flush();
outS.Close();
outS.Dispose();
ims = null;
return bitmap;
}
catch (Exception e)
{
return null;
}
}
}
}
I am in need of a very simple c# image resizer. By simple, I mean simple. This is just a program that loops through a single directory and changes all the pictures in that directory to the same resolution. Here's what I have so far.
private void Form1_Load(object sender, EventArgs e)
{
string[] files = null;
int count = 0;
files = System.IO.Directory.GetFiles(#"C:\Users\..\..\ChristmasPicsResized");
foreach (string file in files)
{
System.Drawing.Bitmap bmp = System.Drawing.Bipmap.FromFile(file);
ResizeBitmap(bmp, 807, 605);
bmp.Save(file);
count++;
lblCount.Text = count.ToString();
}
}
public Bitmap ResizeBitmap(Bitmap b, int nWidth, int nHeight)
{
Bitmap result = new Bitmap(nWidth, nHeight);
using (Graphics g = Graphics.FromImage((Image)result))
g.DrawImage(b, 0, 0, nWidth, nHeight);
return result;
}
The problem I ran into is that the picture cannot be saved while it is open. I am unsure how to make this into a file stream. What should be a very simple app doesn't seem so simple to me. Any help please?
Try saving to a temp file, then delete the original file and rename the temp file to the original file name.
Have a look at C# Image to Byte Array and Byte Array to Image Converter Class
public byte[] imageToByteArray(System.Drawing.Image imageIn)
{
MemoryStream ms = new MemoryStream();
imageIn.Save(ms,System.Drawing.Imaging.ImageFormat.Gif);
return ms.ToArray();
}
and
public Image byteArrayToImage(byte[] byteArrayIn)
{
MemoryStream ms = new MemoryStream(byteArrayIn);
Image returnImage = Image.FromStream(ms);
return returnImage;
}
This way you can close the image after you have read it in, and can then save it over the existing one.
You could also render the resized images into a different folder to preserve the original, high-resolution images. Maybe you'll need them one day (i did that mistake once...).
I created this nuget package which does resizing for you:
http://nuget.org/packages/Simple.ImageResizer
Source and howto here:
https://github.com/terjetyl/Simple.ImageResizer
My C# Image Extension class:
namespace MycImageExtension
{
public static class Helper
{
public static Image Clip(this Image imgPhoto, int width, int height)
{
return Clip(imgPhoto, width, height, false);
}
public static Image ToImage(this byte[] ba)
{
var ms = new MemoryStream(ba);
return Image.FromStream(ms);
}
public static byte[] ToArray(this Image imgPhoto)
{
var ms = new MemoryStream();
imgPhoto.Save(ms, ImageFormat.Jpeg);
return ms.ToArray();
}
static ImageCodecInfo GetImageCodec(string mimetype)
{
foreach (ImageCodecInfo ici in ImageCodecInfo.GetImageEncoders())
{
if (ici.MimeType == mimetype) return ici;
}
return null;
}
public static Image Clip(this Image imgPhoto, int width, int height, bool stretch)
{
if (!stretch && (imgPhoto.Width <= width && imgPhoto.Height <= height))
return imgPhoto;
// detect if portrait
if (imgPhoto.Height > imgPhoto.Width)
{
// swap
int a = width;
width = height;
height = a;
}
var d = new Dimension(imgPhoto.Width, imgPhoto.Height);
double scale = d.NewSizeScaleFactor(new Dimension(width, height), stretch);
var newD = scale * d;
if (stretch)
{
if (!(newD.Width == width || newD.Height == height))
throw new Exception("Stretching algo has some error");
}
var bmPhoto = new Bitmap(imgPhoto, new Size(newD.Width, newD.Height));
return bmPhoto;
}
// using for crystal report
public static Image PadImage(this Image imgPhoto, int width, int height)
{
// detect if portrait
if (imgPhoto.Height > imgPhoto.Width)
{
// swap
int a = width;
width = height;
height = a;
}
var d = new Dimension(imgPhoto.Width, imgPhoto.Height);
double scale = d.NewSizeScaleFactor(new Dimension(width, height), true);
Dimension newSize = scale * d;
PadAt padAt;
int padNeeded;
newSize.GetPadNeeded(new Dimension(width, height), out padAt, out padNeeded);
int padLeft = 0, padTop = 0;
if (padAt == PadAt.Width)
padLeft = padNeeded;
else if (padAt == PadAt.Height)
padTop = padNeeded;
var bmPhoto = new Bitmap(width, height, PixelFormat.Format24bppRgb);
var grPhoto = Graphics.FromImage(bmPhoto);
grPhoto.Clear(Color.White);
grPhoto.InterpolationMode = InterpolationMode.HighQualityBicubic;
grPhoto.DrawImage(imgPhoto,
new Rectangle(padLeft, padTop, newSize.Width, newSize.Height),
new Rectangle(0, 0, imgPhoto.Width, imgPhoto.Height),
GraphicsUnit.Pixel);
grPhoto.Dispose();
return bmPhoto;
}
}
public enum PadAt { None = 0, Width = 1, Height }
public struct Dimension
{
public int Width { set; get; }
public int Height { set; get; }
public Dimension(int width, int height)
: this()
{
this.Width = width;
this.Height = height;
}
public override string ToString()
{
return string.Format("Width: {0} Height: {1}", Width, Height);
}
public static Dimension operator *(Dimension src, double scale)
{
return new Dimension((int)Math.Ceiling((scale * src.Width)), (int)Math.Ceiling((scale * src.Height)));
}
public static Dimension operator *(double scale, Dimension src)
{
return new Dimension((int)Math.Ceiling((scale * src.Width)), (int)Math.Ceiling((scale * src.Height)));
}
public double NewSizeScaleFactor(Dimension newSize, bool stretch)
{
if (!stretch
&&
(this.Width <= newSize.Width && this.Height <= newSize.Height))
return 1;
double widthScaleFactor = (double)newSize.Width / this.Width;
double heightScaleFactor = (double)newSize.Height / this.Height;
// return the lowest scale factor
if (widthScaleFactor < heightScaleFactor)
return widthScaleFactor;
else if (heightScaleFactor < widthScaleFactor)
return heightScaleFactor;
else
return widthScaleFactor; // can even use heightscalefactor
}
public Dimension Clip(Dimension newSize, bool stretch)
{
if (!stretch
&&
(this.Width <= newSize.Width && this.Height <= newSize.Height))
return new Dimension(this.Width, this.Height);
double smallestScaleFactor = NewSizeScaleFactor(newSize, stretch);
return new Dimension((int)(this.Width * smallestScaleFactor), (int)(this.Height * smallestScaleFactor));
}
// so crystal report images would have exact dimension
public void GetPadNeeded(Dimension newSize, out PadAt padAt, out int padNeeded)
{
if (this.Width == newSize.Width && this.Height == newSize.Height)
{
padAt = PadAt.None;
padNeeded = 0;
return;
}
if (this.Width > newSize.Width || this.Height > newSize.Height)
throw new Exception("Source cannot be bigger than the new size");
if (this.Width != newSize.Width && this.Height != newSize.Height)
throw new Exception("At least one side should be equal");
if (newSize.Width != this.Width)
{
padAt = PadAt.Width;
padNeeded = (newSize.Width - this.Width) / 2;
}
else if (newSize.Height != this.Width)
{
padAt = PadAt.Height;
padNeeded = (newSize.Height - this.Height) / 2;
}
else
{
throw new Exception("Some anomaly occured, contact the programmer");
}
}
// test the logic
static void X()
{
var ls = new Dimension[]
{
new Dimension(400, 400), // as is
new Dimension(640, 480), // as is
new Dimension(600, 480), // as is
new Dimension(800, 600), // as is
new Dimension(800, 400), // as is
new Dimension(1280, 960), // as is
new Dimension(1280, 961), // as is
new Dimension(1281, 960), // as is
new Dimension(1280, 900), // as is
new Dimension(1000, 960), // as is
new Dimension(1000, 970), // as is
new Dimension(1000, 900), // as is
new Dimension(1380, 960), // clip
new Dimension(1280, 1000), // clip
new Dimension(1380, 1300), // clip
new Dimension(1600, 1200), // clip
new Dimension(1600, 1000), // clip
new Dimension(1800, 1200), // clip
new Dimension(1800, 1000), // clip
new Dimension(1400, 1200), // clip
new Dimension(1400, 1000), // clip
new Dimension(960, 1280), // clip
new Dimension(960, 1300), // clip
new Dimension(970, 1280) // clip
};
foreach (var l in ls)
{
// saving to database...
double scale = l.NewSizeScaleFactor(new Dimension(1280, 960), false);
Dimension newSize = scale * l;
// ...saving to database
// display to crystal report...
double scaleA = l.NewSizeScaleFactor(new Dimension(800, 600), true);
Dimension newSizeA = scaleA * l;
PadAt padAt;
int padNeeded;
newSizeA.GetPadNeeded(new Dimension(800, 600), out padAt, out padNeeded);
// ...display to crystal report
Console.WriteLine("Pad {0} {1}", padAt, padNeeded);
Console.WriteLine();
}
Console.ReadLine();
}
}
}