Image stitching with opencvsharp - c#

I want to make image stitching with openCvSharp but something goes wrong. Since I do not get the right output of 2 image stitched together.
I was fallowing the tutorial in python link and converting it in to C# code.
Here are 2 photo examples
The output where image should be stitched I get wrong result. It looks the same as image 2.
I gues something goes wrong in line, or somewhere after the line
Cv2.WarpPerspective(trainImg, result, H, new OpenCvSharp.Size(width, height));
Here is the full code.
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
bool debugImages = true;
string locationFolder = "";
OpenFileDialog dlg = new OpenFileDialog();
dlg.CheckFileExists = true;
dlg.Multiselect = true;
if (dlg.ShowDialog() == System.Windows.Forms.DialogResult.OK)
{
locationFolder = Path.GetDirectoryName(dlg.FileNames[0]) + "\\output\\";
List<Mat> imagesMat = new List<Mat>();
for (var i = 0; i < dlg.FileNames.Length; i++)
{
using (Bitmap fromFile = new Bitmap(dlg.FileNames[i]))
{
Mat source = BitmapConverter.ToMat(fromFile);
imagesMat.Add(source);
}
}
if (imagesMat.Count != 2)
throw new Exception("Select only 2 images!!!");
int imageCounter = 0;
Mat trainImg = imagesMat[0];
Mat queryImg = imagesMat[1];
Mat trainImg_gray = new Mat();
Mat queryImg_gray = new Mat();
Cv2.CvtColor(trainImg, trainImg_gray, ColorConversionCodes.BGRA2GRAY);
Cv2.CvtColor(queryImg, queryImg_gray, ColorConversionCodes.BGRA2GRAY);
// detecting keypoints
// FastFeatureDetector, StarDetector, SIFT, SURF, ORB, BRISK, MSER, GFTTDetector, DenseFeatureDetector, SimpleBlobDetector
string method = "SURF";
string feature_matching = "bf"; //bf, knn
var descriptor = SURF.Create(500, 4, 2, true);
Mat descriptors1 = new Mat();
Mat descriptors2 = new Mat();
KeyPoint[] kpsA;
KeyPoint[] kpsB;
descriptor.DetectAndCompute(trainImg_gray, null, out kpsA, descriptors1);
descriptor.DetectAndCompute(queryImg_gray, null, out kpsB, descriptors2);
// Match descriptor vectors
//var flannMatcher = new FlannBasedMatcher();
DMatch[] matches;
if (feature_matching == "bf")
matches = matchKeyPointsBF(descriptors1, descriptors2, method);
else
matches = matchKeyPointsKNN(descriptors1, descriptors2, 0.75, method);
var bfView = new Mat();
Cv2.DrawMatches(trainImg, kpsA, queryImg, kpsB, matches, bfView, null, flags: DrawMatchesFlags.NotDrawSinglePoints);
if (debugImages)
{
using (Bitmap resultBitmap = BitmapConverter.ToBitmap(bfView))
resultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //1
}
Mat H = getHomography(kpsA, kpsB, descriptors1, descriptors2, matches, 4);
if (H == null)
throw new Exception("No Homography!!!");
//for (var i = 0; i < H.Cols; i++)
//{
// for (var j = 0; j < H.Rows; j++)
// Console.Write(H.At<float>(i, j) + " ");
// Console.WriteLine("");
//}
double width = trainImg.Size().Width + queryImg.Size().Width;
double height = trainImg.Size().Height + queryImg.Size().Height;
Mat result = new Mat();
Cv2.WarpPerspective(trainImg, result, H, new OpenCvSharp.Size(width, height));
if (debugImages)
{
using (Bitmap resultBitmap = BitmapConverter.ToBitmap(result))
resultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //1
}
result[new Rect(new OpenCvSharp.Point(0, 0), new OpenCvSharp.Size(queryImg.Size().Width, queryImg.Size().Height))] = queryImg;
if (debugImages)
{
using (Bitmap resultBitmap = BitmapConverter.ToBitmap(result))
resultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //2
}
//# transform the panorama image to grayscale and threshold it
Mat gray = result.Clone();
Cv2.CvtColor(result, gray, ColorConversionCodes.BGR2GRAY);
Mat thresh = new Mat();
double thresh2 = Cv2.Threshold(gray, thresh, 0, 255, ThresholdTypes.Binary);
//# Finds contours from the binary image
OpenCvSharp.Point[][] cnts;
HierarchyIndex[] hierarchy;
Cv2.FindContours(thresh, out cnts, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple);
OpenCvSharp.Point[] cnts2 = new OpenCvSharp.Point[cnts[0].Length];
for (var k = 0; k < cnts[0].Length; k++)
cnts2[k] = cnts[0][k];
//InputArray ptsA = InputArray.Create(cnts2);
//var c = Cv2.ContourArea(ptsA, true);
OpenCvSharp.Rect xywh = Cv2.BoundingRect(cnts2);
result = result[new Rect(new OpenCvSharp.Point(xywh.X, xywh.Y), new OpenCvSharp.Size(xywh.Width, xywh.Height))];
//result = result[new Rect(new OpenCvSharp.Point(0, 0), new OpenCvSharp.Size(256, 256))];
Bitmap endResultBitmap = BitmapConverter.ToBitmap(result);
endResultBitmap.Save(locationFolder + (imageCounter++).ToString().PadLeft(3, '0') + ".png", ImageFormat.Png); //4
Environment.Exit(-1);
}
}
private BFMatcher createMatcher(string method, bool crossCheck)
{
//"Create and return a Matcher Object"
if (method == "SURF" || method == "SIFT")
return new BFMatcher(NormTypes.L2, crossCheck);
else //if (method == "ORB" || method == "BRISK")
return new BFMatcher(NormTypes.Hamming, crossCheck);
}
private DMatch[] matchKeyPointsBF(Mat featuresA, Mat featuresB, string method)
{
BFMatcher bf = createMatcher(method, crossCheck: true);
// # Match descriptors.
DMatch[] bfMatches = bf.Match(featuresA, featuresB);
//# Sort the features in order of distance.
//# The points with small distance (more similarity) are ordered first in the vector
DMatch[] rawMatches = bfMatches.OrderBy(a => a.Distance).ToArray();
if (rawMatches.Length > 100)
Array.Resize(ref rawMatches, 100);
return rawMatches;
}
private DMatch[] matchKeyPointsKNN(Mat featuresA, Mat featuresB, double ratio, string method)
{
BFMatcher bf = createMatcher(method, crossCheck: false);
// # compute the raw matches and initialize the list of actual matches
DMatch[][] rawMatches = bf.KnnMatch(featuresA, featuresB, 2);
List<DMatch> rawMatches2 = new List<DMatch>();
//# loop over the raw matches
DMatch prevmatchN = rawMatches[0][0];
rawMatches2.Add(prevmatchN);
for (int m = 0; m < rawMatches.Length; m++)
{
for (int n = 0; n < rawMatches[m].Length; n++)
{
//# ensure the distance is within a certain ratio of each
//# other (i.e. Lowe's ratio test)
DMatch matchN = rawMatches[m][n];
if (n == 0)
prevmatchN = matchN;
if (prevmatchN.Distance < matchN.Distance * (ratio))
rawMatches2.Add(matchN);
if (rawMatches2.Count >= 100)
break;
}
}
return rawMatches2.ToArray();
}
private Mat getHomography(KeyPoint[] kpsA, KeyPoint[] kpsB, Mat featuresA, Mat featuresB, DMatch[] matches, int reprojThresh)
{
//# convert the keypoints to numpy arrays
Point2f[] PtA = new Point2f[matches.Length];
Point2f[] PtB = new Point2f[matches.Length];
for (int i = 0; i < matches.Length; i++)
{
KeyPoint kpsAI = kpsA[matches[i].QueryIdx];
KeyPoint kpsBI = kpsB[matches[i].TrainIdx];
PtA[i] = new Point2f(kpsAI.Pt.X, kpsAI.Pt.Y);
PtB[i] = new Point2f(kpsBI.Pt.X, kpsBI.Pt.Y);
}
InputArray ptsA = InputArray.Create(PtA);
InputArray ptsB = InputArray.Create(PtB);
if (matches.Length > 4)
{
//You get the homography matrix usin
Mat H = Cv2.FindHomography(ptsA, ptsB, HomographyMethods.Ransac, reprojThresh);
//and then to get any point on the target picture from the original picture:
//Mat targetPoint = new Mat();
//Cv2.PerspectiveTransform(ptsA, targetPoint, H);
return H;
}
else
return null;
}
}

Related

CvInvoke.Undistort Is making the image more distorted, not less

My first time working with OpenCV. The code below uses the Emgu package, but I've also tried it using OpenCVSharp and have got exactly the same results.
I am calibrating the camera using 6 chessboard photos, then using the calibration to Undistort an image. The test image is a photo of some squared paper. The camera really isn't very distorted - I can't tell that it is distorted by eye, only when I draw a straight line over the lines in a paint program.
However - the resulting image comes out way more distorted than the original - adding a lot of barrel distortion.
When I run the commented out lines, the "DrawChessboardCorners" show that it is identifying the points perfectly.
What am I doing wrong here?
static void Main(string[] args)
{
Size patternSize = new Size(9, 6);
List<VectorOfPointF> ListOfCornerPoints = new List<VectorOfPointF>();
DirectoryInfo dir = new DirectoryInfo(#"Z:\Simon\Dropbox\Apps\OpenCVPlay\Image Processing\Chessboard Pattern Photos With Pixel");
Size calibrationImageSize = new Size(0,0) ;
foreach (var file in dir.GetFiles())
{
VectorOfPointF corners = new VectorOfPointF();
var calibrationImage = new Image<Rgb, byte>(file.FullName);
bool find = CvInvoke.FindChessboardCorners(calibrationImage, patternSize, corners, CalibCbType.AdaptiveThresh | CalibCbType.FilterQuads);
//CvInvoke.DrawChessboardCorners(calibrationImage, patternSize, corners, find);
//calibrationImage.Save(#"Z:\Simon\Dropbox\Apps\OpenCVPlay\Image Processing\Chessboard Pattern Photos With Pixel\test\" + file.Name);
ListOfCornerPoints.Add(corners);
calibrationImageSize = calibrationImage.Size;
}
PointF[][] arrayOfCornerPoints = ListOfCornerPoints.Select(a => a.ToArray()).ToArray();
var modelPoints = CreateModelPoints(ListOfCornerPoints.Count, patternSize.Width, patternSize.Height);
var arrayOfModelPoints = modelPoints.Select(a => a.ToArray()).ToArray();
Matrix<double> cameraDistortionCoeffs = new Matrix<double>(5, 1);
Matrix<double> cameraMatrix = new Matrix<double>(3, 3);
Mat[] rotationVectors;
Mat[] translationVectors;
CvInvoke.CalibrateCamera(
arrayOfModelPoints,
arrayOfCornerPoints,
calibrationImageSize,
cameraMatrix,
cameraDistortionCoeffs,
CalibType.Default,
new MCvTermCriteria(),
out rotationVectors,
out translationVectors);
var imageToProcess = new Image<Rgb, byte>(#"Z:\Simon\Dropbox\Apps\OpenCVPlay\Image Processing\Test data\squarePaper.jpg");
Rectangle Rect2 = new Rectangle();
var newCameraMatrix = CvInvoke.GetOptimalNewCameraMatrix(cameraMatrix, cameraDistortionCoeffs, calibrationImageSize, 1, imageToProcess.Size, ref Rect2, true);
Image<Rgb, byte> processedImage = imageToProcess.Clone();
CvInvoke.Undistort(imageToProcess, processedImage, cameraMatrix, cameraDistortionCoeffs);
processedImage.Save(#"Z:\Simon\Dropbox\Apps\OpenCVPlay\Image Processing\Test data\squarePaperFixed.jpg");
}
static List<List<MCvPoint3D32f>> CreateModelPoints(int length, int chessboardCols, int chessboardRows)
{
var modelPoints = new List<List<MCvPoint3D32f>>();
for (var k = 0; k < length; k++)
{
var chessboard = new List<MCvPoint3D32f>();
for (var y = 0; y < chessboardRows; y++)
{
for (var x = 0; x < chessboardCols; x++)
{
chessboard.Add(new MCvPoint3D32f(x, y, 0));
}
}
modelPoints.Add(chessboard);
}
return modelPoints;
}

Calculate actual velocity using optical flow Lucas-Kanade and EmguCV

How to calculate the actual speed of all moving objects on video using the Lucals-kanade algorithm to calculate the optical flow ?
I need to do this on this video. The camera is fixed in one place (Fig. 1)
I find the key points and track them using the Lucas-Kanade algorithm (Fig. 2)
How to use this algorithm to update the actual speed of each car?
Thank you for your answers!
My code:
public class OpticalFlowLK : BaseFilter
{
private Mat prevFrame;
private Mat nextFrame;
private bool prevFrameEmpty = true;
private GFTTDetector gFTTDetector;
private Stopwatch sWatch;
private double time = 0.04;
public OpticalFlowLK()
{
TAG = "[Optical Flow Lucas Kanade]";
gFTTDetector = new GFTTDetector(500);
sWatch = new Stopwatch();
}
protected override Mat ProcessFrame(ref Mat frame)
{
Mat rez = new Mat();
frame.CopyTo(rez);
nextFrame = new Mat();
Mat gray = new Mat();
var tmpImg = gray.ToImage<Gray, Byte>();
CvInvoke.CvtColor(frame, nextFrame, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
if (!prevFrameEmpty)
{
VectorOfKeyPoint prevFeatures = new VectorOfKeyPoint(gFTTDetector.Detect(prevFrame));
//Features2DToolbox.DrawKeypoints(rez, prevFeatures, rez, new Bgr(0, 0, 255));
PointF[] prevPts = new PointF[prevFeatures.Size];
for (int i = 0; i < prevFeatures.Size; i++)
{
prevPts[i] = prevFeatures[i].Point;
}
PointF[] nextPts;
byte[] status;
float[] errors;
sWatch.Start();
CvInvoke.CalcOpticalFlowPyrLK(prevFrame, nextFrame, prevPts, new Size(20, 20), 1, new MCvTermCriteria(20, 0.03), out nextPts, out status, out errors);
sWatch.Stop();
sWatch.Reset();
prevFrame = nextFrame.Clone();
for (int i = 0; i < status.Length; i++)
{
Point prevPt = new Point((int)prevPts[i].X,(int)nextPts[i].Y);
Point nextPt = new Point((int)nextPts[i].X,(int)nextPts[i].Y);
double lenght = Math.Sqrt(Math.Pow(prevPt.X - nextPt.X, 2) + Math.Pow(prevPt.Y - nextPt.Y, 2));
if (lenght > 3)
{
CvInvoke.Circle(rez, nextPt, 1, new MCvScalar(0, 255, 0), 2);
}
}
sWatch.Stop();
prevFrameEmpty = false;
}
else if (prevFrameEmpty)
{
prevFrame = nextFrame.Clone();
prevFrameEmpty = false;
}
return rez;
}
protected override bool InitFilter(ref Mat frame)
{
throw new NotImplementedException();
}
}

How to detect rectangles in image with emgu cv?

I am trying to detect rectangles in Emgucv in c#, I was playing around with the following code that I got off the internet. I am new to this so I hope someone could help me out:
public class ShapeDectection
{
public Image<Bgr, Byte> img;
public PictureBox Picture;
public PictureBox Result;
public double dCannyThres;
private UMat uimage;
private UMat cannyEdges;
private List<Triangle2DF> triangleList;
private List<RotatedRect> boxList;
private Image<Bgr, Byte> triangleRectImage;
public ShapeDectection(PictureBox pic, string filepath, PictureBox results)
{
Picture = pic;
Result = results;
img = new Image<Bgr, Byte>(filepath);
triangleList = new List<Triangle2DF>();
boxList = new List<RotatedRect>();
uimage = new UMat();
cannyEdges = new UMat();
dCannyThres = 180.0;
fnFindTriangleRect();
MessageBox.Show("done");
}
private void fnFindTriangleRect()
{
CvInvoke.CvtColor(img, uimage, ColorConversion.Bgr2Gray);
UMat pyrDown = new UMat();
CvInvoke.PyrDown(uimage, pyrDown);
CvInvoke.PyrUp(pyrDown, uimage);
triangleRectImage = img.CopyBlank();
using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
{
CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
int count = contours.Size;
MessageBox.Show("count = " + count);
for (int i = 0; i < count; i++)
{
using (VectorOfPoint contour = contours[i])
using (VectorOfPoint approxContour = new VectorOfPoint())
{
CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);
if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contour with area > 250
{
MessageBox.Show("approxContour.Size = " + approxContour.Size);
if (approxContour.Size == 3) //The contour has 3 vertices, is a triangle
{
Point[] pts = approxContour.ToArray();
triangleList.Add(new Triangle2DF(pts[0], pts[1], pts[2]));
}
else if (approxContour.Size == 4) // The contour has 4 vertices
{
#region Determine if all the angles in the contours are within [80,100] degree
bool isRectangle = true;
Point[] pts = approxContour.ToArray();
LineSegment2D[] edges = PointCollection.PolyLine(pts, true);
for (int j = 0; j < edges.Length; j++)
{
double dAngle = Math.Abs(edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
MessageBox.Show("" + dAngle);
if (dAngle < 80 || dAngle > 100)
{
isRectangle = false;
break;
}
}
#endregion
if (isRectangle) boxList.Add(CvInvoke.MinAreaRect(approxContour));
}
}
}
}
}
foreach (Triangle2DF triangle in triangleList)
{
triangleRectImage.Draw(triangle, new Bgr(Color.DarkBlue), 2);
}
foreach (RotatedRect box in boxList)
triangleRectImage.Draw(box, new Bgr(Color.Red), 2);
Result.Image = triangleRectImage.ToBitmap();
}
}
I am trying to detect the shapes in the following picture
however this is the result:
as you can see no shape was detected by the script as the number of contours was zero.how can I modify the script so that I can detect these shapes?
I can't seem to find "Canny edge detection"
UMat cannyEdges = new UMat();
CvInvoke.Canny(uimage, cannyEdges, cannyThreshold, cannyThresholdLinking);
in your code. Here is the link to EmguCV wiki page http://www.emgu.com/wiki/index.php/Shape_(Triangle,_Rectangle,_Circle,_Line)_Detection_in_CSharp
Check there for missing parts.

Face Recognition Using EMGU CV 3.1.0

The function DetectAndRecognizeFaces is to detect a face and pass the detected image to the Recognize method which returns a name of recognized face based on the label.
LoadTrainingSet function is to fetch data from SQL database and LoadTrainingData method is to train the eigenfacerecognizer.
The problem is that the predict function never returns -1 for Unknown facees and always returns match if the detected face is not present in the database.
The Code:
private void DetectAndRecognizeFaces()
{
Image<Gray, byte> grayframe = ImageFrame.Convert<Gray, byte>();
//Assign user-defined Values to parameter variables:
minNeighbors = int.Parse(comboBoxMinNeigh.Text); // the 3rd parameter
windowsSize = int.Parse(textBoxWinSize.Text); // the 5th parameter
scaleIncreaseRate = Double.Parse(comboBoxScIncRte.Text); //the 2nd parameter
//detect faces from the gray-scale image and store into an array of type 'var',i.e 'MCvAvgComp[]'
var faces = haar.DetectMultiScale(grayframe, scaleIncreaseRate, minNeighbors, Size.Empty); //the actual face detection happens here
MessageBox.Show("Total Faces Detected: " + faces.Length.ToString());
Bitmap BmpInput = grayframe.ToBitmap();
Bitmap ExtractedFace; //empty
Graphics grp;
//MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);
faceRecognizer.Load(recognizeFilePath);
foreach (var face in faces)
{
t = t + 1;
result = ImageFrame.Copy(face).Convert<Gray, byte>().Resize(100, 100, Inter.Cubic);
//set the size of the empty box(ExtractedFace) which will later contain the detected face
ExtractedFace = new Bitmap(face.Width, face.Height);
//assign the empty box to graphics for painting
grp = Graphics.FromImage(ExtractedFace);
//graphics fills the empty box with exact pixels of the face to be extracted from input image
grp.DrawImage(BmpInput, 0, 0, face, GraphicsUnit.Pixel);
string name = Recognise(result);
if (name == "Unknown")
{
ImageFrame.Draw(face, new Bgr(Color.Red), 3);
MessageBox.Show("Face Name is: " + name.ToString());
ImageFrame.Draw(name, new Point(face.X - 2, face.Y - 2), FontFace.HersheyComplex, 0.5,
new Bgr(0, 0, 255), 1, LineType.EightConnected, bottomLeftOrigin);
}
else
{
ImageFrame.Draw(face, new Bgr(Color.Green), 3);
MessageBox.Show("Face Name is: " + name.ToString());
ImageFrame.Draw(name, new Point(face.X - 2, face.Y - 2), FontFace.HersheyComplex, 0.5,
new Bgr(0, 255, 0), 1, LineType.EightConnected, bottomLeftOrigin);
}
CamImageBox.Image = ImageFrame;
}
public string Recognise(Image<Gray, byte> Input_image, int Eigen_Thresh = -1)
{
if (_IsTrained)
{
faceRecognizer.Load(recognizeFilePath);
FaceRecognizer.PredictionResult ER = faceRecognizer.Predict(Input_image);
if (ER.Label == -1)
{
Eigen_Label = "Unknown";
Eigen_Distance = 0;
return Eigen_Label;
}
else
{
Eigen_Label = Names_List[ER.Label];
Eigen_Distance = (float)ER.Distance;
if (Eigen_Thresh > -1) Eigen_threshold = Eigen_Thresh;
//Only use the post threshold rule if we are using an Eigen Recognizer
//since Fisher and LBHP threshold set during the constructor will work correctly
switch (Recognizer_Type)
{
case ("EMGU.CV.EigenFaceRecognizer"):
if (Eigen_Distance > Eigen_threshold) return Eigen_Label;
else return "Unknown";
case ("EMGU.CV.LBPHFaceRecognizer"):
case ("EMGU.CV.FisherFaceRecognizer"):
default:
return Eigen_Label; //the threshold set in training controls unknowns
}
}
}
else return "";
}
private void LoadTrainingSet()
{
Bitmap bmpImage;
for (int i = 0; i < totalRows; i++)
{
byte[] fetchedBytes = (byte[])dataTable.Rows[i]["FaceImage"];
MemoryStream stream = new MemoryStream(fetchedBytes);
//stream.Write(fetchedBytes, 0, fetchedBytes.Length);
bmpImage = new Bitmap(stream);
trainingImages.Add(new Emgu.CV.Image<Gray, Byte>(bmpImage).Resize(100, 100, Inter.Cubic));
//string faceName = (string)dataTable.Rows[i]["FaceName"];
int faceName = (int)dataTable.Rows[i]["FaceID"];
NameLabels.Add(faceName);
NameLable = (string)dataTable.Rows[i]["FaceName"];
Names_List.Add(NameLable);
//ContTrain = NameLabels[i];
}
LoadTrainedData();
}
public void LoadTrainedData()
{
if (trainingImages.ToArray().Length != 0)
{
var faceImages = new Image<Gray, byte>[trainingImages.Count()];
var facesIDs = new int[NameLabels.Count()];
//var facesNames = new string[Names_List.Count()];
//int[] faceLabels = new int[NameLabels.Count()];
//MCvTermCriteria termCrit = new MCvTermCriteria(ContTrain, 0.001);
for (int i = 0; i < trainingImages.ToArray().Length; i++)
{
faceImages[i] = trainingImages[i];
facesIDs[i] = NameLabels[i];
}
try
{
faceRecognizer.Train(faceImages, facesIDs);
faceRecognizer.Save(recognizeFilePath);
_IsTrained = true;
}
catch (Exception error)
{
MessageBox.Show(error.ToString());
}
}
}

How to insert picture into excel using MemoryStream?

Below are two methods, one uses MemoryStream and the other uses real file on the disk.
public void InsertImage(long x, long y, long? width, long? height, string sImagePath, WorksheetPart wsp)
{
try
{
DrawingsPart dp;
ImagePart imgp;
WorksheetDrawing wsd;
ImagePartType ipt;
switch (sImagePath.Substring(sImagePath.LastIndexOf('.') + 1).ToLower())
{
case "png":
ipt = ImagePartType.Png;
break;
case "jpg":
case "jpeg":
ipt = ImagePartType.Jpeg;
break;
case "gif":
ipt = ImagePartType.Gif;
break;
default:
return;
}
if (wsp.DrawingsPart == null)
{
//----- no drawing part exists, add a new one
dp = wsp.AddNewPart<DrawingsPart>();
imgp = dp.AddImagePart(ipt, wsp.GetIdOfPart(dp));
wsd = new WorksheetDrawing();
}
else
{
//----- use existing drawing part
dp = wsp.DrawingsPart;
imgp = dp.AddImagePart(ipt);
dp.CreateRelationshipToPart(imgp);
wsd = dp.WorksheetDrawing;
}
using (FileStream fs = new FileStream(sImagePath, FileMode.Open))
{
imgp.FeedData(fs);
}
int imageNumber = dp.ImageParts.Count<ImagePart>();
if (imageNumber == 1)
{
Drawing drawing = new Drawing();
drawing.Id = dp.GetIdOfPart(imgp);
wsp.Worksheet.Append(drawing);
}
NonVisualDrawingProperties nvdp = new NonVisualDrawingProperties();
nvdp.Id = new UInt32Value((uint)(1024 + imageNumber));
nvdp.Name = "Picture " + imageNumber.ToString();
nvdp.Description = "";
DocumentFormat.OpenXml.Drawing.PictureLocks picLocks = new DocumentFormat.OpenXml.Drawing.PictureLocks();
picLocks.NoChangeAspect = true;
picLocks.NoChangeArrowheads = true;
NonVisualPictureDrawingProperties nvpdp = new NonVisualPictureDrawingProperties();
nvpdp.PictureLocks = picLocks;
NonVisualPictureProperties nvpp = new NonVisualPictureProperties();
nvpp.NonVisualDrawingProperties = nvdp;
nvpp.NonVisualPictureDrawingProperties = nvpdp;
DocumentFormat.OpenXml.Drawing.Stretch stretch = new DocumentFormat.OpenXml.Drawing.Stretch();
stretch.FillRectangle = new DocumentFormat.OpenXml.Drawing.FillRectangle();
BlipFill blipFill = new BlipFill();
DocumentFormat.OpenXml.Drawing.Blip blip = new DocumentFormat.OpenXml.Drawing.Blip();
blip.Embed = dp.GetIdOfPart(imgp);
blip.CompressionState = DocumentFormat.OpenXml.Drawing.BlipCompressionValues.Print;
blipFill.Blip = blip;
blipFill.SourceRectangle = new DocumentFormat.OpenXml.Drawing.SourceRectangle();
blipFill.Append(stretch);
DocumentFormat.OpenXml.Drawing.Transform2D t2d = new DocumentFormat.OpenXml.Drawing.Transform2D();
DocumentFormat.OpenXml.Drawing.Offset offset = new DocumentFormat.OpenXml.Drawing.Offset();
offset.X = 0;
offset.Y = 0;
t2d.Offset = offset;
Bitmap bm = new Bitmap(sImagePath);
DocumentFormat.OpenXml.Drawing.Extents extents = new DocumentFormat.OpenXml.Drawing.Extents();
if (width == null)
extents.Cx = (long)bm.Width * (long)((float)914400 / bm.HorizontalResolution);
else
extents.Cx = width * (long)((float)914400 / bm.HorizontalResolution);
if (height == null)
extents.Cy = (long)bm.Height * (long)((float)914400 / bm.VerticalResolution);
else
extents.Cy = height * (long)((float)914400 / bm.VerticalResolution);
bm.Dispose();
t2d.Extents = extents;
ShapeProperties sp = new ShapeProperties();
sp.BlackWhiteMode = DocumentFormat.OpenXml.Drawing.BlackWhiteModeValues.Auto;
sp.Transform2D = t2d;
DocumentFormat.OpenXml.Drawing.PresetGeometry prstGeom = new DocumentFormat.OpenXml.Drawing.PresetGeometry();
prstGeom.Preset = DocumentFormat.OpenXml.Drawing.ShapeTypeValues.Rectangle;
prstGeom.AdjustValueList = new DocumentFormat.OpenXml.Drawing.AdjustValueList();
sp.Append(prstGeom);
sp.Append(new DocumentFormat.OpenXml.Drawing.NoFill());
DocumentFormat.OpenXml.Drawing.Spreadsheet.Picture picture = new DocumentFormat.OpenXml.Drawing.Spreadsheet.Picture();
picture.NonVisualPictureProperties = nvpp;
picture.BlipFill = blipFill;
picture.ShapeProperties = sp;
Position pos = new Position();
pos.X = x * 914400 / 72;
pos.Y = y * 914400 / 72;
Extent ext = new Extent();
ext.Cx = extents.Cx;
ext.Cy = extents.Cy;
AbsoluteAnchor anchor = new AbsoluteAnchor();
anchor.Position = pos;
anchor.Extent = ext;
anchor.Append(picture);
anchor.Append(new ClientData());
wsd.Append(anchor);
wsd.Save(dp);
}
catch (Exception ex)
{
throw ex; // or do something more interesting if you want
}
}
//use memorystream
public void InsertImage(long x, long y, long? width, long? height, MemoryStream ms, WorksheetPart wsp)
{
try
{
DrawingsPart dp;
ImagePart imgp;
WorksheetDrawing wsd;
ImagePartType ipt = ImagePartType.Jpeg;
if (wsp.DrawingsPart == null)
{
//----- no drawing part exists, add a new one
dp = wsp.AddNewPart<DrawingsPart>();
imgp = dp.AddImagePart(ipt, wsp.GetIdOfPart(dp));
wsd = new WorksheetDrawing();
}
else
{
//----- use existing drawing part
dp = wsp.DrawingsPart;
imgp = dp.AddImagePart(ipt);
dp.CreateRelationshipToPart(imgp);
wsd = dp.WorksheetDrawing;
}
Bitmap bitmap = new Bitmap(ms);
imgp.FeedData(ms);
int imageNumber = dp.ImageParts.Count<ImagePart>();
if (imageNumber == 1)
{
Drawing drawing = new Drawing();
drawing.Id = dp.GetIdOfPart(imgp);
wsp.Worksheet.Append(drawing);
}
NonVisualDrawingProperties nvdp = new NonVisualDrawingProperties();
nvdp.Id = new UInt32Value((uint)(1024 + imageNumber));
nvdp.Name = "Picture " + imageNumber.ToString();
nvdp.Description = "";
DocumentFormat.OpenXml.Drawing.PictureLocks picLocks = new DocumentFormat.OpenXml.Drawing.PictureLocks();
picLocks.NoChangeAspect = true;
picLocks.NoChangeArrowheads = true;
NonVisualPictureDrawingProperties nvpdp = new NonVisualPictureDrawingProperties();
nvpdp.PictureLocks = picLocks;
NonVisualPictureProperties nvpp = new NonVisualPictureProperties();
nvpp.NonVisualDrawingProperties = nvdp;
nvpp.NonVisualPictureDrawingProperties = nvpdp;
DocumentFormat.OpenXml.Drawing.Stretch stretch = new DocumentFormat.OpenXml.Drawing.Stretch();
stretch.FillRectangle = new DocumentFormat.OpenXml.Drawing.FillRectangle();
BlipFill blipFill = new BlipFill();
DocumentFormat.OpenXml.Drawing.Blip blip = new DocumentFormat.OpenXml.Drawing.Blip();
blip.Embed = dp.GetIdOfPart(imgp);
blip.CompressionState = DocumentFormat.OpenXml.Drawing.BlipCompressionValues.Print;
blipFill.Blip = blip;
blipFill.SourceRectangle = new DocumentFormat.OpenXml.Drawing.SourceRectangle();
blipFill.Append(stretch);
DocumentFormat.OpenXml.Drawing.Transform2D t2d = new DocumentFormat.OpenXml.Drawing.Transform2D();
DocumentFormat.OpenXml.Drawing.Offset offset = new DocumentFormat.OpenXml.Drawing.Offset();
offset.X = 0;
offset.Y = 0;
t2d.Offset = offset;
DocumentFormat.OpenXml.Drawing.Extents extents = new DocumentFormat.OpenXml.Drawing.Extents();
//Bitmap bitmap = new Bitmap(ms);
if (width == null)
extents.Cx = (long)bitmap.Width * (long)((float)914400 / bitmap.HorizontalResolution);
else
extents.Cx = width * (long)((float)914400 / bitmap.HorizontalResolution);
if (height == null)
extents.Cy = (long)bitmap.Height * (long)((float)914400 / bitmap.VerticalResolution);
else
extents.Cy = height * (long)((float)914400 / bitmap.VerticalResolution);
bitmap.Dispose();
t2d.Extents = extents;
ShapeProperties sp = new ShapeProperties();
sp.BlackWhiteMode = DocumentFormat.OpenXml.Drawing.BlackWhiteModeValues.Auto;
sp.Transform2D = t2d;
DocumentFormat.OpenXml.Drawing.PresetGeometry prstGeom = new DocumentFormat.OpenXml.Drawing.PresetGeometry();
prstGeom.Preset = DocumentFormat.OpenXml.Drawing.ShapeTypeValues.Rectangle;
prstGeom.AdjustValueList = new DocumentFormat.OpenXml.Drawing.AdjustValueList();
sp.Append(prstGeom);
sp.Append(new DocumentFormat.OpenXml.Drawing.NoFill());
DocumentFormat.OpenXml.Drawing.Spreadsheet.Picture picture = new DocumentFormat.OpenXml.Drawing.Spreadsheet.Picture();
picture.NonVisualPictureProperties = nvpp;
picture.BlipFill = blipFill;
picture.ShapeProperties = sp;
Position pos = new Position();
pos.X = x * 914400 / 72;
pos.Y = y * 914400 / 72;
Extent ext = new Extent();
ext.Cx = extents.Cx;
ext.Cy = extents.Cy;
AbsoluteAnchor anchor = new AbsoluteAnchor();
anchor.Position = pos;
anchor.Extent = ext;
anchor.Append(picture);
anchor.Append(new ClientData());
wsd.Append(anchor);
wsd.Save(dp);
}
catch (Exception ex)
{
throw ex; // or do something more interesting if you want
}
}
If I invoke the first method(use real file on the disk), it's ok, I can insert my picture into excel file. But if I read the file into memorystream and invoke method2, I can see the picture rectangle with error message.
So my question is how can I insert picture into excel via memorystream? Because I won't create too many files on disk.
I believe you need to create bitmap image data from stream first
There's a solution already for that here on Stack Overflow: Byte Array to Bitmap Image
I copy-paste the code from the solution:
int w= 100;
int h = 200;
int ch = 3; //number of channels (ie. assuming 24 bit RGB in this case)
byte[] imageData = new byte[w*h*ch]; //you image data here
Bitmap bitmap = new Bitmap(w,h,PixelFormat.Format24bppRgb);
BitmapData bmData = bitmap.LockBits(new System.Drawing.Rectangle(0, 0, bitmap.Width, bitmap.Height), ImageLockMode.ReadWrite, bitmap.PixelFormat);
IntPtr pNative = bmData.Scan0;
Marshal.Copy(imageData,0,pNative,w*h*ch);
bitmap.UnlockBits(bmData);
OK, I think I have found solution.
I change memorystream parameter to string, and convert it to memorystream like below:
MemoryStream ms = new System.IO.MemoryStream(System.Convert.FromBase64String(str))
Now, it works.
I learn it from open xml sdk 2.5 productivity tool.

Categories

Resources