I have a jpeg image. I save this bitmapdata to a byte array.
This jpeg has a width of 100 and a height of 100.
I want to extract an image of Rectanlge(10,10,20,20);
Obviously, I can interact through this byte array but I am unsure how to relate the x,y pixels of what I want to this byte array. I know that I have to use the stride and pixel size which is 4 as it is rgba.
I have this which was from this link cropping an area from BitmapData with C#.
Bitmap bmp = new Bitmap(_file);
Rectangle rect = new Rectangle(0, 0, bmp.Width, bmp.Height);
BitmapData rawOriginal = bmp.LockBits(new Rectangle(0, 0, bmp.Width, bmp.Height), ImageLockMode.ReadWrite, PixelFormat.Format32bppRgb);
int origByteCount = rawOriginal.Stride * rawOriginal.Height;
byte[] origBytes = new Byte[origByteCount];
System.Runtime.InteropServices.Marshal.Copy(rawOriginal.Scan0, origBytes, 0, origByteCount);
int BPP = 4;
int width = 20;
int height = 20;
int startX = 10;
int startY = 10;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width * BPP; j += BPP)
{
int origIndex = (startX * rawOriginal.Stride) + (i * rawOriginal.Stride) + (startY * BPP) + (j);
int croppedIndex = (i * width * BPP) + (j);
//copy data: once for each channel
for (int k = 0; k < BPP; k++)
{
croppedBytes[croppedIndex + k] = origBytes[origIndex + k];
}
}
}
But this:
int origIndex = (startX * rawOriginal.Stride) + (i * rawOriginal.Stride) + (startY * BPP) + (j);
I found is incorrect.
Does anyone know what value I should set here please?
thanks
When you're working with bitmap data, there are 2 important things to keep in mind:
the BPP (bytes per pixel): which here is 4
the stride (number of bytes on one line of the image), which here would be 4 * width
so if you want to get the offset of a pixel, just use this formula:
int offset = x * BPP + y * stride;
if you want to extract only a part of your bitmap you could just do this:
int i = 0;
for(int y = startY; y < startY + height; y++)
{
for(int k = startX * bpp + y * stride; k < (startX + width) * bpp + y * stride; k++)
{
croppedBytes[i] = origBytes[k];
i++;
}
}
Stride is bytes per line (Y), you shouldn't multiply x at any point by Stride
int y = startY + i;
int x = startX;
int origIndex = y * rawOriginal.Stride + x * BPP;
Related
I'm having an issue converting image from byte YUV420p[] to byte RGB[] and then to a Bitmap.
This is method to convert from YUV to RGB that I'm using:
double[,] YUV2RGB_CONVERT_MATRIX = new double[3, 3]
{
{ 1, 0, 1.4022 },
{ 1, -0.3456, -0.7145 },
{ 1, 1.771, 0 }
};
unsafe Bitmap ConvertYUV2RGB(byte[] YUVFrame, int width, int height)
{
int uIndex = width * height;
int vIndex = uIndex + ((width * height) >> 2);
int gIndex = width * height;
int bIndex = gIndex * 2;
byte[] rgbFrame = new byte[uIndex * 3];
//图片为pic1,RGB颜色的二进制数据转换得的int r,g,b;
Bitmap bitmap = new Bitmap(width, height);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// R分量
int temp = (int)(YUVFrame[y * width + x] + (YUVFrame[vIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[0, 2]);
rgbFrame[y * width + x] = (byte)(temp < 0 ? 0 : (temp > 255 ? 255 : temp));
// G分量
temp = (int)(YUVFrame[y * width + x] + (YUVFrame[uIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[1, 1] + (YUVFrame[vIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[1, 2]);
rgbFrame[gIndex + y * width + x] = (byte)(temp < 0 ? 0 : (temp > 255 ? 255 : temp));
// B分量
temp = (int)(YUVFrame[y * width + x] + (YUVFrame[uIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[2, 1]);
rgbFrame[bIndex + y * width + x] = (byte)(temp < 0 ? 0 : (temp > 255 ? 255 : temp));
System.Drawing.Color c = System.Drawing.Color.FromArgb(RGBFrame[y * width + x], RGBFrame[gIndex + y * width + x], RGBFrame[bIndex + y * width + x]);
bitmap.SetPixel(x, y, c);
}
}
return bitmap;
}
That function works 100% but it is very slow for obvious reasons:
System.Drawing.Color c = System.Drawing.Color.FromArgb(RGBFrame[y * width + x], RGBFrame[gIndex + y * width + x], RGBFrame[bIndex + y * width + x]);
bitmap.SetPixel(x, y, c);
Here's the generated image:
So, to avoid calling bitmap.SetPixel(x, y, c) inside the loop I changed the code to:
unsafe Bitmap ConvertYUV2RGB(byte[] YUVFrame, int width, int height)
{
int uIndex = width * height;
int vIndex = uIndex + ((width * height) >> 2);
int gIndex = width * height;
int bIndex = gIndex * 2;
byte[] RGBFrame = new byte[uIndex * 3];
//图片为pic1,RGB颜色的二进制数据转换得的int r,g,b;
//Bitmap bitmap = new Bitmap(width, height);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
// R分量
int temp = (int)(YUVFrame[y * width + x] + (YUVFrame[vIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[0, 2]);
RGBFrame[y * width + x] = (byte)(temp < 0 ? 0 : (temp > 255 ? 255 : temp));
// G分量
temp = (int)(YUVFrame[y * width + x] + (YUVFrame[uIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[1, 1] + (YUVFrame[vIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[1, 2]);
RGBFrame[gIndex + y * width + x] = (byte)(temp < 0 ? 0 : (temp > 255 ? 255 : temp));
// B分量
temp = (int)(YUVFrame[y * width + x] + (YUVFrame[uIndex + (y / 2) * (width / 2) + x / 2] - 128) * YUV2RGB_CONVERT_MATRIX[2, 1]);
RGBFrame[bIndex + y * width + x] = (byte)(temp < 0 ? 0 : (temp > 255 ? 255 : temp));
// Commented to avoid calling functions from inside the for loop
// System.Drawing.Color c = System.Drawing.Color.FromArgb(RGBFrame[y * width + x], RGBFrame[gIndex + y * width + x], RGBFrame[bIndex + y * width + x]);
// bitmap.SetPixel(x, y, c);
}
}
return CreateBitmap(RGBFrame, width, height);
}
private Bitmap CreateBitmap(byte[] RGBFrame, int width, int height)
{
PixelFormat pxFormat = PixelFormat.Format24bppRgb;
Bitmap bmp = new Bitmap(width, height, pxFormat);
BitmapData bmpData = bmp.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadWrite, pxFormat);
IntPtr pNative = bmpData.Scan0;
Marshal.Copy(RGBFrame, 0, pNative, RGBFrame.Length);
bmp.UnlockBits(bmpData);
return bmp;
}
But I'm unable to create the image correctly. This is the result:
What is going on here?
I finally resolved my issue with this function:
unsafe Bitmap ConvertYUV2RGB(byte[] YUVFrame, int width, int height)
{
int numOfPixel = width * height;
int positionOfV = numOfPixel;
int positionOfU = numOfPixel / 4 + numOfPixel;
byte[] rgb = new byte[numOfPixel * 3];
int R = 0;
int G = 1;
int B = 2;
for (int i = 0; i < height; i++)
{
int startY = i * width;
int step = (i / 2) * (width / 2);
int startU = positionOfU + step;
int startV = positionOfV + step;
for (int j = 0; j < width; j++)
{
int Y = startY + j;
int U = startU + j / 2;
int V = startV + j / 2;
int index = Y * 3;
double r = ((YUVFrame[Y] & 0xff) + 1.4075 * ((YUVFrame[V] & 0xff) - 128));
double g = ((YUVFrame[Y] & 0xff) - 0.3455 * ((YUVFrame[U] & 0xff) - 128) - 0.7169 * ((YUVFrame[V] & 0xff) - 128));
double b = ((YUVFrame[Y] & 0xff) + 1.779 * ((YUVFrame[U] & 0xff) - 128));
r = (r < 0 ? 0 : r > 255 ? 255 : r);
g = (g < 0 ? 0 : g > 255 ? 255 : g);
b = (b < 0 ? 0 : b > 255 ? 255 : b);
rgb[index + R] = (byte)r;
rgb[index + G] = (byte)g;
rgb[index + B] = (byte)b;
}
}
return CreateBitmap(rgb, width, height);
}
And to create a Bitmap from RGB[]:
Bitmap CreateBitmap(byte[] RGBFrame, int width, int height)
{
PixelFormat pxFormat = PixelFormat.Format24bppRgb;
Bitmap bmp = new Bitmap(width, height, pxFormat);
BitmapData bmpData = bmp.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadWrite, pxFormat);
IntPtr pNative = bmpData.Scan0;
Marshal.Copy(RGBFrame, 0, pNative, RGBFrame.Length);
bmp.UnlockBits(bmpData);
return bmp;
}
Or a BitmapSource for WPF:
BitmapSource FromArray(byte[] data, int w, int h, int ch)
{
System.Windows.Media.PixelFormat format = PixelFormats.Default;
if (ch == 1)
format = PixelFormats.Gray8; //grey scale image 0-255
if (ch == 3)
format = PixelFormats.Bgr24; //RGB
if (ch == 4)
format = PixelFormats.Bgr32; //RGB + alpha
WriteableBitmap wbm = new WriteableBitmap(w, h, 96, 96, format, null);
wbm.WritePixels(new Int32Rect(0, 0, w, h), data, ch * w, 0);
return wbm;
}
I am using mvfox-igc camera sdk for getting images from that camera my problem is that i have byte array of mono8 image and i want to convert that pixel data into 24bit image my programming language
can anybody help me please my code looks like this but is is not doing its work properly
processedBitmap = new Bitmap(data.request.imageWidth.read(),data.request.imageHeight.read(),PixelFormat.Format24bppRgb);
unsafe
{
BitmapData bitmapData = processedBitmap.LockBits(new Rectangle(0, 0, processedBitmap.Width, processedBitmap.Height), ImageLockMode.ReadWrite, processedBitmap.PixelFormat);
int bytesPerPixel = System.Drawing.Bitmap.GetPixelFormatSize(processedBitmap.PixelFormat) / 8;
int heightInPixels = bitmapData.Height;
int widthInBytes = bitmapData.Width * bytesPerPixel;
byte* PtrFirstPixel = (byte*)bitmapData.Scan0;
byte* firstby = (byte*)data.request.imageData.read();
int indexx=0;
Parallel.For(0, heightInPixels, y =>
{
byte* currentLine = PtrFirstPixel + (y * bitmapData.Stride);
for (int x = 0; x < widthInBytes; x = x + bytesPerPixel)
{
currentLine[x] = firstby[indexx];//((byte*)data.request.imageData.read())[0];//(byte)oldBlue;
currentLine[x + 1] = firstby[indexx];//((byte*)data.request.imageData.read())[0];//(byte)oldGreen;
currentLine[x + 2] = firstby[indexx];//((byte*)data.request.imageData.read())[0];//(byte)oldRed;
indexx = indexx + 1;
}
});
processedBitmap.UnlockBits(bitmapData);
processedBitmap.Save("bmp.jpg");
Using the Sobel edge detector code below I find that the output bitmap has a diagonal line of zero values superimposed over detected edges if the input bitmap has a width not divisible by 4. The red square marked in the output bitmap at co-ords (80,80) is broken up and incorrectly placed in this case. Why is this and how can I make the code work with any bitmap width?
private Bitmap SobelEdgeDetect2(Bitmap original, byte Threshold = 128)
{
// https://stackoverflow.com/questions/16747257/edge-detection-with-lockbits-c-sharp
int width = original.Width;
int height = original.Height;
int BitsPerPixel = Image.GetPixelFormatSize(original.PixelFormat);
int OneColorBits = BitsPerPixel / 8;
BitmapData bmpData = original.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadWrite, original.PixelFormat);
int position;
int[,] gx = new int[,] { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } };
int[,] gy = new int[,] { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } };
Bitmap dstBmp = new Bitmap(width, height, original.PixelFormat);
BitmapData dstData = dstBmp.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadWrite, dstBmp.PixelFormat);
int byteCount = dstData.Stride * dstBmp.Height;
byte[] input = new byte[byteCount];
byte[] processed = new byte[byteCount];
IntPtr ptr = bmpData.Scan0;
IntPtr dst = dstData.Scan0;
Marshal.Copy(ptr, input, 0, input.Length);
Marshal.Copy(dst,processed, 0, input.Length);
int BlackPoints = 0;
int WhitePoints = 0;
for (int i = 1; i < height - 1; i++) // y
{
for (int j = 1; j < width - 1; j++) // x
{
int NewX = 0, NewY = 0;
for (int ii = 0; ii < 3; ii++)
{
for (int jj = 0; jj < 3; jj++)
{
int I = i + ii - 1;
int J = j + jj - 1;
byte Current = input[(I * (width) + J) * OneColorBits];
NewX += gx[ii, jj] * Current;
NewY += gy[ii, jj] * Current;
}
}
position = (i * (width) + j) * OneColorBits;
if (NewX * NewX + NewY * NewY > Threshold * Threshold)
{
processed[position] = 255;
processed[position + 1] = 255;
processed[position + 2] = 255;
WhitePoints++;
}
else
{
processed[position] = 0;
processed[position + 1] = 0;
processed[position + 2] = 0;
BlackPoints++;
}
if (j >= 78 && j <= 82 && i >= 78 && i <= 82)
{
processed[position] = 0;
processed[position + 1] = 0;
processed[position + 2] = 255;
}
}
}
Marshal.Copy(processed, 0, dst, input.Length);
dstBmp.UnlockBits(dstData);
return dstBmp;
}
For a 201 pixel wide bitmap, dstData.Stride was 604. For a 200 pixel wide bitmap dstData.Stride was 612, which explains why width had to be divisible by 4 for my code.
Replacing
position = (i * (width) + j) * OneColorBits;
by
position = i * dstData.Stride + j * OneColorBits;
and
byte Current = input[(I * (width) + J) * OneColorBits];
by
byte Current = input[I * dstData.Stride + J * OneColorBits];
fixed the problem.
I'm making a convolution filter for my project and I managed to make it for any size of matrix but as it gets bigger I noticed that not all bits are changed.
Here are the pictures showing the problem:
First one is the original
Filter: Blur 9x9
Filter: EdgeDetection 9x9:
As you can see, there is a little stripe that is never changed and as the matrix gets bigger, the stripe also gets bigger (in 3x3 it wasn't visible)
My convolution matrix class:
public class ConvMatrix
{
public int Factor = 1;
public int Height, Width;
public int Offset = 0;
public int[,] Arr;
//later I assign functions to set these variables
...
}
The filter function:
Bitmap Conv3x3(Bitmap b, ConvMatrix m)
{
if (0 == m.Factor)
return b;
Bitmap bSrc = (Bitmap)b.Clone();
BitmapData bmData = b.LockBits(new Rectangle(0, 0, b.Width, b.Height),
ImageLockMode.ReadWrite,
PixelFormat.Format24bppRgb);
BitmapData bmSrc = bSrc.LockBits(new Rectangle(0, 0, bSrc.Width, bSrc.Height),
ImageLockMode.ReadWrite,
PixelFormat.Format24bppRgb);
int stride = bmData.Stride;
System.IntPtr Scan0 = bmData.Scan0;
System.IntPtr SrcScan0 = bmSrc.Scan0;
unsafe
{
byte* p = (byte*)(void*)Scan0;
byte* pSrc = (byte*)(void*)SrcScan0;
int nOffset = stride - b.Width * m.Width;
int nWidth = b.Width - (m.Size-1);
int nHeight = b.Height - (m.Size-2);
int nPixel = 0;
for (int y = 0; y < nHeight; y++)
{
for (int x = 0; x < nWidth; x++)
{
for (int r = 0; r < m.Height; r++)
{
nPixel = 0;
for (int i = 0; i < m.Width; i++)
for (int j = 0; j < m.Height; j++)
{
nPixel += (pSrc[(m.Width * (i + 1)) - 1 - r + stride * j] * m.Arr[j, i]);
}
nPixel /= m.Factor;
nPixel += m.Offset;
if (nPixel < 0) nPixel = 0;
if (nPixel > 255) nPixel = 255;
p[(m.Width * (m.Height / 2 + 1)) - 1 - r + stride * (m.Height / 2)] = (byte)nPixel;
}
p += m.Width;
pSrc += m.Width;
}
p += nOffset;
pSrc += nOffset;
}
}
b.UnlockBits(bmData);
bSrc.UnlockBits(bmSrc);
return b;
}
Please help
The problem is that your code explicitly stops short of the edges. The calculation for the limits for your outer loops (nWidth and nHeight) shouldn't involve the size of the matrix, they should be equal to the size of your bitmap.
When you do this, if you imagine what happens when you lay the center point of your matrix over each pixel in this case (because you need to read from all sizes of the pixel) the matrix will partially be outside of the image near the edges.
There are a few approaches as to what to do near the edges, but a reasonable one is to clamp the coordinates to the edges. I.e. when you would end up reading a pixel from outside the bitmap, just get the nearest pixel from the edge (size or corner).
I also don't understand why you need five loops - you seem to be looping through the height of the matrix twice. That doesn't look right. All in all the general structure should be something like this:
for (int y = 0; y < bitmap.Height; y++) {
for (int x = 0; x < bitmap.Width; x++) {
int sum = 0;
for (int matrixY = -matrix.Height/2; matrixY < matrix.Height/2; matrixY++)
for (int matrixX = -matrix.Width/2; matrixX < matrix.Width/2; matrixX++) {
// these coordinates will be outside the bitmap near all edges
int sourceX = x + matrixX;
int sourceY = y + matrixY;
if (sourceX < 0)
sourceX = 0;
if (sourceX >= bitmap.Width)
sourceX = bitmap.Width - 1;
if (sourceY < 0)
sourceY = 0;
if (sourceY >= bitmap.Height)
sourceY = bitmap.Height - 1;
sum += source[sourceX, sourceY];
}
}
// factor and clamp sum
destination[x, y] = sum;
}
}
You might need an extra loop to handle each color channel which need to be processed separately. I couldn't immediately see where in your code you might be doing that from all the cryptic variables.
I am doing image processing and I have a 3D array with the rgb values of an image and I am trying to convery those values into ycbcr ( I made a copy of the rgb array and called it ycbcr, and
public static void rgb2ycbcr(System.Drawing.Bitmap bmp, ref byte[, ,] arrayrgb, ref byte[, ,] arrayycbcr)
{
byte Y;
byte Cb;
byte Cr;
for (int i = 1; i < (bmp.Height + 1); i++) //don't worry about bmp.height/width+2 its for my project
{
for (int j = 1; j < (bmp.Width + 1); j++)
{
byte R = arrayrgb[i, j, 0];
byte G = arrayrgb[i, j, 1];
byte B = arrayrgb[i, j, 2];
Y = (byte)((0.257 * R) + (0.504 * G) + (0.098 * B) + 16);
Cb = (byte)(-(0.148 * R) - (0.291 * G) + (0.439 * B) + 128);
Cr = (byte)((0.439 * R) - (0.368 * G) - (0.071 * B) + 128);
arrayycbcr[i, j, 0] = Y;
arrayycbcr[i, j, 1] = Cb;
arrayycbcr[i, j, 2] = Cr;
}
}
}
the problem is I am not getting the same values for ycbcr as I would get in matlab when I use rgb2ycbcr, is there something missing in my code?
Faster and acurate code.
Output values between 0 and 255 (JPG formula)
width = bmp.Width;
height = bmp.Height;
yData = new byte[width, height]; //luma
bData = new byte[width, height]; //Cb
rData = new byte[width, height]; //Cr
unsafe
{
BitmapData bitmapData = bmp.LockBits(new Rectangle(0, 0, width, height), ImageLockMode.ReadWrite, bmp.PixelFormat);
int heightInPixels = bitmapData.Height;
int widthInBytes = width * 3;
byte* ptrFirstPixel = (byte*)bitmapData.Scan0;
//Convert to YCbCr
for (int y = 0; y < heightInPixels; y++)
{
byte* currentLine = ptrFirstPixel + (y * bitmapData.Stride);
for (int x = 0; x < width; x++)
{
int xPor3 = x * 3;
float blue = currentLine[xPor3++];
float green = currentLine[xPor3++];
float red = currentLine[xPor3];
yData[x, y] = (byte)((0.299 * red) + (0.587 * green) + (0.114 * blue));
bData[x, y] = (byte)(128 - (0.168736 * red) + (0.331264 * green) + (0.5 * blue));
rData[x, y] = (byte)(128 + (0.5 * red) + (0.418688 * green) + (0.081312 * blue));
}
}
bmp.UnlockBits(bitmapData);
}
Convert R/G/B to uint before assign.
uint R =ConvertToUint(arrayrgb[i, j, 0]);
uint G =ConvertToUint(arrayrgb[i, j, 1]);
uint B =ConvertToUint(arrayrgb[i, j, 2]);