i am trying to merge two textures into one in unity
the first texture is from a webcamTexture
the second is from a sprite using : gameobject.getComponent<SpriteRenderer>().sprite.texture as Texture2D
I'm having problem in writing the function this is what i did so far :
public static Texture2D CombineTextures(GameObject obj, Texture2D background, Texture2D TodrawLogo)
{
Vector3 v = obj.transform.position;// obj is TodrawLogo gameobject
int width = TodrawLogo.width;
int height = TodrawLogo.height;
for (int x =(int)v.x; x < width; x++){
background.SetPixel(x,(int)v.y,TodrawLogo.GetPixel(x,(int)v.y));
}
background.Apply();
return background;
}
this what i am trying to do :
WebcamTexture
the result Texture should be like this
the webcamTexture is a 3dplane and the logo is a single sprite
but sadly my function doesn't work
does anyone know how to fix this
I know that i should find the exact coordinate of the todraw image and set the pixels but i can't figure out how
Much appreciation
EDIT:
i tried to use #nexx code :
public static Texture2D CombineTexture(Texture2D background, Texture2D TodrawLogo)
{
int width = TodrawLogo.width;
int height = TodrawLogo.height;
int backWidth = background.width;
int backHeight = background.height;
// bottom right corner
int startX = backWidth - width;
int startY = backHeight - height;
// loop through texture
int y = 0;
while (y < backHeight) {
int x = 0;
while (x < backWidth) {
// set normal pixels
background.SetPixel(x,y,background.GetPixel(x,y));
// if we are at bottom right apply logo
//TODO also check alpha, if there is no alpha apply it!
if(x >= startX && y < backHeight- startY)
background.SetPixel(x,y,TodrawLogo.GetPixel(x-startX,y-startY));
++x;
}
++y;
}
background.Apply();
return background;
}
but this is the resulting image i get :
i am stuck at this can someone please tell me what am i doing wrong ?
Here's a working example. I tested!
public Texture2D AddWatermark(Texture2D background, Texture2D watermark)
{
int startX = 0;
int startY = background.height - watermark.height;
for (int x = startX; x < background.width; x++)
{
for (int y = startY; y < background.height; y++)
{
Color bgColor = background.GetPixel(x, y);
Color wmColor = watermark.GetPixel(x - startX, y - startY);
Color final_color = Color.Lerp(bgColor, wmColor, wmColor.a / 1.0f);
background.SetPixel(x, y, final_color);
}
}
background.Apply();
return background;
}
This code works perfectly with two images that are not the same size
public static Texture2D AddWatermark(Texture2D background, Texture2D watermark, int startX, int startY)
{
Texture2D newTex = new Texture2D(background.width, background.height, background.format, false);
for (int x = 0; x < background.width; x++)
{
for (int y = 0; y < background.height; y++)
{
if (x >= startX && y >= startY && x < watermark.width && y < watermark.height)
{
Color bgColor = background.GetPixel(x, y);
Color wmColor = watermark.GetPixel(x - startX, y - startY);
Color final_color = Color.Lerp(bgColor, wmColor, wmColor.a / 1.0f);
newTex.SetPixel(x, y, final_color);
}
else
newTex.SetPixel(x, y, background.GetPixel(x, y));
}
}
newTex.Apply();
return newTex;
}
I made some changes for your CombineTextures method,
public static Texture2D CombineTexture(GameObject obj, Texture2D background, Texture2D TodrawLogo)
{
int width = TodrawLogo.width;
int height = TodrawLogo.height;
int backWidth = background.width;
int backHeight = background.height;
// bottom right corner
int startX = backWidth - width;
int startY = backHeight - height;
// loop through texture
int y = 0;
while (y < backHeight) {
int x = 0;
while (x < backWidth) {
// set normal pixels
background.SetPixel(x,y,background.GetPixel(x,y));
// if we are at bottom right apply logo
//TODO also check alpha, if there is no alpha apply it!
if(x >= startX && y < backHeight- startY)
background.SetPixel(x,y,TodrawLogo.GetPixel(x-startX,y-startY));
++x;
}
++y;
}
background.Apply();
return background;
}
You can change the values inside while loop to place your texture where you want.
I don't know how you would like to do it but you can use unity's OnGUI method to draw the logo where needed.
http://docs.unity3d.com/ScriptReference/MonoBehaviour.OnGUI.html
Its as simple as something like this:
var aTexture : Texture;
function OnGUI() {
if(!aTexture){
Debug.LogError("Assign a Texture in the inspector.");
return;
}
GUI.DrawTexture(Rect(10,10,60,60), aTexture, ScaleMode.ScaleToFit, true, 10.0f);
}
I hope i was helpful.
Try this (its a modification of nexx's code.) It combines two textures into a new texture. The new texture is guarenteed to be writable (otherwise SetPixel/GetPixel won't work.)
Also, it assumes that the watermark texture is smaller than the background texture.
Please note: I've not tested this.
public static Texture2D AddWatermark(Texture2D background, Texture2D watermark)
{
// Create a new writable texture.
Texture2D result = new Texture2D(background.width, background.height);
// Draw watermark at bottom right corner.
int startX = background.width - watermark.width;
int startY = background.height - watermark.height;
for (int x = 0; x < background.width; x++) {
for (int y = 0; y < background.height; y++) {
Color bgColor = background.GetPixel(x, y);
Color wmColor = new Color(0, 0, 0, 0);
// Change this test if no longer drawing at the bottom right corner.
if (x >= startX && y >= startY) {
wmColor = watermark.GetPixel(x, y);
}
// Alpha-blend background and watermark color.
Color bended = bgColor * (1.0f - wmColor.a) + wmColor;
blended.a = 1.0f;
result.SetPixel(x, y, blended);
}
}
result.Apply();
return result;
}
A slightly more optimized solution. Reading and writing only the area required for the watermark.
public static Texture2D AddWatermark(Texture2D background, Texture2D watermark, int startPositionX, int startPositionY)
{
//only read and rewrite the area of the watermark
for (int x = startPositionX; x < background.width; x++)
{
for (int y = startPositionY; y < background.height; y++)
{
if (x - startPositionX < watermark.width && y - startPositionY < watermark.height)
{
var bgColor = background.GetPixel(x, y);
var wmColor = watermark.GetPixel(x - startPositionX, y - startPositionY);
var finalColor = Color.Lerp(bgColor, wmColor, wmColor.a / 1.0f);
background.SetPixel(x, y, finalColor);
}
}
}
background.Apply();
return background;
}
Related
I could use just a little help. I am loading a png into a Texture2D, and have managed to flip it over the y axis using the following script I found. I need to flip it over the x axis now. I know a small modification should do it, but I have not managed to get the results I want.
Texture2D FlipTexture(Texture2D original){
Texture2D flipped = new Texture2D(original.width,original.height);
int xN = original.width;
int yN = original.height;
for(int i=0;i<xN;i++){
for(int j=0;j<yN;j++){
flipped.SetPixel(xN-i-1, j, original.GetPixel(i,j));
}
}
flipped.Apply();
return flipped;
}
say "pix" is a png,
Texture2D photo;
Color[] pix = photo.GetPixels(startAcross,0, 256,256);
// (256 is just an example size)
this ENTIRELY ROTATES a png 180 degrees
System.Array.Reverse(pix, 0, pix.Length);
this mirrors a PNG just around the upright axis
for(int row=0;row<256;++row)
System.Array.Reverse(pix, row*256, 256);
Texture2D FlipTexture(Texture2D original, bool upSideDown = true)
{
Texture2D flipped = new Texture2D(original.width, original.height);
int xN = original.width;
int yN = original.height;
for (int i = 0; i < xN; i++)
{
for (int j = 0; j < yN; j++)
{
if (upSideDown)
{
flipped.SetPixel(j, xN - i - 1, original.GetPixel(j, i));
}
else
{
flipped.SetPixel(xN - i - 1, j, original.GetPixel(i, j));
}
}
}
flipped.Apply();
return flipped;
}
To call it:
FlipTexture(camTexture, true); //Upside down
FlipTexture(camTexture, false); //Sideways
This flips the texture upside down:
int width = texture.width;
int height = texture.height;
Texture2D snap = new Texture2D(width, height);
Color[] pixels = texture.GetPixels();
Color[] pixelsFlipped = new Color[pixels.Length];
for (int i = 0; i < height; i++)
{
Array.Copy(pixels, i*width, pixelsFlipped, (height-i-1) * width , width);
}
snap.SetPixels(pixelsFlipped);
snap.Apply();
I develop a screen sharing app and i would like to make it as efficient as posibble so im trying to send only the differences between the screen shots.
So, suppose we have this image for example:its a 32bpprgba image with transpert parts around.
I would like to store each one of the blocks here as a rectangle in a List and get them bounds. It may sounds very complex but actually it just requires a little logic.
This is my code so far:
private unsafe List<Rectangle> CodeImage(Bitmap bmp)
{
List<Rectangle> rec = new List<Rectangle>();
Bitmap bmpRes = new Bitmap(bmp.Width, bmp.Height);
BitmapData bmData = bmp.LockBits(new Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, bmp.PixelFormat);
IntPtr scan0 = bmData.Scan0;
int stride = bmData.Stride;
int nWidth = bmp.Width;
int nHeight = bmp.Height;
int minX = int.MaxValue; ;
int minY = int.MaxValue;
int maxX = 0;
bool found = false;
for (int y = 0; y < bmp.Height; y++)
{
byte* p = (byte*)scan0.ToPointer();
p += y * stride;
for (int x = 0; x < bmp.Width; x++)
{
if (p[3] != 0) //Check if pixel is not transparent;
{
found = true;
if (x < minX)
minX = x;
if (x > maxX)
maxX = x;
if (y < minY)
minY = y;
}
else
{
if (found)
{
int height = getBlockHeight(stride, scan0, maxX, minY);
found = false;
Rectangle temp = new Rectangle(minX, minY, maxX - minX, height);
rec.Add(temp);
y += minY;
break;
}
}
p += 4;//add to the pointer 4 bytes;
}
}
return rec;
}
as you see im trying to scan the image using the height and width, and when i found a pixel i send it to GetBlockHeight function to get it's height:
public unsafe int getBlockHeight(int stride, IntPtr scan, int x, int y1)
{
int height = 0; ;
for (int y = y1; y < 1080; y++)
{
byte* p = (byte*)scan.ToPointer();
p += (y * stride) + (x * 4);
if (p[3] != 0) //Check if pixel is not transparent;
{
height++;
}
}
return height;
}
But im just not getting the result... i think there's somthing with the logic here... can anyone light my eyes? i know it requires a bit time and thinking but i would very very appreciate anyone who could help a little.
In your current algorithm, after successfully matching a rectangle, you increase y with its height and break out of the inner loop. This means you can only detect data for one rectangle per horizontal line.
If I were you I'd think about the following things, before jumping back into the code:
Save the complete image as a PNG file, and look at its size. Is further processing really required?
Are these rectangles accurate? Will there be scenario's in which you would be constantly sending the contents of the entire screen anyway?
If you're developing for Windows, you might be able to hook into the procedure that invalidates areas on the screen, in which case you wouldn't have to determine these rectangles yourself. I don't know about other OSes
Also I personally wouldn't try to solve the rectangle-detection algorithm in a "nesty" for-loop, but go with something like this:
public void FindRectangles(Bitmap bitmap, Rectangle searchArea, List<Rectangle> results)
{
// Find the first non-transparent pixel within the search area.
// Ensure that it is the pixel with the lowest y-value possible
Point p;
if (!TryFindFirstNonTransparent(bitmap, searchArea, out p))
{
// No non-transparent pixels in this area
return;
}
// Find its rectangle within the area
Rectangle r = GetRectangle(bitmap, p, searchArea);
results.Add(r);
// No need to search above the rectangle we just found
Rectangle left = new Rectangle(searchArea.Left, r.Top, r.Left - searchArea.Left, searchArea.Bottom - r.Top);
Rectangle right = new Rectangle(r.Right, r.Top, searchArea.Right - r.Right, searchArea.Bottom - r.Top);
Rectangle bottom = new Rectangle(r.Left, r.Bottom, r.Width, searchArea.Bottom - r.Bottom);
FindRectangles(bitmap, left, results);
FindRectangles(bitmap, right, results);
FindRectangles(bitmap, bottom, results);
}
public Rectangle GetRectangle(Bitmap bitmap, Point p, Rectangle searchArea)
{
int right = searchArea.Right;
for (int x = p.X; x < searchArea.Right; x++)
{
if (IsTransparent(x, p.Y))
{
right = x - 1;
break;
}
}
int bottom = searchArea.Bottom;
for (int y = p.Y; y < searchArea.Bottom; y++)
{
if (IsTransparent(p.X, y))
{
bottom = y - 1;
break;
}
}
return new Rectangle(p.X, p.Y, right - p.X, bottom - p.Y);
}
This approach, when fully implemented, should give you a list of rectangles (although it will occasionally split a rectangle in two).
(Of course instead of providing the bitmap, you'd pass the pointer to the pixel data with some metadata instead)
I am trying to create a program that accepts an image, recursively goes through each pixel, normalizes the pixel and re-creates a NEW image that looks the same as the original, but has normalized pixels instead.
public void parseJpeg(String jpegPath)
{
var normalizedRed = 0.0;
var normalizedGreen = 0.0;
var normalizedBlue = 0.0;
Bitmap normalizedImage = null;
var image = new Bitmap(jpegPath);
normalizedImage = new Bitmap(image.Width, image.Height);
for (int x = 0; x < image.Width; ++x)
{
for (int y = 0; y < image.Height; ++y)
{
Color color = image.GetPixel(x, y);
double exponent = 2;
double redDouble = Convert.ToDouble(color.R);
double blueDouble = Convert.ToDouble(color.B);
double greenDouble = Convert.ToDouble(color.G);
double redResult = Math.Pow(redDouble, exponent);
double blueResult = Math.Pow(blueDouble, exponent);
double greenResult = Math.Pow(greenDouble, exponent);
double totalResult = redResult + blueResult + greenResult;
normalizedRed = Convert.ToDouble(color.R) / Math.Sqrt(totalResult);
normalizedGreen = Convert.ToDouble(color.G) / Math.Sqrt(totalResult);
normalizedBlue = Convert.ToDouble(color.B) / Math.Sqrt(totalResult);
Color newCol = Color.FromArgb(Convert.ToInt32(normalizedRed), Convert.ToInt32(normalizedGreen), Convert.ToInt32(normalizedBlue));
normalizedImage.SetPixel(x, y, newCol);
}
}
normalizedImage.Save("C:\\Users\\username\\Desktop\\test1.jpeg");
resultsViewBox.AppendText("Process completed.\n");
}
Using the above code produces all black pixels and I do not understand why. When it normalizes it sets RGB = 1. After normalization, how do I set pixels with the NEW normalized value?
When I perform the below code, I get a black and blue image in my preview, but when I open the file it's blank. This is better than what I was getting before, which was ALL black pixels. This only works on one image though. So I am not sure how much of a step forward it is.
public void parseJpeg(String jpegPath)
{
Bitmap normalizedImage = null;
var image = new Bitmap(jpegPath);
normalizedImage = new Bitmap(image.Width, image.Height);
for (int x = 0; x < image.Width; ++x)
{
for (int y = 0; y < image.Height; ++y)
{
Color color = image.GetPixel(x, y);
float norm = (float)System.Math.Sqrt(color.R * color.R + color.B * color.B + color.G * color.G);
Color newCol = Color.FromArgb(Convert.ToInt32(norm));
normalizedImage.SetPixel(x, y, newCol);
}
}
normalizedImage.Save("C:\\Users\\username\\Desktop\\test1.jpeg");
resultsViewBox.AppendText("Process completed.\n");
}
I found the code for what I was trying to do:
http://www.lukehorvat.com/blog/normalizing-image-brightness-in-csharp/
public void parseJpeg(String jpegPath)
{
var image = new Bitmap(jpegPath);
normalizedImage = new Bitmap(image.Width, image.Height);
for (int x = 0; x < image.Width; ++x)
{
for (int y = 0; y < image.Height; ++y)
{
float pixelBrightness = image.GetPixel(x, y).GetBrightness();
minBrightness = Math.Min(minBrightness, pixelBrightness);
maxBrightness = Math.Max(maxBrightness, pixelBrightness);
}
}
for (int x = 0; x < image.Width; x++)
{
for (int y = 0; y < image.Height; y++)
{
Color pixelColor = image.GetPixel(x, y);
float normalizedPixelBrightness = (pixelColor.GetBrightness() - minBrightness) / (maxBrightness - minBrightness);
Color normalizedPixelColor = ColorConverter.ColorFromAhsb(pixelColor.A, pixelColor.GetHue(), pixelColor.GetSaturation(), normalizedPixelBrightness);
normalizedImage.SetPixel(x, y, normalizedPixelColor);
}
}
normalizedImage.Save("C:\\Users\\username\\Desktop\\test1.jpeg");
resultsViewBox.AppendText("Process completed.\n");
}
You are creating a new Bitmap and saving over the file for every pixel in your image. Move the
normalizedImage = new Bitmap(image.Width, image.Height);
line to before your loops, and the
normalizedImage.Save("C:\\Users\\username\\Desktop\\test1.jpeg");
line to after your loops.
Your normalization algorithm does not appear to be correct. Let's say your original color was red (255,0,0) Then your totalResult will be 65025, and your normalizedRed will be 255/sqrt(65025), which is 1, giving you a new normalized color of (1,0,0), which is essentially black.
Just as a note, your code will run a bit faster if you define all the doubles once outside the look and then assign them within the loop rather than defining and deleting each of the 8 doubles each iteration
Instead of messing with the colors you should use the brightness or luminosity factor to achieve normalization. Here is a link to the already answered question that can help you. you can convert each RGB pixel to HSL and minupulate L factor:
How do I normalize an image?
The code that you shared is actually a trim down version of HSL manipulation.
I'm currently using the following to apply a texture to a polygon formed by TriangleList
public static VertexPositionColorTexture[] TextureMapping(VertexPositionColorTexture[] vertices, float xScale, float yScale)
{
bool initialized = false;
float x, y;
float lX = 0, hX = 0, lY = 0, hY = 0;
for (int i = 0; i < vertices.Length; i++)
{
x = vertices[i].Position.X;
y = vertices[i].Position.Y;
if (!initialized)
{
hX = x;
lX = x;
hX = y;
hY = y;
initialized = true;
}
else
{
if (x > hX)
{
hX = x;
}
else if (x < lX)
{
lX = x;
}
if (y > hY)
{
hY = y;
}
else if (y < lY)
{
lY = y;
}
}
}
float width = (Math.Abs(lX) + Math.Abs(hX)) / xScale;
float height = (Math.Abs(lY) + Math.Abs(hY)) / yScale;
for (int i = 0; i < vertices.Length; i++)
{
vertices[i].TextureCoordinate.X = vertices[i].Position.X / width;
vertices[i].TextureCoordinate.Y = vertices[i].Position.Y / height;
}
return vertices;
This currently works fine for a polygon that has points that all have Z=0 (example: (0,0,0) (0,10,0) (10,10,0) (10,0,0)) but doesn't work for any that are rotated or not flat along the z (example (0,0,0) (0,0,10) (0,10,10) (0,10,0)). The only solution I have come with is to get the plane that the polygon lies on (it will always be flat) and somehow rotate or translate the vertices in the above method to flatten it to the xy line to allow for the correct height and width to be determined. Anyone point me in the right direction, or suggest something else?
Solved this myself by re-writing and rotating the polygon to the z plane.
In this image black colour graph is in the white background. I want to get the pixel length between the two peak waves in the graph and the average amplitude (height of the peak) of the peak waves.
I'm stuck with the logic to implement this code.can anyone help me to implement this. I'm using C#
public void black(Bitmap bmp)
{
Color col;
for (int i = 0; i < bmp.Height; i++)
{
for (int j = 0; j < bmp.Width; j++)
{
col = bmp.GetPixel(j, i);
if (col.R == 0) //check whether black pixel
{
y = i; //assign black pixel x,y positions to a variable
x = j;
}
}
}
}
my supervisor told i have to use a 2D array to store increments and decrements(start point pixel value and end point pixel value of each increment and decrement) of the line to get these values.But i haven't sufficient coding skills to apply that logic to this code.
Bitmap img = new Bitmap(pictureBox1.Image);
int width = img.Width;
int height = img.Height;
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
Color pixelColor = img.GetPixel(x, y);
if (pixelColor.R == 0 && pixelColor.G == 0 && pixelColor.B == 0)
//listBox1.Items.Add(String.Format("x:{0} y:{1}", x, y));
textBox1.Text = (String.Format("x:{0} y:{1}", x, y));
}
}