I have modified my rendering engine to use multisampled textures, except now depth testing is being ignored.
Here's how I'm creating the multisampled FBO,
public MSAA_FBO(int WindowWidth, int WindowHeight)
{
this.width = WindowWidth;
this.height = WindowHeight;
GL.GenFramebuffers(1, out ID);
GL.BindFramebuffer(FramebufferTarget.Framebuffer, ID);
// Colour texture
GL.GenTextures(1, out textureColorBufferMultiSampled);
GL.BindTexture(TextureTarget.Texture2DMultisample, textureColorBufferMultiSampled);
GL.TexImage2DMultisample(TextureTargetMultisample.Texture2DMultisample, 4, PixelInternalFormat.Rgb8, WindowWidth, WindowHeight, true);
GL.FramebufferTexture2D(FramebufferTarget.Framebuffer, FramebufferAttachment.ColorAttachment0, TextureTarget.Texture2DMultisample, textureColorBufferMultiSampled, 0);
// Depth render buffer
GL.GenRenderbuffers(1, out RBO);
GL.BindRenderbuffer(RenderbufferTarget.RenderbufferExt, RBO);
GL.RenderbufferStorageMultisample(RenderbufferTarget.Renderbuffer, 4, RenderbufferStorage.DepthComponent, WindowWidth, WindowHeight);
GL.BindRenderbuffer(RenderbufferTarget.Renderbuffer, 0);
var status = GL.CheckFramebufferStatus(FramebufferTarget.Framebuffer);
Console.WriteLine("MSAA: " + status);
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
}
performing the resolve,
public void resolveToFBO(FBO outputFBO)
{
GL.BindFramebuffer(FramebufferTarget.DrawFramebuffer, outputFBO.ID);
GL.BindFramebuffer(FramebufferTarget.ReadFramebuffer, this.ID);
GL.BlitFramebuffer(0, 0, this.width, this.height, 0, 0, outputFBO.width, outputFBO.height, ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit, BlitFramebufferFilter.Nearest);
}
and rendering the image,
public void MSAAPass(Shader shader)
{
GL.UseProgram(shader.ID);
GL.BindFramebuffer(FramebufferTarget.Framebuffer, MSAAbuffer.ID);
GL.Viewport(0, 0, Width, Height);
GL.Enable(EnableCap.Multisample);
GL.ClearColor(System.Drawing.Color.Black);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.Enable(EnableCap.DepthTest);
GL.Disable(EnableCap.Blend);
// Uniforms
Matrix4 viewMatrix = player.GetViewMatrix();
GL.UniformMatrix4(shader.getUniformID("viewMatrix"), false, ref viewMatrix);
// Draw all geometry
DrawScene(shader);
GL.BindFramebuffer(FramebufferTarget.Framebuffer, 0);
MSAAbuffer.resolveToFBO(testBuffer);
}
Your multisampled FBO does not have a depth buffer, hence the depth test will not work. Although you actually created a multisampled renderbuffer with GL_DEPTH_COMPONENT format, you forgot to attach this one as the GL_DEPTH_ATTACHMENT of your FBO. You need to add a glFramebufferRenderbuffer() call in your MSAA_FBO() function.
Related
I am drawing multiple images over earh map, I am using perspective correct texturing link on each image.
I want to store rendered image in to file (sorce file is 1280x760, the rendered images is around 160x90 in most cases rotated). Currently I am doing this with GL.ReadPixels
int width = 1920;
int height = 1080;
using (Bitmap bitmap = new Bitmap(width, height))
{
System.Drawing.Imaging.BitmapData bits = bitmap.LockBits(new Rectangle(0, 0, width, height), System.Drawing.Imaging.ImageLockMode.WriteOnly, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
GL.ReadPixels(0, (0, width, height, OpenTK.Graphics.OpenGL.PixelFormat.Bgra, PixelType.UnsignedByte, bits.Scan0);
bitmap.UnlockBits(bits);
bitmap.RotateFlip(RotateFlipType.Rotate180FlipX);
bitmap.Save("output.png", System.Drawing.Imaging.ImageFormat.Png);
}
The problem occurs when I move the map, rendered images are not visible anymore on the screen and it looks like that in this case GL.ReadPixels returns empty pixels.
How to get rendered image even if this is not currently rendered on screen?
Extra question, if I use framebuffer the result is the same but using the frame buffer I never see image on the screen, it looks like framebuffer is not drawn on the screen, but GL.ReadPixels can get image out.
Which lines of code are needed to draw framebuffer also on the screen?
Any idea?
I am adding some code with drawing in to framebuffer but the result is empty image.
int FBOHandle = 0;
int ColorTexture = 0;
int DepthTexture = 0;
public bool canRender = false;
public void onRender()
{
int zoom = (int)MainForm.mainMap.Zoom;
VideoMapOverlayBitmap pob = null;
lock (videoMapOverlayBitmapsSync)
videoMapOverlayBitmaps.TryGetValue(zoom, out pob);
if (pob == null)
return;
if (canRender)
{
canRender = false;
int fboWidth = 1920;
int fboHeight = 1080;
// Create Color Tex for framebuffer
GL.GenTextures(1, out ColorTexture);
GL.BindTexture(TextureTarget.Texture2D, ColorTexture);
GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba8, fboWidth, fboHeight, 0, OpenTK.Graphics.OpenGL.PixelFormat.Rgba, PixelType.UnsignedByte, IntPtr.Zero);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureWrapS, (int)TextureWrapMode.ClampToBorder);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureWrapT, (int)TextureWrapMode.ClampToBorder);
// GL.Ext.GenerateMipmap( GenerateMipmapTarget.Texture2D );
// Create Depth Tex for framebuffer
GL.GenTextures(1, out DepthTexture);
GL.BindTexture(TextureTarget.Texture2D, DepthTexture);
GL.TexImage2D(TextureTarget.Texture2D, 0, (PixelInternalFormat)All.DepthComponent32, fboWidth, fboHeight, 0, OpenTK.Graphics.OpenGL.PixelFormat.DepthComponent, PixelType.UnsignedInt, IntPtr.Zero);
// things go horribly wrong if DepthComponent's Bitcount does not match the main Framebuffer's Depth
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMinFilter, (int)TextureMinFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureMagFilter, (int)TextureMagFilter.Linear);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureWrapS, (int)TextureWrapMode.ClampToBorder);
GL.TexParameter(TextureTarget.Texture2D, TextureParameterName.TextureWrapT, (int)TextureWrapMode.ClampToBorder);
// GL.Ext.GenerateMipmap( GenerateMipmapTarget.Texture2D );
// Create a FBO and attach the textures
GL.Ext.GenFramebuffers(1, out FBOHandle);
GL.Ext.BindFramebuffer(FramebufferTarget.FramebufferExt, FBOHandle);
GL.Ext.FramebufferTexture2D(FramebufferTarget.FramebufferExt, FramebufferAttachment.ColorAttachment0Ext, TextureTarget.Texture2D, ColorTexture, 0);
GL.Ext.FramebufferTexture2D(FramebufferTarget.FramebufferExt, FramebufferAttachment.DepthAttachmentExt, TextureTarget.Texture2D, DepthTexture, 0);
//check for errors on framebuffer
FramebufferErrorCode errorCode = GL.Ext.CheckFramebufferStatus(FramebufferTarget.Framebuffer);
if (errorCode != FramebufferErrorCode.FramebufferComplete)
{
if (errorCode == FramebufferErrorCode.FramebufferUnsupported)
Console.WriteLine("FramebufferUnsupported");
OnUnload();
return;
}
GL.ClearColor(0, 0, 0, 0);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
/*
//this corrupts my main screen
GL.ClearColor(Color.White);
GL.MatrixMode(MatrixMode.Projection);
GL.LoadIdentity();
GL.Ortho(0, fboWidth, fboHeight, 0, -1, 1); // Up-left corner pixel has coordinate (0, 0)
GL.Viewport(0, 0, fboWidth, fboHeight); // Use all of the glControl painting area
*/
// Render all images
PureProjection proj = MainForm.mainMap.MapProvider.Projection;
List<VideoLogEntry> log = Log;
//go over all images in the loop
foreach (var videoEntry in log)
{
if (videoEntry == null)
continue;
if (videoEntry.projectedRectangleEmpty())
continue;
PointLatLng[] rect = videoEntry.getProjectedRectangle();
if (videoEntry.bmp != null)
videoEntry.createTexture();
GL.Enable(EnableCap.Texture2D);
GL.BindTexture(TextureTarget.Texture2D, videoEntry.texture);
//GL.Ext.FramebufferTexture2D(FramebufferTarget.FramebufferExt, FramebufferAttachment.ColorAttachment0Ext, TextureTarget.Texture2D, videoEntry.texture, 0);
//GL.DrawBuffers(1, new int[] { videoEntry.texture }); //compiler error
//Do the magick for "Perspective correct texturing"
// center point
GPoint localTargetPosition = MainForm.instance.gMapControl.FromLatLngToLocalWithOffset(videoEntry.target);
videoEntry.UpdatePolygonLocalPosition(videoEntry.projectedRectangle);
// determines distances to center for all vertexes
double dUL = Common.distance(new double[] { videoEntry.LocalPoints[0].X, videoEntry.LocalPoints[0].Y }, new double[] { localTargetPosition.X, localTargetPosition.Y });
double dUR = Common.distance(new double[] { videoEntry.LocalPoints[1].X, videoEntry.LocalPoints[1].Y }, new double[] { localTargetPosition.X, localTargetPosition.Y });
double dLR = Common.distance(new double[] { videoEntry.LocalPoints[2].X, videoEntry.LocalPoints[2].Y }, new double[] { localTargetPosition.X, localTargetPosition.Y });
double dLL = Common.distance(new double[] { videoEntry.LocalPoints[3].X, videoEntry.LocalPoints[3].Y }, new double[] { localTargetPosition.X, localTargetPosition.Y });
var texCoords = new[]
{
new Vector4(0, 0, 1, 1),
new Vector4(1, 0, 1, 1),
new Vector4(1, 1, 1, 1),
new Vector4(0, 1, 1, 1)
};
texCoords[0] *= (float)((dUL + dLR) / dLR);
texCoords[1] *= (float)((dUR + dLL) / dLL);
texCoords[2] *= (float)((dLR + dUL) / dUL);
texCoords[3] *= (float)((dLL + dUR) / dUR);
GL.Begin(PrimitiveType.Quads);
{
GL.TexCoord4(texCoords[0]); GL.Vertex4(videoEntry.LocalPoints[0].X, videoEntry.LocalPoints[0].Y, 1, 1); //UL LocalPoints[0] gimbalUL
GL.TexCoord4(texCoords[1]); GL.Vertex4(videoEntry.LocalPoints[1].X, videoEntry.LocalPoints[1].Y, 1, 1); //UR LocalPoints[1] gimbalUR
GL.TexCoord4(texCoords[2]); GL.Vertex4(videoEntry.LocalPoints[2].X, videoEntry.LocalPoints[2].Y, 1, 1); //LR LocalPoints[2] gimbalLR
GL.TexCoord4(texCoords[3]); GL.Vertex4(videoEntry.LocalPoints[3].X, videoEntry.LocalPoints[3].Y, 1, 1); //LL LocalPoints[3] gimbalLL
}
GL.End();
GL.Disable(EnableCap.Texture2D);
}
// Grab your screenshot
// draw FBO in to file
lock (pob.masterBitmapSync)
{
using (Bitmap mybitmap = new Bitmap(fboWidth, fboHeight))
{
//fill bitmal so we will see what ReadPixels draw
using (Graphics gfx = Graphics.FromImage(mybitmap))
using (SolidBrush brush = new SolidBrush(Color.FromArgb(0, 0, 255)))
{
gfx.FillRectangle(brush, 0, 0, mybitmap.Width, mybitmap.Height);
}
GPoint p = new GPoint(0, 0);
int outputWidth = mybitmap.Width;
int outputHeight = mybitmap.Height;
System.Drawing.Imaging.BitmapData bits = mybitmap.LockBits(new Rectangle(0, 0, mybitmap.Width, mybitmap.Height), System.Drawing.Imaging.ImageLockMode.WriteOnly, System.Drawing.Imaging.PixelFormat.Format32bppArgb);
GL.ReadPixels((int)p.X, (int)p.Y, outputWidth, outputHeight, OpenTK.Graphics.OpenGL.PixelFormat.Bgra, PixelType.UnsignedByte, bits.Scan0);
mybitmap.UnlockBits(bits);
//mybitmap.RotateFlip(RotateFlipType.Rotate180FlipX);
mybitmap.Save(#"c:\Downloads\aaa\ReadPixels_" + DateTime.Now.ToString("HHmmss_fff") + ".png", System.Drawing.Imaging.ImageFormat.Png);
pob.masterBitmap.Dispose();
pob.masterBitmap = null;
pob.masterBitmap = (Bitmap)mybitmap.Clone();
}
}
// Unload and dispose the frame buffer
GL.Ext.BindFramebuffer(FramebufferTarget.FramebufferExt, 0);
// Clean up what we allocated before exiting
if (ColorTexture != 0)
GL.DeleteTextures(1, ref ColorTexture);
ColorTexture = 0;
if (DepthTexture != 0)
GL.DeleteTextures(1, ref DepthTexture);
DepthTexture = 0;
if (FBOHandle != 0)
GL.Ext.DeleteFramebuffers(1, ref FBOHandle);
FBOHandle = 0;
}
}
How to get rendered image even if this is not currently rendered on screen?
Create a new frame buffer, bind that frame buffer, set up viewport, set up ortho projection matrix, render your desired piece of the map into it, call GL.ReadPixels, save screenshot, unload and dispose the framebuffer.
Here's some sample code:
// Create framebuffer
int fboId = GL.GenFramebuffer();
GL.BindFramebuffer(FramebufferTarget.Framebuffer, fboId);
// Set up a framebuffer attachment here
int width = ...
int height = ...
int textureId = GL.GenTexture();
GL.BindTexture(TextureTarget.Texture2D, textureId);
GL.TexImage2D(TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba8, width, height, 0, PixelFormat.Rgba, PixelType.Float, IntPtr.Zero);
GL.FramebufferTexture2D(FramebufferTarget.Framebuffer, (FramebufferAttachment)fbAtt.AttachmentType, TextureTarget.Texture2D, textureId, 0);
GL.DrawBuffers(1, new int[] { textureId });
GL.Viewport(0, 0, width, height);
// Set up ortho modo. probably also want to disable depth testing and any active blend modes
....
// Render your map now
....
// Grab your screenshot
....
// Unload and dispose the frame buffer
...
// Reset everything (viewport, ortho mode, etc.) if you don't your normal map to flicker for a frame
....
Your other question I don't quite understand
I' trying to implement a renderer which uses only one VBO and one EBO to store all object vertices and indices.
For that, I found some tutorials and finally came up with a rather good result.
The problem is that it only works properly with ONE single object. As soon as I want to add another one, the rendering shows weird behaviour.
Can you help me with this?
You can find the full code here: https://github.com/BanditBloodwyn/TerritorySimulator.
The important classes are:
Rendering.Core.Rendering.Renderer.cs
Rendering.Core.Classes.Shapes.GLShape.cs
Rendering.Core.RenderGUI.RenderGUI.cs
This is the initializing method in the Renderer:
public void Initialize(GLShape[] shapeArray)
{
Shapes = shapeArray;
GL.Enable(EnableCap.DepthTest);
GL.ClearColor(0.0f, 0.0f, 0.10f, 1.0f);
InitializeBuffers(Shapes);
InitializeVertexArrayObject(Shapes);
SetupShader();
BindBuffers();
}
The submethods look like this.
private void InitializeBuffers(GLShape[] shapeArray)
{
int vertexBufferSize = shapeArray.Sum(shape => shape.VertexBufferSize);
int indexBufferSize = shapeArray.Sum(shape => shape.IndexBufferSize);
// Vertex buffer
vertexBufferObject = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, vertexBufferObject);
GL.BufferData(BufferTarget.ArrayBuffer, vertexBufferSize, (IntPtr)0, BufferUsageHint.StaticDraw);
IntPtr offset = (IntPtr)0;
foreach (GLShape shape in shapeArray)
{
GL.BufferSubData(BufferTarget.ArrayBuffer, offset, shape.VertexBufferSize, shape.Vertices);
offset += shape.VertexBufferSize;
}
// Element buffer
elementBufferObject = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ElementArrayBuffer, elementBufferObject);
GL.BufferData(BufferTarget.ElementArrayBuffer, indexBufferSize, (IntPtr)0, BufferUsageHint.StaticDraw);
offset = (IntPtr)0;
foreach (GLShape shape in shapeArray)
{
GL.BufferSubData(BufferTarget.ElementArrayBuffer, offset, shape.IndexBufferSize, shape.Indices);
offset += shape.IndexBufferSize;
}
}
private void InitializeVertexArrayObject(GLShape[] shapeArray)
{
foreach (GLShape shape in shapeArray)
{
shape.VertexArrayObject = GL.GenVertexArray();
GL.BindVertexArray(shape.VertexArrayObject);
}
}
private void SetupShader()
{
// shader
string vertexPath = Path.Combine(Environment.CurrentDirectory, #"GLSL\", "Vertex.vert");
string fragmentPath = Path.Combine(Environment.CurrentDirectory, #"GLSL\", "Fragment.frag");
shader = new Shader(vertexPath, fragmentPath);
shader.Use();
int vertexLocation = shader.GetAttribLocation("aPosition");
GL.EnableVertexAttribArray(vertexLocation);
GL.VertexAttribPointer(
vertexLocation,
3,
VertexAttribPointerType.Float,
false,
5 * sizeof(float),
0);
int texCoordLocation = shader.GetAttribLocation("aTexCoord");
GL.EnableVertexAttribArray(texCoordLocation);
GL.VertexAttribPointer(
texCoordLocation,
2,
VertexAttribPointerType.Float,
false,
5 * sizeof(float),
3 * sizeof(float));
shader.SetInt("texture0", 0);
shader.SetInt("texture1", 1);
}
private void BindBuffers()
{
GL.BindBuffer(BufferTarget.ArrayBuffer, vertexBufferObject);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, elementBufferObject);
}
The render function itself looks like this.
public void Render()
{
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
if (Shapes == null || Shapes.Length == 0)
return;
IntPtr offset = (IntPtr)0;
foreach (GLShape shape in Shapes)
{
foreach (var texture in shape.Textures)
{
if (LayerConfiguration.ShowEarthTexture)
texture.Key.Use(texture.Value);
else
texture.Key.MakeTransparent(texture.Value);
}
ApplyModelTransforms(shape, out Matrix4 model);
shader.SetMatrix4("model", model);
shader.SetMatrix4("view", Camera.GetViewMatrix());
GL.DrawElements(PrimitiveType.Triangles, shape.Indices.Length, DrawElementsType.UnsignedInt, offset);
offset += shape.IndexBufferSize;
}
shader.SetMatrix4("projection", Camera.GetProjectionMatrix());
shader.Use();
}
As soon as I want to add another one, the rendering shows weird behavior
Of course, because the indices for the 2nd and following objects are wrong. You need to add the sum of the vertices of the previous meshes to the indexes. To the indices of the first mesh you have to add 0, to the indices of the 2nd mesh you have to add the number of vertices of the 1st mesh, to the indices of the 3rd mesh you have to add the sum of the vertices of the 1st and 2nd mesh, ...
offset = (IntPtr)0;
uint firstVertexIndex = 0;
foreach (GLShape shape in shapeArray)
{
var indexArray = shape.Indices.Select(index => index + firstVertexIndex).ToArray();
GL.BufferSubData(
BufferTarget.ElementArrayBuffer, offset, shape.IndexBufferSize, indexArray);
offset += shape.IndexBufferSize;
firstVertexIndex += (uint)(shape.VertexBufferSize / (5 * sizeof(float)));
}
Complete method InitializeBuffers:
private void InitializeBuffers(GLShape[] shapeArray)
{
int vertexBufferSize = shapeArray.Sum(shape => shape.VertexBufferSize);
int indexBufferSize = shapeArray.Sum(shape => shape.IndexBufferSize);
// Vertex buffer
vertexBufferObject = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, vertexBufferObject);
GL.BufferData(BufferTarget.ArrayBuffer, vertexBufferSize, (IntPtr)0, BufferUsageHint.StaticDraw);
IntPtr offset = (IntPtr)0;
foreach (GLShape shape in shapeArray)
{
GL.BufferSubData(BufferTarget.ArrayBuffer, offset, shape.VertexBufferSize, shape.Vertices);
offset += shape.VertexBufferSize;
}
// Element buffer
elementBufferObject = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ElementArrayBuffer, elementBufferObject);
GL.BufferData(BufferTarget.ElementArrayBuffer, indexBufferSize, (IntPtr)0, BufferUsageHint.StaticDraw);
offset = (IntPtr)0;
uint firstVertexIndex = 0;
foreach (GLShape shape in shapeArray)
{
var indexArray = shape.Indices.Select(index => index + firstVertexIndex).ToArray();
GL.BufferSubData(BufferTarget.ElementArrayBuffer, offset, shape.IndexBufferSize, indexArray);
offset += shape.IndexBufferSize;
firstVertexIndex += (uint)(shape.VertexBufferSize / (5 * sizeof(float)));
}
}
i want to draw bitmap using D3D9.. I got code here and it works out. It does draw me box and image, but when i draw only box i got fps around 60. When i uncomment that code for drawing bitmap i got fps between 5-30 and it's very lagging. What's wrong with my code ?
private void D3D9Render()
{
do
{
Drawing.Device.Clear(D3D9.ClearFlags.Target, Color.FromArgb(0, 0, 0, 0), 1.0f, 0);
Drawing.Device.BeginScene();
Drawing.DrawText("Fps : " + Fps.CalculateFrameRate().ToString(), Drawing.Width - 72, 220, Color.White);
Drawing.DrawBox(v.X, v.Y, 90, 7);
/*
Drawing.DrawTexture(new Bitmap("test.jpg"));
Drawing.Sprite.Begin(D3D9.SpriteFlags.None);
Drawing.Sprite.Draw(Drawing.Texture, Drawing.TextureSize,
new Vector3(0, 0, 0),
new Vector3(v.X-65, v.Y-55, v.Z), Color.White);
Drawing.Sprite.End();
*/
Drawing.Device.EndScene();
Drawing.Device.Present();
} while (true);
}
public static void DrawTexture(Bitmap image)
{
Texture = new Texture(Device, image, Usage.None, Pool.Managed);
using (Surface surface = Texture.GetSurfaceLevel(0))
{
SurfaceDescription surfaceDescription = surface.Description;
TextureSize = new Rectangle(0, 0,
surfaceDescription.Width,
surfaceDescription.Height);
}
}
Have you tried allocating your bitmap outside of your render loop? Depending on the size of the bitmap, it could really slow things down. You're allocating and re-allocating space for that bitmap each iteration of the loop.
Move its allocation outside of the loop and then render it (that one allocation) in your render loop.
We are trying to make an app using Xamarin which will have a small animated face in a GLKView on a particular screen. We have looked for solutions for rendering sprites, and the best solution we came up with stems from this solution here. We are having trouble even drawing a simple image in the GLKView, and the error in the output does not really make sense. We are converting this from iOS to Xamarin C# so there are differences between certain calls, but we have tried to keep most pieces in tact.
Here are the parts of the code this is related to:
public class Sprite : NSObject
{
public void Render()
{
Effect.Texture2d0.GLName = TextureInfo.Name;
Effect.Texture2d0.Enabled = true;
Effect.PrepareToDraw();
GL.EnableVertexAttribArray((int)GLKVertexAttrib.Position);
GL.EnableVertexAttribArray((int)GLKVertexAttrib.TexCoord0);
IntPtr ptr = Marshal.AllocHGlobal(Marshal.SizeOf(Quad));
Marshal.StructureToPtr(Quad, ptr, false);
int offset = (int)ptr;
GL.VertexAttribPointer((uint)GLKVertexAttrib.Position, 2, VertexAttribPointerType.Float, false, Marshal.SizeOf(typeof(TexturedVertex)), offset + (int)Marshal.OffsetOf(typeof(TexturedVertex), "geomertryVertex"));
GL.VertexAttribPointer((uint)GLKVertexAttrib.Position, 2, VertexAttribPointerType.Float, false, Marshal.SizeOf(typeof(TexturedVertex)), offset + (int)Marshal.OffsetOf(typeof(TexturedVertex), "textureVertex"));
GL.DrawArrays(BeginMode.TriangleStrip, 0, 4);
Marshal.FreeHGlobal(ptr);
}
}
Sprite.Render() is called in this GLKViewController here:
public class AnimationViewController : GLKViewController
{
GLKView animationView;
EAGLContext context;
Sprite player;
GLKBaseEffect effect;
public override void ViewDidLoad()
{
base.ViewDidLoad();
context = new EAGLContext(EAGLRenderingAPI.OpenGLES2);
if (context == null)
Console.WriteLine("Failed to create ES context...");
animationView = new GLKView(new RectangleF(UIScreen.MainScreen.Bounds.Width * 0.05f,
UIScreen.MainScreen.Bounds.Height * 0.05f,
UIScreen.MainScreen.Bounds.Width * 0.9f,
UIScreen.MainScreen.Bounds.Height * 0.75f), context);
EAGLContext.SetCurrentContext(context);
animationView.DrawInRect += new EventHandler<GLKViewDrawEventArgs>(animationView_DrawInRect);
View.AddSubview(animationView);
effect = new GLKBaseEffect();
Matrix4 projectionMatrix = Matrix4.CreateOrthographicOffCenter(0, animationView.Frame.Width, 0, animationView.Frame.Height, -1024, 1024);
effect.Transform.ProjectionMatrix = projectionMatrix;
player = new Sprite(#"Player.png", effect);
}
void animationView_DrawInRect(object sender, GLKViewDrawEventArgs e)
{
GL.ClearColor(0.98f, 0.98f, 0.98f, 1.0f);
//GL.Clear((uint)(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit));
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.BlendFunc(BlendingFactorSrc.SrcAlpha, BlendingFactorDest.OneMinusSrcAlpha);
GL.Enable(EnableCap.Blend);
player.Render();
}
}
Links to whole code files:
Sprite Class and related Structs
AnimationViewController Class
Looks like the problem is just a typo in the second call to VertexAttribPointer. The second GLKVertexAttrib.Position should instead be GLKVertexAttrib.TexCoord0:
GL.VertexAttribPointer((uint)GLKVertexAttrib.Position, 2, VertexAttribPointerType.Float, false, Marshal.SizeOf(typeof(TexturedVertex)), offset + (int)Marshal.OffsetOf(typeof(TexturedVertex), "geomertryVertex"));
GL.VertexAttribPointer((uint)GLKVertexAttrib.TexCoord0, 2, VertexAttribPointerType.Float, false, Marshal.SizeOf(typeof(TexturedVertex)), offset + (int)Marshal.OffsetOf(typeof(TexturedVertex), "textureVertex"));
First of all, I'd like to suggest that it is not a duplicate of THIS question. At least that's my opinion :)
What I want to achieve is a series of frames to "fade" animation.
I choose two PNG files (let' say they are the same size), for example:
Picture 1
Picture 2
I want to "simulate" merging them like layers in graphic editor. I put Pic1 on the top with opacity 255, Pic2 below with opacity 0, so at first I see only Pic1. Then I change their opacity, like this:
Pic1-200, Pic2-150
Pic1-150, Pic2-200
Pic1-100, Pic2-230
Is there any simple way for it?
In a winforms app this can be done pretty easily. Create a user control with a few properties:
public Image FromImage { get; set; }
public Image ToImage { get; set; }
private float opacity = 1;
Now override OnPaint
protected override void OnPaint(PaintEventArgs e)
{
if (FromImage != null && ToImage != null)
{
ColorMatrix matrix1 = new ColorMatrix();
matrix1.Matrix33 = opacity;
ImageAttributes attributes1 = new ImageAttributes();
attributes1.SetColorMatrix(matrix1, ColorMatrixFlag.Default, ColorAdjustType.Bitmap);
ColorMatrix matrix2 = new ColorMatrix();
matrix2.Matrix33 = 1 - opacity;
ImageAttributes attributes2 = new ImageAttributes();
attributes2.SetColorMatrix(matrix2, ColorMatrixFlag.Default, ColorAdjustType.Bitmap);
e.Graphics.DrawImage(FromImage, new Rectangle(0, 0, this.Width, this.Height), 0, 0, this.Width,
this.Height, GraphicsUnit.Pixel, attributes1);
e.Graphics.DrawImage(ToImage, new Rectangle(0, 0, this.Width, this.Height), 0, 0, this.Width,
this.Height, GraphicsUnit.Pixel, attributes2);
}
base.OnPaint(e);
}
Now drop a timer onto the control, set its to enabled with an elapsed time of something like 100ms. Handle the tick event:
private void timer_Tick(object sender, EventArgs e)
{
if(opacity == 0)
{
this.timer.Stop();
return;
}
this.opacity -= 0.01f;
this.Invalidate();
}
et voila. However, there's one thing to be aware of. This makes quite a flickery transition, which can be alieviated somewhat with this line in the control's constructor:
this.SetStyle(ControlStyles.OptimizedDoubleBuffer | ControlStyles.AllPaintingInWmPaint,true);
Update based on Edit: You could turn this into a utility that takes 2 images and, using much the same code, outputs each step to a new image. Somthing like:
public class ImageUtility
{
private Image image1;
private Image image2;
public ImageUtility(Image image1, Image image2)
{
this.image1 = image1;
this.image2 = image2;
}
public void SaveTransitions(int numSteps, string outDir)
{
var opacityChange = 1.0f/(float) numSteps;
for(float opacity = 1,i=0;opacity>0;opacity-=opacityChange,i++)
{
using(var image = new Bitmap(image1.Width,image2.Width))
{
Graphics g = Graphics.FromImage(image);
ColorMatrix matrix1 = new ColorMatrix();
matrix1.Matrix33 = opacity;
ImageAttributes attributes1 = new ImageAttributes();
attributes1.SetColorMatrix(matrix1, ColorMatrixFlag.Default, ColorAdjustType.Bitmap);
ColorMatrix matrix2 = new ColorMatrix();
matrix2.Matrix33 = 1 - opacity;
ImageAttributes attributes2 = new ImageAttributes();
attributes2.SetColorMatrix(matrix2, ColorMatrixFlag.Default, ColorAdjustType.Bitmap);
g.DrawImage(image1, new Rectangle(0, 0, image1.Width, image1.Height), 0, 0, image1.Width,
image1.Height, GraphicsUnit.Pixel, attributes1);
g.DrawImage(image2, new Rectangle(0, 0, image2.Width, image2.Height), 0, 0, image2.Width,
image2.Height, GraphicsUnit.Pixel, attributes2);
image.Save(Path.Combine(outDir,"Image" + i + ".png"),ImageFormat.Png);
}
}
}
Usage:
ImageUtility util = new ImageUtility(Image.FromFile(#"C:\path\pic1.png"), Image.FromFile(#"C:\path\pic2.png"));
util.SaveTransitions(100, #"C:\path\output"); // saves 100 images
Using winforms you can use Graphics.DrawImage, using the overload that takes an ImageAttributes parameter. That class can specify manipulation to the colour (and alpha) values.
The example on the ImageAttributes page is nearly what you want. Just draw the original and transformed one in the same place, and change the colour matrix to only change the alpha level.