OpenGL Models overriding previous ones - c#

I have been trying to figure out this bug in my code for quite a while, and now I'm seeking help from stackoverflow! Anyway, I have created a class for the creation of a model in openGL. It handles the VBO, VAO, and EBO for a given model, but when I create two and display them, the second always overrides the previous one/ones I have no idea why, I showed it to a friend of mine, and together we figured out nothing. This is the model class:
using OpenTK;
using OpenTK.Graphics;
using OpenTK.Graphics.OpenGL4;
using System;
namespace YISOPENGLEVIL.Components.Objects
{
struct Vertex
{
Vector3 Position;
Vector3 Color;
public Vertex(Vector3 position, Vector3 color)
{
Position = position;
Color = color;
}
}
class Model
{
public int VBO, VAO, EBO;
public Vertex[] Data;
public int[] Indices;
public Model() { }
public Model(Vertex[] data, int[] indices)
{
Data = data;
Indices = indices;
VAO = GL.GenVertexArray();
VBO = GL.GenBuffer();
EBO = GL.GenBuffer();
GL.BindVertexArray(VAO);
GL.BindBuffer(BufferTarget.ArrayBuffer, VBO);
GL.BufferData(BufferTarget.ArrayBuffer, Data.Length * 24, Data, BufferUsageHint.StaticDraw);
GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, false, 24, 0);
GL.VertexAttribPointer(1, 3, VertexAttribPointerType.Float, false, 24, 12);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, EBO);
GL.BufferData(BufferTarget.ElementArrayBuffer, Indices.Length * 4, Indices, BufferUsageHint.StaticDraw);
GL.EnableVertexAttribArray(0);
GL.EnableVertexAttribArray(1);
}
protected static Vector3 toVec3(Color4 c)
{
return new Vector3(c.R, c.G, c.B);
}
public void Render()
{
//GL.DrawArrays(PrimitiveType.Triangles, 0, Data.Length);
GL.DrawElements(BeginMode.Triangles, Indices.Length, DrawElementsType.UnsignedInt, 0);
}
}
}
and my Element class, which gets passed a Model then is used to display it,
using OpenTK;
using System.Collections.Generic;
using OpenTK.Graphics.OpenGL4;
namespace YISOPENGLEVIL.Components.Objects
{
class Element
{
private int transform;
private int s;
public Vector3 Position;
public Vector3 Rotation;
private Matrix4 View;
private Model Model;
public Element(Model m, int Shader)
{
s = Shader;
Model = m;
Position = new Vector3(0, 0, 0);
Rotation = new Vector3(0, 0, 0);
}
public void Render()
{
var t = Matrix4.CreateTranslation(Position.X, Position.Y, Position.Z);
var rx = Matrix4.CreateRotationX(Rotation.X);
var ry = Matrix4.CreateRotationY(Rotation.Y);
var rz = Matrix4.CreateRotationZ(Rotation.Z);
View = t * rx * ry * rz;
transform = GL.GetUniformLocation(s, "transform");
GL.UniformMatrix4(transform, false, ref View);
Model.Render();
}
public static void RenderAll()
{
}
}
}
and this is the implementation:
m1 = new Cube(1.5f, Color4.Aqua);
m2 = new Cube(1f, Color4.Red);
e1 = new Element(m1, s.Program);
e2 = new Element(m2, s.Program);
e1.Position = new Vector3(0, 0, 0);
e2.Position = new Vector3(0, 0, 1);
and:
e1.Render();
e2.Render();
The cube class extends Model, and passes data into the Model constructor by private static methods that create the vertex arrays. The really weird thing is that if I put print statements it shows that both elements are being defined fine and have the correct data, the last always overrides the previous (there are two objects in the correct positions, but both displaying the red smaller cube). I don't get it... Any Ideas?
Screenshot of the program

Related

3D camera rotates the object, but not the camera in space. Implementation problem

I'm learning how to implement the 3D spaces. Now i want implement 3D camera for moving around my designed 3D space.
The problem is that camera rotate the 3D objects. But not the camera in space as need to be. I can not understand what i'm doing wrong.
Code of the Camera class:
public class Camera3D
{
public Vector3D Pos { get; set; }
public Vector3D Target { get; set; }
public Vector3D Up { get; set; }
public double CameraSpeed { get; set; }
public Camera3D(Vector3D pos, Vector3D target, Vector3D up)
{
Pos = pos;
Target = target;
Up = up;
Pos.Normalize();
Target.Normalize();
Up.Normalize();
}
public Camera3D()
{
Pos = new Vector3D(0, 0, 3);
Target = new Vector3D(0, 0, 0);
Up = new Vector3D(0, 1, 0);
}
private Vector3D GetCameraDirection(Vector3D pos, Vector3D target)
{
Vector3D dir = pos - target;
dir.Normalize();
return dir;
}
private Vector3D GetRight(Vector3D pos, Vector3D target, Vector3D up)
{
Vector3D right = Vector3D.CrossProduct(Up, GetCameraDirection(pos, up));
right.Normalize();
return right;
}
private Vector3D GetCameraUp(Vector3D pos, Vector3D target, Vector3D up)
{
Vector3D vup = Vector3D.CrossProduct(GetCameraDirection(pos, target), GetRight(pos, target, up));
return vup;
}
public Matrix3D LookAt(Vector3D pos, Vector3D target, Vector3D up)
{
// translation matrix for camera
Matrix3D translation = new Matrix3D
(
1, 0, 0, -Pos.X,
0, 1, 0, -Pos.Y,
0, 0, 1, -Pos.Z,
0, 0, 0, 1
);
Vector3D vRight = GetRight(pos, target, up);
Vector3D vUp = GetCameraUp(pos, target, up);
Vector3D vDir = GetCameraDirection(pos, target);
Matrix3D m = new Matrix3D
(
vRight.X, vRight.Y, vRight.Z, 0,
vUp.X, vUp.Y, vUp.Z, 0,
vDir.X, vDir.Y, vDir.Z, 0,
0, 0, 0, 1
);
return m * translation;
}
}
Some useful fields and constructor of main window
// just temporary field for test rotating Camera
double a = 0;
Vector3D Translate = new Vector3D(1, 1, 1);
Vector3D Rotation = new Vector3D(0, 0, 0);
Vector3D Scaling = new Vector3D(1, 1, 1);
CTransform3D cTransform; // class for transform operations
Camera3D c = new Camera3D();
public MainWindow()
{
InitializeComponent();
cTransform = new CTransform3D(Rotation, Translate, Scaling);
cTransform.Camera = c; // camera for transformations
// load the mesh
o.Mesh.LoadFromObjectFile("D:\\de\\test\\ship.obj");
}
The main method that draw objects(here is all transformations, projection, and camera transformations too):
public void Draw()
{
try
{
// util classes
MatrixUtil mu = new MatrixUtil();
cv.Children.Clear(); // clear canvas
foreach (var triangle in o.Mesh.Triangles)
{
Triangle tProjected = new Triangle(), tTransformed = new Triangle(), tViewed = new Triangle();
// just init for right compile
tProjected.Points = new Vector3D[3] { new Vector3D(0, 0, 0), new Vector3D(0, 0, 0), new Vector3D(0, 0, 0) };
tTransformed.Points = new Vector3D[3] { new Vector3D(0, 0, 0), new Vector3D(0, 0, 0), new Vector3D(0, 0, 0) };
tViewed.Points = new Vector3D[3] { new Vector3D(0, 0, 0), new Vector3D(0, 0, 0), new Vector3D(0, 0, 0) };
// project and transform matrices
Matrix3D ProjectionMatrix = cTransform.ProjectionMatrix(5, Width, Height, 0.1, 1000); // just projection matrix
Matrix3D TransformMatrix = RotationMatrices.RotatationYMatrix(Rotation.Y); // rotate y
Matrix3D CameraMatrix = cTransform.GetCameraTransform(); // camera matrix that contains only LookAt
c.Pos = new Vector3D(Math.Sin(a), 0, Math.Cos(a)); // change position of camera
// transform points
tTransformed.Points[0] = mu.Matrix_MulVector(TransformMatrix, triangle.Points[0]);
tTransformed.Points[1] = mu.Matrix_MulVector(TransformMatrix, triangle.Points[1]);
tTransformed.Points[2] = mu.Matrix_MulVector(TransformMatrix, triangle.Points[2]);
// multiple camera matrix to transformed points
tViewed.Points[0] = mu.Matrix_MulVector(CameraMatrix, tTransformed.Points[0]);
tViewed.Points[1] = mu.Matrix_MulVector(CameraMatrix, tTransformed.Points[1]);
tViewed.Points[2] = mu.Matrix_MulVector(CameraMatrix, tTransformed.Points[2]);
// project the object
tProjected.Points[0] = mu.Matrix_MulVector(ProjectionMatrix, tViewed.Points[0]);
tProjected.Points[1] = mu.Matrix_MulVector(ProjectionMatrix, tViewed.Points[1]);
tProjected.Points[2] = mu.Matrix_MulVector(ProjectionMatrix, tViewed.Points[2]);
// just draw the result
new CTriangle(
triangle.Points[0].X + o.Position.X, triangle.Points[0].Y + o.Position.Y ,
triangle.Points[1].X + o.Position.X, triangle.Points[1].Y + o.Position.Y ,
triangle.Points[2].X + o.Position.X, triangle.Points[2].Y + o.Position.Y ,
cv, Brushes.Gray, Brushes.Black);
}
a+= .1;
Rotation.X += 0.05f;
Rotation.Y += 0.05f;
Rotation.Z += 0.05f;
}
catch (Exception ex)
{
MessageBox.Show(ex.ToString());
}
}
Projection matrix method from CTransform class:
public Matrix3D ProjectionMatrix(double fov, double width, double height, double near, double far)
{
int fAspectRatio = (int)(width / height);
double fFovRad = 1.0f / Math.Tan(fov * 0.5f / 180.0f * 3.14159f);
return new Matrix3D
(
fAspectRatio * fFovRad, 0, 0, 0,
0, fFovRad, 0, 0,
0, 0, far / (far - near), 1,
0, 0, (-far * near) / (far - near), 1
);
}
GetCameraTransform() Method from CTransform class:
public Matrix3D GetCameraTransform()
{
Matrix3D cameraLookAt = Camera.LookAt(camera.Pos, camera.Target, camera.Up);
return cameraLookAt;
}
Fields of camera from CTransform class:
private Camera3D camera;
public Camera3D Camera { get { return camera; } set { camera = value; } }
Multiple Matrix to Vector method from MatrixUtil class for transformations about points of triangle:
public Vector3D Matrix_MulVector(Matrix3D m, Vector3D v)
{
Vector3D res = new Vector3D();
// additional W component of vector usually will 1. So just add offsets
res.X = v.X * m.M11 + v.Y * m.M21 + v.Z * m.M31 + m.OffsetX;
res.Y = v.X * m.M12 + v.Y * m.M22 + v.Z * m.M32 + m.OffsetY;
res.Z = v.X * m.M13 + v.Y * m.M23 + v.Z * m.M33 + m.OffsetZ;
return res;
}
I know its hard to understand all that mess. But i need help in understanding how the 3D camera works in 3D graphic. And how to implement it in my designed 3D space.
Tried change the order of multiplication matrix and triangles in all pipeline. Got more troubles.
Change the translation camera matrix. Result: nothing.
Change the LookAt method. Result: nothing.
Checked and tried to change some other matrices about transform the object. Result: nothing.

Cannot get colors to render in GLSL Xamarin OpenGLView contol

Can anyone please help with the following?
Using Visual Studio 2019 Xamarin Forms project I am trying to display a rotating red square in the Android emulator. I have it working but my square is black:
Here's the code of my MainPage.xaml.cs (am using ES20 as ES10 does not have the OpenGL functions I need):
using OpenTK.Graphics.ES20;
using System;
using Xamarin.Forms;
namespace OGLMobile
{
public partial class MainPage : ContentPage
{
private float red = 1.0f;
private float green = 0.0f;
private float blue = 0.0f;
private const float halfWidth = 0.2f;
private float[] m_vertex_buffer_data = { 0.5f - halfWidth, 0.5f - halfWidth, 0.0f,
0.5f + halfWidth, 0.5f - halfWidth, 0.0f,
0.5f + halfWidth, 0.5f + halfWidth, 0.0f,
0.5f - halfWidth, 0.5f + halfWidth, 0.0f };
private string[] m_szVertexShader = null;
private string[] m_szFragmentShader = null;
private int m_nProgram = -1;
private int m_nVertexShaderHandle = -1;
private int m_nFragmentShaderHandle = -1;
private uint[] indices = { 0, 1, 2, 0, 2, 3 };
private int m_nVertexBuffer = -1;
bool m_bOGLParametersSet = false;
private void RotateSquare(float radians, float xRotationCentre, float yRotationCentre)
{
int[] nBaseIndices = { 0, 3, 6, 9 };
for (int nVertex = 0; nVertex <= 3 ; nVertex++)
{
int nIndex1 = nBaseIndices[nVertex];
int nIndex2 = nIndex1 + 1;
float offsetX = m_vertex_buffer_data[nIndex1] - xRotationCentre;
float offsetY = m_vertex_buffer_data[nIndex2] - yRotationCentre;
double xRotated = offsetX * Math.Cos(radians) - offsetY * Math.Sin(radians);
double yRotated = offsetX * Math.Sin(radians) + offsetY * Math.Cos(radians);
m_vertex_buffer_data[nIndex1] = (float)xRotated + xRotationCentre;
m_vertex_buffer_data[nIndex2] = (float)yRotated + yRotationCentre;
}
GL.BindBuffer(BufferTarget.ArrayBuffer, m_nVertexBuffer);
GL.BufferSubData(BufferTarget.ArrayBuffer, IntPtr.Zero, new IntPtr(m_vertex_buffer_data.Length * sizeof(float)), m_vertex_buffer_data);
}
public MainPage()
{
Title = "OpenGL";
var view = new OpenGLView { HasRenderLoop = true };
var toggle = new Switch { IsToggled = true };
m_szVertexShader = new string[1];
m_szFragmentShader = new string[1];
view.HeightRequest = 300;
view.WidthRequest = 300;
GL.Viewport(0, 0, 300, 300);
view.OnDisplay = r =>
{
if(!m_bOGLParametersSet) // Do this only on first rendering
{
CreateShader();
m_bOGLParametersSet = true;
GL.UseProgram(m_nProgram);
GL.GenBuffers(1, out m_nVertexBuffer);
GL.BindBuffer(BufferTarget.ArrayBuffer, m_nVertexBuffer);
GL.BufferData(BufferTarget.ArrayBuffer, new IntPtr(m_vertex_buffer_data.Length * sizeof(float)), m_vertex_buffer_data, BufferUsage.StaticDraw);
}
GL.ClearColor(0.0f, 0.0f, 1.0f, 1.0f);
GL.Clear((ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit));
RotateSquare(0.0174533f, 0.5f, 0.5f);
GL.EnableVertexAttribArray(0);
GL.BindBuffer(BufferTarget.ArrayBuffer, m_nVertexBuffer);
GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, false, 0, 0);
GL.DrawElements(BeginMode.Triangles, indices.Length, DrawElementsType.UnsignedInt, indices);
GL.DisableVertexAttribArray(0);
};
toggle.Toggled += (s, a) =>
{
view.HasRenderLoop = toggle.IsToggled;
};
var stack = new StackLayout
{
Padding = new Size(20, 20),
Children = { view, toggle}
};
Content = stack;
}
private void SetShaderSource()
{
m_szVertexShader[0] = "void main()" +
"{" +
"gl_Position = ftransform();" +
"}";
m_szFragmentShader[0] = "void main()" +
"{" +
"gl_FragColor = vec4(1.0,0.0,0.0,1.0);" +
"}";
}
private void CreateShader()
{
SetShaderSource();
int nVertexShaderSourceLength = m_szVertexShader[0].Length;
int nFragmentShaderLength = m_szFragmentShader[0].Length;
m_nVertexShaderHandle = GL.CreateShader(ShaderType.VertexShader);
m_nFragmentShaderHandle = GL.CreateShader(ShaderType.FragmentShader);
GL.ShaderSource(m_nVertexShaderHandle, 1, m_szVertexShader, new int[] { nVertexShaderSourceLength });
GL.ShaderSource(m_nFragmentShaderHandle, 1, m_szFragmentShader, new int[] { nVertexShaderSourceLength });
GL.CompileShader(m_nVertexShaderHandle);
GL.CompileShader(m_nFragmentShaderHandle);
string szVertexShaderLog = GL.GetShaderInfoLog(m_nVertexShaderHandle);
string szFragmentShaderLog = GL.GetShaderInfoLog(m_nFragmentShaderHandle);
m_nProgram = GL.CreateProgram();
GL.AttachShader(m_nProgram, m_nVertexShaderHandle);
GL.AttachShader(m_nProgram, m_nFragmentShaderHandle);
GL.LinkProgram(m_nProgram);
}
}
}
My shaders must be incorrect, I get the same output if I replace them with junk strings, so the system must be defaulting to something.
I do have compile errors when I call GetShaderInfoLog, but my shaders are so trivial that I can't see the issue:
Thanks very much for any help, (been stuck on this for a while now. Have done plenty of OpenGL on desktop and with WebGL before, but not having any luck yet with mobile).
ADDITION: Thanks very much for you replies. I have replaced tha shaders with compliant ones, unfortunately I still have a black square:
(In the image it says mediump, I have also tried highp and lowp but I still get the same results).
I now get a different error on compilation of the vertex shader, and no error for the fragment shader - although I do have one warning for the latter:
Thank you for any further advice you may be able to offer.
These shaders do not meet any GLSL ES specification. See OpenGL ES Shading Language 1.00 Specification respectively. OpenGL ES Shading Language 3.00 Specification.
See a working shader with a vertex shader:
attribute vec3 a_position;
void main()
{
gl_Position = vec4(a_position, 1.0);
}
and fragment shader
precision mediump float;
void main()
{
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
I recommend to verify if the shader compiles with out errors:
GL.CompileShader(m_nVertexShaderHandle);
string infoLogFrag = GL.GetShaderInfoLog(m_nVertexShaderHandle);
if (infoLogFrag != System.String.Empty)
{
System.Console.WriteLine(infoLogFrag);
}
GL.CompileShader(m_nFragmentShaderHandle);
string infoLogVert = GL.GetShaderInfoLog(m_nFragmentShaderHandle);
if (infoLogVert != System.String.Empty)
{
System.Console.WriteLine(infoLogVert);
}
And the program links without errors:
GL.LinkProgram(m_nProgram);
string infoLogProg = GL.GetProgramInfoLog(m_nProgram );
if (infoLogProg != System.String.Empty)
{
System.Console.WriteLine(infoLogProg);
}

Plotting 10 data sets on OpenTK

I have 10 sets of data saved in arrays. For example A1 = [1,2 ,3], A2 = [5,6,7]...etc. I am using OpenTk GameWindow to plot it as I have a big data set (10million+ per data set). For now I know how to plot 1 data set as following:
public GraphWIndow (Data Dataset[0]) : base(800, 600, default, "Data Analyzer", GameWindowFlags.Default, default,4,0, default)
{
test= EditVertices(Dataset[0]);
}
private float[] EditVertices(List<float> list)
{
float[] list2d = new float[2*list.Count()];
for(int i =0; i<list.Count(); i++)
{
list2d[2*i]=-1+ i*(((float)2/(float)list.Count()));
list2d[2*i +1] =((float)(list[i]-list.Min())/(float)(list.Max()-list.Min()));
}
return list2d;
}
protected override void OnLoad(EventArgs e)
{
CursorVisible = true;
shader = new Shader("shader.vert", "shader.frag");
shader.Use();
VertexArrayObject = GL.GenVertexArray();
VertexBufferObject = GL.GenBuffer();
GL.BindVertexArray(VertexArrayObject);
GL.BindBuffer(BufferTarget.ArrayBuffer, VertexBufferObject);
GL.BufferData(BufferTarget.ArrayBuffer, test.Count()*sizeof(float), test, BufferUsageHint.StaticDraw);
var vertexLocation = shader.GetAttribLocation("aPosition");
GL.EnableVertexAttribArray(vertexLocation);
GL.VertexAttribPointer( vertexLocation, 2, VertexAttribPointerType.Float, false, 2*sizeof(float), 0);
GL.EnableVertexAttribArray(0);
GL.BindBuffer(BufferTarget.ArrayBuffer, 0);
GL.BindVertexArray(0);
base.OnLoad(e);
}
protected override void OnRenderFrame(FrameEventArgs e)
{
GL.ClearColor(Color4.DarkBlue);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
GL.BindVertexArray(VertexArrayObject);
GL.DrawArrays(PrimitiveType.LineStrip, 0, test.Count());
GL.PointSize(10);
SwapBuffers();
base.OnRenderFrame(e);
}
shader.vert
#version 400 core
in vec2 aPosition;
void main()
{
gl_Position = vec4(aPosition, 0.0f, 1.0f);
}
I kind of know how I will draw the 10 sets. I will just add an offset to every dataset and combine them in one array(test array) and then pass it to shader.vert. The thing is I don't know what to do with the x coordinates. I don't want to keep adding it to test array, which is shown in EditVertices function, as it will significantly increase its size especially that I am dealing with a huge size of data. Is there a way where I can pass x coordinates separately somehow but at the same time relate it to all data sets? as in they all share the same x coordinates but different Y values. I hope my question is clear.
Is there a way where I can pass x coordinates separately somehow but at the same time relate it to all data sets?
Use 2 attributes of type float, one for the x coordinate and another one for the y coordinate:
#version 400 core
in float aPositionX;
in float aPositionY;
void main()
{
gl_Position = vec4(aPositionX, aPositionY, 0.0, 1.0);
}
var vertexLocationX = shader.GetAttribLocation("aPositionX");
var vertexLocationY = shader.GetAttribLocation("aPositionY");
Now you can use the same X coordinate for all data sets. Create 1 array of vertex attribute data for the X coordinates:
float[] vertexX = new float[2*list.Count()];
for(int i =0; i < list.Count(); i++)
vertexX[i] = (float)(-1 + i * 2/list.Count());
The array for the Y coordinates has to be created separately for each data set
float[] vertexY = new float[2*list.Count()];
for(int i =0; i < list.Count(); i++)
vertexY[i] = (float)((vertexY[i]-list.Min() / (list.Max()-list.Min());
You have to create 2 separate Vertex Buffer Objects:
VertexBufferObjectX = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, VertexBufferObjectX);
GL.BufferData(BufferTarget.ArrayBuffer, vertexX.Count()*sizeof(float), vertexX, BufferUsageHint.StaticDraw);
VertexBufferObjectY = GL.GenBuffer();
GL.BindBuffer(BufferTarget.ArrayBuffer, VertexBufferObjectY);
GL.BufferData(BufferTarget.ArrayBuffer, vertexY.Count()*sizeof(float), vertexY, BufferUsageHint.StaticDraw);
And you have to adapt the specification of the Vertex Array Object:
VertexArrayObject = GL.GenVertexArray();
GL.BindVertexArray(VertexArrayObject);
GL.BindBuffer(BufferTarget.ArrayBuffer, VertexBufferObjectX);
GL.VertexAttribPointer(vertexLocationX, 1, VertexAttribPointerType.Float, false, 0, 0);
GL.BindBuffer(BufferTarget.ArrayBuffer, VertexBufferObjectY);
GL.VertexAttribPointer(vertexLocationY, 1, VertexAttribPointerType.Float, false, 0, 0);
GL.EnableVertexAttribArray(vertexLocationX);
GL.EnableVertexAttribArray(vertexLocationY);
GL.BindVertexArray(0);

HLSL modify depth in pixel shader

I need to render an image (with depth) which I get from outside. I can construct two textures and pass them into a shader with no problem (I can verify values sampled in a pixel shader being correct).
Here's how my HLSL looks like:
// image texture
Texture2D m_TextureColor : register(t0);
// depth texture with values [0..1]
Texture2D<float> m_TextureDepth : register(t1);
// sampler to forbid linear filtering since we're dealing with pixels
SamplerState m_TextureSampler { Filter = MIN_MAG_MIP_POINT; };
struct VS_IN
{
float4 position : POSITION;
float2 texcoord : TEXCOORD;
};
struct VS_OUT
{
float4 position : SV_POSITION;
float2 texcoord : TEXCOORD0;
};
struct PS_OUT
{
float4 color : COLOR0;
float depth : DEPTH0;
};
VS_OUT VS(VS_IN input)
{
VS_OUT output = (VS_OUT)0;
output.position = input.position;
output.texcoord = input.texcoord;
return output;
}
PS_OUT PS(VS_OUT input) : SV_Target
{
PS_OUT output = (PS_OUT)0;
output.color = m_TextureColor.SampleLevel(m_TextureSampler, input.texcoord, 0);
// I want to modify depth of the pixel,
// but it looks like it has no effect on depth no matter what I set here
output.depth = m_TextureDepth.SampleLevel(m_TextureSampler, input.texcoord, 0);
return output;
}
I construct vertex buffer from those (with PrimitiveTopology.TriangleStrip) where first argument Vector4 is position and second argument Vector2 is texture coordinate:
new[]
{
new Vertex(new Vector4(-1, -1, 0.5f, 1), new Vector2(0, 1)),
new Vertex(new Vector4(-1, 1, 0.5f, 1), new Vector2(0, 0)),
new Vertex(new Vector4(1, -1, 0.5f, 1), new Vector2(1, 1)),
new Vertex(new Vector4(1, 1, 0.5f, 1), new Vector2(1, 0)),
}
Everything works just fine: I'm seeing my image, I can sample depth from depth texture and construct something visual from it (that's how I can verify that
depth values I'm sampling are correct). However I can't figure out how to modify pixel's depth so that it would be eaten properly when the depth-test would be happening. Because at the moment it all depends on what kind of z value I set as my vertex position.
This is how I'm setting up DirectX11 (I'm using SharpDX and C#):
var swapChainDescription = new SwapChainDescription
{
BufferCount = 1,
ModeDescription = new ModeDescription(bufferSize.Width, bufferSize.Height, new Rational(60, 1), Format.R8G8B8A8_UNorm),
IsWindowed = true,
OutputHandle = HostHandle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput,
};
var swapChainFlags = DeviceCreationFlags.None | DeviceCreationFlags.BgraSupport;
SharpDX.Direct3D11.Device.CreateWithSwapChain(DriverType.Hardware, swapChainFlags, swapChainDescription, out var device, out var swapchain);
Setting back buffer and depth/stencil buffer:
// color buffer
using (var textureColor = SwapChain.GetBackBuffer<Texture2D>(0))
{
TextureColorResourceView = new RenderTargetView(Device, textureColor);
}
// depth buffer
using (var textureDepth = new Texture2D(Device, new Texture2DDescription
{
Format = Format.D32_Float,
ArraySize = 1,
MipLevels = 1,
Width = BufferSize.Width,
Height = BufferSize.Height,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Default,
BindFlags = BindFlags.DepthStencil,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.None
}))
{
TextureDepthResourceView = new DepthStencilView(Device, textureDepth);
}
DeviceContext.OutputMerger.SetTargets(TextureDepthResourceView, TextureColorResourceView);
Preparing depth stencil state:
var description = DepthStencilStateDescription.Default();
description.DepthComparison = Comparison.LessEqual;
description.IsDepthEnabled = true;
description.DepthWriteMask = DepthWriteMask.All;
DepthState = new DepthStencilState(Device, description);
And using it:
DeviceContext.OutputMerger.SetDepthStencilState(DepthState);
This is how I construct my color/depth textures I'm sending to shader:
public static (ShaderResourceView resource, Texture2D texture) CreateTextureDynamic(this Device device, System.Drawing.Size size, Format format)
{
var textureDesc = new Texture2DDescription
{
MipLevels = 1,
Format = format,
Width = size.Width,
Height = size.Height,
ArraySize = 1,
BindFlags = BindFlags.ShaderResource,
Usage = ResourceUsage.Dynamic,
SampleDescription = new SampleDescription(1, 0),
CpuAccessFlags = CpuAccessFlags.Write,
};
var texture = new Texture2D(device, textureDesc);
return (new ShaderResourceView(device, texture), texture);
}
Also since I need to update them frequently:
public static void UpdateResource(this Texture2D texture, int[] buffer, System.Drawing.Size size)
{
var dataBox = texture.Device.ImmediateContext.MapSubresource(texture, 0, MapMode.WriteDiscard, MapFlags.None, out var dataStream);
Parallel.For(0, size.Height, rowIndex => Marshal.Copy(buffer, size.Width * rowIndex, dataBox.DataPointer + dataBox.RowPitch * rowIndex, size.Width));
dataStream.Dispose();
texture.Device.ImmediateContext.UnmapSubresource(texture, 0);
}
public static void UpdateResource(this Texture2D texture, float[] buffer, System.Drawing.Size size)
{
var dataBox = texture.Device.ImmediateContext.MapSubresource(texture, 0, MapMode.WriteDiscard, MapFlags.None, out var dataStream);
Parallel.For(0, size.Height, rowIndex => Marshal.Copy(buffer, size.Width * rowIndex, dataBox.DataPointer + dataBox.RowPitch * rowIndex, size.Width));
dataStream.Dispose();
texture.Device.ImmediateContext.UnmapSubresource(texture, 0);
}
I also googled a lot about this, found similar posts like this: https://www.gamedev.net/forums/topic/573961-how-to-set-depth-value-in-pixel-shader/ however couldn't managed solve it on my side.
Thanks in advance!
To write to the depth buffer, you need to target the SV_Depth system-value semantic. So your pixel shader output struct would look more like the following:
struct PS_OUT
{
float4 color : SV_Target;
float depth : SV_Depth;
};
And the shader would not specify SV_Target as in your example (the SV_ outputs are defined within the struct). So it would look like:
PS_OUT PS(VS_OUT input)
{
PS_OUT output = (PS_OUT)0;
output.color = m_TextureColor.SampleLevel(m_TextureSampler, input.texcoord, 0);
// Now that output.depth is defined with SV_Depth, and you have depth-write enabled,
// this should write to the depth buffer.
output.depth = m_TextureDepth.SampleLevel(m_TextureSampler, input.texcoord, 0);
return output;
}
Note that you may incur some performance penalties on explicitly writing to depth (specifically on AMD hardware) since that forces a bypass of their early-depth hardware optimization. All future draw calls using that depth buffer will have early-Z optimizations disabled, so it's generally a good idea to perform the depth-write operation as late as possible.

OpenTK - VertexBufferObject doesn't draw anything

I'm trying to learn how to draw with VBOs in C# OpenTK - following examples like http://www.opentk.com/node/2292 and VBOs Using Interleaved Vertices in C#.
I'm pretty sure I want the interleaved single array method like this, with a neat struct for each vertex. I got the code to compile with no errors, but it simply draws a blank brown screen, no white triangle. I'm sure I've made a stupid error, please help me learn from it!
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using OpenTK;
using OpenTK.Graphics;
using OpenTK.Graphics.OpenGL;
using OpenTK.Input;
using System.Drawing;
using System.Runtime.InteropServices;
namespace VBOTest2
{
[StructLayout(LayoutKind.Sequential)]
public struct Vertex
{
public Vector3 Position;
public byte[] Colour;
public Vertex(byte[] colour, Vector3 position)
{
Colour = colour;
Position = position;
}
public static readonly int Stride = Marshal.SizeOf(default(Vertex));
}
public class VBOTest2 : GameWindow
{
uint vbo;
public VBOTest2() :
base(1, 1, new GraphicsMode(32, 24, 8, 0), "Test")
{
Width = 1500;
Height = 800;
VSync = VSyncMode.On;
ClientSize = new Size(1500, 800);
this.Location = new System.Drawing.Point(100, 300);
GL.Viewport(0, 0, Width, Height);
}
void CreateVertexBuffer()
{
Vertex[] vertices = new Vertex[3];
vertices[0] = new Vertex(new byte[]{255,255,255,255}, new Vector3(-1f, -1f, 0f));
vertices[1] = new Vertex(new byte[] { 255, 255, 255, 255 }, new Vector3(1f, -1f, 0f));
vertices[2] = new Vertex(new byte[] { 255, 255, 255, 255 }, new Vector3(0f, 1f, 0f));
GL.GenBuffers(1, out vbo);
GL.BindBuffer(BufferTarget.ArrayBuffer, vbo);
GL.BufferData<Vertex>(BufferTarget.ArrayBuffer, (IntPtr)Vertex.Stride, vertices, BufferUsageHint.StaticDraw);
}
protected override void OnLoad(EventArgs e)
{
GL.ClearColor(Color.Brown);
CreateVertexBuffer();
}
protected override void OnRenderFrame(FrameEventArgs e)
{
base.OnRenderFrame(e);
GL.Clear(ClearBufferMask.ColorBufferBit);
GL.EnableClientState(ArrayCap.VertexArray);
GL.EnableClientState(ArrayCap.ColorArray);
GL.BindBuffer(BufferTarget.ArrayBuffer, vbo);
GL.VertexPointer(3, VertexPointerType.Float, Vertex.Stride, (IntPtr)(0));
GL.ColorPointer(4, ColorPointerType.UnsignedByte, Vertex.Stride, (IntPtr)(3 * sizeof(float)));
GL.DrawArrays(PrimitiveType.Triangles, 0, 3);
//release buffer
GL.BindBuffer(BufferTarget.ArrayBuffer, 0);
GL.DisableClientState(ArrayCap.VertexArray);
GL.DisableClientState(ArrayCap.ColorArray);
SwapBuffers();
}
}
}

Categories

Resources