Weird cube shape - c#

I'm trying to create a cube with both indices and vertices. I'm able to draw them, but they look kinda weird.
Here's my code. It has something to do with either the vertices or indices, but I'm not sure which:
public void Draw(BasicEffect effect)
{
foreach (EffectPass pass in effect.CurrentTechnique.Passes)
{
pass.Apply();
device.SetVertexBuffer(cubeVertexBuffer);
device.Indices = iBuffer;
device.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0, 8, 0, 12);
}
}
private void SetUpIndices()
{
indices = new short[36];
indices[0] = 0;
indices[1] = 3;
indices[2] = 2;
indices[3] = 2;
indices[4] = 1;
indices[5] = 0;
indices[6] = 4;
indices[7] = 7;
indices[8] = 6;
indices[9] = 6;
indices[10] = 5;
indices[11] = 4;
indices[12] = 1;
indices[13] = 2;
indices[14] = 6;
indices[15] = 6;
indices[16] = 5;
indices[17] = 1;
indices[18] = 4;
indices[19] = 7;
indices[20] = 3;
indices[21] = 3;
indices[22] = 0;
indices[23] = 4;
indices[24] = 4;
indices[25] = 0;
indices[26] = 1;
indices[27] = 1;
indices[28] = 5;
indices[29] = 4;
indices[30] = 3;
indices[31] = 7;
indices[32] = 6;
indices[33] = 6;
indices[34] = 2;
indices[35] = 3;
iBuffer = new IndexBuffer(device, typeof(short), 36, BufferUsage.WriteOnly);
iBuffer.SetData(indices);
}
private void SetUpVertices()
{
vertices = new VertexPositionColor[8];
vertices[0] = new VertexPositionColor(new Vector3(0, 0, 0), color);
vertices[1] = new VertexPositionColor(new Vector3(0, 1, 0), color);
vertices[2] = new VertexPositionColor(new Vector3(1, 1, 0), color);
vertices[3] = new VertexPositionColor(new Vector3(1, 0, 0), color);
vertices[4] = new VertexPositionColor(new Vector3(0, 0, -1), color);
vertices[5] = new VertexPositionColor(new Vector3(0, 1, -1), color);
vertices[6] = new VertexPositionColor(new Vector3(1, 1, -1), color);
vertices[7] = new VertexPositionColor(new Vector3(1, 0, -1), color);
cubeVertexBuffer = new VertexBuffer(device, typeof(VertexPositionColor), 8, BufferUsage.WriteOnly);
cubeVertexBuffer.SetData<VertexPositionColor>(vertices);
}

I could do a wild guess and say its because of messed order of vertices in your indices (I would call them triangles further).
Usually in 3d engines you have to set up order of vertices in triangles so they all are ordered same - i.e. clockwise or counter-clockwise - when you look at them from outside of shape they form.
Speaking mathematically all normals of triangles in your shape should be directed either inside or outside of shape. The direction of normal tells 3d engine when to draw triangles - engine can do two times less work if it draws triangles only on one side - the insides of a solid objects in 99,99% cases are not to be seen by user.
In your case look at indices 032 and 476 - they should be either 032/467 or 023/476. And so on.

Related

Cube created by vertices and triangles as mesh renders different than normal cube?

For something I want to do with these cubes I need to create the mesh myself so I know which vertices are which and am able to manipulate the geometry.
But why does my scripted cube (on the left) look so different in shading than the Unity one?
Both use the same material.
I guess it has something to do with the Phong?
This is the code I use to create my own cube:
private void CreateCube()
{
GameObject cubeObject;
cubeObject = Instantiate(prefab, new Vector3(0, 0, 0), Quaternion.identity);
Vector3[] vertices;
int[] triangles;
vertices = new Vector3[8];
// BOTTOM VERTICES
vertices[0] = new Vector3(0, 0, 0); //BBL
vertices[1] = new Vector3(1, 0, 0); //BBR
vertices[2] = new Vector3(0, 0, 1); //BTL
vertices[3] = new Vector3(1, 0, 1); //BTR
// TOP VERTICES
vertices[4] = new Vector3(0, 1, 0); //TBL
vertices[5] = new Vector3(1, 1, 0); //TBR
vertices[6] = new Vector3(0, 1, 1); //TTL
vertices[7] = new Vector3(1, 1, 1); //TTR
triangles = new int[36];
// BOTTOM TRIANGLES
triangles[0] = 1; //BBR
triangles[1] = 2; //BTL
triangles[2] = 0; //BBL
triangles[4] = 2; //BTL
triangles[3] = 3; //BTR
triangles[5] = 1; //BBR
// TOP TRIANGLES
triangles[6] = 4; //TBL
triangles[7] = 6; //TTL
triangles[8] = 7; //TTR
triangles[9] = 5; //TBR
triangles[10] = 4; //TBL
triangles[11] = 7; //TTR
// BACK TRIANGLES
triangles[12] = 5; //TBR
triangles[13] = 0; //BBL
triangles[14] = 4; //TBL
triangles[15] = 1; //BBR
triangles[16] = 0; //BBL
triangles[17] = 5; //TBR
// FRONT TRIANGLES
triangles[18] = 6; //TTL
triangles[19] = 2; //BTL
triangles[20] = 3; //BTR
triangles[21] = 6; //TTL
triangles[22] = 3; //BTR
triangles[23] = 7; //TTR
// LEFT TRIANGLES
triangles[24] = 6; //TTL
triangles[25] = 4; //TBL
triangles[26] = 2; //BTL
triangles[27] = 0; //BBL
triangles[28] = 2; //BTL
triangles[29] = 4; //TBL
// RIGHT TRIANGLES
triangles[32] = 7; //TTR
triangles[31] = 5; //TBR
triangles[30] = 1; //BBR
triangles[35] = 1; //BBR
triangles[34] = 3; //BTR
triangles[33] = 7; //TTR
Mesh cubeMesh = new Mesh();
cubeObject.GetComponent<MeshFilter>().sharedMesh = cubeMesh;
cubeMesh.Clear();
cubeMesh.vertices = vertices;
cubeMesh.triangles = triangles;
cubeMesh.RecalculateNormals();
}
The prefab is just a GameObject with an empty Mesh Filter and a Mesh Renderer with a material attached (Default-Material).

Vectors of a combined mesh have x,y,z values equal to zero

I am trying to combine two meshes. My code is shown below. The CombineInstance 'combine' have the correct values in its vertices. However, the combined mesh 'RetMesh' have all its vertices equal to (0,0,0). I don't understand what I am doing wrong.
I've read the tutorials I have find in the internet and I don't know what I am doing wrong.
void Start () {
Mesh MM2 = testmeshfunc();
GameObject GO;
GO = new GameObject("MeshFromMPC", typeof(MeshFilter), typeof(MeshRenderer));
GO.gameObject.transform.localScale = new Vector3(1, 1, 1);
GO.gameObject.GetComponent<MeshFilter>().mesh = MM2;
}
Mesh testmeshfunc()
{
Mesh RetMesh = new Mesh();
Vector3[] V3A = new Vector3[4];
Vector3[] V3B = new Vector3[4];
int[] TrPtsA = new int[6];
int[] TrPtsB = new int[6];
Mesh mesha = new Mesh();
Mesh meshb = new Mesh();
var combine = new CombineInstance[2];
V3A[0] = new Vector3(0, 0, 0);
V3A[1] = new Vector3(0, 1, 0);
V3A[2] = new Vector3(1, 1, 0);
V3A[3] = new Vector3(1, 0, 0);
TrPtsA[0] = 0;
TrPtsA[1] = 1;
TrPtsA[2] = 2;
TrPtsA[0] = 2;
TrPtsA[1] = 1;
TrPtsA[2] = 3;
mesha.vertices = V3A;
mesha.triangles = TrPtsA;
V3B[0] = new Vector3(0, 0, 0);
V3B[1] = new Vector3(0, 0, 1);
V3B[2] = new Vector3(1, 0, 1);
V3B[3] = new Vector3(1, 0, 0);
TrPtsB[0] = 0;
TrPtsB[1] = 1;
TrPtsB[2] = 2;
TrPtsB[0] = 2;
TrPtsB[1] = 1;
TrPtsB[2] = 3;
meshb.vertices = V3B;
meshb.triangles = TrPtsB;
combine[0].mesh = mesha;
combine[1].mesh = meshb;
RetMesh.RecalculateBounds();
RetMesh.RecalculateNormals();
RetMesh.RecalculateTangents();
RetMesh.CombineMeshes(combine);
return RetMesh;
}
I am using Unity 2017.
First of all, triangle index buffer set wrong. I though you are drawing plane then front face should be clockwise, this is yours
V3A[0] = new Vector3(0, 0, 0);
V3A[1] = new Vector3(0, 1, 0);
V3A[2] = new Vector3(1, 1, 0);
V3A[3] = new Vector3(1, 0, 0);
TrPtsA[0] = 0;
TrPtsA[1] = 1;
TrPtsA[2] = 2;
TrPtsA[0] = 2;
TrPtsA[1] = 1;
TrPtsA[2] = 3;
mesha.vertices = V3A;
mesha.triangles = TrPtsA;
V3B[0] = new Vector3(0, 0, 0);
V3B[1] = new Vector3(0, 0, 1);
V3B[2] = new Vector3(1, 0, 1);
V3B[3] = new Vector3(1, 0, 0);
TrPtsB[0] = 0;
TrPtsB[1] = 1;
TrPtsB[2] = 2;
TrPtsB[0] = 2;
TrPtsB[1] = 1;
TrPtsB[2] = 3;
Code should be like this, I changed direction of index buffer as clockwise. if you don't make it clockwise, it won't appeared on screen by backface removal.
V3A[0] = new Vector3(0, 0, 0);
V3A[1] = new Vector3(0, 1, 0);
V3A[2] = new Vector3(1, 1, 0);
V3A[3] = new Vector3(1, 0, 0);
TrPtsA[0] = 0;
TrPtsA[1] = 1;
TrPtsA[2] = 2;
TrPtsA[0] = 2;
TrPtsA[1] = 3;
TrPtsA[2] = 1;
mesha.vertices = V3A;
mesha.triangles = TrPtsA;
V3B[0] = new Vector3(0, 0, 0);
V3B[1] = new Vector3(0, 0, 1);
V3B[2] = new Vector3(1, 0, 1);
V3B[3] = new Vector3(1, 0, 0);
TrPtsB[0] = 0;
TrPtsB[1] = 1;
TrPtsB[2] = 2;
TrPtsB[0] = 2;
TrPtsB[1] = 3;
TrPtsB[2] = 1;

OpenGL - Geometry shader shadow mapping pass performing terribly

I'm calculating shadows for a number of point lights using Variance Shadow Mapping. All 6 faces of the cubemap are rendered in a single pass with a geometry shader, this repeats for each light source, and the whole lot is stored in a cubemap array. This all runs fine, 16 lights at 60fps no problem.
Chasing further optimisation, I tried to move the entire process to a single geometry shader pass, only to hit the only 113 vertex output limit of my hardware. Out of curiosity I decided to render 4 lights only (72 emitted vertices) and to my surprise it dropped to 24fps.
So why is it that 16 lights with 16 render passes perform significantly better than 4 lights in a single pass?
The code is essentially identical.
#version 400 core
layout(triangles) in;
layout (triangle_strip, max_vertices=18) out;
uniform int lightID;
out vec4 frag_position;
uniform mat4 projectionMatrix;
uniform mat4 shadowTransforms[6];
void main()
{
for(int face = 0; face < 6; face++)
{
gl_Layer = face + (lightID * 6);
for(int i=0; i<3; i++)
{
frag_position = shadowTransforms[face] * gl_in[i].gl_Position;
gl_Position = projectionMatrix * shadowTransforms[face] * gl_in[i].gl_Position;
EmitVertex();
}
EndPrimitive();
}
}
versus
#version 400 core
layout(triangles) in;
layout (triangle_strip, max_vertices=72) out;
out vec4 frag_position;
uniform mat4 projectionMatrix;
uniform mat4 shadowTransforms[24];
void main()
{
for (int lightSource = 0; lightSource < 4; lightSource++)
{
for(int face = 0; face < 6; face++)
{
gl_Layer = face + (lightSource * 6);
for(int i=0; i<3; i++)
{
frag_position = shadowTransforms[gl_Layer] * gl_in[i].gl_Position;
gl_Position = projectionMatrix * shadowTransforms[gl_Layer] * gl_in[i].gl_Position;
EmitVertex();
}
EndPrimitive();
}
}
}
And
public void ShadowMapsPass(Shader shader)
{
// Setup
GL.UseProgram(shader.ID);
GL.Viewport(0, 0, CubeMapArray.size, CubeMapArray.size);
// Clear the cubemarray array data from the previous frame
GL.BindFramebuffer(FramebufferTarget.Framebuffer, shadowMapArray.FBO_handle);
GL.ClearColor(Color.White);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
for (int j = 0; j < lights.Count; j++)
{
// Create the light's view matrices
List<Matrix4> shadowTransforms = new List<Matrix4>();
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(1, 0, 0), new Vector3(0, -1, 0)));
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(-1, 0, 0), new Vector3(0, -1, 0)));
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(0, 1, 0), new Vector3(0, 0, 1)));
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(0, -1, 0), new Vector3(0, 0, -1)));
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(0, 0, 1), new Vector3(0, -1, 0)));
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(0, 0, -1), new Vector3(0, -1, 0)));
// Send uniforms to the shader
for (int i = 0; i < 6; i++)
{
Matrix4 shadowTransform = shadowTransforms[i];
GL.UniformMatrix4(shader.getUniformID("shadowTransforms[" + i + "]"), false, ref shadowTransform);
}
GL.Uniform1(shader.getUniformID("lightID"), j);
DrawScene(shader, false);
}
}
versus
public void ShadowMapsPass(Shader shader)
{
// Setup
GL.UseProgram(shader.ID);
GL.Viewport(0, 0, CubeMapArray.size, CubeMapArray.size);
// Clear the cubemarray array data from the previous frame
GL.BindFramebuffer(FramebufferTarget.Framebuffer, shadowMapArray.FBO_handle);
GL.ClearColor(Color.White);
GL.Clear(ClearBufferMask.ColorBufferBit | ClearBufferMask.DepthBufferBit);
// Create the light's view matrices
List<Matrix4> shadowTransforms = new List<Matrix4>();
for (int j = 0; j < lights.Count; j++)
{
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(1, 0, 0), new Vector3(0, -1, 0)));
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(-1, 0, 0), new Vector3(0, -1, 0)));
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(0, 1, 0), new Vector3(0, 0, 1)));
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(0, -1, 0), new Vector3(0, 0, -1)));
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(0, 0, 1), new Vector3(0, -1, 0)));
shadowTransforms.Add(Matrix4.LookAt(lights[j].position, lights[j].position + new Vector3(0, 0, -1), new Vector3(0, -1, 0)));
}
// Send uniforms to the shader
for (int i = 0; i < shadowTransforms.Count; i++)
{
Matrix4 shadowTransform = shadowTransforms[i];
GL.UniformMatrix4(shader.getUniformID("shadowTransforms[" + i + "]"), false, ref shadowTransform);
}
DrawScene(shader, false);
}
I'd guess fewer opportunities for parallel code execution in the second form. The first version of the geometry shader generates 18 vertices and must be executed 4 times, but those 4 executions can run in parallel. The second version generates 72 vertices one after the other.

Display 3d model as 3D mesh object using wpf 3D graphics

I am working on a C# .Net platform with 3D Wpf Graphics.
Here is flow of code follows:
1) I take the depth data from kinect and give it to one function which calculates 3d points.
private void display3DView()
{
while(loop_run)
{
using ( DepthImageFrame depthFrame = sensor.DepthStream.OpenNextFrame(1000))
{
if (depthFrame == null) continue;
Point3DCollection PointCloud ;
depthFrame.CopyDepthImagePixelDataTo(this.depthImagePixels);
float[,] ImageArray = new float[320, 240];
short [ ,] depth = new short[240,320];
for (int i = 0; i < 240; i++)
{
for (int j = 0; j <320; j++)
{
depth[i,j]= depthImagePixels[j+i *320].Depth;
ImageArray[i,j] =(float)depth[i,j]/(float)1000;
}
}
PointCloud =Calculate_PointCloud(ImageArray);
viewModel(PointCloud);
}
}
}</i>
2) I have calculated 3D points with Camera parameters and depth data of Kinect Camera
private Point3DCollection Calculate_PointCloud(float[,] ImageArray)
{
Point3DCollection PointCloud = new Point3DCollection();
float x_coodinate;``
float y_coordinate;
float z_coordinate;
float thresholdvalue = 2.0f;
for (int i = 0; i < 239; ++i)
{
for (int j = 0; j < 319; ++j)
{
if (Math.Abs(ImageArray[i, j] - ImageArray[i, j + 1]) < thresholdvalue && Math.Abs(ImageArray[i, j] - ImageArray[i + 1, j]) < thresholdvalue && Math.Abs(ImageArray[i, j + 1] - ImageArray[i + 1, j]) < thresholdvalue)
{
z_coordinate = ImageArray[i, j];
x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point1 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point1);
z_coordinate = ImageArray[i, j + 1];
x_coodinate = (((j + 1) - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point2 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point2);
z_coordinate = ImageArray[i + 1, j];
x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = (((i + 1) - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point3 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point3);
}
}
}
return PointCloud;
}</i>
3)Here i converted into set of triangles with normal information of each 3D point and give those triangles to 3D mesh object and render 3d mesh object using viewport3D control
private void viewModel(Point3DCollection points)
{
DirectionalLight DirLight1 = new DirectionalLight();
DirLight1.Color = Colors.White;
DirLight1.Direction = new Vector3D(1, 1, 1);
PerspectiveCamera Camera1 = new PerspectiveCamera();
Camera1.FarPlaneDistance = 8000;
Camera1.NearPlaneDistance = 100;
Camera1.FieldOfView = 10;
Camera1.Position = new Point3D(0, 0, 1);
Camera1.LookDirection = new Vector3D(-1, -1, -1);
Camera1.UpDirection = new Vector3D(0, 1, 0);
bool combinedvertices = true;
TriangleModel Triatomesh = new TriangleModel();
MeshGeometry3D tmesh = new MeshGeometry3D();
GeometryModel3D msheet = new GeometryModel3D();
Model3DGroup modelGroup = new Model3DGroup();
ModelVisual3D modelsVisual = new ModelVisual3D();
Viewport3D myViewport = new Viewport3D();
for(int i =0; i<points.Count; i+=3)
{
Triatomesh.addTriangleToMesh(points[i],points[i + 1], points[i + 2], tmesh, combinedvertices);
}
msheet.Geometry = tmesh;
msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
modelGroup.Children.Add(msheet);
modelGroup.Children.Add(DirLight1);
modelsVisual.Content = modelGroup;
myViewport.IsHitTestVisible = false;
myViewport.Camera = Camera1;
myViewport.Children.Add(modelsVisual);
canvas1.Children.Add(myViewport);
myViewport.Height = canvas1.Height;
myViewport.Width = canvas1.Width;
Canvas.SetTop(myViewport, 0);
Canvas.SetLeft(myViewport, 0);
} </i>
4) Here is the function which takes three 3D points and add to 3d mesh object as a triangle by calculating normal to each 3D point
public void addTriangleToMesh(Point3D p0, Point3D p1, Point3D p2,
MeshGeometry3D mesh, bool combine_vertices)
{
Vector3D normal = CalculateNormal(p0, p1, p2);
if (combine_vertices)
{
addPointCombined(p0, mesh, normal);
addPointCombined(p1, mesh, normal);
addPointCombined(p2, mesh, normal);
}
else
{
mesh.Positions.Add(p0);
mesh.Positions.Add(p1);
mesh.Positions.Add(p2);
//mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
// mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
// mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
mesh.Normals.Add(normal);
mesh.Normals.Add(normal);
mesh.Normals.Add(normal);
}
}
public Vector3D CalculateNormal(Point3D P0, Point3D P1, Point3D P2) //static
{
Vector3D v0 = new Vector3D(P1.X - P0.X, P1.Y - P0.Y, P1.Z - P0.Z);
Vector3D v1 = new Vector3D(P2.X - P1.X, P2.Y - P1.Y, P2.Z - P1.Z);
return Vector3D.CrossProduct(v0, v1);
}
public void addPointCombined(Point3D point, MeshGeometry3D mesh, Vector3D normal)
{
bool found = false;
int i = 0;
foreach (Point3D p in mesh.Positions)
{
if (p.Equals(point))
{
found = true;
mesh.TriangleIndices.Add(i);
mesh.Positions.Add(point);
mesh.Normals.Add(normal);
break;
}
i++;
}
if (!found)
{
mesh.Positions.Add(point);
mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
mesh.Normals.Add(normal);
}
}
5) Here is my XAML code
<Window x:Class="PointCloud3DView.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Title="PointCloud" Height="653" Width="993" Background="Black" Loaded="Window_Loaded">
<Grid Height="1130" Width="1626">
<Canvas Height="611" HorizontalAlignment="Left" Name="canvas1" VerticalAlignment="Top"
Width="967" Background="Black" />
</Grid>
problem is i am not able to get displayed 3D model in Wpf Screen.Please can any one go through the whole code ? and make me understand where i go wrong? as well suggest me with corrections.
Thanks in advance
I have been experimenting with WPF 3D for several weeks now and learned some tough lessens:)
I do not have time to check and try the whole code now as I am in the work. However I would try three things:
I am not sure about direction of your camera. It is in (0,1,0),
looking using vector (-1,-1,-1), it means it is focused on central
point (-1,0,-1). And that is kinda strange... Try to position camera
further (depending on the scale of your model) like (0,10,0) or even
further and focus it to (0,0,0) or wherever central point of your
model is:
Camera1.Position = new Point3D(0, 10, 0);
Camera1.LookDirection = new Point3D(0,0,0) - Camera1.Position;
Also remove directional light (as it uses normals and if they are wrong nothing will be shown) and try ambient light instead. And your
directional lightning has just opposite vector to your looking
direction (-1,-1,-1) and (1,1,1).
Try to swap order of points in triangle indices (WPF renders only one side of the mesh, so model might be there but inside/out) -
instead of 0,1,2 try 0,2,1;
If nothing helps I will try your code once I get home.
/edited later/
Itried your code on simple triangle and rewrite it accordingy to my tips and it worked. There are some comments and two tips how to clean up your code a bit:)
private void viewModel(Point3DCollection points)
{
DirectionalLight DirLight1 = new DirectionalLight();
DirLight1.Color = Colors.White;
DirLight1.Direction = new Vector3D(1, 1, 1);
PerspectiveCamera Camera1 = new PerspectiveCamera();
Camera1.FarPlaneDistance = 8000;
//Camera1.NearPlaneDistance = 100; //close object will not be displayed with this option
Camera1.FieldOfView = 10;
//Camera1.Position = new Point3D(0, 0, 1);
//Camera1.LookDirection = new Vector3D(-1, -1, -1);
Camera1.Position = new Point3D(0, 0, 10);
Camera1.LookDirection = new Point3D(0, 0, 0) - Camera1.Position; //focus camera on real center of your model (0,0,0) in this case
Camera1.UpDirection = new Vector3D(0, 1, 0);
//you can use constructor to create Camera instead of assigning its properties like:
//PerspectiveCamera Camera1 = new PerspectiveCamera(new Point3D(0,0,10), new Vector3D(0,0,-1), new Vector3D(0,1,0), 10);
bool combinedvertices = true;
TriangleModel Triatomesh = new TriangleModel();
MeshGeometry3D tmesh = new MeshGeometry3D();
GeometryModel3D msheet = new GeometryModel3D();
Model3DGroup modelGroup = new Model3DGroup();
ModelVisual3D modelsVisual = new ModelVisual3D();
Viewport3D myViewport = new Viewport3D();
for (int i = 0; i < points.Count; i += 3)
{
Triatomesh.addTriangleToMesh(points[i + 2], points[i + 1], points[i], tmesh, combinedvertices);
//I did swap order of vertexes you may try both options with your model
}
msheet.Geometry = tmesh;
msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
//you can use constructor to create GeometryModel3D instead of assigning its properties like:
//msheet = new GeometryModel3D(tmesh, new DiffuseMaterial(new SolidColorBrush(Colors.White)));
modelGroup.Children.Add(msheet);
//use AMbientLIght instead of directional
modelGroup.Children.Add(new AmbientLight(Colors.White));
modelsVisual.Content = modelGroup;
myViewport.IsHitTestVisible = false;
myViewport.Camera = Camera1;
myViewport.Children.Add(modelsVisual);
canvas1.Children.Add(myViewport);
myViewport.Height = canvas1.Height;
myViewport.Width = canvas1.Width;
Canvas.SetTop(myViewport, 0);
Canvas.SetLeft(myViewport, 0);
}
And the Points3DCollection I used as parameter (instead of Kinect input):
Point3DCollection points = new Point3DCollection();
points.Add(new Point3D(0.5, 0, 0.5));
points.Add(new Point3D(0.5, -0.5, -0.5));
points.Add(new Point3D(-0.5, -0.1, -0.5));
viewModel(points);

Vertex Cube Center of Rotation

How do i find center of rotation for a cube made using vertex buffers?
The cube is currently rotating on a vertex and I've been stuck all week trying to figure out how to adjust it to the center.
Here is my code for rendering a cube:
class RenderCube
{
KeyboardState currentKeys;
GamePadState currentGamepad;
//Transform later to have static v and i buffers.
private VertexBuffer vBuffer;
public VertexBuffer VBuffer
{ get { return vBuffer; } set { vBuffer = value; } }
private IndexBuffer iBuffer;
public IndexBuffer IBuffer
{ get { return iBuffer; } set { iBuffer = value; } }
private BasicEffect bEffect;
public BasicEffect BEffect
{ get { return bEffect; } set { bEffect = value; } }
private Matrix world;
public Matrix World
{ get { return world; } set { world = value; } }
private Matrix view;
public Matrix View
{ get { return view; } set { view = value; } }
private Matrix projection;
private Matrix Projection
{ get { return projection; } set { projection = value; } }
private Color color;
public Color Color
{ get { return color; } set { color = value; } }
private Vector3 position;
public Vector3 Position
{ get { return position; } set { position = value; } }
//Need to change this eventually to use textures.
private VertexPositionColor[] vertices;
short[] indices;
private GraphicsDevice device;
//constructors!
public RenderCube(Color col, Vector3 pos, GraphicsDevice dev)
{
device = dev;
this.color = col;
this.position = pos;
SetUpVertices();
SetUpIndices();
world = Matrix.CreateTranslation(position);
//world = Matrix.CreateTranslation(0, 0, 0);
bEffect = new BasicEffect(device);
bEffect.World = world;
bEffect.VertexColorEnabled = true;
//bEffect.EnableDefaultLighting();
}
public void Render(Camera cam)
{
bEffect.View = cam.view;
bEffect.Projection = cam.projection;
bEffect.World *= cam.rotX;
bEffect.World *= cam.rotY;
bEffect.World *= cam.rotZ;
var rotationCenter = new Vector3(0.5f, 0.5f, 0.5f);
device.SetVertexBuffer(vBuffer);
device.Indices = IBuffer;
foreach (EffectPass pass in bEffect.CurrentTechnique.Passes)
{
pass.Apply();
device.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0, 8, 0, 12);
}
}
/// <summary>
/// Sets up the vertices for a cube using 8 unique vertices.
/// Build order is front to back, left to up to right to down.
/// </summary>
private void SetUpVertices()
{
vertices = new VertexPositionColor[8];
//front left bottom corner
vertices[0] = new VertexPositionColor(new Vector3(0, 0, 0), color);
//front left upper corner
vertices[1] = new VertexPositionColor(new Vector3(0, 100, 0), color);
//front right upper corner
vertices[2] = new VertexPositionColor(new Vector3(100, 100, 0), color);
//front lower right corner
vertices[3] = new VertexPositionColor(new Vector3(100, 0, 0), color);
//back left lower corner
vertices[4] = new VertexPositionColor(new Vector3(0, 0, -100), color);
//back left upper corner
vertices[5] = new VertexPositionColor(new Vector3(0, 100, -100), color);
//back right upper corner
vertices[6] = new VertexPositionColor(new Vector3(100, 100, -100), color);
//back right lower corner
vertices[7] = new VertexPositionColor(new Vector3(100, 0, -100), color);
vBuffer = new VertexBuffer(device, typeof(VertexPositionColor), 8, BufferUsage.WriteOnly);
vBuffer.SetData<VertexPositionColor>(vertices);
}
/// <summary>
/// Sets up the indices for a cube. Has 36 positions that match up
/// to the element numbers of the vertices created earlier.
/// Valid range is 0-7 for each value.
/// </summary>
private void SetUpIndices()
{
indices = new short[36];
//Front face
//bottom right triangle
indices[0] = 0;
indices[1] = 3;
indices[2] = 2;
//top left triangle
indices[3] = 2;
indices[4] = 1;
indices[5] = 0;
//back face
//bottom right triangle
indices[6] = 4;
indices[7] = 7;
indices[8] = 6;
//top left triangle
indices[9] = 6;
indices[10] = 5;
indices[11] = 4;
//Top face
//bottom right triangle
indices[12] = 1;
indices[13] = 2;
indices[14] = 6;
//top left triangle
indices[15] = 6;
indices[16] = 5;
indices[17] = 1;
//bottom face
//bottom right triangle
indices[18] = 4;
indices[19] = 7;
indices[20] = 3;
//top left triangle
indices[21] = 3;
indices[22] = 0;
indices[23] = 4;
//left face
//bottom right triangle
indices[24] = 4;
indices[25] = 0;
indices[26] = 1;
//top left triangle
indices[27] = 1;
indices[28] = 5;
indices[29] = 4;
//right face
//bottom right triangle
indices[30] = 3;
indices[31] = 7;
indices[32] = 6;
//top left triangle
indices[33] = 6;
indices[34] = 2;
indices[35] = 3;
iBuffer = new IndexBuffer(device, IndexElementSize.SixteenBits, sizeof(short) * indices.Length, BufferUsage.WriteOnly);
iBuffer.SetData(indices);
}
}
The basic idea is to introduce a translation matrix that pushes the cube to the origin, perform the rotation and undo the translation:
public void Render(Camera cam)
{
//...
//push the cube to the origin
bEffect.World *= Matrix.CreateTranslation(-50, -50, 50);
//perform the rotation
bEffect.World *= cam.rotX;
bEffect.World *= cam.rotY;
bEffect.World *= cam.rotZ;
//undo the translation
bEffect.World *= Matrix.CreateTranslation(50, 50, -50);
//...

Categories

Resources