Display 3d model as 3D mesh object using wpf 3D graphics - c#

I am working on a C# .Net platform with 3D Wpf Graphics.
Here is flow of code follows:
1) I take the depth data from kinect and give it to one function which calculates 3d points.
private void display3DView()
{
while(loop_run)
{
using ( DepthImageFrame depthFrame = sensor.DepthStream.OpenNextFrame(1000))
{
if (depthFrame == null) continue;
Point3DCollection PointCloud ;
depthFrame.CopyDepthImagePixelDataTo(this.depthImagePixels);
float[,] ImageArray = new float[320, 240];
short [ ,] depth = new short[240,320];
for (int i = 0; i < 240; i++)
{
for (int j = 0; j <320; j++)
{
depth[i,j]= depthImagePixels[j+i *320].Depth;
ImageArray[i,j] =(float)depth[i,j]/(float)1000;
}
}
PointCloud =Calculate_PointCloud(ImageArray);
viewModel(PointCloud);
}
}
}</i>
2) I have calculated 3D points with Camera parameters and depth data of Kinect Camera
private Point3DCollection Calculate_PointCloud(float[,] ImageArray)
{
Point3DCollection PointCloud = new Point3DCollection();
float x_coodinate;``
float y_coordinate;
float z_coordinate;
float thresholdvalue = 2.0f;
for (int i = 0; i < 239; ++i)
{
for (int j = 0; j < 319; ++j)
{
if (Math.Abs(ImageArray[i, j] - ImageArray[i, j + 1]) < thresholdvalue && Math.Abs(ImageArray[i, j] - ImageArray[i + 1, j]) < thresholdvalue && Math.Abs(ImageArray[i, j + 1] - ImageArray[i + 1, j]) < thresholdvalue)
{
z_coordinate = ImageArray[i, j];
x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point1 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point1);
z_coordinate = ImageArray[i, j + 1];
x_coodinate = (((j + 1) - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point2 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point2);
z_coordinate = ImageArray[i + 1, j];
x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = (((i + 1) - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point3 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point3);
}
}
}
return PointCloud;
}</i>
3)Here i converted into set of triangles with normal information of each 3D point and give those triangles to 3D mesh object and render 3d mesh object using viewport3D control
private void viewModel(Point3DCollection points)
{
DirectionalLight DirLight1 = new DirectionalLight();
DirLight1.Color = Colors.White;
DirLight1.Direction = new Vector3D(1, 1, 1);
PerspectiveCamera Camera1 = new PerspectiveCamera();
Camera1.FarPlaneDistance = 8000;
Camera1.NearPlaneDistance = 100;
Camera1.FieldOfView = 10;
Camera1.Position = new Point3D(0, 0, 1);
Camera1.LookDirection = new Vector3D(-1, -1, -1);
Camera1.UpDirection = new Vector3D(0, 1, 0);
bool combinedvertices = true;
TriangleModel Triatomesh = new TriangleModel();
MeshGeometry3D tmesh = new MeshGeometry3D();
GeometryModel3D msheet = new GeometryModel3D();
Model3DGroup modelGroup = new Model3DGroup();
ModelVisual3D modelsVisual = new ModelVisual3D();
Viewport3D myViewport = new Viewport3D();
for(int i =0; i<points.Count; i+=3)
{
Triatomesh.addTriangleToMesh(points[i],points[i + 1], points[i + 2], tmesh, combinedvertices);
}
msheet.Geometry = tmesh;
msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
modelGroup.Children.Add(msheet);
modelGroup.Children.Add(DirLight1);
modelsVisual.Content = modelGroup;
myViewport.IsHitTestVisible = false;
myViewport.Camera = Camera1;
myViewport.Children.Add(modelsVisual);
canvas1.Children.Add(myViewport);
myViewport.Height = canvas1.Height;
myViewport.Width = canvas1.Width;
Canvas.SetTop(myViewport, 0);
Canvas.SetLeft(myViewport, 0);
} </i>
4) Here is the function which takes three 3D points and add to 3d mesh object as a triangle by calculating normal to each 3D point
public void addTriangleToMesh(Point3D p0, Point3D p1, Point3D p2,
MeshGeometry3D mesh, bool combine_vertices)
{
Vector3D normal = CalculateNormal(p0, p1, p2);
if (combine_vertices)
{
addPointCombined(p0, mesh, normal);
addPointCombined(p1, mesh, normal);
addPointCombined(p2, mesh, normal);
}
else
{
mesh.Positions.Add(p0);
mesh.Positions.Add(p1);
mesh.Positions.Add(p2);
//mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
// mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
// mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
mesh.Normals.Add(normal);
mesh.Normals.Add(normal);
mesh.Normals.Add(normal);
}
}
public Vector3D CalculateNormal(Point3D P0, Point3D P1, Point3D P2) //static
{
Vector3D v0 = new Vector3D(P1.X - P0.X, P1.Y - P0.Y, P1.Z - P0.Z);
Vector3D v1 = new Vector3D(P2.X - P1.X, P2.Y - P1.Y, P2.Z - P1.Z);
return Vector3D.CrossProduct(v0, v1);
}
public void addPointCombined(Point3D point, MeshGeometry3D mesh, Vector3D normal)
{
bool found = false;
int i = 0;
foreach (Point3D p in mesh.Positions)
{
if (p.Equals(point))
{
found = true;
mesh.TriangleIndices.Add(i);
mesh.Positions.Add(point);
mesh.Normals.Add(normal);
break;
}
i++;
}
if (!found)
{
mesh.Positions.Add(point);
mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
mesh.Normals.Add(normal);
}
}
5) Here is my XAML code
<Window x:Class="PointCloud3DView.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Title="PointCloud" Height="653" Width="993" Background="Black" Loaded="Window_Loaded">
<Grid Height="1130" Width="1626">
<Canvas Height="611" HorizontalAlignment="Left" Name="canvas1" VerticalAlignment="Top"
Width="967" Background="Black" />
</Grid>
problem is i am not able to get displayed 3D model in Wpf Screen.Please can any one go through the whole code ? and make me understand where i go wrong? as well suggest me with corrections.
Thanks in advance

I have been experimenting with WPF 3D for several weeks now and learned some tough lessens:)
I do not have time to check and try the whole code now as I am in the work. However I would try three things:
I am not sure about direction of your camera. It is in (0,1,0),
looking using vector (-1,-1,-1), it means it is focused on central
point (-1,0,-1). And that is kinda strange... Try to position camera
further (depending on the scale of your model) like (0,10,0) or even
further and focus it to (0,0,0) or wherever central point of your
model is:
Camera1.Position = new Point3D(0, 10, 0);
Camera1.LookDirection = new Point3D(0,0,0) - Camera1.Position;
Also remove directional light (as it uses normals and if they are wrong nothing will be shown) and try ambient light instead. And your
directional lightning has just opposite vector to your looking
direction (-1,-1,-1) and (1,1,1).
Try to swap order of points in triangle indices (WPF renders only one side of the mesh, so model might be there but inside/out) -
instead of 0,1,2 try 0,2,1;
If nothing helps I will try your code once I get home.
/edited later/
Itried your code on simple triangle and rewrite it accordingy to my tips and it worked. There are some comments and two tips how to clean up your code a bit:)
private void viewModel(Point3DCollection points)
{
DirectionalLight DirLight1 = new DirectionalLight();
DirLight1.Color = Colors.White;
DirLight1.Direction = new Vector3D(1, 1, 1);
PerspectiveCamera Camera1 = new PerspectiveCamera();
Camera1.FarPlaneDistance = 8000;
//Camera1.NearPlaneDistance = 100; //close object will not be displayed with this option
Camera1.FieldOfView = 10;
//Camera1.Position = new Point3D(0, 0, 1);
//Camera1.LookDirection = new Vector3D(-1, -1, -1);
Camera1.Position = new Point3D(0, 0, 10);
Camera1.LookDirection = new Point3D(0, 0, 0) - Camera1.Position; //focus camera on real center of your model (0,0,0) in this case
Camera1.UpDirection = new Vector3D(0, 1, 0);
//you can use constructor to create Camera instead of assigning its properties like:
//PerspectiveCamera Camera1 = new PerspectiveCamera(new Point3D(0,0,10), new Vector3D(0,0,-1), new Vector3D(0,1,0), 10);
bool combinedvertices = true;
TriangleModel Triatomesh = new TriangleModel();
MeshGeometry3D tmesh = new MeshGeometry3D();
GeometryModel3D msheet = new GeometryModel3D();
Model3DGroup modelGroup = new Model3DGroup();
ModelVisual3D modelsVisual = new ModelVisual3D();
Viewport3D myViewport = new Viewport3D();
for (int i = 0; i < points.Count; i += 3)
{
Triatomesh.addTriangleToMesh(points[i + 2], points[i + 1], points[i], tmesh, combinedvertices);
//I did swap order of vertexes you may try both options with your model
}
msheet.Geometry = tmesh;
msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
//you can use constructor to create GeometryModel3D instead of assigning its properties like:
//msheet = new GeometryModel3D(tmesh, new DiffuseMaterial(new SolidColorBrush(Colors.White)));
modelGroup.Children.Add(msheet);
//use AMbientLIght instead of directional
modelGroup.Children.Add(new AmbientLight(Colors.White));
modelsVisual.Content = modelGroup;
myViewport.IsHitTestVisible = false;
myViewport.Camera = Camera1;
myViewport.Children.Add(modelsVisual);
canvas1.Children.Add(myViewport);
myViewport.Height = canvas1.Height;
myViewport.Width = canvas1.Width;
Canvas.SetTop(myViewport, 0);
Canvas.SetLeft(myViewport, 0);
}
And the Points3DCollection I used as parameter (instead of Kinect input):
Point3DCollection points = new Point3DCollection();
points.Add(new Point3D(0.5, 0, 0.5));
points.Add(new Point3D(0.5, -0.5, -0.5));
points.Add(new Point3D(-0.5, -0.1, -0.5));
viewModel(points);

Related

Creating a 2D Circular Mesh in Unity

I currently have a "CreateMesh" script that can be put as a component of an object with a Mesh Renderer, and a Mesh Filter, and a 2D mesh is created with a polygon collider in the dimensions of the mesh given a "MeshType" variable is set to either "tri" or "box" (for a triangle and rectangle mesh respectively.) I want to also add the ability to create a circular mesh however from some research I've realised this isn't as simple as I first thought. However I'm yet to find anything that's helping.
This is the code I have for the box and triangle meshes:
public float width = 5f;
public float height = 5f;
public string meshType;
public PolygonCollider2D polyCollider;
void Start()
{
polyCollider = GetComponent<PolygonCollider2D>();
}
// Update is called once per frame
void Update () {
if (meshType == "tri")
{
TriangleMesh(width, height);
}
if (meshType == "box")
{
BoxMesh(width, height);
}
}
void TriangleMesh(float width, float height)
{
MeshFilter mf = GetComponent<MeshFilter>();
Mesh mesh = new Mesh();
mf.mesh = mesh;
//Verticies
Vector3[] verticies = new Vector3[3]
{
new Vector3(0,0,0), new Vector3(width, 0, 0), new Vector3(0, height, 0)
};
//Triangles
int[] tri = new int[3];
tri[0] = 0;
tri[1] = 2;
tri[2] = 1;
//normals
Vector3[] normals = new Vector3[3];
normals[0] = -Vector3.forward;
normals[1] = -Vector3.forward;
normals[2] = -Vector3.forward;
//UVs
Vector2[] uv = new Vector2[3];
uv[0] = new Vector2(0, 0);
uv[0] = new Vector2(1, 0);
uv[0] = new Vector2(0, 1);
//initialise
mesh.vertices = verticies;
mesh.triangles = tri;
mesh.normals = normals;
mesh.uv = uv;
//setting up collider
polyCollider.pathCount = 1;
Vector2[] path = new Vector2[3]
{
new Vector2(0,0), new Vector2(0, height), new Vector2(width, 0)
};
polyCollider.SetPath(0, path);
}
void BoxMesh(float width, float height)
{
MeshFilter mf = GetComponent<MeshFilter>();
Mesh mesh = new Mesh();
mf.mesh = mesh;
//Verticies
Vector3[] verticies = new Vector3[4]
{
new Vector3(0,0,0), new Vector3(0, height, 0), new Vector3(width, height, 0), new Vector3(width, 0, 0)
};
//Triangles
int[] tri = new int[6];
tri[0] = 0;
tri[1] = 1;
tri[2] = 3;
tri[3] = 1;
tri[4] = 2;
tri[5] = 3;
//normals
Vector3[] normals = new Vector3[4];
normals[0] = -Vector3.forward;
normals[1] = -Vector3.forward;
normals[2] = -Vector3.forward;
normals[3] = -Vector3.forward;
//UVs
Vector2[] uv = new Vector2[4];
uv[0] = new Vector2(0, 0);
uv[1] = new Vector2(0, 1);
uv[2] = new Vector2(1, 1);
uv[3] = new Vector2(1, 0);
//initialise
mesh.vertices = verticies;
mesh.triangles = tri;
mesh.normals = normals;
mesh.uv = uv;
//setting up collider
polyCollider.pathCount = 1;
Vector2[] path = new Vector2[4]
{
new Vector2(0,0), new Vector2(0, height), new Vector2(width, height), new Vector2(width, 0)
};
polyCollider.SetPath(0, path);
}
So essentially I want a function that I could call in the update method that would simply create a circular mesh. E.g:
void Update () {
if (meshType == "tri")
{
TriangleMesh(width, height);
}
if (meshType == "box")
{
BoxMesh(width, height);
}
if (meshType == "circle")
{
CircleMesh(radius);
}
}
The solution I've managed to find involves creating a regular polygon of n sides with a large value of n. I have a function called PolyMesh which creates a regular polygon mesh with n sides and a given radius.
Generating the vertices
For each vertex of a regular polygon with n sides the coordinates relative to the centre of the polygon are given by x = r*i*sin(θ) and y = r*i*cos(θ) so therefore x = r*i*sin(2π/2) and y = r*i*cos(2π/2). Where i iterates from 0 to n-1. We can therefore have a list which has vertices assigned to it and then is converted to an array afterwards:
//verticies
List<Vector3> verticiesList = new List<Vector3> { };
float x;
float y;
for (int i = 0; i < n; i ++)
{
x = radius * Mathf.Sin((2 * Mathf.PI * i) / n);
y = radius * Mathf.Cos((2 * Mathf.PI * i) / n);
verticiesList.Add(new Vector3(x, y, 0f));
}
Vector3[] verticies = verticiesList.ToArray();
Generating the triangles
A given regular polygon of n sides can be split into n-2 triangles from the same point. So we can generate each triangle as follows:
//triangles
List<int> trianglesList = new List<int> { };
for(int i = 0; i < (n-2); i++)
{
trianglesList.Add(0);
trianglesList.Add(i+1);
trianglesList.Add(i+2);
}
int[] triangles = trianglesList.ToArray();
Generating the Normals
Since this is a 2d object we can have every normal as -Vector3.forward like so:
//normals
List<Vector3> normalsList = new List<Vector3> { };
for (int i = 0; i < verticies.Length; i++)
{
normalsList.Add(-Vector3.forward);
}
Vector3[] normals = normalsList.ToArray();
Generating the collider
We could just use a circle collider with the same radius but in order to make this function work for a polygon of a smaller value of n we must use a PolygonCollider2D. Since the vertices are already in order in the vertices array we can simply use them as the paths for our PolygonCollider2D.
//polyCollider
polyCollider.pathCount = 1;
List<Vector2> pathList = new List<Vector2> { };
for (int i = 0; i < n; i++)
{
pathList.Add(new Vector2(verticies[i].x, verticies[i].y));
}
Vector2[] path = pathList.ToArray();
polyCollider.SetPath(0, path);
The complete code should look like this:
public PolygonCollider2D polyCollider;
void Start()
{
polyCollider = GetComponent<PolygonCollider2D>();
}
void PolyMesh(float radius, int n)
{
MeshFilter mf = GetComponent<MeshFilter>();
Mesh mesh = new Mesh();
mf.mesh = mesh;
//verticies
List<Vector3> verticiesList = new List<Vector3> { };
float x;
float y;
for (int i = 0; i < n; i ++)
{
x = radius * Mathf.Sin((2 * Mathf.PI * i) / n);
y = radius * Mathf.Cos((2 * Mathf.PI * i) / n);
verticiesList.Add(new Vector3(x, y, 0f));
}
Vector3[] verticies = verticiesList.ToArray();
//triangles
List<int> trianglesList = new List<int> { };
for(int i = 0; i < (n-2); i++)
{
trianglesList.Add(0);
trianglesList.Add(i+1);
trianglesList.Add(i+2);
}
int[] triangles = trianglesList.ToArray();
//normals
List<Vector3> normalsList = new List<Vector3> { };
for (int i = 0; i < verticies.Length; i++)
{
normalsList.Add(-Vector3.forward);
}
Vector3[] normals = normalsList.ToArray();
//initialise
mesh.vertices = verticies;
mesh.triangles = triangles;
mesh.normals = normals;
//polyCollider
polyCollider.pathCount = 1;
List<Vector2> pathList = new List<Vector2> { };
for (int i = 0; i < n; i++)
{
pathList.Add(new Vector2(verticies[i].x, verticies[i].y));
}
Vector2[] path = pathList.ToArray();
polyCollider.SetPath(0, path);
}
An introduction to meshes
I have less than 50 reputation and so I can't just comment on #Tom Ryan's answer.
With that said, beware that his solution doesn't include the UVs for the mesh. Here is that addition:
//uvs
Vector2[] uvs = new Vector2[vertices.Length];
for (int i = 0; i < uvs.Length; i++)
{
uvs[i] = new Vector2(vertices[i].x / (radius*2) + 0.5f, vertices[i].y / (radius*2) + 0.5f);
}
// Later...
mesh.uv = uvs;

Marching Cubes generating holes in mesh

I'm working on a Marching Cubes implementation in Unity. My code is based on Paul Bourke's code actually with a lot of modifications, but anyway i'm checking if a block at a position is null if it is than a debug texture will be placed on it.
This is my MC script
public class MarchingCubes
{
private World world;
private Chunk chunk;
private List<Vector3> vertices = new List<Vector3> ();
private List<Vector3> normals = new List<Vector3> ();
private Vector3[] ns;
private List<int> triangles = new List<int> ();
private List<Vector2> uvs = new List<Vector2> ();
private Vector3[] positions = new Vector3[8];
private float[] corners = new float[8];
private Vector3i size = new Vector3i (16, 128, 16);
Vector3[] vertlist = new Vector3[12];
private float isolevel = 1f;
private float Corner (Vector3i pos)
{
int x = pos.x;
int y = pos.y;
int z = pos.z;
if (x < size.x && z < size.z) {
return chunk.GetValue (x, y, z);
} else {
int ix = chunk.X, iz = chunk.Z;
int rx = chunk.region.x, rz = chunk.region.z;
if (x >= size.x) {
ix++;
x = 0;
}
if (z >= size.z) {
iz++;
z = 0;
}
return chunk.region.GetChunk (ix, iz).GetValue (x, y, z);
}
}
Block block;
public Mesh MarchChunk (World world, Chunk chunk, Mesh mesh)
{
this.world = world;
this.chunk = chunk;
vertices.Clear ();
triangles.Clear ();
uvs.Clear ();
for (int x = 0; x < size.x; x++) {
for (int y = 1; y < size.y - 2; y++) {
for (int z = 0; z < size.z; z++) {
block = chunk.GetBlock (x, y, z);
int cubeIndex = 0;
for (int i = 0; i < corners.Length; i++) {
corners [i] = Corner (new Vector3i (x, y, z) + offset [i]);
positions [i] = (new Vector3i (x, y, z) + offset [i]).ToVector3 ();
if (corners [i] < isolevel)
cubeIndex |= (1 << i);
}
if (eTable [cubeIndex] == 0)
continue;
for (int i = 0; i < vertlist.Length; i++) {
if ((eTable [cubeIndex] & 1 << i) == 1 << i)
vertlist [i] = LinearInt (positions [eCons [i, 0]], positions [eCons [i, 1]], corners [eCons [i, 0]], corners [eCons [i, 1]]);
}
for (int i = 0; triTable [cubeIndex, i] != -1; i += 3) {
int index = vertices.Count;
vertices.Add (vertlist [triTable [cubeIndex, i]]);
vertices.Add (vertlist [triTable [cubeIndex, i + 1]]);
vertices.Add (vertlist [triTable [cubeIndex, i + 2]]);
float tec = (0.125f);
Vector2 uvBase = block != null ? block.UV : new Vector2 ();
uvs.Add (uvBase);
uvs.Add (uvBase + new Vector2 (0, tec));
uvs.Add (uvBase + new Vector2 (tec, tec));
triangles.Add (index + 0);
triangles.Add (index + 1);
triangles.Add (index + 2);
}
}
}
}
if (mesh == null)
mesh = new Mesh ();
mesh.Clear ();
mesh.vertices = vertices.ToArray ();
mesh.triangles = triangles.ToArray ();
mesh.uv = uvs.ToArray ();
mesh.RecalculateNormals ();
return mesh;
}
bool IsBitSet (int b, int pos)
{
return ((b & pos) == pos);
}
Vector3 LinearInt (Vector3 p1, Vector3 p2, float v1, float v2)
{
Vector3 p;
p.x = p1.x + (isolevel - v1) * (p2.x - p1.x) / (v2 - v1);
p.y = p1.y + (isolevel - v1) * (p2.y - p1.y) / (v2 - v1);
p.z = p1.z + (isolevel - v1) * (p2.z - p1.z) / (v2 - v1);
return p;
}
private static int[,] eCons = new int[12, 2] {
{ 0, 1 },
{ 1, 2 },
{ 2, 3 },
{ 3, 0 },
{ 4, 5 },
{ 5, 6 },
{ 6, 7 },
{ 7, 4 },
{ 0, 4 },
{ 1, 5 },
{ 2, 6 },
{ 3, 7 }
};
private static Vector3i[] offset = new Vector3i[8] {
new Vector3i (0, 0, 1),
new Vector3i (1, 0, 1),
new Vector3i (1, 0, 0),
new Vector3i (0, 0, 0),
new Vector3i (0, 1, 1),
new Vector3i (1, 1, 1),
new Vector3i (1, 1, 0),
new Vector3i (0, 1, 0)
};
}
I didn't put the tables in the sample, because they are the same as the ones in Bourke's code.
EDIT:
What I figured out yet is that the cell's value at the blue triangles are 0 so they don't have to be triangulated, but the cell's value under them is 1 and because of this a top triangle is created to complete the mesh.

Basic SAT collision algorithm

Everything I know about vector projection so far is from the internet so I am a little confused. First of all I assume when we project the vertices of a polygon on to an axis, we get the scalar projection as opposed to the vector projection. And the formula for this is (A.B) / |A|, now in this tutorial, they simply use the dot product to project on to the axis. Is this because the vector and the axis are perpendicular or something? Nonetheless I had a go at writing a crude version in C#, but it doesn't seem to return correct results
Rectangle r1 = new Rectangle(300, 200, 50, 50);
Rectangle r2 = new Rectangle(340, 240, 50, 50);
bool areColliding(Rectangle r1, Rectangle r2)
{
/* Using clockwise labelling
*
* B*
* . .
* . .
* A * C*
* . .
* . .
* D*
*
*/
//Calculate vectors and normals of Rectangle 1
Point r1A = r1.Location;
Point r1B = new Point(r1.Location.X + r1.Width, r1.Location.Y);
Point r1C = new Point(r1.Location.X, r1.Location.Y + r1.Height);
Point r1D = new Point(r1.Location.X + r1.Width, r1.Location.Y + r1.Height);
Vector2 r1AB = new Vector2(r1B.X - r1A.X, r1B.Y - r1A.Y);
Vector2 r1BC = new Vector2(r1C.X - r1B.X, r1C.Y - r1B.Y);
Vector2 r1CD = new Vector2(r1D.X - r1C.X, r1D.Y - r1C.Y);
Vector2 r1DA = new Vector2(r1A.X - r1D.X, r1A.Y - r1D.Y);
Vector2 r1AB_Normal = getNormal(r1AB);
Vector2 r1BC_Normal = getNormal(r1BC);
Vector2 r1CD_Normal = getNormal(r1CD);
Vector2 r1DA_Normal = getNormal(r1DA);
Point[] r1Points = {r1A, r1B, r1C, r1D};
Vector2[] Axes1 = { r1AB_Normal, r1BC_Normal, r1CD_Normal, r1DA_Normal };
//Calculate vectors and normals of Rectangle 2
Point r2A = r2.Location;
Point r2B = new Point(r2.Location.X + r2.Width, r2.Location.Y);
Point r2C = new Point(r2.Location.X, r2.Location.Y + r2.Height);
Point r2D = new Point(r2.Location.X + r2.Width, r2.Location.Y + r2.Height);
Vector2 r2AB = new Vector2(r2B.X - r2A.X, r2B.Y - r2A.Y);
Vector2 r2BC = new Vector2(r2C.X - r2B.X, r2C.Y - r2B.Y);
Vector2 r2CD = new Vector2(r2D.X - r2C.X, r2D.Y - r2C.Y);
Vector2 r2DA = new Vector2(r2A.X - r2D.X, r2A.Y - r2D.Y);
Vector2 r2AB_Normal = getNormal(r2AB);
Vector2 r2BC_Normal = getNormal(r2BC);
Vector2 r2CD_Normal = getNormal(r2CD);
Vector2 r2DA_Normal = getNormal(r2DA);
Point[] r2Points = { r2A, r2B, r2C, r2D };
Vector2[] Axes2 = { r2AB_Normal, r2BC_Normal, r2CD_Normal, r2DA_Normal };
//Start projecting each vertex on to each axis
for (int i = 0; i < Axes1.Length; i++)
{
float r1Min = Vector2.Dot(Axes1[i], new Vector2(r1Points[0].X, r1Points[0].Y));
float r1Max = float.NaN;
for (int p = 1; p < r1Points.Length; p++)
{
float dot = Vector2.Dot(Axes1[i], new Vector2(r1Points[p].X, r1Points[p].Y));
if (dot < r1Min)
{
r1Min = dot;
}
}
float r2Min = Vector2.Dot(Axes1[i], new Vector2(r1Points[0].X, r1Points[0].Y));
float r2Max = float.NaN;
for (int p = 1; p < r2Points.Length; p++)
{
float dot = Vector2.Dot(Axes1[i], new Vector2(r1Points[p].X, r1Points[p].Y));
if (dot < r2Min)
{
r2Min = dot;
}
}
if (r1Min < r2Max)
{
return true;
}
}
for (int i = 0; i < Axes2.Length; i++)
{
float r1Min = Vector2.Dot(Axes1[i], new Vector2(r1Points[0].X, r1Points[0].Y));
float r1Max = float.NaN;
for (int p = 1; p < r1Points.Length; p++)
{
float dot = Vector2.Dot(Axes1[i], new Vector2(r1Points[p].X, r1Points[p].Y));
if (dot < r1Min)
{
r1Min = dot;
}
}
float r2Min = Vector2.Dot(Axes1[i], new Vector2(r1Points[0].X, r1Points[0].Y));
float r2Max = float.NaN;
for (int p = 1; p < r2Points.Length; p++)
{
float dot = Vector2.Dot(Axes1[i], new Vector2(r1Points[p].X, r1Points[p].Y));
if (dot < r2Min)
{
r2Min = dot;
}
}
if (r1Min < r2Max)
{
return true;
}
}
return false;
}
Vector2 getNormal(Vector2 v)
{
return new Vector2(-v.Y, v.X);
}

Weird cube shape

I'm trying to create a cube with both indices and vertices. I'm able to draw them, but they look kinda weird.
Here's my code. It has something to do with either the vertices or indices, but I'm not sure which:
public void Draw(BasicEffect effect)
{
foreach (EffectPass pass in effect.CurrentTechnique.Passes)
{
pass.Apply();
device.SetVertexBuffer(cubeVertexBuffer);
device.Indices = iBuffer;
device.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0, 8, 0, 12);
}
}
private void SetUpIndices()
{
indices = new short[36];
indices[0] = 0;
indices[1] = 3;
indices[2] = 2;
indices[3] = 2;
indices[4] = 1;
indices[5] = 0;
indices[6] = 4;
indices[7] = 7;
indices[8] = 6;
indices[9] = 6;
indices[10] = 5;
indices[11] = 4;
indices[12] = 1;
indices[13] = 2;
indices[14] = 6;
indices[15] = 6;
indices[16] = 5;
indices[17] = 1;
indices[18] = 4;
indices[19] = 7;
indices[20] = 3;
indices[21] = 3;
indices[22] = 0;
indices[23] = 4;
indices[24] = 4;
indices[25] = 0;
indices[26] = 1;
indices[27] = 1;
indices[28] = 5;
indices[29] = 4;
indices[30] = 3;
indices[31] = 7;
indices[32] = 6;
indices[33] = 6;
indices[34] = 2;
indices[35] = 3;
iBuffer = new IndexBuffer(device, typeof(short), 36, BufferUsage.WriteOnly);
iBuffer.SetData(indices);
}
private void SetUpVertices()
{
vertices = new VertexPositionColor[8];
vertices[0] = new VertexPositionColor(new Vector3(0, 0, 0), color);
vertices[1] = new VertexPositionColor(new Vector3(0, 1, 0), color);
vertices[2] = new VertexPositionColor(new Vector3(1, 1, 0), color);
vertices[3] = new VertexPositionColor(new Vector3(1, 0, 0), color);
vertices[4] = new VertexPositionColor(new Vector3(0, 0, -1), color);
vertices[5] = new VertexPositionColor(new Vector3(0, 1, -1), color);
vertices[6] = new VertexPositionColor(new Vector3(1, 1, -1), color);
vertices[7] = new VertexPositionColor(new Vector3(1, 0, -1), color);
cubeVertexBuffer = new VertexBuffer(device, typeof(VertexPositionColor), 8, BufferUsage.WriteOnly);
cubeVertexBuffer.SetData<VertexPositionColor>(vertices);
}
I could do a wild guess and say its because of messed order of vertices in your indices (I would call them triangles further).
Usually in 3d engines you have to set up order of vertices in triangles so they all are ordered same - i.e. clockwise or counter-clockwise - when you look at them from outside of shape they form.
Speaking mathematically all normals of triangles in your shape should be directed either inside or outside of shape. The direction of normal tells 3d engine when to draw triangles - engine can do two times less work if it draws triangles only on one side - the insides of a solid objects in 99,99% cases are not to be seen by user.
In your case look at indices 032 and 476 - they should be either 032/467 or 023/476. And so on.

Irregular triangles vertex index

At last I have something displayed. Switched to using graphics.GraphicsDevice.DrawIndexedPrimitives ... Next problem... Only one triangle is displayed. The data set is about 200 triangles. I formatted the data coming in to make sure every three consecutive vectors form a triangle face. These are irregular triangles forming an irregular shape. I don't fully understand the indexing of the vertices. Looks like each 3 indices form a triangle. If that is so then the indices match the data coming in. I did this:
int i4 = -1;
indices = new int[xData1.Count];
for (int i2 = 0; i2 < xData1.Count; i2++)
{
i4++;
cubeVertices[i4].Position = new Vector3((float)xData1[i2][0], (float)xData1[i2][1], (float)xData1[i2][2]);
cubeVertices[i4].Color = Color.LawnGreen;
indices[i4] = i4;
}
making the indices match the vertices coming in.. then I used Reimers normal calc to provide normals.. this is probably wrong as his example was using 6 vertices per index (I think!), like this:
for (int i = 0; i < cubeVertices.Length; i++)
cubeVertices[i].Normal = new Vector3(0, 0, 0);
for (int i = 0; i < indices.Length / 3; i++)
{
int index1 = indices[i * 3];
int index2 = indices[i * 3 + 1];
int index3 = indices[i * 3 + 2];
Vector3 side1 = cubeVertices[index1].Position - cubeVertices[index3].Position;
Vector3 side2 = cubeVertices[index1].Position - cubeVertices[index2].Position;
Vector3 normal = Vector3.Cross(side1, side2);
cubeVertices[index1].Normal += normal;
cubeVertices[index2].Normal += normal;
cubeVertices[index3].Normal += normal;
}
for (int i = 0; i < cubeVertices.Length; i++)
cubeVertices[i].Normal.Normalize();
how many things do I need to fix here? I am only seeing 1 out of a couple of hundred triangles
:(
thx for your patience
public struct VertexPositionColorNormal
{
public Vector3 Position;
public Color Color;
public Vector3 Normal;
public readonly static VertexDeclaration VertexDeclaration = new VertexDeclaration
(
new VertexElement(0, VertexElementFormat.Vector3, VertexElementUsage.Position, 0),
new VertexElement(sizeof(float) * 3, VertexElementFormat.Color, VertexElementUsage.Color, 0),
new VertexElement(sizeof(float) * 3 + 4, VertexElementFormat.Vector3, VertexElementUsage.Normal, 0)
);
}
...
private void CopyToBuffers()
{
vertexBuffer = new VertexBuffer(graphics.GraphicsDevice, VertexPositionColorNormal.VertexDeclaration,
cubeVertices.Length, BufferUsage.WriteOnly);
vertexBuffer.SetData(cubeVertices);
myIndexBuffer = new IndexBuffer(graphics.GraphicsDevice, typeof(int), indices.Length, BufferUsage.WriteOnly);
myIndexBuffer.SetData(indices);
}
....
foreach (EffectPass pass in basicEffect.CurrentTechnique.Passes)
{
basicEffect.World = world;
basicEffect.View = view;
basicEffect.Projection = proj;
pass.Apply();
graphics.GraphicsDevice.Indices = myIndexBuffer;
graphics.GraphicsDevice.SetVertexBuffer(vertexBuffer);
graphics.GraphicsDevice.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0,
cubeVertices.Length, 0, indices.Length / 3);
Your normal calculation is correct, and even if it was wrong the only thing that would happen is that your triangles would receive the wrong lightning.
You're using indices which exactly match the order of the vertices coming in, which is in itself redundant. If you switch to not setting the indices at all and use DrawPrimitives instead with the primitive count the same does that make a difference?
Other than that, are you sure that the data you're giving it is valid? Are the vertex positions correctly set?

Categories

Resources