Creating a 2D Circular Mesh in Unity - c#

I currently have a "CreateMesh" script that can be put as a component of an object with a Mesh Renderer, and a Mesh Filter, and a 2D mesh is created with a polygon collider in the dimensions of the mesh given a "MeshType" variable is set to either "tri" or "box" (for a triangle and rectangle mesh respectively.) I want to also add the ability to create a circular mesh however from some research I've realised this isn't as simple as I first thought. However I'm yet to find anything that's helping.
This is the code I have for the box and triangle meshes:
public float width = 5f;
public float height = 5f;
public string meshType;
public PolygonCollider2D polyCollider;
void Start()
{
polyCollider = GetComponent<PolygonCollider2D>();
}
// Update is called once per frame
void Update () {
if (meshType == "tri")
{
TriangleMesh(width, height);
}
if (meshType == "box")
{
BoxMesh(width, height);
}
}
void TriangleMesh(float width, float height)
{
MeshFilter mf = GetComponent<MeshFilter>();
Mesh mesh = new Mesh();
mf.mesh = mesh;
//Verticies
Vector3[] verticies = new Vector3[3]
{
new Vector3(0,0,0), new Vector3(width, 0, 0), new Vector3(0, height, 0)
};
//Triangles
int[] tri = new int[3];
tri[0] = 0;
tri[1] = 2;
tri[2] = 1;
//normals
Vector3[] normals = new Vector3[3];
normals[0] = -Vector3.forward;
normals[1] = -Vector3.forward;
normals[2] = -Vector3.forward;
//UVs
Vector2[] uv = new Vector2[3];
uv[0] = new Vector2(0, 0);
uv[0] = new Vector2(1, 0);
uv[0] = new Vector2(0, 1);
//initialise
mesh.vertices = verticies;
mesh.triangles = tri;
mesh.normals = normals;
mesh.uv = uv;
//setting up collider
polyCollider.pathCount = 1;
Vector2[] path = new Vector2[3]
{
new Vector2(0,0), new Vector2(0, height), new Vector2(width, 0)
};
polyCollider.SetPath(0, path);
}
void BoxMesh(float width, float height)
{
MeshFilter mf = GetComponent<MeshFilter>();
Mesh mesh = new Mesh();
mf.mesh = mesh;
//Verticies
Vector3[] verticies = new Vector3[4]
{
new Vector3(0,0,0), new Vector3(0, height, 0), new Vector3(width, height, 0), new Vector3(width, 0, 0)
};
//Triangles
int[] tri = new int[6];
tri[0] = 0;
tri[1] = 1;
tri[2] = 3;
tri[3] = 1;
tri[4] = 2;
tri[5] = 3;
//normals
Vector3[] normals = new Vector3[4];
normals[0] = -Vector3.forward;
normals[1] = -Vector3.forward;
normals[2] = -Vector3.forward;
normals[3] = -Vector3.forward;
//UVs
Vector2[] uv = new Vector2[4];
uv[0] = new Vector2(0, 0);
uv[1] = new Vector2(0, 1);
uv[2] = new Vector2(1, 1);
uv[3] = new Vector2(1, 0);
//initialise
mesh.vertices = verticies;
mesh.triangles = tri;
mesh.normals = normals;
mesh.uv = uv;
//setting up collider
polyCollider.pathCount = 1;
Vector2[] path = new Vector2[4]
{
new Vector2(0,0), new Vector2(0, height), new Vector2(width, height), new Vector2(width, 0)
};
polyCollider.SetPath(0, path);
}
So essentially I want a function that I could call in the update method that would simply create a circular mesh. E.g:
void Update () {
if (meshType == "tri")
{
TriangleMesh(width, height);
}
if (meshType == "box")
{
BoxMesh(width, height);
}
if (meshType == "circle")
{
CircleMesh(radius);
}
}

The solution I've managed to find involves creating a regular polygon of n sides with a large value of n. I have a function called PolyMesh which creates a regular polygon mesh with n sides and a given radius.
Generating the vertices
For each vertex of a regular polygon with n sides the coordinates relative to the centre of the polygon are given by x = r*i*sin(θ) and y = r*i*cos(θ) so therefore x = r*i*sin(2π/2) and y = r*i*cos(2π/2). Where i iterates from 0 to n-1. We can therefore have a list which has vertices assigned to it and then is converted to an array afterwards:
//verticies
List<Vector3> verticiesList = new List<Vector3> { };
float x;
float y;
for (int i = 0; i < n; i ++)
{
x = radius * Mathf.Sin((2 * Mathf.PI * i) / n);
y = radius * Mathf.Cos((2 * Mathf.PI * i) / n);
verticiesList.Add(new Vector3(x, y, 0f));
}
Vector3[] verticies = verticiesList.ToArray();
Generating the triangles
A given regular polygon of n sides can be split into n-2 triangles from the same point. So we can generate each triangle as follows:
//triangles
List<int> trianglesList = new List<int> { };
for(int i = 0; i < (n-2); i++)
{
trianglesList.Add(0);
trianglesList.Add(i+1);
trianglesList.Add(i+2);
}
int[] triangles = trianglesList.ToArray();
Generating the Normals
Since this is a 2d object we can have every normal as -Vector3.forward like so:
//normals
List<Vector3> normalsList = new List<Vector3> { };
for (int i = 0; i < verticies.Length; i++)
{
normalsList.Add(-Vector3.forward);
}
Vector3[] normals = normalsList.ToArray();
Generating the collider
We could just use a circle collider with the same radius but in order to make this function work for a polygon of a smaller value of n we must use a PolygonCollider2D. Since the vertices are already in order in the vertices array we can simply use them as the paths for our PolygonCollider2D.
//polyCollider
polyCollider.pathCount = 1;
List<Vector2> pathList = new List<Vector2> { };
for (int i = 0; i < n; i++)
{
pathList.Add(new Vector2(verticies[i].x, verticies[i].y));
}
Vector2[] path = pathList.ToArray();
polyCollider.SetPath(0, path);
The complete code should look like this:
public PolygonCollider2D polyCollider;
void Start()
{
polyCollider = GetComponent<PolygonCollider2D>();
}
void PolyMesh(float radius, int n)
{
MeshFilter mf = GetComponent<MeshFilter>();
Mesh mesh = new Mesh();
mf.mesh = mesh;
//verticies
List<Vector3> verticiesList = new List<Vector3> { };
float x;
float y;
for (int i = 0; i < n; i ++)
{
x = radius * Mathf.Sin((2 * Mathf.PI * i) / n);
y = radius * Mathf.Cos((2 * Mathf.PI * i) / n);
verticiesList.Add(new Vector3(x, y, 0f));
}
Vector3[] verticies = verticiesList.ToArray();
//triangles
List<int> trianglesList = new List<int> { };
for(int i = 0; i < (n-2); i++)
{
trianglesList.Add(0);
trianglesList.Add(i+1);
trianglesList.Add(i+2);
}
int[] triangles = trianglesList.ToArray();
//normals
List<Vector3> normalsList = new List<Vector3> { };
for (int i = 0; i < verticies.Length; i++)
{
normalsList.Add(-Vector3.forward);
}
Vector3[] normals = normalsList.ToArray();
//initialise
mesh.vertices = verticies;
mesh.triangles = triangles;
mesh.normals = normals;
//polyCollider
polyCollider.pathCount = 1;
List<Vector2> pathList = new List<Vector2> { };
for (int i = 0; i < n; i++)
{
pathList.Add(new Vector2(verticies[i].x, verticies[i].y));
}
Vector2[] path = pathList.ToArray();
polyCollider.SetPath(0, path);
}
An introduction to meshes

I have less than 50 reputation and so I can't just comment on #Tom Ryan's answer.
With that said, beware that his solution doesn't include the UVs for the mesh. Here is that addition:
//uvs
Vector2[] uvs = new Vector2[vertices.Length];
for (int i = 0; i < uvs.Length; i++)
{
uvs[i] = new Vector2(vertices[i].x / (radius*2) + 0.5f, vertices[i].y / (radius*2) + 0.5f);
}
// Later...
mesh.uv = uvs;

Related

Why cube mesh becomes a plane when in high resolution?

I use the following code to generate a Cube as a single mesh. My purpose is to generate a sphere from it by normalizing as I have shown in the commented line (I just have to do that to all those statements in the following lines). The problem here is that the mesh changes from a cube to a flat plane as I keep increasing the resolution (parameter given as public int resolution).
(This code was inspired by this video https://youtu.be/QN39W020LqU . But I am using the technique in my own way as given by the following code, so that I can generate a single mesh instead of a combination of 6 meshes, this is required for my work)
[code=CSharp]
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
public class Sc_Planet : MonoBehaviour
{
[Range(2, 512)]
public int resolution = 2;
[Range(2, 256)]
public int radius = 10;
MeshFilter meshFilter;
void OnValidate()
{
Initialize();
}
void Initialize()
{
if (meshFilter == null)
{
GameObject meshObj = new GameObject("mesh_Planet");
meshObj.transform.parent = transform;
meshObj.AddComponent<MeshRenderer>().sharedMaterial = new Material(Shader.Find("Standard"));
meshFilter = meshObj.AddComponent<MeshFilter>();
meshFilter.sharedMesh = new Mesh();
}
int xmax = resolution + 1;
int ymax = resolution + 1;
float dx = 1.0f / resolution;
float dy = 1.0f / resolution;
Vector3[] vertsTop = new Vector3[xmax * ymax];
Vector3[] vertsRight = new Vector3[xmax * ymax];
Vector3[] vertsFront = new Vector3[xmax * ymax];
Vector3[] vertsBottom = new Vector3[xmax * ymax];
Vector3[] vertsLeft = new Vector3[xmax * ymax];
Vector3[] vertsBack = new Vector3[xmax * ymax];
for (int y = 0; y < ymax; y++)
{
for (int x = 0; x < xmax; x++)
{
float px = dx * x - 0.5f;
float py = dy * y - 0.5f;
int t = x + y * xmax;
//vertsTop[t] = new Vector3(py, 0.5f, px).normalized * radius;
vertsTop[t] = new Vector3(py, 0.5f, px);
vertsRight[t] = new Vector3(px, py, 0.5f);
vertsFront[t] = new Vector3(0.5f, px, py);
vertsBottom[t] = new Vector3(px, -0.5f, py);
vertsLeft[t] = new Vector3(py, px, -0.5f);
vertsBack[t] = new Vector3(-0.5f, py, px);
}
}
List<int> trianglesList = new List<int>();
for (int y = 0; y < ymax - 1; ++y)
{
for (int x = 0; x < xmax; ++x)
{
if (x % xmax != xmax - 1)
{
int f = x + y * xmax;
trianglesList.Add(f);
trianglesList.Add(f + 1);
trianglesList.Add(f + 1 + xmax);
trianglesList.Add(f);
trianglesList.Add(f + 1 + xmax);
trianglesList.Add(f + xmax);
}
}
}
List<Vector3> verts = new List<Vector3>();
Dictionary<Vector3, int> vdict = new Dictionary<Vector3, int>();
List<int> triangles = new List<int>();
int nextIndex = 0;
void addFace(Vector3 [] in_verts, List<int> in_triangles)
{
for(int i = 0; i < in_verts.Length; ++i)
{
if (!vdict.ContainsKey(in_verts[i]))
{
vdict.Add(in_verts[i], nextIndex);
verts.Add(in_verts[i]);
++nextIndex;
}
}
for(int i = 0; i < in_triangles.Count; ++i)
{
triangles.Add(vdict[in_verts[in_triangles[i]]]);
}
}
addFace(vertsTop, trianglesList);
addFace(vertsRight, trianglesList);
addFace(vertsFront, trianglesList);
addFace(vertsBottom, trianglesList);
addFace(vertsLeft, trianglesList);
addFace(vertsBack, trianglesList);
var mesh = meshFilter.sharedMesh;
mesh.Clear();
mesh.vertices = verts.ToArray();
mesh.triangles = triangles.ToArray();
mesh.RecalculateNormals();
}
}
[/code]
This code works in Blender (I used python to script it on Blender and it works very well for any resolution).
The only problem is that when I use this in Unity, the meshes become weird as I have shown in the images I have attached below.
At Resolution = 96 :
At Resolution = 122 :
At Resolution = 182 :
At Resolution = 344:
Why is this happening?
How should I correct it?
(I have also posted this in unity forums: Why cube mesh becomes a plane when in high resolution?)
Ok I found the answer. This is exceeding the limit of vertices on unity api for 16-bit based meshes. I had to change it to a 32-bit indexed mesh to correct it.
Details are in this docuemntaiton page : https://docs.unity3d.com/ScriptReference/Rendering.IndexFormat.html?_ga=2.9556401.501737799.1635227368-67181881.1629608252
I just had to add the code :
mesh.indexFormat = UnityEngine.Rendering.IndexFormat.UInt32;
That was it.

How can I adjust rotation based on another gameObject in Unity?

I'm trying to make a chess game in Augmented Reality. I wrote a script which places chessboard on the Plane in AR. Then I created mesh with 64 squares which match chessboard tiles. I have a problem placing mesh to match my chessboard(screenshots). I think I should rotate mesh by Y axis, but I wasn't able to do that.
placing chessboard:
GameObject placedObject = Instantiate(objectToPlace, placementPose.position, placementPose.rotation * Quaternion.Euler(-90f, 0f, 0f));
script that creates and places mesh:
private float yAdjust = 0F;
private Vector3 boardCenter = GameObject.Find("Interaction").GetComponent<TapToPlaceObject>().placementPose.position;
private void GenerateSquares(float squareSize)
{
adjust = new Vector3(-4 * squareSize, 0, -4 * squareSize) + boardCenter;
squares = new GameObject[8,8];
for (int i = 0; i < 8; i++)
{
for (int j = 0; j < 8; j++)
{
squares[i, j] = CreateSquare(squareSize,i,j);
}
}
}
private GameObject CreateSquare(float squareSize, int i, int j)
{
GameObject square = new GameObject(string.Format("{0},{1}", i, j));
square.transform.parent = transform;
Vector3[] vertices = new Vector3[4];
vertices[0] = new Vector3(i * squareSize, yAdjust, j * squareSize) + adjust;
vertices[1] = new Vector3(i * squareSize, yAdjust, (j + 1) * squareSize) + adjust;
vertices[2] = new Vector3((i + 1) * squareSize, yAdjust, j * squareSize) + adjust;
vertices[3] = new Vector3((i + 1) * squareSize, yAdjust, (j + 1) * squareSize) + adjust;
int[] triangles = new int[] { 0, 1, 2, 1, 3, 2 };
Mesh mesh = new Mesh();
square.AddComponent<MeshFilter>().mesh = mesh;
square.AddComponent<MeshRenderer>().material = squareMaterial;
//square.transform.rotation = Quaternion.Euler(new Vector3(0, boardRotation.eulerAngles.y, 0));
//square.transform.rotation = boardRotation;
mesh.vertices = vertices;
mesh.triangles = triangles;
square.AddComponent<BoxCollider>();
return square;
}
screenshot of my problem
You could probably try and just add another
* Quaternion.Euler(0f, 45f, 0f)
In general though for your squares there is Transform.SetParent which allows you to pass the optional parameter worldPositionStays as false which is probably what you would want to do. By setting
transform.parent = parent
equals calling
transform.SetParent(parent);
which equals calling
transform.SetParent(parent, true);
so the objects keep their original world space orientation and position.
However, I would actually rather recommend you already create that board once in edit mode, make the entire board a prefab and now when you spawn the board you only need to take care of placing and rotating one single object and all children will already be their correctly placed and rotated within the board.

Split Texture using a Curved Line in Unity3D C#

I have a texture that I want to slice into 2 parts, using a Vector2 array.
I have all the Vector2 points for the curved line.
Question
How can I slice the texture into 2 parts using the curved line of points.
Alternative Solutions/Questions
How can I 'pixel' fill a Vector2[] shape to create a Texture?
My attempts
1) Generating Vector2 points to create a square, with the top part being the curve edge. Looked promising but when I tried generating a Mesh, the points sorting was incorrect.
2) Dynamically created a Polygon2D Collider - mimicking the bottom part of the sliced texture - this had the same issue as attempt 1, the point ordering. So when convert the Collider to Mesh, it obviously had the same results as attempt
In the picture below:
The red line simulates my Vector2 array
The gray+green square is the texture 1024 x 1024 pixels
The green area is the target area I want
This makes a mesh that is the shape you want (but with jagged edges on top), hopefully that is a step in the right direction. The Vector2 points[] array contains your red line. It should be sorted by the x coordinate, and all the numbers should be between 0 and 1. Needs a mesh filter and a mesh renderer with your texture.
using UnityEngine;
[RequireComponent(typeof(MeshFilter))]
[RequireComponent(typeof(MeshRenderer))]
public class createMesh : MonoBehaviour {
void Start () {
Vector2[] points = new Vector2[4];
points [0] = new Vector2 (0, .5f);
points [1] = new Vector2 (.33f, 1f);
points [2] = new Vector2 (.66f, .5f);
points [3] = new Vector2 (1, 1f);
MeshFilter mf = GetComponent<MeshFilter> ();
Mesh mesh = new Mesh();
Vector3[] verticies = new Vector3[points.Length * 2];
int[] triangles = new int[(points.Length - 1)*6];
Vector3[] normals = new Vector3[points.Length * 2];
Vector2[] uv = new Vector2[points.Length * 2];
int vIndex = 0;
int tIndex = 0;
int nIndex = 0;
int uvIndex = 0;
for (int i = 0; i< points.Length; i++) {
Vector3 topVert = points[i];
Vector3 bottomVert = topVert;
bottomVert.y = 0;
verticies[vIndex++]= bottomVert;
verticies[vIndex++]=topVert;
//uv
uv[uvIndex++] = bottomVert;
uv[uvIndex++] = topVert;
//normals
normals[nIndex++] = -Vector3.forward;
normals[nIndex++] = -Vector3.forward;
if (i<points.Length - 1) {
//triangles
triangles[tIndex++] = (i)*2;
triangles[tIndex++] = (i)*2+1;
triangles[tIndex++] = (i)*2+2;
triangles[tIndex++] = (i)*2+2;
triangles[tIndex++] = (i)*2+1;
triangles[tIndex++] = (i)*2+3;
}
}
mesh.vertices = verticies;
mesh.triangles = triangles;
mesh.normals = normals;
mesh.uv = uv;
mf.mesh = mesh;
}
}
Bonus: here's a way to do it just with the texture. To use this the bitmap has to be set to Advanced, with read/write enabled in the import settings. This method uses 0 to 1023 (or however large your texture is) for coordinates, and should work for numbers out of that range too.
using UnityEngine;
using System.Collections;
public class tex2d : MonoBehaviour {
public Vector2[] points;
void Start () {
MeshRenderer mr;
Texture2D t2d;
Texture2D newTex = new Texture2D (1024, 1024);
mr = GetComponent<MeshRenderer> ();
t2d = mr.material.GetTexture (0) as Texture2D;
MakeTex (points, t2d, ref newTex, 1024);
mr.material.SetTexture (0, newTex);
}
void MakeTex(Vector2[] pnts, Texture2D inputTex, ref Texture2D outputTex, int size){
Color bgcolor = new Color (1, 0, 1, 1);
for (int i=0; i<(pnts.Length-1); i++) {
Vector2 p1=pnts[i];
Vector2 p2=pnts[i+1];
//skip points that are out of range
if ((p1.x <0 && p2.x <0) || (p1.x > size && p2.x>size)) continue;
for (int x =(int)p1.x; x<(int)p2.x; x++) {
if (x<0) continue;
if (x>=size) break;
float interpX = (x-p1.x)/(p2.x-p1.x);
int interpY = (int) ((p2.y-p1.y)*interpX + p1.y);
for (int y=0; y<interpY; y++) {
outputTex.SetPixel(x,y,inputTex.GetPixel(x,y));
}
for (int y= interpY; y<size; y++) {
outputTex.SetPixel(x,y,bgcolor);
}
}
}
outputTex.Apply ();
}
}

Display 3d model as 3D mesh object using wpf 3D graphics

I am working on a C# .Net platform with 3D Wpf Graphics.
Here is flow of code follows:
1) I take the depth data from kinect and give it to one function which calculates 3d points.
private void display3DView()
{
while(loop_run)
{
using ( DepthImageFrame depthFrame = sensor.DepthStream.OpenNextFrame(1000))
{
if (depthFrame == null) continue;
Point3DCollection PointCloud ;
depthFrame.CopyDepthImagePixelDataTo(this.depthImagePixels);
float[,] ImageArray = new float[320, 240];
short [ ,] depth = new short[240,320];
for (int i = 0; i < 240; i++)
{
for (int j = 0; j <320; j++)
{
depth[i,j]= depthImagePixels[j+i *320].Depth;
ImageArray[i,j] =(float)depth[i,j]/(float)1000;
}
}
PointCloud =Calculate_PointCloud(ImageArray);
viewModel(PointCloud);
}
}
}</i>
2) I have calculated 3D points with Camera parameters and depth data of Kinect Camera
private Point3DCollection Calculate_PointCloud(float[,] ImageArray)
{
Point3DCollection PointCloud = new Point3DCollection();
float x_coodinate;``
float y_coordinate;
float z_coordinate;
float thresholdvalue = 2.0f;
for (int i = 0; i < 239; ++i)
{
for (int j = 0; j < 319; ++j)
{
if (Math.Abs(ImageArray[i, j] - ImageArray[i, j + 1]) < thresholdvalue && Math.Abs(ImageArray[i, j] - ImageArray[i + 1, j]) < thresholdvalue && Math.Abs(ImageArray[i, j + 1] - ImageArray[i + 1, j]) < thresholdvalue)
{
z_coordinate = ImageArray[i, j];
x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point1 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point1);
z_coordinate = ImageArray[i, j + 1];
x_coodinate = (((j + 1) - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point2 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point2);
z_coordinate = ImageArray[i + 1, j];
x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
y_coordinate = (((i + 1) - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
Point3D point3 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
PointCloud.Add(point3);
}
}
}
return PointCloud;
}</i>
3)Here i converted into set of triangles with normal information of each 3D point and give those triangles to 3D mesh object and render 3d mesh object using viewport3D control
private void viewModel(Point3DCollection points)
{
DirectionalLight DirLight1 = new DirectionalLight();
DirLight1.Color = Colors.White;
DirLight1.Direction = new Vector3D(1, 1, 1);
PerspectiveCamera Camera1 = new PerspectiveCamera();
Camera1.FarPlaneDistance = 8000;
Camera1.NearPlaneDistance = 100;
Camera1.FieldOfView = 10;
Camera1.Position = new Point3D(0, 0, 1);
Camera1.LookDirection = new Vector3D(-1, -1, -1);
Camera1.UpDirection = new Vector3D(0, 1, 0);
bool combinedvertices = true;
TriangleModel Triatomesh = new TriangleModel();
MeshGeometry3D tmesh = new MeshGeometry3D();
GeometryModel3D msheet = new GeometryModel3D();
Model3DGroup modelGroup = new Model3DGroup();
ModelVisual3D modelsVisual = new ModelVisual3D();
Viewport3D myViewport = new Viewport3D();
for(int i =0; i<points.Count; i+=3)
{
Triatomesh.addTriangleToMesh(points[i],points[i + 1], points[i + 2], tmesh, combinedvertices);
}
msheet.Geometry = tmesh;
msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
modelGroup.Children.Add(msheet);
modelGroup.Children.Add(DirLight1);
modelsVisual.Content = modelGroup;
myViewport.IsHitTestVisible = false;
myViewport.Camera = Camera1;
myViewport.Children.Add(modelsVisual);
canvas1.Children.Add(myViewport);
myViewport.Height = canvas1.Height;
myViewport.Width = canvas1.Width;
Canvas.SetTop(myViewport, 0);
Canvas.SetLeft(myViewport, 0);
} </i>
4) Here is the function which takes three 3D points and add to 3d mesh object as a triangle by calculating normal to each 3D point
public void addTriangleToMesh(Point3D p0, Point3D p1, Point3D p2,
MeshGeometry3D mesh, bool combine_vertices)
{
Vector3D normal = CalculateNormal(p0, p1, p2);
if (combine_vertices)
{
addPointCombined(p0, mesh, normal);
addPointCombined(p1, mesh, normal);
addPointCombined(p2, mesh, normal);
}
else
{
mesh.Positions.Add(p0);
mesh.Positions.Add(p1);
mesh.Positions.Add(p2);
//mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
// mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
// mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
mesh.Normals.Add(normal);
mesh.Normals.Add(normal);
mesh.Normals.Add(normal);
}
}
public Vector3D CalculateNormal(Point3D P0, Point3D P1, Point3D P2) //static
{
Vector3D v0 = new Vector3D(P1.X - P0.X, P1.Y - P0.Y, P1.Z - P0.Z);
Vector3D v1 = new Vector3D(P2.X - P1.X, P2.Y - P1.Y, P2.Z - P1.Z);
return Vector3D.CrossProduct(v0, v1);
}
public void addPointCombined(Point3D point, MeshGeometry3D mesh, Vector3D normal)
{
bool found = false;
int i = 0;
foreach (Point3D p in mesh.Positions)
{
if (p.Equals(point))
{
found = true;
mesh.TriangleIndices.Add(i);
mesh.Positions.Add(point);
mesh.Normals.Add(normal);
break;
}
i++;
}
if (!found)
{
mesh.Positions.Add(point);
mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
mesh.Normals.Add(normal);
}
}
5) Here is my XAML code
<Window x:Class="PointCloud3DView.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Title="PointCloud" Height="653" Width="993" Background="Black" Loaded="Window_Loaded">
<Grid Height="1130" Width="1626">
<Canvas Height="611" HorizontalAlignment="Left" Name="canvas1" VerticalAlignment="Top"
Width="967" Background="Black" />
</Grid>
problem is i am not able to get displayed 3D model in Wpf Screen.Please can any one go through the whole code ? and make me understand where i go wrong? as well suggest me with corrections.
Thanks in advance
I have been experimenting with WPF 3D for several weeks now and learned some tough lessens:)
I do not have time to check and try the whole code now as I am in the work. However I would try three things:
I am not sure about direction of your camera. It is in (0,1,0),
looking using vector (-1,-1,-1), it means it is focused on central
point (-1,0,-1). And that is kinda strange... Try to position camera
further (depending on the scale of your model) like (0,10,0) or even
further and focus it to (0,0,0) or wherever central point of your
model is:
Camera1.Position = new Point3D(0, 10, 0);
Camera1.LookDirection = new Point3D(0,0,0) - Camera1.Position;
Also remove directional light (as it uses normals and if they are wrong nothing will be shown) and try ambient light instead. And your
directional lightning has just opposite vector to your looking
direction (-1,-1,-1) and (1,1,1).
Try to swap order of points in triangle indices (WPF renders only one side of the mesh, so model might be there but inside/out) -
instead of 0,1,2 try 0,2,1;
If nothing helps I will try your code once I get home.
/edited later/
Itried your code on simple triangle and rewrite it accordingy to my tips and it worked. There are some comments and two tips how to clean up your code a bit:)
private void viewModel(Point3DCollection points)
{
DirectionalLight DirLight1 = new DirectionalLight();
DirLight1.Color = Colors.White;
DirLight1.Direction = new Vector3D(1, 1, 1);
PerspectiveCamera Camera1 = new PerspectiveCamera();
Camera1.FarPlaneDistance = 8000;
//Camera1.NearPlaneDistance = 100; //close object will not be displayed with this option
Camera1.FieldOfView = 10;
//Camera1.Position = new Point3D(0, 0, 1);
//Camera1.LookDirection = new Vector3D(-1, -1, -1);
Camera1.Position = new Point3D(0, 0, 10);
Camera1.LookDirection = new Point3D(0, 0, 0) - Camera1.Position; //focus camera on real center of your model (0,0,0) in this case
Camera1.UpDirection = new Vector3D(0, 1, 0);
//you can use constructor to create Camera instead of assigning its properties like:
//PerspectiveCamera Camera1 = new PerspectiveCamera(new Point3D(0,0,10), new Vector3D(0,0,-1), new Vector3D(0,1,0), 10);
bool combinedvertices = true;
TriangleModel Triatomesh = new TriangleModel();
MeshGeometry3D tmesh = new MeshGeometry3D();
GeometryModel3D msheet = new GeometryModel3D();
Model3DGroup modelGroup = new Model3DGroup();
ModelVisual3D modelsVisual = new ModelVisual3D();
Viewport3D myViewport = new Viewport3D();
for (int i = 0; i < points.Count; i += 3)
{
Triatomesh.addTriangleToMesh(points[i + 2], points[i + 1], points[i], tmesh, combinedvertices);
//I did swap order of vertexes you may try both options with your model
}
msheet.Geometry = tmesh;
msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
//you can use constructor to create GeometryModel3D instead of assigning its properties like:
//msheet = new GeometryModel3D(tmesh, new DiffuseMaterial(new SolidColorBrush(Colors.White)));
modelGroup.Children.Add(msheet);
//use AMbientLIght instead of directional
modelGroup.Children.Add(new AmbientLight(Colors.White));
modelsVisual.Content = modelGroup;
myViewport.IsHitTestVisible = false;
myViewport.Camera = Camera1;
myViewport.Children.Add(modelsVisual);
canvas1.Children.Add(myViewport);
myViewport.Height = canvas1.Height;
myViewport.Width = canvas1.Width;
Canvas.SetTop(myViewport, 0);
Canvas.SetLeft(myViewport, 0);
}
And the Points3DCollection I used as parameter (instead of Kinect input):
Point3DCollection points = new Point3DCollection();
points.Add(new Point3D(0.5, 0, 0.5));
points.Add(new Point3D(0.5, -0.5, -0.5));
points.Add(new Point3D(-0.5, -0.1, -0.5));
viewModel(points);

Basic SAT collision algorithm

Everything I know about vector projection so far is from the internet so I am a little confused. First of all I assume when we project the vertices of a polygon on to an axis, we get the scalar projection as opposed to the vector projection. And the formula for this is (A.B) / |A|, now in this tutorial, they simply use the dot product to project on to the axis. Is this because the vector and the axis are perpendicular or something? Nonetheless I had a go at writing a crude version in C#, but it doesn't seem to return correct results
Rectangle r1 = new Rectangle(300, 200, 50, 50);
Rectangle r2 = new Rectangle(340, 240, 50, 50);
bool areColliding(Rectangle r1, Rectangle r2)
{
/* Using clockwise labelling
*
* B*
* . .
* . .
* A * C*
* . .
* . .
* D*
*
*/
//Calculate vectors and normals of Rectangle 1
Point r1A = r1.Location;
Point r1B = new Point(r1.Location.X + r1.Width, r1.Location.Y);
Point r1C = new Point(r1.Location.X, r1.Location.Y + r1.Height);
Point r1D = new Point(r1.Location.X + r1.Width, r1.Location.Y + r1.Height);
Vector2 r1AB = new Vector2(r1B.X - r1A.X, r1B.Y - r1A.Y);
Vector2 r1BC = new Vector2(r1C.X - r1B.X, r1C.Y - r1B.Y);
Vector2 r1CD = new Vector2(r1D.X - r1C.X, r1D.Y - r1C.Y);
Vector2 r1DA = new Vector2(r1A.X - r1D.X, r1A.Y - r1D.Y);
Vector2 r1AB_Normal = getNormal(r1AB);
Vector2 r1BC_Normal = getNormal(r1BC);
Vector2 r1CD_Normal = getNormal(r1CD);
Vector2 r1DA_Normal = getNormal(r1DA);
Point[] r1Points = {r1A, r1B, r1C, r1D};
Vector2[] Axes1 = { r1AB_Normal, r1BC_Normal, r1CD_Normal, r1DA_Normal };
//Calculate vectors and normals of Rectangle 2
Point r2A = r2.Location;
Point r2B = new Point(r2.Location.X + r2.Width, r2.Location.Y);
Point r2C = new Point(r2.Location.X, r2.Location.Y + r2.Height);
Point r2D = new Point(r2.Location.X + r2.Width, r2.Location.Y + r2.Height);
Vector2 r2AB = new Vector2(r2B.X - r2A.X, r2B.Y - r2A.Y);
Vector2 r2BC = new Vector2(r2C.X - r2B.X, r2C.Y - r2B.Y);
Vector2 r2CD = new Vector2(r2D.X - r2C.X, r2D.Y - r2C.Y);
Vector2 r2DA = new Vector2(r2A.X - r2D.X, r2A.Y - r2D.Y);
Vector2 r2AB_Normal = getNormal(r2AB);
Vector2 r2BC_Normal = getNormal(r2BC);
Vector2 r2CD_Normal = getNormal(r2CD);
Vector2 r2DA_Normal = getNormal(r2DA);
Point[] r2Points = { r2A, r2B, r2C, r2D };
Vector2[] Axes2 = { r2AB_Normal, r2BC_Normal, r2CD_Normal, r2DA_Normal };
//Start projecting each vertex on to each axis
for (int i = 0; i < Axes1.Length; i++)
{
float r1Min = Vector2.Dot(Axes1[i], new Vector2(r1Points[0].X, r1Points[0].Y));
float r1Max = float.NaN;
for (int p = 1; p < r1Points.Length; p++)
{
float dot = Vector2.Dot(Axes1[i], new Vector2(r1Points[p].X, r1Points[p].Y));
if (dot < r1Min)
{
r1Min = dot;
}
}
float r2Min = Vector2.Dot(Axes1[i], new Vector2(r1Points[0].X, r1Points[0].Y));
float r2Max = float.NaN;
for (int p = 1; p < r2Points.Length; p++)
{
float dot = Vector2.Dot(Axes1[i], new Vector2(r1Points[p].X, r1Points[p].Y));
if (dot < r2Min)
{
r2Min = dot;
}
}
if (r1Min < r2Max)
{
return true;
}
}
for (int i = 0; i < Axes2.Length; i++)
{
float r1Min = Vector2.Dot(Axes1[i], new Vector2(r1Points[0].X, r1Points[0].Y));
float r1Max = float.NaN;
for (int p = 1; p < r1Points.Length; p++)
{
float dot = Vector2.Dot(Axes1[i], new Vector2(r1Points[p].X, r1Points[p].Y));
if (dot < r1Min)
{
r1Min = dot;
}
}
float r2Min = Vector2.Dot(Axes1[i], new Vector2(r1Points[0].X, r1Points[0].Y));
float r2Max = float.NaN;
for (int p = 1; p < r2Points.Length; p++)
{
float dot = Vector2.Dot(Axes1[i], new Vector2(r1Points[p].X, r1Points[p].Y));
if (dot < r2Min)
{
r2Min = dot;
}
}
if (r1Min < r2Max)
{
return true;
}
}
return false;
}
Vector2 getNormal(Vector2 v)
{
return new Vector2(-v.Y, v.X);
}

Categories

Resources