3D camera rotates the object, but not the camera in space. Implementation problem - c#

I'm learning how to implement the 3D spaces. Now i want implement 3D camera for moving around my designed 3D space.
The problem is that camera rotate the 3D objects. But not the camera in space as need to be. I can not understand what i'm doing wrong.
Code of the Camera class:
public class Camera3D
{
public Vector3D Pos { get; set; }
public Vector3D Target { get; set; }
public Vector3D Up { get; set; }
public double CameraSpeed { get; set; }
public Camera3D(Vector3D pos, Vector3D target, Vector3D up)
{
Pos = pos;
Target = target;
Up = up;
Pos.Normalize();
Target.Normalize();
Up.Normalize();
}
public Camera3D()
{
Pos = new Vector3D(0, 0, 3);
Target = new Vector3D(0, 0, 0);
Up = new Vector3D(0, 1, 0);
}
private Vector3D GetCameraDirection(Vector3D pos, Vector3D target)
{
Vector3D dir = pos - target;
dir.Normalize();
return dir;
}
private Vector3D GetRight(Vector3D pos, Vector3D target, Vector3D up)
{
Vector3D right = Vector3D.CrossProduct(Up, GetCameraDirection(pos, up));
right.Normalize();
return right;
}
private Vector3D GetCameraUp(Vector3D pos, Vector3D target, Vector3D up)
{
Vector3D vup = Vector3D.CrossProduct(GetCameraDirection(pos, target), GetRight(pos, target, up));
return vup;
}
public Matrix3D LookAt(Vector3D pos, Vector3D target, Vector3D up)
{
// translation matrix for camera
Matrix3D translation = new Matrix3D
(
1, 0, 0, -Pos.X,
0, 1, 0, -Pos.Y,
0, 0, 1, -Pos.Z,
0, 0, 0, 1
);
Vector3D vRight = GetRight(pos, target, up);
Vector3D vUp = GetCameraUp(pos, target, up);
Vector3D vDir = GetCameraDirection(pos, target);
Matrix3D m = new Matrix3D
(
vRight.X, vRight.Y, vRight.Z, 0,
vUp.X, vUp.Y, vUp.Z, 0,
vDir.X, vDir.Y, vDir.Z, 0,
0, 0, 0, 1
);
return m * translation;
}
}
Some useful fields and constructor of main window
// just temporary field for test rotating Camera
double a = 0;
Vector3D Translate = new Vector3D(1, 1, 1);
Vector3D Rotation = new Vector3D(0, 0, 0);
Vector3D Scaling = new Vector3D(1, 1, 1);
CTransform3D cTransform; // class for transform operations
Camera3D c = new Camera3D();
public MainWindow()
{
InitializeComponent();
cTransform = new CTransform3D(Rotation, Translate, Scaling);
cTransform.Camera = c; // camera for transformations
// load the mesh
o.Mesh.LoadFromObjectFile("D:\\de\\test\\ship.obj");
}
The main method that draw objects(here is all transformations, projection, and camera transformations too):
public void Draw()
{
try
{
// util classes
MatrixUtil mu = new MatrixUtil();
cv.Children.Clear(); // clear canvas
foreach (var triangle in o.Mesh.Triangles)
{
Triangle tProjected = new Triangle(), tTransformed = new Triangle(), tViewed = new Triangle();
// just init for right compile
tProjected.Points = new Vector3D[3] { new Vector3D(0, 0, 0), new Vector3D(0, 0, 0), new Vector3D(0, 0, 0) };
tTransformed.Points = new Vector3D[3] { new Vector3D(0, 0, 0), new Vector3D(0, 0, 0), new Vector3D(0, 0, 0) };
tViewed.Points = new Vector3D[3] { new Vector3D(0, 0, 0), new Vector3D(0, 0, 0), new Vector3D(0, 0, 0) };
// project and transform matrices
Matrix3D ProjectionMatrix = cTransform.ProjectionMatrix(5, Width, Height, 0.1, 1000); // just projection matrix
Matrix3D TransformMatrix = RotationMatrices.RotatationYMatrix(Rotation.Y); // rotate y
Matrix3D CameraMatrix = cTransform.GetCameraTransform(); // camera matrix that contains only LookAt
c.Pos = new Vector3D(Math.Sin(a), 0, Math.Cos(a)); // change position of camera
// transform points
tTransformed.Points[0] = mu.Matrix_MulVector(TransformMatrix, triangle.Points[0]);
tTransformed.Points[1] = mu.Matrix_MulVector(TransformMatrix, triangle.Points[1]);
tTransformed.Points[2] = mu.Matrix_MulVector(TransformMatrix, triangle.Points[2]);
// multiple camera matrix to transformed points
tViewed.Points[0] = mu.Matrix_MulVector(CameraMatrix, tTransformed.Points[0]);
tViewed.Points[1] = mu.Matrix_MulVector(CameraMatrix, tTransformed.Points[1]);
tViewed.Points[2] = mu.Matrix_MulVector(CameraMatrix, tTransformed.Points[2]);
// project the object
tProjected.Points[0] = mu.Matrix_MulVector(ProjectionMatrix, tViewed.Points[0]);
tProjected.Points[1] = mu.Matrix_MulVector(ProjectionMatrix, tViewed.Points[1]);
tProjected.Points[2] = mu.Matrix_MulVector(ProjectionMatrix, tViewed.Points[2]);
// just draw the result
new CTriangle(
triangle.Points[0].X + o.Position.X, triangle.Points[0].Y + o.Position.Y ,
triangle.Points[1].X + o.Position.X, triangle.Points[1].Y + o.Position.Y ,
triangle.Points[2].X + o.Position.X, triangle.Points[2].Y + o.Position.Y ,
cv, Brushes.Gray, Brushes.Black);
}
a+= .1;
Rotation.X += 0.05f;
Rotation.Y += 0.05f;
Rotation.Z += 0.05f;
}
catch (Exception ex)
{
MessageBox.Show(ex.ToString());
}
}
Projection matrix method from CTransform class:
public Matrix3D ProjectionMatrix(double fov, double width, double height, double near, double far)
{
int fAspectRatio = (int)(width / height);
double fFovRad = 1.0f / Math.Tan(fov * 0.5f / 180.0f * 3.14159f);
return new Matrix3D
(
fAspectRatio * fFovRad, 0, 0, 0,
0, fFovRad, 0, 0,
0, 0, far / (far - near), 1,
0, 0, (-far * near) / (far - near), 1
);
}
GetCameraTransform() Method from CTransform class:
public Matrix3D GetCameraTransform()
{
Matrix3D cameraLookAt = Camera.LookAt(camera.Pos, camera.Target, camera.Up);
return cameraLookAt;
}
Fields of camera from CTransform class:
private Camera3D camera;
public Camera3D Camera { get { return camera; } set { camera = value; } }
Multiple Matrix to Vector method from MatrixUtil class for transformations about points of triangle:
public Vector3D Matrix_MulVector(Matrix3D m, Vector3D v)
{
Vector3D res = new Vector3D();
// additional W component of vector usually will 1. So just add offsets
res.X = v.X * m.M11 + v.Y * m.M21 + v.Z * m.M31 + m.OffsetX;
res.Y = v.X * m.M12 + v.Y * m.M22 + v.Z * m.M32 + m.OffsetY;
res.Z = v.X * m.M13 + v.Y * m.M23 + v.Z * m.M33 + m.OffsetZ;
return res;
}
I know its hard to understand all that mess. But i need help in understanding how the 3D camera works in 3D graphic. And how to implement it in my designed 3D space.
Tried change the order of multiplication matrix and triangles in all pipeline. Got more troubles.
Change the translation camera matrix. Result: nothing.
Change the LookAt method. Result: nothing.
Checked and tried to change some other matrices about transform the object. Result: nothing.

Related

Meshes created from code don't have a position unity

So, I tried to create a grid so that I can instantiate objects on it. I check for the position of said hit object (one of the squares I created) and then set the instantiated object to that position. Problem is, the squares I created with code don't have a position and are all set to 0, 0, 0.
{
GameObject tileObject = new GameObject(string.Format("{0}, {1}", x, y));
tileObject.transform.parent = transform;
Mesh mesh = new Mesh();
tileObject.AddComponent<MeshFilter>().mesh = mesh;
tileObject.AddComponent<MeshRenderer>().material = tileMaterial;
Vector3[] vertices = new Vector3[4];
vertices[0] = new Vector3(x * tileSize, 0, y * tileSize);
vertices[1] = new Vector3(x * tileSize, 0, (y +1) * tileSize);
vertices[2] = new Vector3((x +1) * tileSize, 0, y * tileSize);
vertices[3] = new Vector3((x +1) * tileSize, 0, (y +1) * tileSize);
int[] tris = new int[] { 0, 1, 2, 1, 3, 2 };
mesh.vertices = vertices;
mesh.triangles = tris;
mesh.RecalculateNormals();
tileObject.layer = LayerMask.NameToLayer("Tile");
tileObject.AddComponent<BoxCollider>();
//var xPos = Mathf.Round(x);
//var yPos = Mathf.Round(y);
//tileObject.gameObject.transform.position = new Vector3(xPos , 0f, yPos);
return tileObject;
}```
As said your issue is that you leave all tiles on the position 0,0,0 and only set their vertices to the desired world space positions.
You would rather want to keep your vertices local like e.g.
// I would use the offset of -0.5f so the mesh is centered at the transform pivot
// Also no need to recreate the arrays everytime, you can simply reference the same ones
private readonly Vector3[] vertices = new Vector3[4]
{
new Vector3(-0.5f, 0, -0.5f);
new Vector3(-0.5f, 0, 0.5f);
new Vector3(0.5f, 0, -0.5f);
new Vector3(0.5f, 0, 0.5f);
};
private readonly int[] tris = new int[] { 0, 1, 2, 1, 3, 2 };
and then in your method do
GameObject tileObject = new GameObject($"{x},{y}");
tileObject.transform.parent = transform;
tileObject.localScale = new Vector3 (tileSize, 1, tileSize);
tileObject.localPosition = new Vector3(x * tileSize, 0, y * tileSize);
The latter depends of course on your needs. Actually I would prefer to have the tiles also centered around the grid object so something like e.g.
// The "-0.5f" is for centering the tile itself correctly
// The "-gridWith/2f" makes the entire grid centered around the parent
tileObject.localPosition = new Vector3((x - 0.5f - gridWidth/2f) * tileSize, 0, (y - 0.5f - gridHeight/2f) * tileSize);
In order to later find out which tile you are standing on (e.g. via raycasts, collisions, etc) I would then rather use a dedicated component and simply tell it it's coordinates like e.g.
// Note that Tile is a built-in type so you would want to avoid confusion
public class MyTile : MonoBehaviour
{
public Vector2Int GridPosition;
}
and then while generating your grid you would simply add
var tile = tileObject.AddComponent<MyTile>();
tile.GridPosition = new Vector2Int(x,y);
while you can still also access its transform.position to get the actual world space center of the tiles

How to check if device has been rotated on all axis in Unity

I want to check in Unity if the device has been rotated on all of it's axis.
So, I am reading the rotation of all the axis.
What should I do in order to validate for example that the user has "flipped" his device over the X-axis? I need to check the value, and see that they contain 0, 90, 180 and 270 degrees in a loop.
Here is part of my code:
void Update () {
float X = Input.acceleration.x;
float Y = Input.acceleration.y;
float Z = Input.acceleration.z;
xText.text = ((Mathf.Atan2(Y, Z) * 180 / Mathf.PI)+180).ToString();
yText.text = ((Mathf.Atan2(X, Z) * 180 / Mathf.PI)+180).ToString();
zText.text = ((Mathf.Atan2(X, Y) * 180 / Mathf.PI)+180).ToString();
}
The accelerometer only tells you if the acceleration of the device changes. So you will have values if the device started moving, or stopped moving. You can't retrieve its orientation from that.
Instead you need to use the gyroscope of the device. Most device have one nowadays.
Fortunately, Unity supports the gyroscope through the Gyroscope class
Simply using
Input.gyro.attitude
Will give you the orientation of the device in space, in the form of a quaternion.
To check the angles, use the eulerAngles function, for instance, is the device flipped in the x axis:
Vector3 angles = Input.gyro.attitude.eulerAngles;
bool xFlipped = angles.x > 180;
Be careful, you might have to invert some values if you want to apply the rotation in Unity (because it depend which orientation the devices uses for positive values, left or right)
// The Gyroscope is right-handed. Unity is left handed.
// Make the necessary change to the camera.
private static Quaternion GyroToUnity(Quaternion q)
{
return new Quaternion(q.x, q.y, -q.z, -q.w);
}
Here is the full example from the doc (Unity version 2017.3), in case the link above is broken. It shows how to read value from the gyroscope, and apply them to an object in Unity.
// Create a cube with camera vector names on the faces.
// Allow the device to show named faces as it is oriented.
using UnityEngine;
public class ExampleScript : MonoBehaviour
{
// Faces for 6 sides of the cube
private GameObject[] quads = new GameObject[6];
// Textures for each quad, should be +X, +Y etc
// with appropriate colors, red, green, blue, etc
public Texture[] labels;
void Start()
{
// make camera solid colour and based at the origin
GetComponent<Camera>().backgroundColor = new Color(49.0f / 255.0f, 77.0f / 255.0f, 121.0f / 255.0f);
GetComponent<Camera>().transform.position = new Vector3(0, 0, 0);
GetComponent<Camera>().clearFlags = CameraClearFlags.SolidColor;
// create the six quads forming the sides of a cube
GameObject quad = GameObject.CreatePrimitive(PrimitiveType.Quad);
quads[0] = createQuad(quad, new Vector3(1, 0, 0), new Vector3(0, 90, 0), "plus x",
new Color(0.90f, 0.10f, 0.10f, 1), labels[0]);
quads[1] = createQuad(quad, new Vector3(0, 1, 0), new Vector3(-90, 0, 0), "plus y",
new Color(0.10f, 0.90f, 0.10f, 1), labels[1]);
quads[2] = createQuad(quad, new Vector3(0, 0, 1), new Vector3(0, 0, 0), "plus z",
new Color(0.10f, 0.10f, 0.90f, 1), labels[2]);
quads[3] = createQuad(quad, new Vector3(-1, 0, 0), new Vector3(0, -90, 0), "neg x",
new Color(0.90f, 0.50f, 0.50f, 1), labels[3]);
quads[4] = createQuad(quad, new Vector3(0, -1, 0), new Vector3(90, 0, 0), "neg y",
new Color(0.50f, 0.90f, 0.50f, 1), labels[4]);
quads[5] = createQuad(quad, new Vector3(0, 0, -1), new Vector3(0, 180, 0), "neg z",
new Color(0.50f, 0.50f, 0.90f, 1), labels[5]);
GameObject.Destroy(quad);
}
// make a quad for one side of the cube
GameObject createQuad(GameObject quad, Vector3 pos, Vector3 rot, string name, Color col, Texture t)
{
Quaternion quat = Quaternion.Euler(rot);
GameObject GO = Instantiate(quad, pos, quat);
GO.name = name;
GO.GetComponent<Renderer>().material.color = col;
GO.GetComponent<Renderer>().material.mainTexture = t;
GO.transform.localScale += new Vector3(0.25f, 0.25f, 0.25f);
return GO;
}
protected void Update()
{
GyroModifyCamera();
}
protected void OnGUI()
{
GUI.skin.label.fontSize = Screen.width / 40;
GUILayout.Label("Orientation: " + Screen.orientation);
GUILayout.Label("input.gyro.attitude: " + Input.gyro.attitude);
GUILayout.Label("iphone width/font: " + Screen.width + " : " + GUI.skin.label.fontSize);
}
/********************************************/
// The Gyroscope is right-handed. Unity is left handed.
// Make the necessary change to the camera.
void GyroModifyCamera()
{
transform.rotation = GyroToUnity(Input.gyro.attitude);
}
private static Quaternion GyroToUnity(Quaternion q)
{
return new Quaternion(q.x, q.y, -q.z, -q.w);
}
}

Vertex Cube Center of Rotation

How do i find center of rotation for a cube made using vertex buffers?
The cube is currently rotating on a vertex and I've been stuck all week trying to figure out how to adjust it to the center.
Here is my code for rendering a cube:
class RenderCube
{
KeyboardState currentKeys;
GamePadState currentGamepad;
//Transform later to have static v and i buffers.
private VertexBuffer vBuffer;
public VertexBuffer VBuffer
{ get { return vBuffer; } set { vBuffer = value; } }
private IndexBuffer iBuffer;
public IndexBuffer IBuffer
{ get { return iBuffer; } set { iBuffer = value; } }
private BasicEffect bEffect;
public BasicEffect BEffect
{ get { return bEffect; } set { bEffect = value; } }
private Matrix world;
public Matrix World
{ get { return world; } set { world = value; } }
private Matrix view;
public Matrix View
{ get { return view; } set { view = value; } }
private Matrix projection;
private Matrix Projection
{ get { return projection; } set { projection = value; } }
private Color color;
public Color Color
{ get { return color; } set { color = value; } }
private Vector3 position;
public Vector3 Position
{ get { return position; } set { position = value; } }
//Need to change this eventually to use textures.
private VertexPositionColor[] vertices;
short[] indices;
private GraphicsDevice device;
//constructors!
public RenderCube(Color col, Vector3 pos, GraphicsDevice dev)
{
device = dev;
this.color = col;
this.position = pos;
SetUpVertices();
SetUpIndices();
world = Matrix.CreateTranslation(position);
//world = Matrix.CreateTranslation(0, 0, 0);
bEffect = new BasicEffect(device);
bEffect.World = world;
bEffect.VertexColorEnabled = true;
//bEffect.EnableDefaultLighting();
}
public void Render(Camera cam)
{
bEffect.View = cam.view;
bEffect.Projection = cam.projection;
bEffect.World *= cam.rotX;
bEffect.World *= cam.rotY;
bEffect.World *= cam.rotZ;
var rotationCenter = new Vector3(0.5f, 0.5f, 0.5f);
device.SetVertexBuffer(vBuffer);
device.Indices = IBuffer;
foreach (EffectPass pass in bEffect.CurrentTechnique.Passes)
{
pass.Apply();
device.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0, 8, 0, 12);
}
}
/// <summary>
/// Sets up the vertices for a cube using 8 unique vertices.
/// Build order is front to back, left to up to right to down.
/// </summary>
private void SetUpVertices()
{
vertices = new VertexPositionColor[8];
//front left bottom corner
vertices[0] = new VertexPositionColor(new Vector3(0, 0, 0), color);
//front left upper corner
vertices[1] = new VertexPositionColor(new Vector3(0, 100, 0), color);
//front right upper corner
vertices[2] = new VertexPositionColor(new Vector3(100, 100, 0), color);
//front lower right corner
vertices[3] = new VertexPositionColor(new Vector3(100, 0, 0), color);
//back left lower corner
vertices[4] = new VertexPositionColor(new Vector3(0, 0, -100), color);
//back left upper corner
vertices[5] = new VertexPositionColor(new Vector3(0, 100, -100), color);
//back right upper corner
vertices[6] = new VertexPositionColor(new Vector3(100, 100, -100), color);
//back right lower corner
vertices[7] = new VertexPositionColor(new Vector3(100, 0, -100), color);
vBuffer = new VertexBuffer(device, typeof(VertexPositionColor), 8, BufferUsage.WriteOnly);
vBuffer.SetData<VertexPositionColor>(vertices);
}
/// <summary>
/// Sets up the indices for a cube. Has 36 positions that match up
/// to the element numbers of the vertices created earlier.
/// Valid range is 0-7 for each value.
/// </summary>
private void SetUpIndices()
{
indices = new short[36];
//Front face
//bottom right triangle
indices[0] = 0;
indices[1] = 3;
indices[2] = 2;
//top left triangle
indices[3] = 2;
indices[4] = 1;
indices[5] = 0;
//back face
//bottom right triangle
indices[6] = 4;
indices[7] = 7;
indices[8] = 6;
//top left triangle
indices[9] = 6;
indices[10] = 5;
indices[11] = 4;
//Top face
//bottom right triangle
indices[12] = 1;
indices[13] = 2;
indices[14] = 6;
//top left triangle
indices[15] = 6;
indices[16] = 5;
indices[17] = 1;
//bottom face
//bottom right triangle
indices[18] = 4;
indices[19] = 7;
indices[20] = 3;
//top left triangle
indices[21] = 3;
indices[22] = 0;
indices[23] = 4;
//left face
//bottom right triangle
indices[24] = 4;
indices[25] = 0;
indices[26] = 1;
//top left triangle
indices[27] = 1;
indices[28] = 5;
indices[29] = 4;
//right face
//bottom right triangle
indices[30] = 3;
indices[31] = 7;
indices[32] = 6;
//top left triangle
indices[33] = 6;
indices[34] = 2;
indices[35] = 3;
iBuffer = new IndexBuffer(device, IndexElementSize.SixteenBits, sizeof(short) * indices.Length, BufferUsage.WriteOnly);
iBuffer.SetData(indices);
}
}
The basic idea is to introduce a translation matrix that pushes the cube to the origin, perform the rotation and undo the translation:
public void Render(Camera cam)
{
//...
//push the cube to the origin
bEffect.World *= Matrix.CreateTranslation(-50, -50, 50);
//perform the rotation
bEffect.World *= cam.rotX;
bEffect.World *= cam.rotY;
bEffect.World *= cam.rotZ;
//undo the translation
bEffect.World *= Matrix.CreateTranslation(50, 50, -50);
//...

Matrix transformations to recreate camera "Look At" functionality

Summary:
I'm given a series of points in 3D space, and I want to analyze them from any viewing angle. I'm trying to figure out how to reproduce the "Look At" functionality of OpenGL in WPF. I want the mouse move X,Y to manipulate the Phi and Theta Spherical Coordinates (respectively) of the camera so that I as I move my mouse, the camera appears to orbit around the center of mass (generally the origin) of the point cloud, which will represent the target of the Look At
What I've done:
I have made the following code, but so far it isn't doing what I want:
internal static Matrix3D CalculateLookAt(Vector3D eye, Vector3D at = new Vector3D(), Vector3D up = new Vector3D())
{
if (Math.Abs(up.Length - 0.0) < double.Epsilon) up = new Vector3D(0, 1, 0);
var zaxis = (at - eye);
zaxis.Normalize();
var xaxis = Vector3D.CrossProduct(up, zaxis);
xaxis.Normalize();
var yaxis = Vector3D.CrossProduct(zaxis, xaxis);
return new Matrix3D(
xaxis.X, yaxis.X, zaxis.X, 0,
xaxis.Y, yaxis.Y, zaxis.Y, 0,
xaxis.Z, yaxis.Z, zaxis.Z, 0,
Vector3D.DotProduct(xaxis, -eye), Vector3D.DotProduct(yaxis, -eye), Vector3D.DotProduct(zaxis, -eye), 1
);
}
I got the algorithm from this link: http://msdn.microsoft.com/en-us/library/bb205342(VS.85).aspx
I then apply the returned matrix to all of the points using this:
var vector = new Vector3D(p.X, p.Y, p.Z);
var projection = Vector3D.Multiply(vector, _camera); // _camera is the LookAt Matrix
if (double.IsNaN(projection.X)) projection.X = 0;
if (double.IsNaN(projection.Y)) projection.Y = 0;
if (double.IsNaN(projection.Z)) projection.Z = 0;
return new Point(
(dispCanvas.ActualWidth * projection.X / 320),
(dispCanvas.ActualHeight * projection.Y / 240)
);
I am calculating the center of all the points as the at vector, and I've been setting my initial eye vector at (center.X,center.Y,center.Z + 100) which is plenty far away from all the points
I then take the mouse move and apply the following code to get the Spherical Coordinates and put that into the CalculateLookAt function:
var center = GetCenter(_points);
var pos = e.GetPosition(Canvas4); //e is of type MouseButtonEventArgs
var delta = _previousPoint - pos;
double r = 100;
double theta = delta.Y * Math.PI / 180;
double phi = delta.X * Math.PI / 180;
var x = r * Math.Sin(theta) * Math.Cos(phi);
var y = r * Math.Cos(theta);
var z = -r * Math.Sin(theta) * Math.Sin(phi);
_camera = MathHelper.CalculateLookAt(new Vector3D(center.X * x, center.Y * y, center.Z * z), new Vector3D(center.X, center.Y, center.Z));
UpdateCanvas(); // Redraws the points on the canvas using the new _camera values
Conclusion:
This does not make the camera orbit around the points. So either my understanding of how to use the Look At function is off, or my math is incorrect.
Any help would be very much appreciated.
Vector3D won't transform in affine space. The Vector3D won't translate because it is a vector, which doesn't exist in affine space (i.e. 3D vector space with a translation component), only in vector space. You need a Point3D:
var m = new Matrix3D(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
10, 10, 10, 1);
var v = new Point3D(1, 1, 1);
var r = Point3D.Multiply(v, m); // 11,11,11
Note your presumed answer is also incorrect, as it should be 10 + 1 for each component, since your vector is [1,1,1].
Well, it turns out that the Matrix3D libraries have some interesting issues.
I noticed that Vector3D.Multiply(vector, matrix) would not translate the vector.
For example:
var matrixTest = new Matrix3D(
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
10, 10, 10, 1
);
var vectorTest = new Vector3D(1, 1, 1);
var result = Vector3D.Multiply(vectorTest, matrixTest);
// result = {1,1,1}, should be {11,11,11}
I ended up having to rewrite some of the basic matrix math functions in order for the code to work.
Everything was working except for the logic side, it was the basic math (handled by the Matrix3D library) that was the problem.
Here is the fix. Replace all Vector3D.Multiply method calls with this:
public static Vector3D Vector3DMultiply(Vector3D vector, Matrix3D matrix)
{
return new Vector3D(
vector.X * matrix.M11 + vector.Y * matrix.M12 + vector.Z * matrix.M13 + matrix.OffsetX,
vector.X * matrix.M21 + vector.Y * matrix.M22 + vector.Z * matrix.M23 + matrix.OffsetY,
vector.X * matrix.M31 + vector.Y * matrix.M32 + vector.Z * matrix.M33 + matrix.OffsetZ
);
}
And everything works!

Drawing a textured quad using XNA

I'm attempting to render a textured quad using the example located here.
I can successfully render the quad, but the texture information appears to be lost. The quad takes the color of the underlying texture, though.
I've checked the obvious problems ("Does the BasicEffect rendering the quad have the TextureEnabled property set to true?") and can't immediately see the problem.
Code below:
public class Quad
{
public VertexPositionNormalTexture[] Vertices;
public Vector3 Origin;
public Vector3 Up;
public Vector3 Normal;
public Vector3 Left;
public Vector3 UpperLeft;
public Vector3 UpperRight;
public Vector3 LowerLeft;
public Vector3 LowerRight;
public int[] Indexes;
public Quad(Vector3 origin, Vector3 normal, Vector3 up,
float width, float height)
{
this.Vertices = new VertexPositionNormalTexture[4];
this.Indexes = new int[6];
this.Origin = origin;
this.Normal = normal;
this.Up = up;
// Calculate the quad corners
this.Left = Vector3.Cross(normal, this.Up);
Vector3 uppercenter = (this.Up * height / 2) + origin;
this.UpperLeft = uppercenter + (this.Left * width / 2);
this.UpperRight = uppercenter - (this.Left * width / 2);
this.LowerLeft = this.UpperLeft - (this.Up * height);
this.LowerRight = this.UpperRight - (this.Up * height);
this.FillVertices();
}
private void FillVertices()
{
Vector2 textureUpperLeft = new Vector2(0.0f, 0.0f);
Vector2 textureUpperRight = new Vector2(1.0f, 0.0f);
Vector2 textureLowerLeft = new Vector2(0.0f, 1.0f);
Vector2 textureLowerRight = new Vector2(1.0f, 1.0f);
for (int i = 0; i < this.Vertices.Length; i++)
{
this.Vertices[i].Normal = this.Normal;
}
this.Vertices[0].Position = this.LowerLeft;
this.Vertices[0].TextureCoordinate = textureLowerLeft;
this.Vertices[1].Position = this.UpperLeft;
this.Vertices[1].TextureCoordinate = textureUpperLeft;
this.Vertices[2].Position = this.LowerRight;
this.Vertices[2].TextureCoordinate = textureLowerRight;
this.Vertices[3].Position = this.UpperRight;
this.Vertices[3].TextureCoordinate = textureUpperRight;
this.Indexes[0] = 0;
this.Indexes[1] = 1;
this.Indexes[2] = 2;
this.Indexes[3] = 2;
this.Indexes[4] = 1;
this.Indexes[5] = 3;
}
}
this.quadEffect = new BasicEffect(this.GraphicsDevice, null);
this.quadEffect.AmbientLightColor = new Vector3(0.8f, 0.8f, 0.8f);
this.quadEffect.LightingEnabled = true;
this.quadEffect.World = Matrix.Identity;
this.quadEffect.View = this.View;
this.quadEffect.Projection = this.Projection;
this.quadEffect.TextureEnabled = true;
this.quadEffect.Texture = someTexture;
this.quad = new Quad(Vector3.Zero, Vector3.UnitZ, Vector3.Up, 2, 2);
this.quadVertexDecl = new VertexDeclaration(this.GraphicsDevice, VertexPositionNormalTexture.VertexElements);
public override void Draw(GameTime gameTime)
{
this.GraphicsDevice.Textures[0] = this.SpriteDictionary["B1S1I800"];
this.GraphicsDevice.VertexDeclaration = quadVertexDecl;
quadEffect.Begin();
foreach (EffectPass pass in quadEffect.CurrentTechnique.Passes)
{
pass.Begin();
GraphicsDevice.DrawUserIndexedPrimitives<VertexPositionNormalTexture>(
PrimitiveType.TriangleList,
beamQuad.Vertices, 0, 4,
beamQuad.Indexes, 0, 2);
pass.End();
}
quadEffect.End();
}
From what I can see, this should work. The only thing I can imagine, which isn't in this code, is that the loading of the texture goes wrong somewhere. I also can't quite visualize what you mean that the quad has the underlying color of the texture? Do you have a screenshot for us?
Also, if something does show up, a very distorted version of your texture for example, it could be possible that the rendering of other stuff has effect on the rendering of the quad. For example if you draw the quad while the graphicsdevice has another vertex declaration on it, or if the previous thing rendered set some exotic rendering state, or if you're drawing the quad within the drawing code of something else. Try isolating this code, into a fresh project or something, or disable the rendering of everything else.

Categories

Resources