I have a very-very strange problem with my project. I tried to send an index number with my vertices, and use this number in the HLSL shader.
But, when I set this number to a value, like, 1, the shader get a very wide spectrum from that number - going down to negative value, between 0-1 and above 1. Even when I give in nonsense numbers, like 10000
(I use C# with SlimDX, and this is made with pixel shader 2.0 and I tried 3.0 as well.)
int c = 0;
int testValue = 10000;
for (int i = 0; i < tris.Length; i++)
{
vertices[c].Position = tris[i].p0;
vertices[c].Normal = tris[i].normal;
vertices[c].Index = testValue;
vertices[c++].Color = Color.LightBlue.ToArgb();
vertices[c].Position = tris[i].p1;
vertices[c].Normal = tris[i].normal;
vertices[c].Index = testValue;
vertices[c++].Color = Color.LightBlue.ToArgb();
vertices[c].Position = tris[i].p2;
vertices[c].Normal = tris[i].normal;
vertices[c].Index = testValue;
vertices[c++].Color = Color.LightBlue.ToArgb();
}
This is how I create my vertices. Everything works as they should work, except the Index value. I get the colors right, I get the normals right, I get the position right.
Here is the HLSL code:
VertexToPixel IndexedRenderedVS( float4 inPos : POSITION, float4 inColor : COLOR0, float3 inNormal : NORMAL, int inIndex : TEXCOORD0)
{
VertexToPixel Output = (VertexToPixel)0;
float4x4 xWorldViewProjection = mul(xWorld, xViewProjection);
Output.Position = mul(inPos, xWorldViewProjection);
if (inIndex < 0)
Output.Color = float4(1, 0, 0, 1);
if (inIndex > 0 && inIndex < 1)
Output.Color = float4(0, 1, 0, 1);
if (inIndex > 1)
Output.Color = float4(0, 0, 1, 1);
//Output.Color = inColor;
return Output;
}
PixelToFrame IndexedRenderedPS(VertexToPixel PSIN)
{
PixelToFrame Output = (PixelToFrame)0;
Output.Color = PSIN.Color;
return Output;
}
And finally, my VertexFormat struct:
internal Vector3 Position;
internal int Color;
internal Vector3 Normal;
internal int Index;
internal static VertexElement[] VertexElements = new VertexElement[]
{
new VertexElement(0, 0, DeclarationType.Float3, DeclarationMethod.Default, DeclarationUsage.Position, 0),
new VertexElement(0, sizeof(float) * 3, DeclarationType.Color, DeclarationMethod.Default, DeclarationUsage.Color, 0),
new VertexElement(0, sizeof(float) * 7, DeclarationType.Float3, DeclarationMethod.Default, DeclarationUsage.Normal, 0),
new VertexElement(0, sizeof(float) * 10, DeclarationType.Float1, DeclarationMethod.Default, DeclarationUsage.TextureCoordinate, 0),
VertexElement.VertexDeclarationEnd
};
internal static VertexFormat Format
{
get { return VertexFormat.Position | VertexFormat.Diffuse | VertexFormat.Normal | VertexFormat.Texture1; }
}
With this code, and an insane index value (10000 - I get a same picture, no matter what values I give in, 0, 1, or even when I put negative numbers in. It just dont care what I give in).
I get this picture:
Anybody have any idea where I make a mistake? I simply cant find where I misplaced some value. Tried a huge number of vertex declaration, changed everything in inside the shader - and now I offically ran out from ideas.
Any help is appreciated. Thank you :)
In DirectX, texture coordinates are always floats, usually a float2, but sometimes a float3 or float4 (you can specify a float1, but if you do you'll actually get a float2 in the assembly and the runtime will throw away the second channel). You're typing it on the CPU side as an int, and then in the vertex description as a float1, and finally in the shader as an int. I would recommend typing all of these as float2 to start.
Related
I am making a cubic voxel game. I have chunks, world, blocks and mesh generation done, but there's one problem - I could not do the texturing.
Everything I need is just add a texture to a side of a 3D mesh (Texture of every is different!). I've seen some implementations but it's hard to read somebody else's code (I've tried to use them, but it didn't work). I've tried to do this by myself, but with no results.
Can anybody explain how to do this??
Here is my current code:
[ExecuteInEditMode]
[RequireComponent(typeof(MeshFilter))]
[RequireComponent(typeof(MeshRenderer))]
public class Chunk : MonoBehaviour
{
private ushort[] _voxels = new ushort[16 * 16 * 16];
private MeshFilter meshFilter;
private Vector3[] cubeVertices = new[] {
new Vector3 (0, 0, 0),
new Vector3 (1, 0, 0),
new Vector3 (1, 1, 0),
new Vector3 (0, 1, 0),
new Vector3 (0, 1, 1),
new Vector3 (1, 1, 1),
new Vector3 (1, 0, 1),
new Vector3 (0, 0, 1),
};
private int[] cubeTriangles = new[] {
// Front
0, 2, 1,
0, 3, 2,
// Top
2, 3, 4,
2, 4, 5,
// Right
1, 2, 5,
1, 5, 6,
// Left
0, 7, 4,
0, 4, 3,
// Back
5, 4, 7,
5, 7, 6,
// Bottom
0, 6, 7,
0, 1, 6
};
public ushort this[int x, int y, int z]
{
get { return _voxels[x * 16 * 16 + y * 16 + z]; }
set { _voxels[x * 16 * 16 + y * 16 + z] = value; }
}
void Start()
{
meshFilter = GetComponent<MeshFilter>();
}
private void Update()
{
GenerateMesh();
}
public void GenerateMesh()
{
Mesh mesh = new Mesh();
List<Vector3> vertices = new List<Vector3>();
List<int> triangles = new List<int>();
for (var x = 0; x < 16; x++)
{
for (var y = 0; y < 16; y++)
{
for (var z = 0; z < 16; z++)
{
var voxelType = this[x, y, z];
if (voxelType == 0)
continue;
var pos = new Vector3(x, y, z);
var verticesPos = vertices.Count;
foreach (var vert in cubeVertices)
vertices.Add(pos + vert);
foreach (var tri in cubeTriangles)
triangles.Add(verticesPos + tri);
}
}
}
mesh.SetVertices(vertices);
mesh.SetTriangles(triangles.ToArray(), 0);
meshFilter.mesh = mesh;
}
}
NOTE: This is a repost with many edits so it is focused on one problem plus has better explanation. Sorry for that.
Like your SetVertices() and SetTriangles(), you can call a SetUVs() with a list of the UV coordinates of each vertex on your texture.
The UV list size must match the vertices list size!
The UV coordinate are expressed as Vector2 with values between 0 and 1.
For example, to apply the whole texture on the front face of your cube, you have the first 4 uvs like this:
private Vector2[] cubeUVs = new[] {
new Vector2 (0, 0),
new Vector2 (1, 0),
new Vector2 (1, 1),
new Vector2 (0, 1),
...
}
...
mesh.SetUVs(0, cubeUVs);
If your texture is not a square, then it will be stretched.
You should also call RecalculateBounds() and RecalculateNormals() at the end of your GenerateMesh() method to avoid some issues later.
EDIT
If you really want different texture files for each side of the cube, then the cleanest and most performant solution for me is to set a different VertexColor for each side of your cube, eg. (1,0,0), (0,1,0), (0,0,1), (1,1,0), (1,0,1) and (0,1,1).
However, you will have to duplicate all your vertices 3 times. (because the vertex color is bound to a vertex, and each vertex of a cube belongs to 3 sides)
(You still have to set the UVs like I said previously, but each side has the whole texture instead of only a part of the texture)
Then, you will have to create a custom shader with 6 textures in inputs (one for each side).
And in the fragment function, you select the right texture color according to the vertex color.
You can for that, do some if to select the texture, but it will be not very performant:
float3 finalColor;
if(vertexColor.r > 0.5f && vertexColor.g < 0.5f && vertexColor.b < 0.5f)
{
finalColor = text2D(_TopTexture, in.uv);
}
else if(...)
{
...
}
...
Or if you want more perf (with a lot of cubes), you can instead do some multiplications to select the right texture:
float3 topTexColor = text2D(_TopTexture, in.uv) * vertexColor.r * (1.0f - vertexColor.g) * (1.0f - vertexColor.b);
float3 frontTexColor = ...;
...
float3 finalColor = topTexColor + frontTexColor + ...;
I need to render an image (with depth) which I get from outside. I can construct two textures and pass them into a shader with no problem (I can verify values sampled in a pixel shader being correct).
Here's how my HLSL looks like:
// image texture
Texture2D m_TextureColor : register(t0);
// depth texture with values [0..1]
Texture2D<float> m_TextureDepth : register(t1);
// sampler to forbid linear filtering since we're dealing with pixels
SamplerState m_TextureSampler { Filter = MIN_MAG_MIP_POINT; };
struct VS_IN
{
float4 position : POSITION;
float2 texcoord : TEXCOORD;
};
struct VS_OUT
{
float4 position : SV_POSITION;
float2 texcoord : TEXCOORD0;
};
struct PS_OUT
{
float4 color : COLOR0;
float depth : DEPTH0;
};
VS_OUT VS(VS_IN input)
{
VS_OUT output = (VS_OUT)0;
output.position = input.position;
output.texcoord = input.texcoord;
return output;
}
PS_OUT PS(VS_OUT input) : SV_Target
{
PS_OUT output = (PS_OUT)0;
output.color = m_TextureColor.SampleLevel(m_TextureSampler, input.texcoord, 0);
// I want to modify depth of the pixel,
// but it looks like it has no effect on depth no matter what I set here
output.depth = m_TextureDepth.SampleLevel(m_TextureSampler, input.texcoord, 0);
return output;
}
I construct vertex buffer from those (with PrimitiveTopology.TriangleStrip) where first argument Vector4 is position and second argument Vector2 is texture coordinate:
new[]
{
new Vertex(new Vector4(-1, -1, 0.5f, 1), new Vector2(0, 1)),
new Vertex(new Vector4(-1, 1, 0.5f, 1), new Vector2(0, 0)),
new Vertex(new Vector4(1, -1, 0.5f, 1), new Vector2(1, 1)),
new Vertex(new Vector4(1, 1, 0.5f, 1), new Vector2(1, 0)),
}
Everything works just fine: I'm seeing my image, I can sample depth from depth texture and construct something visual from it (that's how I can verify that
depth values I'm sampling are correct). However I can't figure out how to modify pixel's depth so that it would be eaten properly when the depth-test would be happening. Because at the moment it all depends on what kind of z value I set as my vertex position.
This is how I'm setting up DirectX11 (I'm using SharpDX and C#):
var swapChainDescription = new SwapChainDescription
{
BufferCount = 1,
ModeDescription = new ModeDescription(bufferSize.Width, bufferSize.Height, new Rational(60, 1), Format.R8G8B8A8_UNorm),
IsWindowed = true,
OutputHandle = HostHandle,
SampleDescription = new SampleDescription(1, 0),
SwapEffect = SwapEffect.Discard,
Usage = Usage.RenderTargetOutput,
};
var swapChainFlags = DeviceCreationFlags.None | DeviceCreationFlags.BgraSupport;
SharpDX.Direct3D11.Device.CreateWithSwapChain(DriverType.Hardware, swapChainFlags, swapChainDescription, out var device, out var swapchain);
Setting back buffer and depth/stencil buffer:
// color buffer
using (var textureColor = SwapChain.GetBackBuffer<Texture2D>(0))
{
TextureColorResourceView = new RenderTargetView(Device, textureColor);
}
// depth buffer
using (var textureDepth = new Texture2D(Device, new Texture2DDescription
{
Format = Format.D32_Float,
ArraySize = 1,
MipLevels = 1,
Width = BufferSize.Width,
Height = BufferSize.Height,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Default,
BindFlags = BindFlags.DepthStencil,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.None
}))
{
TextureDepthResourceView = new DepthStencilView(Device, textureDepth);
}
DeviceContext.OutputMerger.SetTargets(TextureDepthResourceView, TextureColorResourceView);
Preparing depth stencil state:
var description = DepthStencilStateDescription.Default();
description.DepthComparison = Comparison.LessEqual;
description.IsDepthEnabled = true;
description.DepthWriteMask = DepthWriteMask.All;
DepthState = new DepthStencilState(Device, description);
And using it:
DeviceContext.OutputMerger.SetDepthStencilState(DepthState);
This is how I construct my color/depth textures I'm sending to shader:
public static (ShaderResourceView resource, Texture2D texture) CreateTextureDynamic(this Device device, System.Drawing.Size size, Format format)
{
var textureDesc = new Texture2DDescription
{
MipLevels = 1,
Format = format,
Width = size.Width,
Height = size.Height,
ArraySize = 1,
BindFlags = BindFlags.ShaderResource,
Usage = ResourceUsage.Dynamic,
SampleDescription = new SampleDescription(1, 0),
CpuAccessFlags = CpuAccessFlags.Write,
};
var texture = new Texture2D(device, textureDesc);
return (new ShaderResourceView(device, texture), texture);
}
Also since I need to update them frequently:
public static void UpdateResource(this Texture2D texture, int[] buffer, System.Drawing.Size size)
{
var dataBox = texture.Device.ImmediateContext.MapSubresource(texture, 0, MapMode.WriteDiscard, MapFlags.None, out var dataStream);
Parallel.For(0, size.Height, rowIndex => Marshal.Copy(buffer, size.Width * rowIndex, dataBox.DataPointer + dataBox.RowPitch * rowIndex, size.Width));
dataStream.Dispose();
texture.Device.ImmediateContext.UnmapSubresource(texture, 0);
}
public static void UpdateResource(this Texture2D texture, float[] buffer, System.Drawing.Size size)
{
var dataBox = texture.Device.ImmediateContext.MapSubresource(texture, 0, MapMode.WriteDiscard, MapFlags.None, out var dataStream);
Parallel.For(0, size.Height, rowIndex => Marshal.Copy(buffer, size.Width * rowIndex, dataBox.DataPointer + dataBox.RowPitch * rowIndex, size.Width));
dataStream.Dispose();
texture.Device.ImmediateContext.UnmapSubresource(texture, 0);
}
I also googled a lot about this, found similar posts like this: https://www.gamedev.net/forums/topic/573961-how-to-set-depth-value-in-pixel-shader/ however couldn't managed solve it on my side.
Thanks in advance!
To write to the depth buffer, you need to target the SV_Depth system-value semantic. So your pixel shader output struct would look more like the following:
struct PS_OUT
{
float4 color : SV_Target;
float depth : SV_Depth;
};
And the shader would not specify SV_Target as in your example (the SV_ outputs are defined within the struct). So it would look like:
PS_OUT PS(VS_OUT input)
{
PS_OUT output = (PS_OUT)0;
output.color = m_TextureColor.SampleLevel(m_TextureSampler, input.texcoord, 0);
// Now that output.depth is defined with SV_Depth, and you have depth-write enabled,
// this should write to the depth buffer.
output.depth = m_TextureDepth.SampleLevel(m_TextureSampler, input.texcoord, 0);
return output;
}
Note that you may incur some performance penalties on explicitly writing to depth (specifically on AMD hardware) since that forces a bypass of their early-depth hardware optimization. All future draw calls using that depth buffer will have early-Z optimizations disabled, so it's generally a good idea to perform the depth-write operation as late as possible.
This is C# WPF using SharpDX 4.0.
I'm trying to update a dynamic texture on each render loop using a color buffer generated from a library. I'm seeing an issue where the resulting texture doesn't match the expected bitmap. The texture appears to be wider than expect or the format is larger than expected.
var surfaceWidth = 200; var surfaceHeight = 200;
var pixelBytes = surfaceWidth * surfaceHeight * 4;
//Set up the color buffer and byte array to stream to the texture
_colorBuffer = new int[surfaceWidth * surfaceHeight];
_textureStreamBytes = new byte[pixelBytes]; //16000 length
//Create the texture to update
_scanTexture = new Texture2D(Device, new Texture2DDescription()
{
Format = Format.B8G8R8A8_UNorm,
ArraySize = 1,
MipLevels = 1,
Width = SurfaceWidth,
Height = SurfaceHeight,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Dynamic,
BindFlags = BindFlags.ShaderResource,
CpuAccessFlags = CpuAccessFlags.Write,
OptionFlags = ResourceOptionFlags.None,
});
_scanResourceView= new ShaderResourceView(Device, _scanTexture);
context.PixelShader.SetShaderResource(0, _scanResourceView);
And on render I populate the color buffer and write to the texture.
protected void Render()
{
Device.ImmediateContext.ClearRenderTargetView(
RenderTargetView, new SharpDX.Mathematics.Interop.RawColor4(0.8f,0.8f,0,1));
Library.GenerateColorBuffer(ref _colorBuffer);
System.Buffer.BlockCopy(_colorBuffer, 0, depthPixels, 0, depthPixels.Length);
_parent.DrawBitmap(ref _colorBuffer);
DataBox databox = context.MapSubresource(_scanTexture, 0, MapMode.WriteDiscard, SharpDX.Direct3D11.MapFlags.None, out DataStream stream);
if (!databox.IsEmpty)
stream.Write(_textureStreamBytes, 0, _textureStreamBytes.Length);
context.UnmapSubresource(_scanTexture, 0);
context.Draw(4, 0);
}
Sampler creation and setting before the above happens:
var sampler = new SamplerState(_device, new SamplerStateDescription()
{
Filter = SharpDX.Direct3D11.Filter.MinMagMipLinear,
AddressU = TextureAddressMode.Wrap,
AddressV = TextureAddressMode.Wrap,
AddressW = TextureAddressMode.Wrap,
BorderColor = SharpDX.Color.Blue,
ComparisonFunction = Comparison.Never,
MaximumAnisotropy = 1,
MipLodBias = 0,
MinimumLod = 0,
MaximumLod = 0,
});
context = _device.ImmediateContext;
context.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleStrip;
context.VertexShader.Set(vertexShader);
context.Rasterizer.SetViewport(new Viewport(0, 0, SurfaceWidth, SurfaceHeight, 0.0f, 1.0f));
context.PixelShader.Set(pixelShader);
context.PixelShader.SetSampler(0, sampler);
context.OutputMerger.SetTargets(depthView, _renderTargetView);
And shader (using a full screen triangle with no vertices):
SamplerState pictureSampler;
Texture2D picture;
struct PS_IN
{
float4 pos : SV_POSITION;
float2 tex : TEXCOORD;
};
PS_IN VS(uint vI : SV_VERTEXID)
{
float2 texcoord = float2(vI & 1,vI >> 1); //you can use these for texture coordinates later
PS_IN output = (PS_IN)0;
output.pos = float4((texcoord.x - 0.5f) * 2, -(texcoord.y - 0.5f) * 2, 0, 1);
output.tex = texcoord;
return output;
}
float4 PS(PS_IN input) : SV_Target
{
return picture.Sample(pictureSampler, input.tex);
}
What I'm seeing is:
_colorBuffer length 40000 (200 width *200 height)
_textureStreamBytes length 160000 (200 * 200 * 4bytes)
Stream from databox Length = 179200 difference of 19200 bytes / 4800 pixels.
This translates to 24 rows of 200 pixel width. In other words the texture is 24 pixels wider than expected. But debugging shows width/height as 200.
Image showing the issue. Left is rendered view, right is bitmap
Does anyone know what I'm doing wrong here? Or things that should/could be done differently?
Thank you.
P.S. I've got this working correctly in OpenGL by using a similar process but need to get it working for directx:
gl.TexSubImage2D(OpenGL.GL_TEXTURE_2D, 0, 0, 0, (int)width, (int)height, OpenGL.GL_RGBA, OpenGL.GL_UNSIGNED_BYTE, colorBuffer);
From experimenting it appears that multiples of 32 are needed for both width and height. For example 100 * 128 even though a multiple of 32 will cause an issue. Instead I'm using:
var newHeight = (int)(initialHeight / 32) * 32;
var newWidth = (int)(initialWidth / 32) * 32;
I'm not sure if the root issue is my own mistake or a sharpDX issue or a DirectX issue. The other way I see to solve this issue is to add padding to the pixel array to account for the difference in length at the end of each row.
In my application i have a problem with passing my world, view and projection matrix to the shader. I have set up a little engine to perform those tasks and i am using PIX and visual studio to debug what i get as output.
First i post the code that relates to the vertices and indices:
Rendering.Geometry.MeshGeometry<uint> geom = Rendering.Geometry.MeshGeometry<uint>.Create(device);
var elem = Rendering.Geometry.VertexElement.CreatePosition3D(device);
float[] vertices = new float[9]
{
0, 0, -3,
0, 0, 3,
0, 5, 0,
};
elem.DataStream.WriteRange(vertices);
geom.AddVertexElement(elem);
var triangle = geom.Triangles.AddFace();
triangle.P1 = 0;
triangle.P2 = 1;
triangle.P3 = 2;
The geometry seems to be correct because when i debug my draw call in PIX i get the correct values for the vertices (0/0/-3)/(0/0/3)/(0/5/0) so i think index buffer, vertex buffer, input layout and polygon topology are all set up correctly.
Now in PIX i have that interesting Pre-VS, Post-VS view. Pre-VS as i told everything looks fine, the vertices are correct in the right order. When i go to Post-VS and debug a vertex i end up in my shader where i can go through the instructions.
Now what is not correct are the matrices passed to it with the constant buffer. Here is my shader:
cbuffer MatrixBuffer
{
float4x4 worldMatrix;
float4x4 viewMatrix;
float4x4 projectionMatrix;
};
struct VertexInputType
{
float4 position : POSITION;
};
struct PixelInputType
{
float4 position : SV_POSITION;
};
PixelInputType BasicEffectVS(VertexInputType input)
{
PixelInputType output = (PixelInputType)0;
float4x4 worldViewProj = worldMatrix * viewMatrix * projectionMatrix;
output.position = mul(input.position, worldViewProj);
output.position.w = 1.0f;
return output;
}
When i have a look in PIX for the three matrices i see that except for the worldMatrix they have completely wrong values (even NaN is contained) for viewMatrix and projectionMatrix. The way i set the matrices in my application is the following:
basicEffect.WorldMatrix = SlimDX.Matrix.Identity;
basicEffect.ViewMatrix = SlimDX.Matrix.Transpose(SlimDX.Matrix.LookAtLH(new SlimDX.Vector3(20, 5, 0), new SlimDX.Vector3(0, 5, 0), new SlimDX.Vector3(0, 1, 0)));
basicEffect.ProjectionMatrix = SlimDX.Matrix.Transpose(SlimDX.Matrix.PerspectiveFovLH((float)Math.PI / 4, ((float)f.ClientSize.Width / f.ClientSize.Height), 1.0f, 100.0f));
Debugging them in VS gives me the correct values. I then follow the SetValue call on the shader until i get to the actual writing of bytes. Everything is fine there!
The buffer is created the following way:
holder.buffer = new SlimDX.Direct3D11.Buffer(mShader.Device, new BufferDescription()
{
BindFlags = BindFlags.ConstantBuffer,
SizeInBytes = buffer.Description.Size,
Usage = ResourceUsage.Dynamic,
CpuAccessFlags = CpuAccessFlags.Write
});
Even worse:
If i add another matrix parameter to my shader and i set a hardcoded matrix for that value, like:
Matrix mat = new Matrix()
{
M11 = 1,
M12 = 2,
...
};
in PIX i get exactly the values i expect. So my function setting values to the shader must be right.
Anyone has an idea where this comes from?
Make sure you remove this line:
output.position.w = 1.0f;
This is the projective component, since you already multiplied by your projection matrix you need to send it as it is to the pixel shader.
Also I would be quite careful with all the transposes, i'm not sure they are really needed.
At last I have something displayed. Switched to using graphics.GraphicsDevice.DrawIndexedPrimitives ... Next problem... Only one triangle is displayed. The data set is about 200 triangles. I formatted the data coming in to make sure every three consecutive vectors form a triangle face. These are irregular triangles forming an irregular shape. I don't fully understand the indexing of the vertices. Looks like each 3 indices form a triangle. If that is so then the indices match the data coming in. I did this:
int i4 = -1;
indices = new int[xData1.Count];
for (int i2 = 0; i2 < xData1.Count; i2++)
{
i4++;
cubeVertices[i4].Position = new Vector3((float)xData1[i2][0], (float)xData1[i2][1], (float)xData1[i2][2]);
cubeVertices[i4].Color = Color.LawnGreen;
indices[i4] = i4;
}
making the indices match the vertices coming in.. then I used Reimers normal calc to provide normals.. this is probably wrong as his example was using 6 vertices per index (I think!), like this:
for (int i = 0; i < cubeVertices.Length; i++)
cubeVertices[i].Normal = new Vector3(0, 0, 0);
for (int i = 0; i < indices.Length / 3; i++)
{
int index1 = indices[i * 3];
int index2 = indices[i * 3 + 1];
int index3 = indices[i * 3 + 2];
Vector3 side1 = cubeVertices[index1].Position - cubeVertices[index3].Position;
Vector3 side2 = cubeVertices[index1].Position - cubeVertices[index2].Position;
Vector3 normal = Vector3.Cross(side1, side2);
cubeVertices[index1].Normal += normal;
cubeVertices[index2].Normal += normal;
cubeVertices[index3].Normal += normal;
}
for (int i = 0; i < cubeVertices.Length; i++)
cubeVertices[i].Normal.Normalize();
how many things do I need to fix here? I am only seeing 1 out of a couple of hundred triangles
:(
thx for your patience
public struct VertexPositionColorNormal
{
public Vector3 Position;
public Color Color;
public Vector3 Normal;
public readonly static VertexDeclaration VertexDeclaration = new VertexDeclaration
(
new VertexElement(0, VertexElementFormat.Vector3, VertexElementUsage.Position, 0),
new VertexElement(sizeof(float) * 3, VertexElementFormat.Color, VertexElementUsage.Color, 0),
new VertexElement(sizeof(float) * 3 + 4, VertexElementFormat.Vector3, VertexElementUsage.Normal, 0)
);
}
...
private void CopyToBuffers()
{
vertexBuffer = new VertexBuffer(graphics.GraphicsDevice, VertexPositionColorNormal.VertexDeclaration,
cubeVertices.Length, BufferUsage.WriteOnly);
vertexBuffer.SetData(cubeVertices);
myIndexBuffer = new IndexBuffer(graphics.GraphicsDevice, typeof(int), indices.Length, BufferUsage.WriteOnly);
myIndexBuffer.SetData(indices);
}
....
foreach (EffectPass pass in basicEffect.CurrentTechnique.Passes)
{
basicEffect.World = world;
basicEffect.View = view;
basicEffect.Projection = proj;
pass.Apply();
graphics.GraphicsDevice.Indices = myIndexBuffer;
graphics.GraphicsDevice.SetVertexBuffer(vertexBuffer);
graphics.GraphicsDevice.DrawIndexedPrimitives(PrimitiveType.TriangleList, 0, 0,
cubeVertices.Length, 0, indices.Length / 3);
Your normal calculation is correct, and even if it was wrong the only thing that would happen is that your triangles would receive the wrong lightning.
You're using indices which exactly match the order of the vertices coming in, which is in itself redundant. If you switch to not setting the indices at all and use DrawPrimitives instead with the primitive count the same does that make a difference?
Other than that, are you sure that the data you're giving it is valid? Are the vertex positions correctly set?