I'm trying to use SharpGL to render in a WPF application using gl.VertexPointer() and gl.DrawArrays(). But I cannot get it to render a square. The background clears to green and I can see the FPS drawing at the bottom left of the WPF panel. When adding the code for the square, the FPS text disappears as well and I just have a blank screen.
I'm doing the same exact thing that I have in a C++ project which works just fine. I don't know what I'm missing or doing incorrectly.
XAML
<Window x:Class="NodePlusPlus.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
xmlns:d="http://schemas.microsoft.com/expression/blend/2008"
xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
xmlns:local="clr-namespace:NodePlusPlus"
xmlns:gl="clr-namespace:SharpGL.WPF;assembly=SharpGL.WPF"
mc:Ignorable="d"
Title="MainWindow" Height="900" Width="1600" Background="#FF202020">
<Grid x:Name="MainGrid">
<gl:OpenGLControl x:Name="glPanel" DrawFPS="True" />
</Grid>
</Window>
C#
public MainWindow()
{
InitializeComponent();
glPanel.OpenGLInitialized += glPanel_OpenGLInitialized;
glPanel.OpenGLDraw += glPanel_OpenGLDraw;
glPanel.Resized += glPanel_Resized;
}
private void glPanel_Resized(object sender, SharpGL.WPF.OpenGLRoutedEventArgs args)
{
OpenGL gl = args.OpenGL;
gl.Ortho2D(0, Width, 0, Height);
}
private void glPanel_OpenGLInitialized(object sender, SharpGL.WPF.OpenGLRoutedEventArgs args)
{
OpenGL gl = args.OpenGL;
gl.Hint(OpenGL.GL_PERSPECTIVE_CORRECTION_HINT, OpenGL.GL_FASTEST);
gl.ShadeModel(OpenGL.GL_SMOOTH);
gl.Enable(OpenGL.GL_CULL_FACE);
gl.Enable(OpenGL.GL_TEXTURE_2D);
gl.Enable(OpenGL.GL_BLEND);
gl.Enable(OpenGL.GL_DEPTH_TEST);
gl.BlendFunc(OpenGL.GL_SRC_ALPHA, OpenGL.GL_ONE_MINUS_SRC_ALPHA);
gl.EnableClientState(OpenGL.GL_VERTEX_ARRAY);
gl.EnableClientState(OpenGL.GL_TEXTURE_COORD_ARRAY);
gl.ClearColor(0.0f, 1.0f, 0.0f, 0.1f);
}
private void glPanel_OpenGLDraw(object sender, SharpGL.WPF.OpenGLRoutedEventArgs args)
{
OpenGL gl = args.OpenGL;
// Clear the color and depth buffers
gl.Clear(OpenGL.GL_COLOR_BUFFER_BIT | OpenGL.GL_DEPTH_BUFFER_BIT);
gl.LoadIdentity();
// Build a rectangle
float Width = 100.0f;
float Height = 100.0f;
float[] vertices = new float[18];
vertices[0] = 0; vertices[1] = 0; vertices[2] = 0.0f;
vertices[3] = 0; vertices[4] = Height; vertices[5] = 0.0f;
vertices[6] = Width; vertices[7] = Height; vertices[8] = 0.0f;
vertices[9] = 0; vertices[10] = 0; vertices[11] = 0.0f;
vertices[12] = Width; vertices[13] = Height; vertices[14] = 0.0f;
vertices[15] = Width; vertices[16] = 0; vertices[17] = 0.0f;
// If I remove this whole Begin()-End() section, it will render the Open GL FPS. With this draw code here, it does not render the FPS.
gl.Begin(OpenGL.GL_TRIANGLES);
gl.Translate(0.0f, 0.0f, 0.0f);
gl.Color(0.0f, 0.0f, 0.0f, 1.0f);
gl.VertexPointer(3, 0, vertices);
gl.DrawArrays(OpenGL.GL_TRIANGLES, 0, 6);
gl.End();
gl.Flush();
}
With Draw code...
Without Draw code...
As mentioned by #BDL, glBegin() and glEnd() are not used in this case.
As mentioned by #Rabbid76, glEnableClientState(OpenGL.GL_TEXTURE_COORD_ARRAY) should be removed since I am not using texture coordinates.
Thanks for the help guys!
Also removing gl.Enable(OpenGL.GL_CULL_FACE) was necessary in the specific example to draw my square. As #Rabbid76 mentioned, the default winding order is counter-clockwise. My vertices are winding clockwise.
End result
private void glPanel_OpenGLInitialized(object sender, SharpGL.WPF.OpenGLRoutedEventArgs args)
{
OpenGL gl = args.OpenGL;
gl.Hint(OpenGL.GL_PERSPECTIVE_CORRECTION_HINT, OpenGL.GL_FASTEST);
gl.ShadeModel(OpenGL.GL_SMOOTH);
gl.Enable(OpenGL.GL_TEXTURE_2D);
gl.Enable(OpenGL.GL_BLEND);
gl.Enable(OpenGL.GL_DEPTH_TEST);
gl.BlendFunc(OpenGL.GL_SRC_ALPHA, OpenGL.GL_ONE_MINUS_SRC_ALPHA);
gl.EnableClientState(OpenGL.GL_VERTEX_ARRAY);
gl.ClearColor(0.0f, 1.0f, 0.0f, 0.1f);
}
private void glPanel_OpenGLDraw(object sender, SharpGL.WPF.OpenGLRoutedEventArgs args)
{
OpenGL gl = args.OpenGL;
// Clear the color and depth buffers
gl.Clear(OpenGL.GL_COLOR_BUFFER_BIT | OpenGL.GL_DEPTH_BUFFER_BIT);
gl.LoadIdentity();
// Build a rectangle
float Width = 100.0f;
float Height = 100.0f;
float[] vertices = new float[18];
vertices[0] = 0; vertices[1] = 0; vertices[2] = 0.0f;
vertices[3] = 0; vertices[4] = Height; vertices[5] = 0.0f;
vertices[6] = Width; vertices[7] = Height; vertices[8] = 0.0f;
vertices[9] = 0; vertices[10] = 0; vertices[11] = 0.0f;
vertices[12] = Width; vertices[13] = Height; vertices[14] = 0.0f;
vertices[15] = Width; vertices[16] = 0; vertices[17] = 0.0f;
gl.Translate(0.0f, 0.0f, 0.0f);
gl.Color(0.0f, 0.0f, 0.0f, 1.0f);
gl.VertexPointer(3, 0, vertices);
gl.DrawArrays(OpenGL.GL_TRIANGLES, 0, 6);
gl.Flush();
}
Related
I started studying SharpGL on WPF, and I wanted to learn how to output a picture, I found a tutorial where it was shown how to output a picture, but after rewriting the code to myself, the picture is not output, but output a red image, what could be wrong?
Wpf screen
Link to Download Project
Texture texture = new Texture();
private void openGLControl1_OpenGLInitialized(object sender, EventArgs e)
{
// Get the OpenGL object, for quick access.
SharpGL.OpenGL gl = this.openGLControl1.OpenGL;
// Get the OpenGL object, for quick access.
// A bit of extra initialisation here, we have to enable textures.
gl.Enable(OpenGL.GL_TEXTURE_2D);
// Create our texture object from a file. This creates the texture for OpenGL.
texture.Create(gl, #"C:\Users\user\Desktop\Crate.bmp");
}
private void OpenGLControl_OpenGLDraw(object sender, OpenGLRoutedEventArgs args)
{
// System.Threading.Thread.Sleep(1000);
SharpGL.OpenGL gl = this.openGLControl1.OpenGL;
gl.Clear(OpenGL.GL_COLOR_BUFFER_BIT | OpenGL.GL_DEPTH_BUFFER_BIT);
texture.Bind(gl);
gl.LoadIdentity();
gl.Translate(0, 0, -1f);
gl.Begin(OpenGL.GL_QUADS);
var startX = 0 - 0 / 2;
var startY = 0 - 0 / 2;
float width = 100;
float height = 100;
gl.TexCoord(0, 1); gl.Vertex(new[] { startX, startY });
gl.TexCoord(1, 1); gl.Vertex(new[] { startX + width, startY });
gl.TexCoord(1, 0); gl.Vertex(new[] { startX + width, startY + height });
gl.TexCoord(0, 0); gl.Vertex(new[] { startX, startY + height });
gl.End();
gl.Flush();
}
private void OpenGLControl_Resized(object sender, OpenGLRoutedEventArgs args)
{
SharpGL.OpenGL gl = this.openGLControl1.OpenGL;
gl.MatrixMode(OpenGL.GL_PROJECTION);
gl.LoadIdentity();
float w = 450;
float h = w * 0.5625f;
gl.Viewport(0, 0, (int)w, (int)h);
gl.MatrixMode(OpenGL.GL_MODELVIEW);
}
My image appears too large when it is rendered using SharpGL. How do I load it properly? The image's dimension is only 313 x 79 pixels but it almost occupy the rest of the screen when it renders.
I got this code from codeplex. The example given is how to render images in 3D (name of project is NativeTexturesSample).
https://sharpgl.codeplex.com/downloads/get/614989. I manage to make the rendering in 2D but I think I'm not doing it correctly.
private void openGLControl1_OpenGLDraw(object sender, RenderEventArgs e)
{
const int screenWidth = 1920;
const int screenHeight = 1080;
SharpGL.OpenGL gl = this.openGLControl1.OpenGL;
gl.MatrixMode(OpenGL.GL_PROJECTION);
gl.Ortho2D(0, screenWidth , screenHeight , 0);
gl.Disable(OpenGL.GL_DEPTH_TEST);
gl.LoadIdentity();
texture.Create(gl, #"C:\image\footerlogo.bmp");
texture.Bind(gl);
gl.Begin(OpenGL.GL_QUADS);
gl.TexCoord(0.0f, 1.0f); gl.Vertex(-2.0f, 0.0f);
gl.TexCoord(1.0f, 1.0f); gl.Vertex(1.0f, 0.0f);
gl.TexCoord(1.0f, 0.0f); gl.Vertex(1.0f, 1.0f);
gl.TexCoord(0.0f, 0.0f); gl.Vertex(-2.0f, 1.0f);
gl.End();
gl.Flush();
}
Try this code:
public partial class SharpGLForm : Form
{
private bool TexturesInitialised = false;
private float rotation = 0.0f;
private float Scale = 1;
private Bitmap gImage1;
private System.Drawing.Imaging.BitmapData gbitmapdata;
private uint[] gtexture = new uint[1];
/// <summary>
/// Initializes a new instance of the <see cref="SharpGLForm"/> class.
/// </summary>
public SharpGLForm()
{
InitializeComponent();
}
private void InitialiseTexture(ref OpenGL gl)
{
gImage1 = new Bitmap(#"C:\Jess1.bmp");// Environment.GetFolderPath(Environment.SpecialFolder.MyPictures)
Rectangle rect = new Rectangle(0, 0, gImage1.Width, gImage1.Height);
gbitmapdata = gImage1.LockBits(rect, System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format24bppRgb);
gImage1.UnlockBits(gbitmapdata);
gl.GenTextures(1, gtexture);
gl.BindTexture(OpenGL.GL_TEXTURE_2D, gtexture[0]);
gl.TexImage2D(OpenGL.GL_TEXTURE_2D, 0, (int)OpenGL.GL_RGB8, gImage1.Width, gImage1.Height, 0, OpenGL.GL_BGR_EXT, OpenGL.GL_UNSIGNED_BYTE, gbitmapdata.Scan0);
uint[] array = new uint[] { OpenGL.GL_NEAREST };
gl.TexParameterI(OpenGL.GL_TEXTURE_2D, OpenGL.GL_TEXTURE_MIN_FILTER, array);
gl.TexParameterI(OpenGL.GL_TEXTURE_2D, OpenGL.GL_TEXTURE_MAG_FILTER, array);
TexturesInitialised = true;
}
private void openGLControl_OpenGLDraw(object sender, RenderEventArgs e)
{
// Get the OpenGL object.
OpenGL gl = openGLControl.OpenGL;
// Clear the color and depth buffer.
gl.Clear(OpenGL.GL_COLOR_BUFFER_BIT | OpenGL.GL_DEPTH_BUFFER_BIT);
// Load the identity matrix.
gl.LoadIdentity();
if (!TexturesInitialised)
{
InitialiseTexture(ref gl);
}
gl.Enable(OpenGL.GL_TEXTURE_2D);
gl.BindTexture(OpenGL.GL_TEXTURE_2D, gtexture[0]);
gl.Color(1.0f, 1.0f, 1.0f, 0.1f); //Must have, weirdness!
gl.Begin(OpenGL.GL_QUADS);
gl.TexCoord(1.0f, 1.0f);
gl.Vertex(gImage1.Width, gImage1.Height, 1.0f);
gl.TexCoord(0.0f, 1.0f);
gl.Vertex(0.0f, gImage1.Height, 1.0f);
gl.TexCoord(0.0f, 0.0f);
gl.Vertex(0.0f, 0.0f, 1.0f);
gl.TexCoord(1.0f, 0.0f);
gl.Vertex(gImage1.Width, 0.0f, 1.0f);
gl.End();
gl.Disable(OpenGL.GL_TEXTURE_2D);
}
private void openGLControl_OpenGLInitialized(object sender, EventArgs e)
{
OpenGL gl = openGLControl.OpenGL;
gl.ClearColor(0, 0, 0, 0);
}
private void openGLControl_Resized(object sender, EventArgs e)
{
OpenGL gl = openGLControl.OpenGL;
gl.MatrixMode(OpenGL.GL_PROJECTION);
gl.LoadIdentity();
gl.Perspective(60.0f, (double)Width / (double)Height, 0.01, 10000.0);
gl.LookAt(0, 0, -500, 0, 0, 0, 0, 1, 0);
gl.MatrixMode(OpenGL.GL_MODELVIEW);
}
}
Im playing around with the Platformer Starter Kit and so far I've added in horizontal and vertical "camera" movement and Im trying to add inn a parallaxing background. The problem is that after two background layers it stops showing the rest of them. Im very new to XNA and need a little help :). Heres a pic of the problem:
Heres the code. Please tell me if you need some more :)
Layer classes:
class Layer
{
public Texture2D[] Textures { get; private set; }
public float ScrollRate { get; private set; }
public Layer(ContentManager content, string basePath, float scrollRate)
{
// Assumes each layer only has 3 segments.
Textures = new Texture2D[3];
for (int i = 0; i < 3; ++i)
Textures[i] = content.Load<Texture2D>(basePath + "_" + i);
ScrollRate = scrollRate;
}
public void Draw(SpriteBatch spriteBatch, float cameraPosition, float cameraPositionYAxis)
{
// Assume each segment is the same width.
int segmentWidth = Textures[0].Width;
// Calculate which segments to draw and how much to offset them.
float x = cameraPosition * ScrollRate;
float y = ScrollRate;
int leftSegment = (int)Math.Floor(x / segmentWidth);
int rightSegment = leftSegment + 1;
x = (x / segmentWidth - leftSegment) * -segmentWidth;
spriteBatch.Draw(Textures[leftSegment % Textures.Length], new Vector2(x, -y), Color.White);
spriteBatch.Draw(Textures[rightSegment % Textures.Length], new Vector2(x + segmentWidth, -y), Color.White);
}
}
Heres the draw method in my Level.cs with my ScrollCamera (dont know if ScrollCamera has anything to do with it)
public void Draw(GameTime gameTime, SpriteBatch spriteBatch)
{
ScrollCamera(spriteBatch.GraphicsDevice.Viewport);
Matrix cameraTransformYAxis = Matrix.CreateTranslation(-cameraPosition, -cameraPositionYAxis, 0.0f);
spriteBatch.Begin(SpriteSortMode.Immediate, BlendState.AlphaBlend, SamplerState.LinearClamp,
DepthStencilState.Default, RasterizerState.CullCounterClockwise, null, cameraTransformYAxis);
//added this foreach loop
foreach (var layer in layers)
{
layer.Draw(spriteBatch, cameraPosition, cameraPositionYAxis);
}
DrawTiles(spriteBatch);
Player.Draw(gameTime, spriteBatch);
foreach (Enemy enemy in enemies)
{
enemy.Draw(gameTime, spriteBatch);
}
spriteBatch.End();
}
private void ScrollCamera(Viewport viewport)
{
#if ZUNE
const float ViewMargin = 0.4f;
#else
const float ViewMargin = 0.5f;
#endif
float marginWidth = viewport.Width * ViewMargin;
float marginLeft = cameraPosition + marginWidth;
float marginRight = cameraPosition + viewport.Width - marginWidth;
const float TopMargin = 0.4f;
const float BottomMargin = 0.4f;
float marginTop = cameraPositionYAxis + viewport.Height * TopMargin;
float marginBottom = cameraPositionYAxis + viewport.Height - viewport.Height * BottomMargin;
// float maxCameraPositionYOffset = Tile.Height * Height - viewport.Height;
float CameraMovement = 0.0f;
if (Player.Position.X < marginLeft)
CameraMovement = Player.Position.X - marginLeft;
else if (Player.Position.X > marginRight)
CameraMovement = Player.Position.X - marginRight;
//Aktualizuj przesuwanie ekranu, ale zapobiegnij wyjściu poza mape
float maxCameraPosition = Tile.Width * Width - viewport.Width;
cameraPosition = MathHelper.Clamp(cameraPosition + CameraMovement, 0.0f, maxCameraPosition);
float cameraMovementY = 0.0f;
if (Player.Position.Y < marginTop) //above the top margin
cameraMovementY = Player.Position.Y - marginTop;
else if (Player.Position.Y > marginBottom) //below the bottom margin
cameraMovementY = Player.Position.Y - marginBottom;
float maxCameraPositionYOffset = Tile.Height * Height - viewport.Height;
cameraPositionYAxis = MathHelper.Clamp(cameraPositionYAxis + cameraMovementY, 0.0f, maxCameraPositionYOffset);
}
And I think thats it. Please tell me if you need some more code :)
You want to use Linear Wrapping. There's an excellent blog post on it right here. This assumes of course that your texture tiles perfect. You just simply need to to set your linear wrapping mode, code example below:
// Use this one instead!
spriteBatch.Begin(SpriteSortMode.Deferred, null, SamplerState.LinearWrap, null, null);
spriteBatch.Draw(texture, position, new Rectangle(-scrollX, -scrollY, texture.Width, texture.Height), Color.White);
spriteBatch.End();
I am working with Bitmap C# and wondering how to convert a color png image to only one color. I want all the visible colors in the image to become white. The parts that are transparent should remain transparent. I am going to display these agains a grey background.
If the image doesn't use alpha channel for transparency then the following will do:
Bitmap image;
for (int x = 0; x < image.Width; x++)
{
for (int y = 0; y < image.Height; y++)
{
if (image.GetPixel(x, y) != Color.Transparent)
{
image.SetPixel(x, y, Color.White);
}
}
}
The other answers was helpful and got me going, thanks a lot. I couldn't make them work though, not sure why. But I also found out that I wanted to keep the original alpha value of the pixels, rendering the edges smooth. This is what I came up with.
for (int x = 0; x < bitmap.Width; x++)
{
for (int y = 0; y < bitmap.Height; y++)
{
Color bitColor = bitmap.GetPixel(x, y);
//Sets all the pixels to white but with the original alpha value
bitmap.SetPixel(x, y, Color.FromArgb(bitColor.A, 255, 255, 255));
}
}
Here is a screen dump of the result magnified a few times (original on top):
(source: codeodyssey.se)
SetPixel is just about the slowest possible way to do that. You can use a ColorMatrix instead:
var newImage = new Bitmap(original.Width, original.Height,
original.PixelFormat);
using (var g = Graphics.FromImage(newImage)) {
var matrix = new ColorMatrix(new[] {
new float[] { 1.0f, 0.0f, 0.0f, 0.0f, 0.0f },
new float[] { 0.0f, 1.0f, 0.0f, 0.0f, 0.0f },
new float[] { 0.0f, 0.0f, 1.0f, 0.0f, 0.0f },
new float[] { 0.0f, 0.0f, 0.0f, 1.0f, 0.0f },
new float[] { 1.0f, 1.0f, 1.0f, 0.0f, 1.0f }
});
var attributes = new ImageAttributes();
attributes.SetColorMatrix(matrix);
g.DrawImage(original,
new Rectangle(0, 0, original.Width, original.Height),
0, 0, original.Width, original.Height,
GraphicsUnit.Pixel, attributes);
}
try following code:
void Test()
{
Bitmap bmp = new Bitmap(50, 50);//you will load it from file or resource
Color c = Color.Green;//transparent color
//loop height and width.
// YOU MAY HAVE TO CONVERT IT TO Height X VerticalResolution and
// Width X HorizontalResolution
for (int i = 0; i < bmp.Height; i++)
{
for (int j = 0; j < bmp.Width; j++)
{
var p = bmp.GetPixel(j, i);//get pixle at point
//if pixle color not equals transparent
if(!c.Equals(Color.FromArgb(p.ToArgb())))
{
//set it to white
bmp.SetPixel(j,i,Color.White);
}
}
}
}
PS: this is not tested and in no way optimized
I'm attempting to render a textured quad using the example located here.
I can successfully render the quad, but the texture information appears to be lost. The quad takes the color of the underlying texture, though.
I've checked the obvious problems ("Does the BasicEffect rendering the quad have the TextureEnabled property set to true?") and can't immediately see the problem.
Code below:
public class Quad
{
public VertexPositionNormalTexture[] Vertices;
public Vector3 Origin;
public Vector3 Up;
public Vector3 Normal;
public Vector3 Left;
public Vector3 UpperLeft;
public Vector3 UpperRight;
public Vector3 LowerLeft;
public Vector3 LowerRight;
public int[] Indexes;
public Quad(Vector3 origin, Vector3 normal, Vector3 up,
float width, float height)
{
this.Vertices = new VertexPositionNormalTexture[4];
this.Indexes = new int[6];
this.Origin = origin;
this.Normal = normal;
this.Up = up;
// Calculate the quad corners
this.Left = Vector3.Cross(normal, this.Up);
Vector3 uppercenter = (this.Up * height / 2) + origin;
this.UpperLeft = uppercenter + (this.Left * width / 2);
this.UpperRight = uppercenter - (this.Left * width / 2);
this.LowerLeft = this.UpperLeft - (this.Up * height);
this.LowerRight = this.UpperRight - (this.Up * height);
this.FillVertices();
}
private void FillVertices()
{
Vector2 textureUpperLeft = new Vector2(0.0f, 0.0f);
Vector2 textureUpperRight = new Vector2(1.0f, 0.0f);
Vector2 textureLowerLeft = new Vector2(0.0f, 1.0f);
Vector2 textureLowerRight = new Vector2(1.0f, 1.0f);
for (int i = 0; i < this.Vertices.Length; i++)
{
this.Vertices[i].Normal = this.Normal;
}
this.Vertices[0].Position = this.LowerLeft;
this.Vertices[0].TextureCoordinate = textureLowerLeft;
this.Vertices[1].Position = this.UpperLeft;
this.Vertices[1].TextureCoordinate = textureUpperLeft;
this.Vertices[2].Position = this.LowerRight;
this.Vertices[2].TextureCoordinate = textureLowerRight;
this.Vertices[3].Position = this.UpperRight;
this.Vertices[3].TextureCoordinate = textureUpperRight;
this.Indexes[0] = 0;
this.Indexes[1] = 1;
this.Indexes[2] = 2;
this.Indexes[3] = 2;
this.Indexes[4] = 1;
this.Indexes[5] = 3;
}
}
this.quadEffect = new BasicEffect(this.GraphicsDevice, null);
this.quadEffect.AmbientLightColor = new Vector3(0.8f, 0.8f, 0.8f);
this.quadEffect.LightingEnabled = true;
this.quadEffect.World = Matrix.Identity;
this.quadEffect.View = this.View;
this.quadEffect.Projection = this.Projection;
this.quadEffect.TextureEnabled = true;
this.quadEffect.Texture = someTexture;
this.quad = new Quad(Vector3.Zero, Vector3.UnitZ, Vector3.Up, 2, 2);
this.quadVertexDecl = new VertexDeclaration(this.GraphicsDevice, VertexPositionNormalTexture.VertexElements);
public override void Draw(GameTime gameTime)
{
this.GraphicsDevice.Textures[0] = this.SpriteDictionary["B1S1I800"];
this.GraphicsDevice.VertexDeclaration = quadVertexDecl;
quadEffect.Begin();
foreach (EffectPass pass in quadEffect.CurrentTechnique.Passes)
{
pass.Begin();
GraphicsDevice.DrawUserIndexedPrimitives<VertexPositionNormalTexture>(
PrimitiveType.TriangleList,
beamQuad.Vertices, 0, 4,
beamQuad.Indexes, 0, 2);
pass.End();
}
quadEffect.End();
}
From what I can see, this should work. The only thing I can imagine, which isn't in this code, is that the loading of the texture goes wrong somewhere. I also can't quite visualize what you mean that the quad has the underlying color of the texture? Do you have a screenshot for us?
Also, if something does show up, a very distorted version of your texture for example, it could be possible that the rendering of other stuff has effect on the rendering of the quad. For example if you draw the quad while the graphicsdevice has another vertex declaration on it, or if the previous thing rendered set some exotic rendering state, or if you're drawing the quad within the drawing code of something else. Try isolating this code, into a fresh project or something, or disable the rendering of everything else.