Im playing around with the Platformer Starter Kit and so far I've added in horizontal and vertical "camera" movement and Im trying to add inn a parallaxing background. The problem is that after two background layers it stops showing the rest of them. Im very new to XNA and need a little help :). Heres a pic of the problem:
Heres the code. Please tell me if you need some more :)
Layer classes:
class Layer
{
public Texture2D[] Textures { get; private set; }
public float ScrollRate { get; private set; }
public Layer(ContentManager content, string basePath, float scrollRate)
{
// Assumes each layer only has 3 segments.
Textures = new Texture2D[3];
for (int i = 0; i < 3; ++i)
Textures[i] = content.Load<Texture2D>(basePath + "_" + i);
ScrollRate = scrollRate;
}
public void Draw(SpriteBatch spriteBatch, float cameraPosition, float cameraPositionYAxis)
{
// Assume each segment is the same width.
int segmentWidth = Textures[0].Width;
// Calculate which segments to draw and how much to offset them.
float x = cameraPosition * ScrollRate;
float y = ScrollRate;
int leftSegment = (int)Math.Floor(x / segmentWidth);
int rightSegment = leftSegment + 1;
x = (x / segmentWidth - leftSegment) * -segmentWidth;
spriteBatch.Draw(Textures[leftSegment % Textures.Length], new Vector2(x, -y), Color.White);
spriteBatch.Draw(Textures[rightSegment % Textures.Length], new Vector2(x + segmentWidth, -y), Color.White);
}
}
Heres the draw method in my Level.cs with my ScrollCamera (dont know if ScrollCamera has anything to do with it)
public void Draw(GameTime gameTime, SpriteBatch spriteBatch)
{
ScrollCamera(spriteBatch.GraphicsDevice.Viewport);
Matrix cameraTransformYAxis = Matrix.CreateTranslation(-cameraPosition, -cameraPositionYAxis, 0.0f);
spriteBatch.Begin(SpriteSortMode.Immediate, BlendState.AlphaBlend, SamplerState.LinearClamp,
DepthStencilState.Default, RasterizerState.CullCounterClockwise, null, cameraTransformYAxis);
//added this foreach loop
foreach (var layer in layers)
{
layer.Draw(spriteBatch, cameraPosition, cameraPositionYAxis);
}
DrawTiles(spriteBatch);
Player.Draw(gameTime, spriteBatch);
foreach (Enemy enemy in enemies)
{
enemy.Draw(gameTime, spriteBatch);
}
spriteBatch.End();
}
private void ScrollCamera(Viewport viewport)
{
#if ZUNE
const float ViewMargin = 0.4f;
#else
const float ViewMargin = 0.5f;
#endif
float marginWidth = viewport.Width * ViewMargin;
float marginLeft = cameraPosition + marginWidth;
float marginRight = cameraPosition + viewport.Width - marginWidth;
const float TopMargin = 0.4f;
const float BottomMargin = 0.4f;
float marginTop = cameraPositionYAxis + viewport.Height * TopMargin;
float marginBottom = cameraPositionYAxis + viewport.Height - viewport.Height * BottomMargin;
// float maxCameraPositionYOffset = Tile.Height * Height - viewport.Height;
float CameraMovement = 0.0f;
if (Player.Position.X < marginLeft)
CameraMovement = Player.Position.X - marginLeft;
else if (Player.Position.X > marginRight)
CameraMovement = Player.Position.X - marginRight;
//Aktualizuj przesuwanie ekranu, ale zapobiegnij wyjściu poza mape
float maxCameraPosition = Tile.Width * Width - viewport.Width;
cameraPosition = MathHelper.Clamp(cameraPosition + CameraMovement, 0.0f, maxCameraPosition);
float cameraMovementY = 0.0f;
if (Player.Position.Y < marginTop) //above the top margin
cameraMovementY = Player.Position.Y - marginTop;
else if (Player.Position.Y > marginBottom) //below the bottom margin
cameraMovementY = Player.Position.Y - marginBottom;
float maxCameraPositionYOffset = Tile.Height * Height - viewport.Height;
cameraPositionYAxis = MathHelper.Clamp(cameraPositionYAxis + cameraMovementY, 0.0f, maxCameraPositionYOffset);
}
And I think thats it. Please tell me if you need some more code :)
You want to use Linear Wrapping. There's an excellent blog post on it right here. This assumes of course that your texture tiles perfect. You just simply need to to set your linear wrapping mode, code example below:
// Use this one instead!
spriteBatch.Begin(SpriteSortMode.Deferred, null, SamplerState.LinearWrap, null, null);
spriteBatch.Draw(texture, position, new Rectangle(-scrollX, -scrollY, texture.Width, texture.Height), Color.White);
spriteBatch.End();
Related
I am trying to achieve something similar to How to increase (animate) the width of the square on both ends in Unity. How can I determine the scale by which to increase the width (of the sprite) for it to fill the whole screen width?
UPDATE
Below is the Swift code for I implemented for expanding the sprite width to take the full screen width:
func expandEnemy () {
spritePosBeforeScaleX = CGPointMake((enemy?.sprite.position.x)!, (enemy?.sprite.anchorPoint.y)!)
enemy?.sprite.anchorPoint = CGPointMake((enemy?.sprite.position.x)! / self.size.width, (enemy?.sprite.anchorPoint.y)!)
let enemyScalingAction = SKAction.scaleXTo(self.size.width / (enemy?.sprite.size.width)!, duration: 1.0)
enemy!.sprite.runAction(enemyScalingAction)
delay(0.1)
{
center = CGPointMake(enemy!.sprite.size.width / 2 - (enemy!.sprite.size.width * enemy!.sprite.anchorPoint.x), enemy!.sprite.size.height / 2 - (enemy!.sprite.size.height * enemy!.sprite.anchorPoint.y))
enemy!.sprite.physicsBody = SKPhysicsBody(rectangleOfSize: enemy!.sprite.size, center: center)
}
}
It all depends on the aspect ratio of the screen and the size of the object with the SpriteRenderer. You need to scale up the gameobject that holds the spriterenderer by a factor where you take these into consideration.
[ExecuteInEditMode]
public class SpriteToScreen : MonoBehaviour {
public float sprw = 256f;
public float sprh = 256f;
float unitspp = 100f;
public float scrw = 0f;
public float scrh = 0f;
public float aspect = 0f;
public float spr_aspect = 1f;
public float factorY = 0.017578125f;
public void Update(){
scrw = Screen.width;
scrh = Screen.height;
aspect = scrw / scrh;
unitspp = this.GetComponent<SpriteRenderer>().sprite.pixelsPerUnit;
sprw = this.GetComponent<SpriteRenderer>().sprite.bounds.size.x * unitspp;
sprh = this.GetComponent<SpriteRenderer>().sprite.bounds.size.y * unitspp;
spr_aspect = sprw / sprh;
this.transform.localScale = new Vector3( (1152f / sprh * aspect) / spr_aspect,
1152f / sprh,
1 );
}
}
You can scale Image of an UI in x-axis full screen. Just get the RectTransform then modify the sizeDelta property of it to the Screen size of the device or the size of the Canvas.
The function below can scale Unity UI Image in x, y or both x and y axis full screen. The Image to scale must be under Canvas. Assign a Sprite to the Source Image of the Image component the code below should work.
//Attach the UI Image to scacle in the Editor here
public GameObject image;
To Scale:
Scale in X-axis Full Screen in 3 seconds:
StartCoroutine(scaleToFullScreen(image, 0, 3f));
Scale in Y-axis Full Screen in 3 seconds:
StartCoroutine(scaleToFullScreen(image, 1, 3f));
Scale in X-axis AND Y-axis Full Screen in 3 seconds:
StartCoroutine(scaleToFullScreen(image, 2, 3f));
The Scale function:
bool isScaling = false;
IEnumerator scaleToFullScreen(GameObject imageToScale, int scaleType, float byTime)
{
if (isScaling)
{
yield break;
}
if (scaleType < 0 || scaleType > 2)
{
Debug.Log("Invalid ScaleType. Valid Scale Types X:0, Y:1, XandY:3");
yield break;
}
isScaling = true;
Canvas canvas = imageToScale.GetComponentInParent<Canvas>();
float x, y;
if (canvas != null)
{
x = canvas.pixelRect.width;
y = canvas.pixelRect.height;
}
else
{
x = Screen.width;
y = Screen.height;
}
RectTransform rect = imageToScale.GetComponent<RectTransform>();
if (rect == null)
{
rect = imageToScale.AddComponent<RectTransform>();
}
//Center the position of the image so that it will be resized equally
rect.anchoredPosition3D = new Vector3(0, 0, rect.anchoredPosition3D.z);
//The default Size
Vector2 originalScale = rect.sizeDelta;
//The new scale we want to scale texture to
Vector2 newScale = originalScale;
if (scaleType == 0)
{
newScale.x = x;
}
else if (scaleType == 1)
{
newScale.y = y;
}
else if (scaleType == 2)
{
newScale.x = x;
newScale.y = y;
}
float counter = 0;
while (counter < byTime)
{
counter += Time.deltaTime;
rect.sizeDelta = Vector2.Lerp(originalScale, newScale, counter / byTime);
yield return null;
}
isScaling = false;
}
This should hopefully be a simple question. So I finally figured out how to render stuff in 3D in OpenTK. Great! Only problem is, it doesn't quite look how I expect. I'm drawing a sphere using the Polar method, and drawing using PrimitiveType.Polygon.
Here's the algorithm for calculating the coordinates. What I'm doing is stepping through each phi then theta in the sphere, incrementally adding more adjacent quads to my final point list:
Point 1: Theta1, Phi1
Point 2: Theta1, Phi2
Point 3: Theta2, Phi2
Point 4: Theta2: Phi1
protected static RegularPolygon3D _create_unit(int n)
{
List<Vector3> pts = new List<Vector3>();
float theta = 0.0f;
float theta2 = 0.0f;
float phi = 0.0f;
float phi2 = 0.0f;
float segments = n;
float cosT = 0.0f;
float cosT2 = 0.0f;
float cosP = 0.0f;
float cosP2 = 0.0f;
float sinT = 0.0f;
float sinT2 = 0.0f;
float sinP = 0.0f;
float sinP2 = 0.0f;
List<Vector3> current = new List<Vector3>(4);
for (float lat = 0; lat < segments; lat++)
{
phi = (float)Math.PI * (lat / segments);
phi2 = (float)Math.PI * ((lat + 1.0f) / segments);
cosP = (float)Math.Cos(phi);
cosP2 = (float)Math.Cos(phi2);
sinP = (float)Math.Sin(phi);
sinP2 = (float)Math.Sin(phi2);
for (float lon = 0; lon < segments; lon++)
{
current = new List<Vector3>(4);
theta = TWO_PI * (lon / segments);
theta2 = TWO_PI * ((lon + 1.0f) / segments);
cosT = (float)Math.Cos(theta);
cosT2 = (float)Math.Cos(theta2);
sinT = (float)Math.Sin(theta);
sinT2 = (float)Math.Sin(theta2);
current.Add(new Vector3(
cosT * sinP,
sinT * sinP,
cosP
));
current.Add(new Vector3(
cosT * sinP2,
sinT * sinP2,
cosP2
));
current.Add(new Vector3(
cosT2 * sinP2,
sinT2 * sinP2,
cosP2
));
current.Add(new Vector3(
cosT2 * sinP,
sinT2 * sinP,
cosP
));
pts.AddRange(current);
}
}
var rtn = new RegularPolygon3D(pts);
rtn.Translation = Vector3.ZERO;
rtn.Scale = Vector3.ONE;
return rtn;
}
And so my Sphere class looks like this:
public class Sphere : RegularPolygon3D
{
public static Sphere Create(Vector3 center, float radius)
{
var rp = RegularPolygon3D.Create(30, center, radius);
return new Sphere(rp);
}
private Sphere(RegularPolygon3D polygon) : base(polygon)
{
}
}
I should also mention, that the color of this sphere is not constant. I 2 dimensions, I have this code that works great for gradients. In 3D...not so much. That's why my sphere has multiple colors. The way the 2d gradient code works, is there is a list of colors coming from a class I created called GeometryColor. When the polygon is rendered, every vertex gets colored based off the list of colors within GeometryColor. So if there are 3 colors the user wished to gradient between, and there were 6 vertices (hexagon), then the code would assign the first 2 vertices color 1, the 2nd two color 2, then the last 2 color 3. The following code shows how the color for the vertex is calculated.
public ColorLibrary.sRGB GetVertexFillColor(int index)
{
var pct = ((float)index + 1.0f) / (float)Vertices.Count;
var colorIdx = (int)Math.Round((FillColor.Colors.Count - 1.0f) * pct);
return FillColor.Colors[colorIdx];
}
Anyway, here's the output I'm getting...hope somebody can see my error...
Thanks.
Edit: If I only use ONE Vertex color (i,e instead of my array of 4 diff colors), then I get a completely smooth sphere...although without lighting and stuff its hard to tell its anything but a circle lol)
Edit....so somehow my sphere is slightly see through...even though all my alphas are set to 1.0f and I'm doing depth testing..
GL.DepthMask(true);
GL.Enable(EnableCap.DepthTest);
GL.ClearDepth(1.0f);
GL.DepthFunc(DepthFunction.Lequal);
Final edit: OK, it has SOMETHING to do with my vertices I'm guessing, because when I use PrimitiveType.Quads it works perfectly....
I'm using the following code to produce a vignette effect on an image. As you can see below it works quite well. I'd like to be able to adjust the inner spread of the vignette (i.e make the middle brighter and shorten the gradient) however the maths have got the better of me. Could anyone please give me some pointers with an explanation?
protected override void Apply(ImageBase target,
ImageBase source,
Rectangle targetRectangle,
Rectangle sourceRectangle,
int startY, int endY)
{
int startX = sourceRectangle.X;
int endX = sourceRectangle.Right;
Color color = this.Color;
Vector2 centre = Rectangle.Center(targetRectangle);
float rX = this.RadiusX > 0 ? this.RadiusX : targetRectangle.Width / 2f;
float rY = this.RadiusY > 0 ? this.RadiusY : targetRectangle.Height / 2f;
float maxDistance = (float)Math.Sqrt(rX * rX + rY * rY);
Parallel.For(
startY,
endY,
y =>
{
for (int x = startX; x < endX; x++)
{
float distance = Vector2.Distance(centre, new Vector2(x, y));
Color sourceColor = target[x, y];
target[x, y] = Color.Lerp(sourceColor,
color, .9f * distance / maxDistance);
}
});
}
Original Image
Vignette Effect
I am creating a spinning galaxy made of blocks for the stars/systems.
I have been fiddling with this for a few days and have this so far:
public int numberArms = 6;
public int numberStars = 1000;
public float galaxyRadius = 500f;
public int spread = 100;
float fHatRandom (float fRange)
{
float fArea = 4 * Mathf.Atan (6.0f);
float fP = fArea * Random.value;
return Mathf.Tan (fP / 4) * fRange / 6.0f;
}
float fLineRandom (float fRange)
{
float fArea = fRange * fRange / 2;
float fP = fArea * Random.value;
return fRange - Mathf.Sqrt (fRange * fRange - 2 * fP);
}
// Use this for initialization
void Start ()
{
Random.seed = 100;
int starsPerArm = numberStars / numberArms;
float fAngularSpread = spread / numberArms;
float fArmAngle = (360 / numberArms);
for (int arm = 0; arm < numberArms; arm++)
{
for (int i = 0; i < starsPerArm; i++)
{
float fR = fHatRandom (galaxyRadius);
float fQ = fLineRandom (fAngularSpread);
float fK = 1;
float fA = numberArms * (fArmAngle);
float fX = fR * Mathf.Cos (Mathf.Deg2Rad * (fA + fR * fK + fQ));
float fY = fR * Mathf.Sin (Mathf.Deg2Rad * (fA + fR * fK + fQ));
Vector3 starPos = new Vector3 (fX, fY, arm*4);
Collider[] colliders = Physics.OverlapSphere (starPos, 1);
if (colliders.Length == 0)
{
GameObject star = GameObject.CreatePrimitive (PrimitiveType.Cube);
star.transform.position = starPos;
star.transform.parent = transform;
} else
{
i--;//because they overlapped, we try again.
}
}
}
}
}
As it works right now, it creates the spiral arm of the galaxy just fine. But as you can see, I just set the position of the arm to be stacked on the other arms because I cannot for the life of me figure out how to get them to rotate around the center, for that matter my center seems to be off.
I admittedly have the math skills of a gnat and have been fumbling my way through this, can someone help correct the math and get the arms/center where they belong?
Shouldn't that:
float fA = numberArms * (fArmAngle);
be
float fA = arm * (fArmAngle);
Just saying...
I'm attempting to render a textured quad using the example located here.
I can successfully render the quad, but the texture information appears to be lost. The quad takes the color of the underlying texture, though.
I've checked the obvious problems ("Does the BasicEffect rendering the quad have the TextureEnabled property set to true?") and can't immediately see the problem.
Code below:
public class Quad
{
public VertexPositionNormalTexture[] Vertices;
public Vector3 Origin;
public Vector3 Up;
public Vector3 Normal;
public Vector3 Left;
public Vector3 UpperLeft;
public Vector3 UpperRight;
public Vector3 LowerLeft;
public Vector3 LowerRight;
public int[] Indexes;
public Quad(Vector3 origin, Vector3 normal, Vector3 up,
float width, float height)
{
this.Vertices = new VertexPositionNormalTexture[4];
this.Indexes = new int[6];
this.Origin = origin;
this.Normal = normal;
this.Up = up;
// Calculate the quad corners
this.Left = Vector3.Cross(normal, this.Up);
Vector3 uppercenter = (this.Up * height / 2) + origin;
this.UpperLeft = uppercenter + (this.Left * width / 2);
this.UpperRight = uppercenter - (this.Left * width / 2);
this.LowerLeft = this.UpperLeft - (this.Up * height);
this.LowerRight = this.UpperRight - (this.Up * height);
this.FillVertices();
}
private void FillVertices()
{
Vector2 textureUpperLeft = new Vector2(0.0f, 0.0f);
Vector2 textureUpperRight = new Vector2(1.0f, 0.0f);
Vector2 textureLowerLeft = new Vector2(0.0f, 1.0f);
Vector2 textureLowerRight = new Vector2(1.0f, 1.0f);
for (int i = 0; i < this.Vertices.Length; i++)
{
this.Vertices[i].Normal = this.Normal;
}
this.Vertices[0].Position = this.LowerLeft;
this.Vertices[0].TextureCoordinate = textureLowerLeft;
this.Vertices[1].Position = this.UpperLeft;
this.Vertices[1].TextureCoordinate = textureUpperLeft;
this.Vertices[2].Position = this.LowerRight;
this.Vertices[2].TextureCoordinate = textureLowerRight;
this.Vertices[3].Position = this.UpperRight;
this.Vertices[3].TextureCoordinate = textureUpperRight;
this.Indexes[0] = 0;
this.Indexes[1] = 1;
this.Indexes[2] = 2;
this.Indexes[3] = 2;
this.Indexes[4] = 1;
this.Indexes[5] = 3;
}
}
this.quadEffect = new BasicEffect(this.GraphicsDevice, null);
this.quadEffect.AmbientLightColor = new Vector3(0.8f, 0.8f, 0.8f);
this.quadEffect.LightingEnabled = true;
this.quadEffect.World = Matrix.Identity;
this.quadEffect.View = this.View;
this.quadEffect.Projection = this.Projection;
this.quadEffect.TextureEnabled = true;
this.quadEffect.Texture = someTexture;
this.quad = new Quad(Vector3.Zero, Vector3.UnitZ, Vector3.Up, 2, 2);
this.quadVertexDecl = new VertexDeclaration(this.GraphicsDevice, VertexPositionNormalTexture.VertexElements);
public override void Draw(GameTime gameTime)
{
this.GraphicsDevice.Textures[0] = this.SpriteDictionary["B1S1I800"];
this.GraphicsDevice.VertexDeclaration = quadVertexDecl;
quadEffect.Begin();
foreach (EffectPass pass in quadEffect.CurrentTechnique.Passes)
{
pass.Begin();
GraphicsDevice.DrawUserIndexedPrimitives<VertexPositionNormalTexture>(
PrimitiveType.TriangleList,
beamQuad.Vertices, 0, 4,
beamQuad.Indexes, 0, 2);
pass.End();
}
quadEffect.End();
}
From what I can see, this should work. The only thing I can imagine, which isn't in this code, is that the loading of the texture goes wrong somewhere. I also can't quite visualize what you mean that the quad has the underlying color of the texture? Do you have a screenshot for us?
Also, if something does show up, a very distorted version of your texture for example, it could be possible that the rendering of other stuff has effect on the rendering of the quad. For example if you draw the quad while the graphicsdevice has another vertex declaration on it, or if the previous thing rendered set some exotic rendering state, or if you're drawing the quad within the drawing code of something else. Try isolating this code, into a fresh project or something, or disable the rendering of everything else.