I am writing code for image rotation. But performance is so bad.
When I use 10000 x 10000 image, it took about 100 minutes. I want to speed up up to < 1~3 min, but I don't know how I can improve this code.
This code is for huge size images (over 4gb), and process single line of image sequentially.
I attach full code. First I do x-y movement transformation, after that I perform angle transformation.
Here is code for this. Environment is .net framework 4.6.2, 64bit.
Test code: I used "emgucv3 and speedycoding" library from nuget package
flatten method is from speedycoding , and Image<Gray, byte> from emgucv3
Image<Gray,byte> img = new Image<Gray, byte>(imgpath);
PointD[] posdList = new PointD[4]
{
new PointD(-1024,48),
new PointD(-3264,0),
new PointD(639,-2016),
new PointD(3119,1696)
};
var data = img.Data.TointArray();
var dataW = img.Data.GetLength(1);
var dataH = img.Data.GetLength(0);
var strideW = dataW % 4;
var strideH = dataH % 4;
posdList = posdList.Select( x => new PointD( x.X * ratio + dataW/2, x.Y * ratio+dataH/2 ) ).ToArray();
img = img.Resize( dataW + ( strideW == 0 ? 0 : 4 - strideW ), dataH + ( strideH == 0 ? 0 : 4 - strideH ), Inter.Cubic );
data = img.Data.TointArray();
dataW = img.Data.GetLength(1);
dataH = img.Data.GetLength(0);
var srcdata = img.Data.Flatten();
byte[][] temprers;
using (MemoryStream mstream = new MemoryStream(srcdata))
{
temprers = Run(mstream, dataW, dataH, posdList);
}
This is main run code.
public static byte[][] Run(MemoryStream bytestream, int w, int h, PointD[] pos3)
{
try
{
Stopwatch stw = new Stopwatch();
var srcPos = new AffinePos(pos3[0], pos3[1], pos3[2], pos3[3]);
var trsData = srcPos.ToTrnsData(w, h);
var xc = trsData.XSrcCnter;
var yc = trsData.YSrcCnter;
var xmax = Math.Max(w - xc, xc) * 2;
var ymax = Math.Max(h - yc, yc) * 2;
byte[][] R1 = CreateJagged(ymax, xmax);
for (int i = 0; i < h; i++)
{
var data = new byte[w];
bytestream.Seek(i * w, SeekOrigin.Begin);
bytestream.Read(data, 0, w);
var reshaped = data.Reshape(1, w).ToJagged();
xytransform(ref R1, trsData, reshaped, i, xmax, ymax);
}
var w1 = R1[0].Length;
var h1 = R1.Length;
var degree = trsData.Angle * 180 / Math.PI;
double radian = trsData.Angle;
double cosRadian = Cos(radian);
double sinRadian = Sin(radian);
int newWidth = (int)(w1 * Abs(cosRadian) + h1 * Abs(sinRadian));
int newHeight = (int)(h1 * Abs(cosRadian) + w1 * Abs(sinRadian));
Console.WriteLine( "Create Jagged (sec)" );
stw.Start();
byte[][] R2 = CreateJagged(newWidth, newHeight);
stw.Stop();
Console.WriteLine( stw.ElapsedMilliseconds / 1000 );
Console.WriteLine( "Length : {0} / (ms)", R1.Length );
for (int i = 0; i < R1.Length; i++)
{
var reshaped = R1[i].Reshape(1, w1).ToJagged();
Stopwatch stwinnter = new Stopwatch();
stwinnter.Start();
rotateGeneral(ref R2, trsData, reshaped, i, w1, h1);
stwinnter.Stop();
Console.WriteLine( stwinnter.ElapsedMilliseconds );
}
return R2;
}
catch (Exception ex)
{
Console.WriteLine(ex.ToString());
return null;
}
}
public static void xytransform(ref byte[][] target, TrnsData data, byte[][] src, int hidx, int xmax, int ymax)
{
var h = src.GetLength(0);
var w = src[0].GetLength(0);
var tcenterX = (int)xmax / 2;
var tcenterY = (int)ymax / 2;
var xshift = (int)(data.XSrcCnter - tcenterX);
var yshift = (int)(data.YSrcCnter - tcenterY);
for (int j = 0; j < h; j++)
{
for (int i = 0; i < w; i++)
{
if (i - xshift >= 0
&& j - yshift >= 0
&& i - xshift < target[0].Length
&& j - yshift < target.Length)
{
var yidx = (j - yshift + hidx).ToString();
var xidx = (i - xshift).ToString();
if (j - yshift + hidx * h < target.Length
&& i - xshift < target[0].Length)
{ target[j - yshift + hidx * h][i - xshift] = src[j][i]; }
}
}
}
}
public static void rotateGeneral(ref byte[][] target, TrnsData data, byte[][] G, int hidx, int oldw, int oldh)
{
double radian = data.Angle;
var newWidth = target[0].Length;
var newHeight = target.Length;
double cosRadian = Cos(radian);
double sinRadian = Sin(radian);
int centerX = oldw / 2;
int centerY = oldh / 2;
int diffX = (newWidth - oldw) / 2;
int diffY = (newHeight - oldh) / 2;
double sourceX, sourceY;
int isourceX, isourceY;
int isourceX2, isourceY2;
int h = G.Length;
var temptarget = target;
Parallel.For( 0, target.Length ,
y =>
{
for (int x = 0; x < temptarget[0].Length; x++)
{
var dx = x - (centerX + diffX);
var dy = y - (centerY + diffY);
var xres = dx * cosRadian - dy * sinRadian;
var yres = dx * sinRadian + dy * cosRadian;
var srcX = xres + centerX;
var srcY = yres - hidx * h + centerY;
var isourceX_ = (int)Math.Round( srcX );
var isourceY_ = (int)Math.Round( srcY );
//try
//{
if (isourceY_ < G.Length
&& isourceY_ >= 0
&& isourceX_ < G[0].Length
&& isourceX_ >= 0)
{
temptarget[y][x] = G[isourceY_][isourceX_];
}
//}
//catch (Exception es)
//{
// Console.WriteLine( es.ToString() );
//}
}
} );
target = temptarget;
}
transData class is this
public struct TrnsData
{
public double Innerw;
public double Innterh;
public int H;
public int W;
public int XSrcCnter;
public int YSrcCnter;
public int XShift;
public int YShift;
public int dX;
public int dY;
public double Angle; // radian
}
public class AffinePos
{
public PointD LB;
public PointD LT;
public PointD RT;
public PointD RB;
public AffinePos(double x1, double y1, double x2, double y2, double x3, double y3, double x4, double y4)
{
LB = new PointD(x1, y1);
LT = new PointD(x2, y2);
RT = new PointD(x3, y3);
RB = new PointD(x4, y4);
}
public AffinePos(PointD lb, PointD lt, PointD rt, PointD rb)
{
LB = lb;
LT = lt;
RT = rt;
RB = rb;
}
}
This is extension method for create TrnsData
public static TrnsData ToTrnsData
(this AffinePos srcPos, int w, int h)
{
var innerh = PosL2(srcPos.LB, srcPos.LT);
var innerw = PosL2(srcPos.RT, srcPos.LT);
var trgPos = srcPos.MoveToCenter(new PointD(w / 2.0, h / 2.0)); // ok
var fcenterX = srcPos.GetCenter().X;
var fcenterY = srcPos.GetCenter().Y;
var lcenterX = trgPos.GetCenter().X;
var lcenterY = trgPos.GetCenter().Y;
var xshift = lcenterX - fcenterX;
var yshift = lcenterY - fcenterY;
var dx = srcPos.LT.X - srcPos.RT.X;
var dy = srcPos.LT.Y - srcPos.RT.Y;
double radian;
if (Abs( dx) < 0.0001)
{
radian = 1.5708;
}
else if (Abs(dy) < 0.0001)
{
radian = 3.14159;
}
else
{
radian = Math.Atan(dy / dx);
}
return new TrnsData()
{
H = h,
W = w,
XShift = (int)xshift,
YShift = (int)yshift,
Innerw = innerw,
Innterh = innerh,
XSrcCnter = (int)fcenterX,
YSrcCnter = (int)fcenterY,
dX = (int)dx,
dY = (int)dy,
Angle = radian,
};
}
and additional extension method
public static byte[][] CreateJagged(int h, int w)
{
byte[][] output = new byte[h][];
for (int i = 0; i < h; i++)
{
output[i] = new byte[w];
}
return output;
}
Related
I'am trying to Draw a Triangle in through three points, with known coordinates enclosed within the Triangle.
I wrote this algorithm to do all this, but the code is slow.
Can anyone give me another easy and faster way to draw a Triangle?
I have got the algorithm of drawing the line from this site but do not mention the post sorry.
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Windows.Forms;
public partial class Form1 : Form
{
int Screen_height;
int Screen_width;
List<int> Pointsx = new List<int>(new int[] { });
List<int> Pointsy = new List<int>(new int[] { });
List<int> edge_one_Tranglex= new List<int>(new int[] { });
List<int> edge_one_Trangley = new List<int>(new int[] { });
List<int> edge_two_Tranglex = new List<int>(new int[] { });
List<int> edge_two_Trangley = new List<int>(new int[] { });
List<int> edge_three_Tranglex = new List<int>(new int[] { });
List<int> edge_three_Trangley = new List<int>(new int[] { });
int edge = 1;
Bitmap bmp;
int start = 0;
int center_x;
int center_y;
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e)
{
Screen_height = panel1.Height;
Screen_width = panel1.Width;
Console.WriteLine(" " + Screen_height + "," + Screen_width);
}
private void panel1_Paint(object sender, PaintEventArgs e)
{
if (start == 0)
{
var sw = new Stopwatch();
sw.Start();
bmp = new Bitmap(panel1.Width, panel1.Height);
panel1.BackgroundImage = (Image)bmp;
panel1.BackgroundImageLayout = ImageLayout.None;
//from x to x2 and from y to y2
//D_line(100, 10, -100, 20);
D_Triangle(-300, 10, 100, 20, 100, -100);
sw.Stop();
Console.WriteLine("" + sw.Elapsed);
start += 1;
}
}
public void D_line(int x, int y, int x2, int y2)
{
center_x = Screen_width / 2;
center_y = Screen_height / 2;
line(center_x + x, center_y - y, center_x + x2, center_y - y2);
}
public void line(int x, int y, int x2, int y2)
{
int w = x2 - x;
int h = y2 - y;
int dx1 = 0, dy1 = 0, dx2 = 0, dy2 = 0;
if (w < 0) dx1 = -1; else if (w > 0) dx1 = 1;
if (h < 0) dy1 = -1; else if (h > 0) dy1 = 1;
if (w < 0) dx2 = -1; else if (w > 0) dx2 = 1;
int longest = Math.Abs(w);
int shortest = Math.Abs(h);
if (!(longest > shortest))
{
longest = Math.Abs(h);
shortest = Math.Abs(w);
if (h < 0) dy2 = -1; else if (h > 0) dy2 = 1;
dx2 = 0;
}
int numerator = longest >> 1;
for (int i = 0; i <= longest; i++)
{
//putpixel(x, y, color);
bmp.SetPixel(x, y, Color.Red);
//my code
if (edge == 1)
{
edge_one_Tranglex.Add(x);
edge_one_Trangley.Add(y);
}
if (edge == 2)
{
edge_two_Tranglex.Add(x);
edge_two_Trangley.Add(y);
}
if (edge == 3)
{
edge_three_Tranglex.Add(x);
edge_three_Trangley.Add(y);
}
if (edge >= 4)
{
if (!Pointsx.Contains(x) || !Pointsy.Contains(y))
{
Pointsx.Add(x);
Pointsy.Add(y);
}
}
numerator += shortest;
if (!(numerator < longest))
{
numerator -= longest;
x += dx1;
y += dy1;
}
else
{
x += dx2;
y += dy2;
}
}
edge++;
// edge_two_Trangle.ForEach(p => Console.WriteLine(p));
}
void D_Triangle(int x1, int y1, int x2, int y2, int x3, int y3)
{
D_line(x1, y1, x2, y2);
D_line(x2, y2, x3, y3);
D_line(x3, y3, x1, y1);
int a = edge_two_Tranglex.Count();
for(int i =1; i < a -1;)
{
line(center_x + x1, center_y - y1, edge_two_Tranglex[i], edge_two_Trangley[i]);
i++;
}
}
}
I am trying to use libtiff.net to read elevation data from a GeoTIFF file.
So far I have mostly just been able to read metadata from the file using the example at libtiff.net's webpage.
But howto read elevation data I do not understand... I tried first reading with Tiff.ReadScanline() as described here but the file I have seems to be stored differently (probably in tiles if I understand it correctly)
Here is the metadata (as far as I have been able to read) (the tiff file is from the danish terrain elevation data set):
Tiff c:\Users***\DTM_1km_6170_500.tif, page 0 has following tags set:
IMAGEWIDTH System.Int32 : 2500
IMAGELENGTH System.Int32 : 2500
BITSPERSAMPLE System.Int16 : 32
COMPRESSION BitMiracle.LibTiff.Classic.Compression : ADOBE_DEFLATE
PHOTOMETRIC BitMiracle.LibTiff.Classic.Photometric : MINISBLACK
STRIPOFFSETS System.UInt64[] : System.UInt64[]
SAMPLESPERPIXEL System.Int16 : 1
STRIPBYTECOUNTS System.UInt64[] : System.UInt64[]
PLANARCONFIG BitMiracle.LibTiff.Classic.PlanarConfig : CONTIG
PREDICTOR BitMiracle.LibTiff.Classic.Predictor : FLOATINGPOINT
TILEWIDTH System.Int32 : 256
TILELENGTH System.Int32 : 256
TILEOFFSETS System.UInt64[] : System.UInt64[]
TILEBYTECOUNTS System.UInt64[] : System.UInt64[]
SAMPLEFORMAT BitMiracle.LibTiff.Classic.SampleFormat : IEEEFP
DATATYPE System.Int16 : 3
GEOTIFF_MODELPIXELSCALETAG System.Int32 : 3 GEOTIFF_MODELPIXELSCALETAG
System.Byte[] : Ù?Ù?
GEOTIFF_MODELTIEPOINTTAG System.Int32 : 6 GEOTIFF_MODELTIEPOINTTAG
System.Byte[] : A ^WA
34735 System.Int32 : 36 34735 System.Byte[] :
± ± #° èd )#
34736 System.Int32 : 3 34736 System.Byte[] :
34737 System.Int32 : 30 34737 System.Byte[] : ETRS89 / UTM zone
32N|ETRS89|
42113 System.Int32 : 6 42113 System.Byte[] : -9999
The code I have written so far is as follows:
namespace GeoTIFFReader
{
public class GeoTIFF
{
private double[,] heightmap;
private double dx;
private double dy;
private double startx;
private double starty;
public GeoTIFF(string fn)
{
using (Tiff tiff = Tiff.Open(fn, "r"))
{
if (tiff == null)
{
// Error - could not open
return;
}
int width = tiff.GetField(TiffTag.IMAGEWIDTH)[0].ToInt();
int height = tiff.GetField(TiffTag.IMAGELENGTH)[0].ToInt();
heightmap = new double[width, height];
FieldValue[] modelPixelScaleTag = tiff.GetField(TiffTag.GEOTIFF_MODELPIXELSCALETAG);
FieldValue[] modelTiePointTag = tiff.GetField(TiffTag.GEOTIFF_MODELTIEPOINTTAG);
byte[] modelPixelScale = modelPixelScaleTag[1].GetBytes();
dx = BitConverter.ToDouble(modelPixelScale, 0);
dy = BitConverter.ToDouble(modelPixelScale, 8) * -1;
byte[] modelTransformation = modelTiePointTag[1].GetBytes();
double originLon = BitConverter.ToDouble(modelTransformation, 24);
double originLat = BitConverter.ToDouble(modelTransformation, 32);
startx = originLon + dx / 2.0;
starty = originLat + dy / 2.0;
double curx = startx;
double cury = starty;
FieldValue[] bitsPerSampleTag = tiff.GetField(TiffTag.BITSPERSAMPLE);
FieldValue[] tilewtag = tiff.GetField(TiffTag.TILEWIDTH);
FieldValue[] tilehtag = tiff.GetField(TiffTag.TILELENGTH);
int tilew = tilewtag[0].ToInt();
int tileh = tilehtag[0].ToInt();
var tile = new byte[tilew*tileh];
//var scanline = new byte[tiff.ScanlineSize()]; Does not work... wrong format
for (int il = 0; il < height; il++)
{
//tiff.ReadScanline(scanline, il); // Load il'th line of data
for (int ir = 0; ir < width; ir++)
{
// Here I would like to read each pixel data that contains elevation in gray-scale in f32 as far I as I understand from metadata
//object value = scanline[ir];
//heightmap[ir, il] = double.Parse(value.ToString());
}
}
Console.WriteLine(heightmap.ToString());
}
}
}
}
So if anyone knows howto extract this data, that would be very much appreciated.
So I stumbled across some hinting that lead me to find an answer to the specific question..:
int tileSize = tiff.TileSize();
for (int iw = 0; iw < nWidth; iw += tilew)
{
for (int ih = 0; ih < nHeight; ih += tileh)
{
byte[] buffer = new byte[tileSize];
tiff.ReadTile(buffer, 0, iw, ih, 0, 0);
for (int itw = 0; itw < tilew; itw++)
{
int iwhm = ih + itw;
if (iwhm > nWidth - 1)
{
break;
}
for (int ith = 0; ith < tileh; ith++)
{
int iyhm = iw + ith;
if (iyhm > nHeight - 1)
{
break;
}
heightMap[iwhm, iyhm] =
BitConverter.ToSingle(buffer, (itw * tileh + ith) * 4);
}
}
}
}
EDIT 2018-09-20:
#Graviton - Sorry for the long response time.. but here is the complete class I have used with a constructor that takes the filename as input and populates the heightMap (but #Nazonokaizijin looks a bit nicer and slimmer):
using System;
using BitMiracle.LibTiff.Classic;
using System.IO;
namespace GeoTIFFReader
{
public class GeoTIFF
{
private float[,] heightMap;
public float[,] HeightMap
{
get { return heightMap; }
private set { heightMap = value; }
}
private int nWidth;
public int NWidth
{
get { return nWidth; }
private set { nWidth = value; }
}
private int nHeight;
public int NHeight
{
get { return nHeight; }
private set { nHeight = value; }
}
private double dW;
public double DW
{
get { return dW; }
private set { dW = value; }
}
private double dH;
public double DH
{
get { return dH; }
private set { dH = value; }
}
private double startW;
public double StartW
{
get { return startW; }
private set { startW = value; }
}
private double startH;
public double StartH
{
get { return startH; }
private set { startH = value; }
}
public GeoTIFF(string fn)
{
using (Tiff tiff = Tiff.Open(fn, "r"))
{
if (tiff == null)
{
// Error - could not open
return;
}
nWidth = tiff.GetField(TiffTag.IMAGEWIDTH)[0].ToInt();
nHeight = tiff.GetField(TiffTag.IMAGELENGTH)[0].ToInt();
heightMap = new float[nWidth, nHeight];
FieldValue[] modelPixelScaleTag = tiff.GetField(TiffTag.GEOTIFF_MODELPIXELSCALETAG);
FieldValue[] modelTiePointTag = tiff.GetField(TiffTag.GEOTIFF_MODELTIEPOINTTAG);
byte[] modelPixelScale = modelPixelScaleTag[1].GetBytes();
dW = BitConverter.ToDouble(modelPixelScale, 0);
dH = BitConverter.ToDouble(modelPixelScale, 8) * -1;
byte[] modelTransformation = modelTiePointTag[1].GetBytes();
double originLon = BitConverter.ToDouble(modelTransformation, 24);
double originLat = BitConverter.ToDouble(modelTransformation, 32);
startW = originLon + dW / 2.0;
startH = originLat + dH / 2.0;
FieldValue[] tileByteCountsTag = tiff.GetField(TiffTag.TILEBYTECOUNTS);
long[] tileByteCounts = tileByteCountsTag[0].TolongArray();
FieldValue[] bitsPerSampleTag = tiff.GetField(TiffTag.BITSPERSAMPLE);
int bytesPerSample = bitsPerSampleTag[0].ToInt() / 8;
FieldValue[] tilewtag = tiff.GetField(TiffTag.TILEWIDTH);
FieldValue[] tilehtag = tiff.GetField(TiffTag.TILELENGTH);
int tilew = tilewtag[0].ToInt();
int tileh = tilehtag[0].ToInt();
int tileWidthCount = nWidth / tilew;
int remainingWidth = nWidth - tileWidthCount * tilew;
if (remainingWidth > 0)
{
tileWidthCount++;
}
int tileHeightCount = nHeight / tileh;
int remainingHeight = nHeight - tileHeightCount * tileh;
if (remainingHeight > 0)
{
tileHeightCount++;
}
int tileSize = tiff.TileSize();
for (int iw = 0; iw < nWidth; iw += tilew)
{
for (int ih = 0; ih < nHeight; ih += tileh)
{
byte[] buffer = new byte[tileSize];
tiff.ReadTile(buffer, 0, iw, ih, 0, 0);
for (int itw = 0; itw < tilew; itw++)
{
int iwhm = ih + itw;
if (iwhm > nWidth - 1)
{
break;
}
for (int ith = 0; ith < tileh; ith++)
{
int iyhm = iw + ith;
if (iyhm > nHeight - 1)
{
break;
}
heightMap[iwhm, iyhm] =
BitConverter.ToSingle(buffer, (itw * tileh + ith) * 4);
}
}
}
}
}
}
}
}
You can do it something like this:
private void ReadTiff()
{
Tiff tiff = Tiff.Open("myfile.tif", "r");
if (tiff == null)
return;
//Get the image size
int imageWidth = tiff.GetField(TiffTag.IMAGEWIDTH)[0].ToInt();
int imageHeight = tiff.GetField(TiffTag.IMAGELENGTH)[0].ToInt();
//Get the tile size
int tileWidth = tiff.GetField(TiffTag.TILEWIDTH)[0].ToInt();
int tileHeight = tiff.GetField(TiffTag.TILELENGTH)[0].ToInt();
int tileSize = tiff.TileSize();
//Pixel depth
int depth = tileSize / (tileWidth * tileHeight);
byte[] buffer = new byte[tileSize];
for (int y = 0; y < imageHeight; y += tileHeight)
{
for (int x = 0; x < imageWidth; x += tileWidth)
{
//Read the value and store to the buffer
tiff.ReadTile(buffer, 0, x, y, 0, 0);
for (int kx = 0; kx < tileWidth; kx++)
{
for (int ky = 0; ky < tileHeight; ky++)
{
//Calculate the index in the buffer
int startIndex = (kx + tileWidth * ky) * depth;
if (startIndex >= buffer.Length)
continue;
//Calculate pixel index
int pixelX = x + kx;
int pixelY = y + ky;
if (pixelX >= imageWidth || pixelY >= imageHeight)
continue;
//Get the value for the target pixel
double value = BitConverter.ToSingle(buffer, startIndex);
}
}
}
}
tiff.Close();
}
It has a lib (in c#) complementary to LibTiff that makes the elevation query given a latitude/longitude, available at GeoTiffCOG:
GeoTiff geoTiff = new GeoTiff(file_tiff);
double value = geoTiff.GetElevationAtLatLon(latitude, longitude);
It also supports Cloud Optimized GeoTiff (COG) by adding URI as an argument.
I believe the problem is the PREDICTOR. Instead of placing the data in the file, it is LZW encoding of the differences in the data, with FLOATINGPOINT predictor. This was proprietary with Adobe. I have been searching for code do decrypt PREDICTOR_FLOATINGPOINT, myself. I think this github link
may have library code that will read and decode it.
I try to make a meteor like this video,
but I can only get like this:
This is my simplex noise:
public class PerlinNoise{
int B = 256;
int[] m_perm = new int[B+B];
Texture2D m_permTex;
public int octava;
public float frequencia, amplitud;
public PerlinNoise(int seed, float frec, float amp, int oct)
{
octava = oct;
amplitud = amp;
frequencia = frec;
UnityEngine.Random.seed = seed;
int i, j, k;
for (i = 0 ; i < B ; i++)
{
m_perm[i] = i;
}
while (--i != 0)
{
k = m_perm[i];
j = UnityEngine.Random.Range(0, B);
m_perm[i] = m_perm[j];
m_perm[j] = k;
}
for (i = 0 ; i < B; i++)
{
m_perm[B + i] = m_perm[i];
}
}
float FADE(float t) { return t * t * t * ( t * ( t * 6.0f - 15.0f ) + 10.0f ); }
float LERP(float t, float a, float b) { return (a) + (t)*((b)-(a)); }
float GRAD1(int hash, float x )
{
int h = hash & 15;
float grad = 1.0f + (h & 7);
if ((h&8) != 0) grad = -grad;
return ( grad * x );
}
float GRAD2(int hash, float x, float y)
{
int h = hash & 7;
float u = h<4 ? x : y;
float v = h<4 ? y : x;
return (((h&1) != 0)? -u : u) + (((h&2) != 0) ? -2.0f*v : 2.0f*v);
}
float GRAD3(int hash, float x, float y , float z)
{
int h = hash & 15;
float u = h<8 ? x : y;
float v = (h<4) ? y : (h==12 || h==14) ? x : z;
return (((h&1) != 0)? -u : u) + (((h&2) != 0)? -v : v);
}
float Noise1D( float x )
{
//returns a noise value between -0.5 and 0.5
int ix0, ix1;
float fx0, fx1;
float s, n0, n1;
ix0 = (int)Mathf.Floor(x); // Integer part of x
fx0 = x - ix0; // Fractional part of x
fx1 = fx0 - 1.0f;
ix1 = ( ix0+1 ) & 0xff;
ix0 = ix0 & 0xff; // Wrap to 0..255
s = FADE(fx0);
n0 = GRAD1(m_perm[ix0], fx0);
n1 = GRAD1(m_perm[ix1], fx1);
return 0.188f * LERP( s, n0, n1);
}
public float Noise2D( float x, float y )
{
int ix0, iy0, ix1, iy1;
float fx0, fy0, fx1, fy1, s, t, nx0, nx1, n0, n1;
ix0 = (int)Mathf.Floor(x);
iy0 = (int)Mathf.Floor(y);
fx0 = x - ix0;
fy0 = y - iy0;
fx1 = fx0 - 1.0f;
fy1 = fy0 - 1.0f;
ix1 = (ix0 + 1) & 0xff; // Wrap to 0..255
iy1 = (iy0 + 1) & 0xff;
ix0 = ix0 & 0xff;
iy0 = iy0 & 0xff;
t = FADE( fy0 );
s = FADE( fx0 );
nx0 = GRAD2(m_perm[ix0 + m_perm[iy0]], fx0, fy0);
nx1 = GRAD2(m_perm[ix0 + m_perm[iy1]], fx0, fy1);
n0 = LERP( t, nx0, nx1 );
nx0 = GRAD2(m_perm[ix1 + m_perm[iy0]], fx1, fy0);
nx1 = GRAD2(m_perm[ix1 + m_perm[iy1]], fx1, fy1);
n1 = LERP(t, nx0, nx1);
return 0.507f * LERP( s, n0, n1 );
}
float Noise3D( float x, float y, float z )
{
//returns a noise value between -1.5 and 1.5
int ix0, iy0, ix1, iy1, iz0, iz1;
float fx0, fy0, fz0, fx1, fy1, fz1;
float s, t, r;
float nxy0, nxy1, nx0, nx1, n0, n1;
ix0 = (int)Mathf.Floor( x ); // Integer part of x
iy0 = (int)Mathf.Floor( y ); // Integer part of y
iz0 = (int)Mathf.Floor( z ); // Integer part of z
fx0 = x - ix0; // Fractional part of x
fy0 = y - iy0; // Fractional part of y
fz0 = z - iz0; // Fractional part of z
fx1 = fx0 - 1.0f;
fy1 = fy0 - 1.0f;
fz1 = fz0 - 1.0f;
ix1 = ( ix0 + 1 ) & 0xff; // Wrap to 0..255
iy1 = ( iy0 + 1 ) & 0xff;
iz1 = ( iz0 + 1 ) & 0xff;
ix0 = ix0 & 0xff;
iy0 = iy0 & 0xff;
iz0 = iz0 & 0xff;
r = FADE( fz0 );
t = FADE( fy0 );
s = FADE( fx0 );
nxy0 = GRAD3(m_perm[ix0 + m_perm[iy0 + m_perm[iz0]]], fx0, fy0, fz0);
nxy1 = GRAD3(m_perm[ix0 + m_perm[iy0 + m_perm[iz1]]], fx0, fy0, fz1);
nx0 = LERP( r, nxy0, nxy1 );
nxy0 = GRAD3(m_perm[ix0 + m_perm[iy1 + m_perm[iz0]]], fx0, fy1, fz0);
nxy1 = GRAD3(m_perm[ix0 + m_perm[iy1 + m_perm[iz1]]], fx0, fy1, fz1);
nx1 = LERP( r, nxy0, nxy1 );
n0 = LERP( t, nx0, nx1 );
nxy0 = GRAD3(m_perm[ix1 + m_perm[iy0 + m_perm[iz0]]], fx1, fy0, fz0);
nxy1 = GRAD3(m_perm[ix1 + m_perm[iy0 + m_perm[iz1]]], fx1, fy0, fz1);
nx0 = LERP( r, nxy0, nxy1 );
nxy0 = GRAD3(m_perm[ix1 + m_perm[iy1 + m_perm[iz0]]], fx1, fy1, fz0);
nxy1 = GRAD3(m_perm[ix1 + m_perm[iy1 + m_perm[iz1]]], fx1, fy1, fz1);
nx1 = LERP( r, nxy0, nxy1 );
n1 = LERP( t, nx0, nx1 );
return 0.936f * LERP( s, n0, n1 );
}
public float FractalNoise1D(float x, int octNum, float frq, float amp)
{
float gain = 1.0f;
float sum = 0.0f;
for(int i = 0; i < octNum; i++)
{
sum += Noise1D(x*gain/frq) * amp/gain;
gain *= 2.0f;
}
return sum;
}
public float FractalNoise2D(float x, float y, int octNum, float frq, float amp)
{
float gain = 1.0f;
float sum = 0.0f;
for(int i = 0; i < octNum; i++)
{
sum += Noise2D(x*gain/frq, y*gain/frq) * amp/gain;
gain *= 2.0f;
}
return sum;
}
public int Noise2D(Vector3Int v3) {
return Mathf.RoundToInt(FractalNoise2D(v3.x,v3.z,octava,frequencia,amplitud));
}
public int Noise2D(int x, int z)
{
return Mathf.RoundToInt(FractalNoise2D(x, z, octava, frequencia, amplitud));
}
public float FractalNoise3D(float x, float y, float z, int octNum, float frq, float amp)
{
float gain = 1.0f;
float sum = 0.0f;
for(int i = 0; i < octNum; i++)
{
sum += Noise3D(x*gain/frq, y*gain/frq, z*gain/frq) * amp/gain;
gain *= 2.0f;
}
return sum;
}
public void LoadPermTableIntoTexture()
{
m_permTex = new Texture2D(256, 1, TextureFormat.Alpha8, false);
m_permTex.filterMode = FilterMode.Point;
m_permTex.wrapMode = TextureWrapMode.Clamp;
for(int i = 0; i < 256; i++)
{
float v = (float)m_perm[i] / 255.0f;
m_permTex.SetPixel(i, 0, new Color(0,0,0,v));
}
m_permTex.Apply();
}
}
This is my implementation:
public void Resimulate() {
PerlinNoise per = new PerlinNoise(seed, frec, amp, octa);
for (int x = 0; x < TamX; x++)
{
for (int y = 0; y < TamY; y++)
{
for (int z = 0; z < TamZ; z++)
{
if (per.FractalNoise3D(x, y, z, octa, frec, amp)>0)
{
lista.Add(new Bloque(new Vector3Int(x, y, z)));
}
}
}
}
}
I found information on various pages, blogs and here but found nothing that worked for me. If anyone has information I would greatly appreciate your help.
Thank you for helping me.
First, it's important to note that the code you have is not Simplex noise! It's the older "Perlin noise" algorithm that tends to exhibit visually significant directional artifacts.
I would suggest avoiding both Perlin noise and Simplex noise altogether. Perlin because of the directional artifacts, and Simplex because you run into patent issues with the 3D implementation.
There's an algorithm I've designed recently to get around both of those issues, that's starting to be adopted by a fair number of game developers, called OpenSimplex noise. Code: https://gist.github.com/KdotJPG/b1270127455a94ac5d19
(EDIT: Here's a C# port: https://gist.github.com/omgwtfgames/601497972e4e30fd9c5f)
Do something like this:
const double S = 24; //Sparsity of noise features
public void Resimulate() {
//To get fractal noise, ideally you want multiple instances of noise so you don't get odd behavior near (0,0,0)
OpenSimplexNoise n1 = new OpenSimplexNoise(seed);
OpenSimplexNoise n2 = new OpenSimplexNoise(seed+1);
OpenSimplexNoise n3 = new OpenSimplexNoise(seed+2);
for (int x = 0; x < TamX; x++) {
for (int y = 0; y < Tamy; z++) {
for (int z = 0; z < TamZ; z++) {
double n = (n1.eval(x / S, y / S, z / S)
+ n2.eval(x / S / 2, y / S / 2, z / S / 2) * .5
+ n3.eval(x / S / 4, y / S / 4, z / S / 4) * .25)
/ (1 + .5 + .25);
double dx = (TamX / 2.0 - x);
double dy = (TamY / 2.0 - y);
double dz = (TamZ / 2.0 - z);
double d = dx*dx + dy*dy + dz*dz;
//d is now your squared distance from the center.
//n is your fractal noise value
//you want to combine d and n to form some threshold that
//determines whether or not there is a block.
//threshold = d / C1 + n + C2 > 0 where C1 and C2 are constants you choose
}
}
}
}
EDIT: Here's a visual difference between Perlin and OpenSimplex:
Left is noise(x, y, 0) grayscale
Next is noise(x, y, 0) > 0 ? white : black
Next is |noise(x, y, 0)| > 0.1 ? white : black
Next is noise(x, y, 0.5) grayscale
My problem is that clicks only get registers on the lower right corner and in some cases not even there it seems to get worse the longer you stray from the 0.0 and the likes the worse it gets.
public void Render(SpriteBatch B, Camera C)
{
Vector2 firstSquare = new Vector2(C.Position.X / 32, C.Position.Y / 32);
int firstX = (int)firstSquare.X;
int firstY = (int)firstSquare.Y;
Vector2 squareOffset = new Vector2(C.Position.X % 32, C.Position.Y % 32);
int offsetX = (int)squareOffset.X;
int offsetY = (int)squareOffset.Y;
for (int y = 0; y < 16; y++)
{
for (int x = 0; x < 26; x++)
{
Tile T = GetTile(x + firstX, y + firstY);
if (T == null)
{
continue;
}
T.RenderWithCamera(B,new Vector2((x*32)-offsetX,(y*32)-offsetY));
}
}
public void CheckClick(float mx, float my,Camera C)
{
Vector2 firstSquare = new Vector2(C.Position.X / 32, C.Position.Y / 32);
int x = (int)firstSquare.X;
int y = (int)firstSquare.Y;
Vector2 squareOffset = new Vector2(C.Position.X % 32, C.Position.Y % 32);
int offsetX = (int)squareOffset.X;
int offsetY = (int)squareOffset.Y;
int vx = (int)mx / 32;
int vy = (int)my / 32;
float x1 = vx + x;
float y1 = vy + y;
int maxX, maxY;
maxX = C.Width / 32;
maxY = C.Height / 32;
Console.WriteLine("MAX_X:" + maxX + "MAX_Y:" + maxY);
Tile T = GetTile(x1, y1);
Rectangle A = new Rectangle((int)mx, (int)my, 1, 1);
if (T == null)
{ Console.WriteLine("No Tile found"); return; }
if (T.IsInside(A))
{
Console.WriteLine("Not inside?");
Tile S = null;
S = new Wall((int)x1, (int)y1, 0);
if (S != null)
{
tiles.Add(S);
tiles2[(int)T.pos.X, (int)T.pos.Y] = S;
}
}
Console.WriteLine("Clicked Tile at X:" + T.pos.X + "Y:" + T.pos.Y);
}
public bool IsInside(Rectangle B) // TILE
{
Rectangle rectA = new Rectangle((int)Last_pos.X, (int)Last_pos.Y, icon.Width, icon.Height);
Console.WriteLine("A:" + rectA.X + "A.y:" + rectA.Y + "B.X:" + B.X + "B.Y:" + B.Y);
if(rectA.Intersects(B))
{
return true;
}
else
return false;
}
Here's how I like to handle clicking a tilemap.
int xTile = Math.floor((Mouse.X + CameraBounds.left) / Tile.width);
int yTile = Math.floor((Mouse.Y + CameraBounds.top) / Tile.height);
Is there any open source C# code or library to present a graphical waveform given a byte array?
This is as open source as it gets:
public static void DrawNormalizedAudio(ref float[] data, PictureBox pb,
Color color)
{
Bitmap bmp;
if (pb.Image == null)
{
bmp = new Bitmap(pb.Width, pb.Height);
}
else
{
bmp = (Bitmap)pb.Image;
}
int BORDER_WIDTH = 5;
int width = bmp.Width - (2 * BORDER_WIDTH);
int height = bmp.Height - (2 * BORDER_WIDTH);
using (Graphics g = Graphics.FromImage(bmp))
{
g.Clear(Color.Black);
Pen pen = new Pen(color);
int size = data.Length;
for (int iPixel = 0; iPixel < width; iPixel++)
{
// determine start and end points within WAV
int start = (int)((float)iPixel * ((float)size / (float)width));
int end = (int)((float)(iPixel + 1) * ((float)size / (float)width));
float min = float.MaxValue;
float max = float.MinValue;
for (int i = start; i < end; i++)
{
float val = data[i];
min = val < min ? val : min;
max = val > max ? val : max;
}
int yMax = BORDER_WIDTH + height - (int)((max + 1) * .5 * height);
int yMin = BORDER_WIDTH + height - (int)((min + 1) * .5 * height);
g.DrawLine(pen, iPixel + BORDER_WIDTH, yMax,
iPixel + BORDER_WIDTH, yMin);
}
}
pb.Image = bmp;
}
This function will produce something like this:
This takes an array of samples in floating-point format (where all sample values range from -1 to +1). If your original data is actually in the form of a byte[] array, you'll have to do a little bit of work to convert it to float[]. Let me know if you need that, too.
Update: since the question technically asked for something to render a byte array, here are a couple of helper methods:
public float[] FloatArrayFromStream(System.IO.MemoryStream stream)
{
return FloatArrayFromByteArray(stream.GetBuffer());
}
public float[] FloatArrayFromByteArray(byte[] input)
{
float[] output = new float[input.Length / 4];
for (int i = 0; i < output.Length; i++)
{
output[i] = BitConverter.ToSingle(input, i * 4);
}
return output;
}
Update 2: I forgot there's a better way to do this:
public float[] FloatArrayFromByteArray(byte[] input)
{
float[] output = new float[input.Length / 4];
Buffer.BlockCopy(input, 0, output, 0, input.Length);
return output;
}
I'm just so in love with for loops, I guess.
I modified MusiGenesis's solution a little bit.
This gave me a much better result, especially with house music :)
public static Bitmap DrawNormalizedAudio(List<float> data, Color foreColor, Color backColor, Size imageSize)
{
Bitmap bmp = new Bitmap(imageSize.Width, imageSize.Height);
int BORDER_WIDTH = 0;
float width = bmp.Width - (2 * BORDER_WIDTH);
float height = bmp.Height - (2 * BORDER_WIDTH);
using (Graphics g = Graphics.FromImage(bmp))
{
g.Clear(backColor);
Pen pen = new Pen(foreColor);
float size = data.Count;
for (float iPixel = 0; iPixel < width; iPixel += 1)
{
// determine start and end points within WAV
int start = (int)(iPixel * (size / width));
int end = (int)((iPixel + 1) * (size / width));
if (end > data.Count)
end = data.Count;
float posAvg, negAvg;
averages(data, start, end, out posAvg, out negAvg);
float yMax = BORDER_WIDTH + height - ((posAvg + 1) * .5f * height);
float yMin = BORDER_WIDTH + height - ((negAvg + 1) * .5f * height);
g.DrawLine(pen, iPixel + BORDER_WIDTH, yMax, iPixel + BORDER_WIDTH, yMin);
}
}
return bmp;
}
private static void averages(List<float> data, int startIndex, int endIndex, out float posAvg, out float negAvg)
{
posAvg = 0.0f;
negAvg = 0.0f;
int posCount = 0, negCount = 0;
for (int i = startIndex; i < endIndex; i++)
{
if (data[i] > 0)
{
posCount++;
posAvg += data[i];
}
else
{
negCount++;
negAvg += data[i];
}
}
posAvg /= posCount;
negAvg /= negCount;
}
with adapted code from robby and using Graphics.Fill/DrawClosedCurve with antialiasing, I get a pretty good looking result.
here's the code:
using System;
using System.Drawing;
using System.Drawing.Drawing2D;
namespace Soundfingerprinting.Audio.Services
{
public static class AudioVisualizationService
{
public class WaveVisualizationConfiguration
{
public Nullable<Color> AreaColor { get; set; }
public Nullable<Color> EdgeColor { get; set; }
public int EdgeSize { get; set; }
public Nullable<Rectangle> Bounds { get; set; }
public double Overlap { get; set; }
public int Step { get; set; }
}
public static void DrawWave(float[] data, Bitmap bitmap, WaveVisualizationConfiguration config = null)
{
Color areaColor = Color.FromArgb(0x7F87CEFA);// Color.LightSkyBlue; semi transparent
Color edgeColor = Color.DarkSlateBlue;
int edgeSize = 2;
int step = 2;
double overlap = 0.10f; // would better use a windowing function
Rectangle bounds = Rectangle.FromLTRB(0, 0, bitmap.Width, bitmap.Height);
if (config != null)
{
edgeSize = config.EdgeSize;
if (config.AreaColor.HasValue)
areaColor = config.AreaColor.GetValueOrDefault();
if (config.EdgeColor.HasValue)
edgeColor = config.EdgeColor.GetValueOrDefault();
if (config.Bounds.HasValue)
bounds = config.Bounds.GetValueOrDefault();
step = Math.Max(1, config.Step);
overlap = config.Overlap;
}
float width = bounds.Width;
float height = bounds.Height;
using (Graphics g = Graphics.FromImage(bitmap))
{
Pen edgePen = new Pen(edgeColor);
edgePen.LineJoin = LineJoin.Round;
edgePen.Width = edgeSize;
Brush areaBrush = new SolidBrush(areaColor);
float size = data.Length;
PointF[] topCurve = new PointF[(int)width / step];
PointF[] bottomCurve = new PointF[(int)width / step];
int idx = 0;
for (float iPixel = 0; iPixel < width; iPixel += step)
{
// determine start and end points within WAV
int start = (int)(iPixel * (size / width));
int end = (int)((iPixel + step) * (size / width));
int window = end - start;
start -= (int)(overlap * window);
end += (int)(overlap * window);
if (start < 0)
start = 0;
if (end > data.Length)
end = data.Length;
float posAvg, negAvg;
averages(data, start, end, out posAvg, out negAvg);
float yMax = height - ((posAvg + 1) * .5f * height);
float yMin = height - ((negAvg + 1) * .5f * height);
float xPos = iPixel + bounds.Left;
if (idx >= topCurve.Length)
idx = topCurve.Length - 1;
topCurve[idx] = new PointF(xPos, yMax);
bottomCurve[bottomCurve.Length - idx - 1] = new PointF(xPos, yMin);
idx++;
}
PointF[] curve = new PointF[topCurve.Length * 2];
Array.Copy(topCurve, curve, topCurve.Length);
Array.Copy(bottomCurve, 0, curve, topCurve.Length, bottomCurve.Length);
g.InterpolationMode = InterpolationMode.HighQualityBicubic;
g.SmoothingMode = SmoothingMode.AntiAlias;
g.FillClosedCurve(areaBrush, curve, FillMode.Winding, 0.15f);
if (edgeSize > 0)
g.DrawClosedCurve(edgePen, curve, 0.15f, FillMode.Winding);
}
}
private static void averages(float[] data, int startIndex, int endIndex, out float posAvg, out float negAvg)
{
posAvg = 0.0f;
negAvg = 0.0f;
int posCount = 0, negCount = 0;
for (int i = startIndex; i < endIndex; i++)
{
if (data[i] > 0)
{
posCount++;
posAvg += data[i];
}
else
{
negCount++;
negAvg += data[i];
}
}
if (posCount > 0)
posAvg /= posCount;
if (negCount > 0)
negAvg /= negCount;
}
}
}
In NAudio, there is code to draw audio waveforms in both WinForms and WPF. Have a look at the demo projects for examples of how to use it.
I've been a fan of ZedGraph for many years and have used it to display all kinds of data in various projects.
The following sample code graphs an array of doubles varying between -1 and 1:
void DisplayWaveGraph(ZedGraphControl graphControl, double[] waveData)
{
var pane = graphControl.GraphPane;
pane.Chart.Border.IsVisible = false;
pane.Chart.Fill.IsVisible = false;
pane.Fill.Color = Color.Black;
pane.Margin.All = 0;
pane.Title.IsVisible = false;
pane.XAxis.IsVisible = false;
pane.XAxis.Scale.Max = waveData.Length - 1;
pane.XAxis.Scale.Min = 0;
pane.YAxis.IsVisible = false;
pane.YAxis.Scale.Max = 1;
pane.YAxis.Scale.Min = -1;
var timeData = Enumerable.Range(0, waveData.Length)
.Select(i => (double) i)
.ToArray();
pane.AddCurve(null, timeData, waveData, Color.Lime, SymbolType.None);
graphControl.AxisChange();
}
The above sample mimics the style of an audio editor by suppressing the axes and changing the colors to produce the following: