I have implemented a bank of oriented bandpass filters described in this article.
See the last paragraph of the section named "2.1 Pre-processing".
We selected 12 not overlapping filters, to analyze 12 different directions, rotated with respect to 15° each other.
I am having the following issue,
The filter-bank was supposed to generate 12 filtered images. But, in reality, I am having only 03 outputs as seen in the following snapshot,
Source Code:
Here is the complete VS2013 solution as a zipped file.
Here is the most relevant part of the source code,
public class KassWitkinFunction
{
/*
* tx = centerX * cos
* ty = centerY * sin
*
* u* = cos . (u + tx) + sin . (v + ty)
* v* = - sin . (u + tx) + cos . (v + ty)
*
*/
//#region MyRegion
public static double tx(int centerX, double theta)
{
double costheta = Math.Cos(theta);
double txx = centerX * costheta;
return txx;
}
public static double ty(int centerY, double theta)
{
double sintheta = Math.Sin(theta);
double tyy = centerY * sintheta;
return tyy;
}
public static double uStar(double u, double v, int centerX, int centerY, double theta)
{
double txx = tx(centerX, theta);
double tyy = ty(centerY, theta);
double sintheta = Math.Sin(theta);
double costheta = Math.Cos(theta);
double cosThetaUTx = costheta * (u + txx);
double sinThetaVTy = sintheta * (v + tyy);
double returns = cosThetaUTx + sinThetaVTy;
return returns;
}
//#endregion
public static double vStar(double u, double v, int centerX, int centerY, double theta)
{
double txx = tx(centerX, theta);
double tyy = ty(centerY, theta);
double sintheta = Math.Sin(theta);
double costheta = Math.Cos(theta);
double sinThetaUTx = (-1) * sintheta * (u + txx);
double cosThetaVTy = costheta * (v + tyy);
double returns = sinThetaUTx + cosThetaVTy;
return returns;
}
public static double ApplyFilterOnOneCoord(double u, double v, double Du, double Dv, int CenterX, int CenterY, double Theta, int N)
{
double uStar = KassWitkinFunction.uStar(u, v, CenterX, CenterY, Theta);
double vStar = KassWitkinFunction.vStar(u, v, CenterX, CenterY, Theta);
double uStarDu = uStar / Du;
double vStarDv = vStar / Dv;
double sqrt = Math.Sqrt(uStarDu + vStarDv);
double _2n = 2 * N;
double pow = Math.Pow(sqrt, _2n);
double div = 1 + 0.414 * pow;
double returns = 1/div;
return returns;
}
}
public class KassWitkinKernel
{
public readonly int N = 4;
public int Width { get; set; }
public int Height { get; set; }
public double[,] Kernel { get; private set; }
public double[,] PaddedKernel { get; private set; }
public double Du { get; set; }
public double Dv { get; set; }
public int CenterX { get; set; }
public int CenterY { get; set; }
public double ThetaInRadian { get; set; }
public void SetKernel(double[,] value)
{
Kernel = value;
Width = Kernel.GetLength(0);
Height = Kernel.GetLength(1);
}
public void Pad(int newWidth, int newHeight)
{
double[,] temp = (double[,])Kernel.Clone();
PaddedKernel = ImagePadder.Pad(temp, newWidth, newHeight);
}
public Bitmap ToBitmap()
{
return ImageDataConverter.ToBitmap(Kernel);
}
public Bitmap ToBitmapPadded()
{
return ImageDataConverter.ToBitmap(PaddedKernel);
}
public Complex[,] ToComplex()
{
return ImageDataConverter.ToComplex(Kernel);
}
public Complex[,] ToComplexPadded()
{
return ImageDataConverter.ToComplex(PaddedKernel);
}
public void Compute()
{
Kernel = new double[Width, Height];
for (int i = 0; i < Width; i++)
{
for (int j = 0; j < Height; j++)
{
Kernel[i, j] = (double)KassWitkinFunction.ApplyFilterOnOneCoord(i, j,
Du,
Dv,
CenterX,
CenterY,
ThetaInRadian,
N);
//Data[i, j] = r.NextDouble();
}
}
string str = string.Empty;
}
}
public class KassWitkinBandpassFilter
{
public Bitmap Apply(Bitmap image, KassWitkinKernel kernel)
{
Complex[,] cImagePadded = ImageDataConverter.ToComplex(image);
Complex[,] cKernelPadded = kernel.ToComplexPadded();
Complex[,] convolved = Convolution.Convolve(cImagePadded, cKernelPadded);
return ImageDataConverter.ToBitmap(convolved);
}
}
public class KassWitkinFilterBank
{
private List<KassWitkinKernel> Kernels;
public int NoOfFilters { get; set; }
public double FilterAngle { get; set; }
public int WidthWithPadding { get; set; }
public int HeightWithPadding { get; set; }
public int KernelDimension { get; set; }
public KassWitkinFilterBank()
{}
public List<Bitmap> Apply(Bitmap bitmap)
{
Kernels = new List<KassWitkinKernel>();
double degrees = FilterAngle;
KassWitkinKernel kernel;
for (int i = 0; i < NoOfFilters; i++)
{
kernel = new KassWitkinKernel();
kernel.Width = KernelDimension;
kernel.Height = KernelDimension;
kernel.CenterX = (kernel.Width) / 2;
kernel.CenterY = (kernel.Height) / 2;
kernel.Du = 2;
kernel.Dv = 2;
kernel.ThetaInRadian = Tools.DegreeToRadian(degrees);
kernel.Compute();
kernel.Pad(WidthWithPadding, HeightWithPadding);
Kernels.Add(kernel);
degrees += degrees;
}
List<Bitmap> list = new List<Bitmap>();
foreach (KassWitkinKernel k in Kernels)
{
Bitmap image = (Bitmap)bitmap.Clone();
Complex[,] cImagePadded = ImageDataConverter.ToComplex(image);
Complex[,] cKernelPadded = k.ToComplexPadded();
Complex[,] convolved = Convolution.Convolve(cImagePadded, cKernelPadded);
Bitmap temp = ImageDataConverter.ToBitmap(convolved);
list.Add(temp);
}
return list;
}
}
As I pointed out earlier in comments, most of the filter outputs are blank because they contain NaNs. These are caused by
the implementation of equations (1) and (2) from
your reference article.
Getting in touch with the original authors would probably have the best chance of reproducing the original results, but at the very least you could ensure that no NaNs are produced with:
double arg = uStarDu + vStarDv;
double div = 1 + 0.414 * Math.Pow(Math.Abs(arg), N);
On the other hand, given the general form of the equation being reminescent of a Butterworth filter
(together with the mention of bandpass filtering),
and the seemingly unecessary square root followed by exponentiation (which suggest either a missed obvious simplification, or more likely in my opinion an error
in rendering of the equation), I would suggest to instead use the following equation:
where is the center of the image. This could be implemented with:
public static double uStar(double u, double v, int centerX, int centerY, double theta)
{
double sintheta = Math.Sin(theta);
double costheta = Math.Cos(theta);
return costheta * (u - centerX) + sintheta * (v - centerY);
}
public static double vStar(double u, double v, int centerX, int centerY, double theta)
{
double sintheta = Math.Sin(theta);
double costheta = Math.Cos(theta);
return (-1) * sintheta * (u - centerX) + costheta * (v - centerY);
}
public static double ApplyFilterOnOneCoord(double u, double v, double Du, double Dv, int CenterX, int CenterY, double Theta, int N)
{
double uStarDu = KassWitkinFunction.uStar(u, v, CenterX, CenterY, Theta) / Du;
double vStarDv = KassWitkinFunction.vStar(u, v, CenterX, CenterY, Theta) / Dv;
double arg = uStarDu + vStarDv;
double div = Math.Sqrt(1 + Math.Pow(arg, 2*N));;
return 1/div;
}
Now you must realize that those equations are given for the filter representation in the frequency domain, whereas your Convolution.Convolve
expect the filter kernel to be provided in the spatial domain (despite the core of the computation being done in the frequency domain).
The easiest way to apply these filters (and still get the correct padding in the spatial domain) is to:
set the frequency domain kernel size to the padded size to compute the kernel in the frequency domain
transform the frequency domain kernel to spatial domain
zero out the kernel where the padding would have been added
transform the kernel back to frequency domain
This can be achieved with the following modified version of KassWitkinKernel.Pad:
private Complex[,] cPaddedKernel;
public void Pad(int unpaddedWidth, int unpaddedHeight, int newWidth, int newHeight)
{
Complex[,] unpaddedKernelFrequencyDomain = ImageDataConverter.ToComplex((double[,])Kernel.Clone());
FourierTransform ftInverse = new FourierTransform();
ftInverse.InverseFFT(FourierShifter.RemoveFFTShift(unpaddedKernelFrequencyDomain));
Complex[,] cKernel = FourierShifter.FFTShift(ftInverse.GrayscaleImageComplex);
int startPointX = (int)Math.Ceiling((double)(newWidth - unpaddedWidth) / (double)2) - 1;
int startPointY = (int)Math.Ceiling((double)(newHeight - unpaddedHeight) / (double)2) - 1;
for (int j = 0; j < newHeight; j++)
{
for (int i=0; i<startPointX; i++)
{
cKernel[i, j] = 0;
}
for (int i = startPointX + unpaddedWidth; i < newWidth; i++)
{
cKernel[i, j] = 0;
}
}
for (int i = startPointX; i < startPointX + unpaddedWidth; i++)
{
for (int j = 0; j < startPointY; j++)
{
cKernel[i, j] = 0;
}
for (int j = startPointY + unpaddedHeight; j < newHeight; j++)
{
cKernel[i, j] = 0;
}
}
FourierTransform ftForward = new FourierTransform(cKernel); ftForward.ForwardFFT();
cPaddedKernel = ftForward.FourierImageComplex;
}
public Complex[,] ToComplexPadded()
{
return cPaddedKernel;
}
Latter when computing the convolution you would skip the FFT for the kernel in the convolution.
Note that you could similarly avoid recomputing the image's FFT for every filter in the filter bank.
If you precompute the FFT of the image, the remaining computations required to get the convolution
is reduced to the frequency domain multiplication and the final inverse transform:
public partial class Convolution
{
public static Complex[,] ConvolveInFrequencyDomain(Complex[,] fftImage, Complex[,] fftKernel)
{
Complex[,] convolve = null;
int imageWidth = fftImage.GetLength(0);
int imageHeight = fftImage.GetLength(1);
int maskWidth = fftKernel.GetLength(0);
int maskHeight = fftKernel.GetLength(1);
if (imageWidth == maskWidth && imageHeight == maskHeight)
{
Complex[,] fftConvolved = new Complex[imageWidth, imageHeight];
for (int j = 0; j < imageHeight; j++)
{
for (int i = 0; i < imageWidth; i++)
{
fftConvolved[i, j] = fftImage[i, j] * fftKernel[i, j];
}
}
FourierTransform ftForConv = new FourierTransform();
ftForConv.InverseFFT(fftConvolved);
convolve = FourierShifter.FFTShift(ftForConv.GrayscaleImageComplex);
Rescale(convolve);
}
else
{
throw new Exception("padding needed");
}
return convolve;
}
}
Which would be used in KassWitkinFilterBank.Apply as follow:
Bitmap image = (Bitmap)bitmap.Clone();
Complex[,] cImagePadded = ImageDataConverter.ToComplex(image);
FourierTransform ftForImage = new FourierTransform(cImagePadded); ftForImage.ForwardFFT();
Complex[,] fftImage = ftForImage.FourierImageComplex;
foreach (KassWitkinKernel k in Kernels)
{
Complex[,] cKernelPadded = k.ToComplexPadded();
Complex[,] convolved = Convolution.ConvolveInFrequencyDomain(fftImage, cKernelPadded);
Bitmap temp = ImageDataConverter.ToBitmap(convolved);
list.Add(temp);
}
So that should get you past the bump indicated in your question.
Of course if the intention is to reproduce the results of the paper you still have other hurdles to get over.
The first one being to actually use a sharpened image as input to the filter bank.
While you do this, you may want to first smooth the edges of the image to avoid generating a strong edge all
around the image, which would skew the results of the line detection algorithm.
The problem is here:
public static double ApplyFilterOnOneCoord(double u, double v, double Du, double Dv, int CenterX, int CenterY, double Theta, int N)
{
double uStar = KassWitkinFunction.uStar(u, v, CenterX, CenterY, Theta);
double vStar = KassWitkinFunction.vStar(u, v, CenterX, CenterY, Theta);
double uStarDu = uStar / Du;
double vStarDv = vStar / Dv;
double sqrt = Math.Sqrt(uStarDu + vStarDv);
double _2n = 2 * N;
double pow = Math.Pow(sqrt, _2n);
if (!double.IsNaN(sqrt) && Math.Abs(pow - Math.Pow(uStarDu + vStarDv, N)) > 1e-7)
{
//execution will never reach here!!
}
pow = Math.Pow(uStarDu + vStarDv, N);
double div = 1 + 0.414 * pow;
double returns = 1 / div;
return returns;
}
What I don't understand is why should we take the square root, before computing the Math.Pow, especially when we know that the power is an even number. The only thing it does (besides making the code more complex and slow) is to generate NaN for negative values.
I'm not sure if the entire computation is right now, but now all the 12 filtered images appear!
This is used in pre-processing, and is claimed to come from the paper by Kass and Within. I tried to read the original paper, but the quality is very low and hard to read. Do you happen to have a link to a better quality scan of their [15] reference?
Related
Project: intelligence scissors.
The first part of the project is to load an image (in the form of RGBPixel 2d array) then to construct a graph of weights to use it later to determine the shortest path between 2 points on the image (the 2 points will be determined by an anchor and a free point..In short i will have the source and the destination points).
I have a function that open the image and return a RGBPixel 2d array already made.Now image is loaded i want to construct the graph to do the i should use a function called CalculatePixelEnergies here is the code
public static Vector2D CalculatePixelEnergies(int x, int y, RGBPixel[,] ImageMatrix)
{
if (ImageMatrix == null) throw new Exception("image is not set!");
Vector2D gradient = CalculateGradientAtPixel(x, y, ImageMatrix);
double gradientMagnitude = Math.Sqrt(gradient.X * gradient.X + gradient.Y * gradient.Y);
double edgeAngle = Math.Atan2(gradient.Y, gradient.X);
double rotatedEdgeAngle = edgeAngle + Math.PI / 2.0;
Vector2D energy = new Vector2D();
energy.X = Math.Abs(gradientMagnitude * Math.Cos(rotatedEdgeAngle));
energy.Y = Math.Abs(gradientMagnitude * Math.Sin(rotatedEdgeAngle));
return energy;
}
This function use CalculateGradientAtPixel, Here is the code in case you want it.
private static Vector2D CalculateGradientAtPixel(int x, int y, RGBPixel[,] ImageMatrix)
{
Vector2D gradient = new Vector2D();
RGBPixel mainPixel = ImageMatrix[y, x];
double pixelGrayVal = 0.21 * mainPixel.red + 0.72 * mainPixel.green + 0.07 * mainPixel.blue;
if (y == GetHeight(ImageMatrix) - 1)
{
//boundary pixel.
for (int i = 0; i < 3; i++)
{
gradient.Y = 0;
}
}
else
{
RGBPixel downPixel = ImageMatrix[y + 1, x];
double downPixelGrayVal = 0.21 * downPixel.red + 0.72 * downPixel.green + 0.07 * downPixel.blue;
gradient.Y = pixelGrayVal - downPixelGrayVal;
}
if (x == GetWidth(ImageMatrix) - 1)
{
//boundary pixel.
gradient.X = 0;
}
else
{
RGBPixel rightPixel = ImageMatrix[y, x + 1];
double rightPixelGrayVal = 0.21 * rightPixel.red + 0.72 * rightPixel.green + 0.07 * rightPixel.blue;
gradient.X = pixelGrayVal - rightPixelGrayVal;
}
return gradient;
}
In my code of graph construction i decided to make a 2d double array to hold the weights, here what i do but it seems to be a wrong construction
public static double [,] calculateWeights(RGBPixel[,] ImageMatrix)
{
double[,] weights = new double[1000, 1000];
int height = ImageOperations.GetHeight(ImageMatrix);
int width = ImageOperations.GetWidth(ImageMatrix);
for (int y = 0; y < height - 1; y++)
{
for (int x = 0; x < width - 1; x++)
{
Vector2D e;
e = ImageOperations.CalculatePixelEnergies(x, y, ImageMatrix);
weights[y + 1, x] = 1 / e.X;
weights[y, x + 1] = 1 / e.Y;
}
}
return weights;
}
an example for an image
an other example for an image
Here we have the Spatial domain implementation of Gabor filter. But, I need to implement a Gabor filter in the Frequency Domain for performance reasons.
I have found the Frequency Domain equation of Gabor Filter:
I am actually in doubt about the correctness and/or applicability of this formula.
Source Code
So, I have implemented the following :
public partial class GaborFfftForm : Form
{
private double Gabor(double u, double v, double f0, double theta, double a, double b)
{
double rad = Math.PI / 180 * theta;
double uDash = u * Math.Cos(rad) + v * Math.Sin(rad);
double vDash = (-1) * u * Math.Sin(rad) + v * Math.Cos(rad);
return Math.Exp((-1) * Math.PI * Math.PI * ((uDash - f0) / (a * a)) + (vDash / (b * b)));
}
public Array2d<Complex> GaborKernelFft(int sizeX, int sizeY, double f0, double theta, double a, double b)
{
int halfX = sizeX / 2;
int halfY = sizeY / 2;
Array2d<Complex> kernel = new Array2d<Complex>(sizeX, sizeY);
for (int u = -halfX; u < halfX; u++)
{
for (int v = -halfY; v < halfY; v++)
{
double g = Gabor(u, v, f0, theta, a, b);
kernel[u + halfX, v + halfY] = new Complex(g, 0);
}
}
return kernel;
}
public GaborFfftForm()
{
InitializeComponent();
Bitmap image = DataConverter2d.ReadGray(StandardImage.LenaGray);
Array2d<double> dImage = DataConverter2d.ToDouble(image);
int newWidth = Tools.ToNextPowerOfTwo(dImage.Width) * 2;
int newHeight = Tools.ToNextPowerOfTwo(dImage.Height) * 2;
double u0 = 0.2;
double v0 = 0.2;
double alpha = 10;//1.5;
double beta = alpha;
Array2d<Complex> kernel2d = GaborKernelFft(newWidth, newHeight, u0, v0, alpha, beta);
dImage.PadTo(newWidth, newHeight);
Array2d<Complex> cImage = DataConverter2d.ToComplex(dImage);
Array2d<Complex> fImage = FourierTransform.ForwardFft(cImage);
// FFT convolution .................................................
Array2d<Complex> fOutput = new Array2d<Complex>(newWidth, newHeight);
for (int x = 0; x < newWidth; x++)
{
for (int y = 0; y < newHeight; y++)
{
fOutput[x, y] = fImage[x, y] * kernel2d[x, y];
}
}
Array2d<Complex> cOutput = FourierTransform.InverseFft(fOutput);
Array2d<double> dOutput = Rescale2d.Rescale(DataConverter2d.ToDouble(cOutput));
//dOutput.CropBy((newWidth-image.Width)/2, (newHeight - image.Height)/2);
Bitmap output = DataConverter2d.ToBitmap(dOutput, image.PixelFormat);
Array2d<Complex> cKernel = FourierTransform.InverseFft(kernel2d);
cKernel = FourierTransform.RemoveFFTShift(cKernel);
Array2d<double> dKernel = DataConverter2d.ToDouble(cKernel);
Array2d<double> dRescaledKernel = Rescale2d.Rescale(dKernel);
Bitmap kernel = DataConverter2d.ToBitmap(dRescaledKernel, image.PixelFormat);
pictureBox1.Image = image;
pictureBox2.Image = kernel;
pictureBox3.Image = output;
}
}
Just concentrate on the algorithmic steps at this time.
I have generated a Gabor kernel in the frequency domain. Since, the kernel is already in Frequency domain, I didn't apply FFT to it, whereas image is FFT-ed. Then, I multiplied the kernel and the image to achieve FFT-Convolution. Then they are inverse-FFTed and converted back to Bitmap as usual.
Output
The kernel looks okay. But, The filter-output doesn't look very promising (or, does it?).
The orientation (theta) doesn't have any effect on the kernel.
The calculation/formula is frequently suffering from divide-by-zero exception up on changing values.
How can I fix those problems?
Oh, and, also,
what do the parameters α, β, represent?
what should be the appropriate value of f0?
Update:
I have modified my code according to #Cris Luoengo's answer.
private double Gabor(double u, double v, double u0, double v0, double a, double b)
{
double p = (-2) * Math.PI * Math.PI;
double q = (u-u0)/(a*a);
double r = (v - v0) / (b * b);
return Math.Exp(p * (q + r));
}
public Array2d<Complex> GaborKernelFft(int sizeX, int sizeY, double u0, double v0, double a, double b)
{
double xx = sizeX;
double yy = sizeY;
double halfX = (xx - 1) / xx;
double halfY = (yy - 1) / yy;
Array2d<Complex> kernel = new Array2d<Complex>(sizeX, sizeY);
for (double u = 0; u <= halfX; u += 0.1)
{
for (double v = 0; v <= halfY; v += 0.1)
{
double g = Gabor(u, v, u0, v0, a, b);
int x = (int)(u * 10);
int y = (int)(v * 10);
kernel[x,y] = new Complex(g, 0);
}
}
return kernel;
}
where,
double u0 = 0.2;
double v0 = 0.2;
double alpha = 10;//1.5;
double beta = alpha;
I am not sure whether this is a good output.
There seems to be a typo in the equation for the Gabor filter that you found. The Gabor filter is a translated Gaussian in the frequency domain. Hence, it needs to have u² and v² in the exponent.
Equation (2) in your link seems more sensible, but still misses a 2:
exp( -2(πσ)² (u-f₀)² )
It is the 1D case, this is the filter we want to use in the direction θ. We now multiply in the perpendicular direction, v, with a non-shifted Gaussian. I set α and β to be the inverse of the two sigmas:
exp( -2(π/α)² (u-f₀)² ) exp( -2(π/β)² v² ) = exp( -2π²((u-f₀)/α)² + -2π²(v/β)² )
You should implement the above equation with u and v rotated over θ, as you already do.
Also, u and v should run from -0.5 to 0.5, not from -sizeX/2 to sizeX/2. And that is assuming your FFT sets the origin in the middle of the image, which is not common. Typically the FFT algorithms set the origin in a corner of the image. So you should probably have your u and v run from 0 to (sizeX-1)/sizeX instead.
With a corrected implementation as above, you should set f₀ to be between 0 and 0.5 (try 0.2 to start with), and α and β should be small enough such that the Gaussian doesn't reach the 0 frequency (you want the filter to be 0 there)
In the frequency domain, your filter will look like a rotated Gaussian away from the origin.
In the spatial domain, the magnitude of your filter should look again like a Gaussian. The imaginary component should look like this (picture links to Wikipedia page I found it on):
(i.e. it is anti-symmetric (odd) in the θ direction), possibly with more lobes depending on α, β and f₀. The real component should be similar but symmetric (even), with a maximum in the middle. Note that after IFFT, you might need to shift the origin from the top-left corner to the middle of the image (Google "fftshift").
Note that if you set α and β to be equal, the rotation of the u-v plane is irrelevant. In this case, you can use cartesian coordinates instead of polar coordinates to define the frequency. That is, instead of defining f₀ and θ as parameters, you define u₀ and v₀. In the exponent you then replace u-f₀ with u-u₀, and v with v-v₀.
The code after the edit of the question misses the square again. I would write the code as follows:
private double Gabor(double u, double v, double u0, double v0, double a, double b)
{
double p = (-2) * Math.PI * Math.PI;
double q = (u-u0)/a;
double r = (v - v0)/b;
return Math.Exp(p * (q*q + r*r));
}
public Array2d<Complex> GaborKernelFft(int sizeX, int sizeY, double u0, double v0, double a, double b)
{
double halfX = sizeX / 2;
double halfY = sizeY / 2;
Array2d<Complex> kernel = new Array2d<Complex>(sizeX, sizeY);
for (double y = 0; y < sizeY; y++)
{
double v = y / sizeY;
// v -= HalfY; // whether this is necessary or not depends on your FFT
for (double x = 0; x < sizeX; x++)
{
double u = x / sizeX;
// u -= HalfX; // whether this is necessary or not depends on your FFT
double g = Gabor(u, v, u0, v0, a, b);
kernel[(int)x, (int)y] = new Complex(g, 0);
}
}
return kernel;
}
I am using a solution I've found in this post:
Polygon area calculation using Latitude and Longitude generated from Cartesian space and a world file
There is something wrong because the values I am getting are not real. For example we know a football field should have around 5,300.00 square meters, right? but the calculation is giving 5,759,154.21.
This is the code:
private static double CalculatePolygonArea(IList<Position> coordinates)
{
double area = 0;
if (coordinates.Count > 2)
{
for (var i = 0; i < coordinates.Count - 1; i++)
{
Position p1 = coordinates[i];
Position p2 = coordinates[i + 1];
area += (ConvertToRadian(p2.Longitude) - ConvertToRadian(p1.Longitude)) * (2 + Math.Sin(ConvertToRadian(p1.Latitude)) + Math.Sin(ConvertToRadian(p2.Latitude)));
}
area = area * 6378137 * 6378137 / 2;
}
return Math.Abs(area);
}
private static double ConvertToRadian(double input)
{
return input * Math.PI / 180;
}
What can be wrong here? Any help?
The area calculation you are using is just plain wrong.... :-/
I use the SphericalUtil.ComputeSignedArea method from Google's Android Maps Utils.
Note: Google's Java code for that is under Apache License Version 2.0, and I converted it to C#.
Looking up that football field up in one of my apps, I get: 4,461, not quite the actual 5,531 but not bad for using Google Map photos...
Here is just the ComputeSignedArea:
public static class SphericalUtil
{
const double EARTH_RADIUS = 6371009;
static double ToRadians(double input)
{
return input / 180.0 * Math.PI;
}
public static double ComputeSignedArea(IList<LatLng> path)
{
return ComputeSignedArea(path, EARTH_RADIUS);
}
static double ComputeSignedArea(IList<LatLng> path, double radius)
{
int size = path.Count;
if (size < 3) { return 0; }
double total = 0;
var prev = path[size - 1];
double prevTanLat = Math.Tan((Math.PI / 2 - ToRadians(prev.Latitude)) / 2);
double prevLng = ToRadians(prev.Longitude);
foreach (var point in path)
{
double tanLat = Math.Tan((Math.PI / 2 - ToRadians(point.Latitude)) / 2);
double lng = ToRadians(point.Longitude);
total += PolarTriangleArea(tanLat, lng, prevTanLat, prevLng);
prevTanLat = tanLat;
prevLng = lng;
}
return total * (radius * radius);
}
static double PolarTriangleArea(double tan1, double lng1, double tan2, double lng2)
{
double deltaLng = lng1 - lng2;
double t = tan1 * tan2;
return 2 * Math.Atan2(t * Math.Sin(deltaLng), 1 + t * Math.Cos(deltaLng));
}
}
I know, what I ask exist with Google Map, but I'm working with Xamarin.Forms.Map so.. I have to make it by my own.
However, I know how to get the center of my point, the POI (Point of Interest), but I don't know how to determine the zoom of the camera..
I searched on the web and from this post, I got redirected to the algorythm of Haversine.
However, I tried the code given but it doesn't work.. I know how to find the POI, the 2 farest point, but I can't determine the zoom..
Any idea please? :/
Note: There is the code if you want to know something about what I tried
#region Camera focus method
private static void OnCustomPinsPropertyChanged(BindableObject bindable, object oldValue, object newValue)
{
CustomMap customMap = ((CustomMap)bindable);
if (customMap.CameraFocusParameter == CameraFocusReference.OnPins)
{
List<Position> PositionPins = new List<Position>();
bool onlyOnePointPresent;
foreach (CustomPin pin in (newValue as List<CustomPin>))
{
PositionPins.Add(pin.Position);
}
Position CentralPosition = GetCentralPosition(PositionPins);
if (PositionPins.Count > 1)
{
Position[] FarestPoints = GetTwoFarestPointsOfCenterPointReference(PositionPins, CentralPosition);
customMap.CameraFocus = GetPositionAndZoomLevelForCameraAboutPositions(FarestPoints);
onlyOnePointPresent = false;
}
else
{
customMap.CameraFocus = new CameraFocusData() { Position = CentralPosition };
onlyOnePointPresent = true;
}
customMap.MoveToRegion(MapSpan.FromCenterAndRadius(customMap.CameraFocus.Position,
(!onlyOnePointPresent) ? (customMap.CameraFocus.Distance) : (new Distance(5))));
}
}
public static Position GetCentralPosition(List<Position> positions)
{
if (positions.Count == 1)
{
foreach (Position pos in positions)
{
return (pos);
}
}
double lat = 0;
double lng = 0;
foreach (var pos in positions)
{
lat += pos.Latitude;
lng += pos.Longitude;
}
var total = positions.Count;
lat = lat / total;
lng = lng / total;
return new Position(lat, lng);
}
public class DataCalc
{
public Position Pos { get; set; }
public double Distance { get; set; }
}
public static Position[] GetTwoFarestPointsOfCenterPointReference(List<Position> farestPosition, Position centerPosition)
{
Position[] FarestPos = new Position[2];
List<DataCalc> dataCalc = new List<DataCalc>();
Debug.WriteLine("So the center is on [{0}]/[{1}]", centerPosition.Latitude, centerPosition.Longitude);
foreach (Position pos in farestPosition)
{
dataCalc.Add(new DataCalc()
{
Pos = pos,
Distance = Math.Sqrt(Math.Pow(pos.Latitude - centerPosition.Latitude, 2) + Math.Pow(pos.Longitude - centerPosition.Longitude, 2))
});
}
DataCalc First = new DataCalc() { Distance = 0 };
foreach (DataCalc dc in dataCalc)
{
if (dc.Distance > First.Distance)
{
First = dc;
}
}
Debug.WriteLine("The farest one is on [{0}]/[{1}]", First.Pos.Latitude, First.Pos.Longitude);
DataCalc Second = new DataCalc() { Distance = 0 };
foreach (DataCalc dc in dataCalc)
{
if (dc.Distance > Second.Distance
&& (dc.Pos.Latitude != First.Pos.Latitude && dc.Pos.Longitude != First.Pos.Longitude))
{
Second = dc;
}
}
Debug.WriteLine("the second is on [{0}]/[{1}]", Second.Pos.Latitude, Second.Pos.Longitude);
FarestPos[0] = First.Pos;
FarestPos[1] = Second.Pos;
return (FarestPos);
}
public class CameraFocusData
{
public Position Position { get; set; }
public Distance Distance { get; set; }
}
//HAVERSINE
public static CameraFocusData GetPositionAndZoomLevelForCameraAboutPositions(Position[] FarestPoints)
{
double earthRadius = 6371000; //metros
Position pos1 = FarestPoints[0];
Position pos2 = FarestPoints[1];
double latitud1Radianes = pos1.Latitude * (Math.PI / 180.0);
double latitud2Radianes = pos2.Latitude * (Math.PI / 180.0);
double longitud1Radianes = pos2.Longitude * (Math.PI / 180.0);
double longitud2Radianes = pos2.Longitude * (Math.PI / 180.0);
double deltaLatitud = (pos2.Latitude - pos1.Latitude) * (Math.PI / 180.0);
double deltaLongitud = (pos2.Longitude - pos1.Longitude) * (Math.PI / 180.0);
double sum1 = Math.Sin(deltaLatitud / 2) * Math.Sin(deltaLatitud / 2);
double sum2 = Math.Cos(latitud1Radianes) * Math.Cos(latitud2Radianes) * Math.Sin(deltaLongitud / 2) * Math.Sin(deltaLongitud / 2);
var a = sum1 + sum2;
var c = 2 * Math.Atan2(Math.Sqrt(a), Math.Sqrt(1 - a));
var distance = earthRadius * c;
/* lt is deltaLatitud
* lng is deltaLongitud*/
var Bx = Math.Cos(latitud2Radianes) * Math.Cos(deltaLongitud);
var By = Math.Cos(latitud2Radianes) * Math.Sin(deltaLongitud);
var lt = Math.Atan2(Math.Sin(latitud1Radianes) + Math.Sin(latitud2Radianes),
Math.Sqrt((Math.Cos(latitud1Radianes) + Bx) * (Math.Cos(latitud2Radianes) + Bx) + By * By));//Latitud del punto medio
var lng = longitud1Radianes + Math.Atan2(By, Math.Cos(longitud1Radianes) + Bx);//Longitud del punto medio
Debug.WriteLine("the final pos of the camera is on [{0}]/[{1}]", lt, lng);
return (new CameraFocusData() { Position = new Position(lt, lng), Distance = new Distance(distance + 0.2) });
}
#endregion
I then found the solution, there is the code for it, it has been wrote to be put into your custom map.
Here, private static void OnCustomPinsPropertyChanged(BindableObject bindable, object oldValue, object newValue) is a method which is called by my List<CustomPins> but you can use a different method.
public static readonly BindableProperty CustomPinsProperty =
BindableProperty.Create(nameof(CustomPins), typeof(IList<CustomPin>), typeof(CustomMap), null,
propertyChanged: OnCustomPinsPropertyChanged);
Also, you can add the lat/long of the user, I didn't do it because of my needs which are without the pos of the user :).
Finaly, you can add a multiplicator for the zoom, I mean, you could say, hmm, the zoom is to far for me, then ok, do like me and multiplicate the double distance value by something as 0.7 or 0.6 :)
#region Camera focus definition
public class CameraFocusData
{
public Position Position { get; set; }
public Distance Distance { get; set; }
}
private static void OnCustomPinsPropertyChanged(BindableObject bindable, object oldValue, object newValue)
{
CustomMap customMap = ((CustomMap)bindable);
if (customMap.CameraFocusParameter == CameraFocusReference.OnPins)
{
List<double> latitudes = new List<double>();
List<double> longitudes = new List<double>();
foreach (CustomPin pin in (newValue as List<CustomPin>))
{
latitudes.Add(pin.Position.Latitude);
longitudes.Add(pin.Position.Longitude);
}
double lowestLat = latitudes.Min();
double highestLat = latitudes.Max();
double lowestLong = longitudes.Min();
double highestLong = longitudes.Max();
double finalLat = (lowestLat + highestLat) / 2;
double finalLong = (lowestLong + highestLong) / 2;
double distance = DistanceCalculation.GeoCodeCalc.CalcDistance(lowestLat, lowestLong, highestLat, highestLong, DistanceCalculation.GeoCodeCalcMeasurement.Kilometers);
customMap.MoveToRegion(MapSpan.FromCenterAndRadius(new Position(finalLat, finalLong), Distance.FromKilometers(distance * 0.7)));
}
}
private class DistanceCalculation
{
public static class GeoCodeCalc
{
public const double EarthRadiusInMiles = 3956.0;
public const double EarthRadiusInKilometers = 6367.0;
public static double ToRadian(double val) { return val * (Math.PI / 180); }
public static double DiffRadian(double val1, double val2) { return ToRadian(val2) - ToRadian(val1); }
public static double CalcDistance(double lat1, double lng1, double lat2, double lng2)
{
return CalcDistance(lat1, lng1, lat2, lng2, GeoCodeCalcMeasurement.Miles);
}
public static double CalcDistance(double lat1, double lng1, double lat2, double lng2, GeoCodeCalcMeasurement m)
{
double radius = GeoCodeCalc.EarthRadiusInMiles;
if (m == GeoCodeCalcMeasurement.Kilometers) { radius = GeoCodeCalc.EarthRadiusInKilometers; }
return radius * 2 * Math.Asin(Math.Min(1, Math.Sqrt((Math.Pow(Math.Sin((DiffRadian(lat1, lat2)) / 2.0), 2.0) + Math.Cos(ToRadian(lat1)) * Math.Cos(ToRadian(lat2)) * Math.Pow(Math.Sin((DiffRadian(lng1, lng2)) / 2.0), 2.0)))));
}
}
public enum GeoCodeCalcMeasurement : int
{
Miles = 0,
Kilometers = 1
}
}
#endregion
Have fun !
UPDATE of DEC 2018
I worked on a solution which I hosted a long time ago on my repo, with the other POCs https://github.com/Emixam23/XamarinByEmixam23/tree/master/Detailed%20Part/Controls/Map/MapPinsProject
I wanted a class that can convert one system to another.
I've found a source code in python and tried to port it into C#.
This is the python source. From here
import math
class GlobalMercator(object):
def __init__(self, tileSize=256):
"Initialize the TMS Global Mercator pyramid"
self.tileSize = tileSize
self.initialResolution = 2 * math.pi * 6378137 / self.tileSize
# 156543.03392804062 for tileSize 256 pixels
self.originShift = 2 * math.pi * 6378137 / 2.0
# 20037508.342789244
def LatLonToMeters(self, lat, lon ):
"Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913"
mx = lon * self.originShift / 180.0
my = math.log( math.tan((90 + lat) * math.pi / 360.0 )) / (math.pi / 180.0)
my = my * self.originShift / 180.0
return mx, my
def MetersToLatLon(self, mx, my ):
"Converts XY point from Spherical Mercator EPSG:900913 to lat/lon in WGS84 Datum"
lon = (mx / self.originShift) * 180.0
lat = (my / self.originShift) * 180.0
lat = 180 / math.pi * (2 * math.atan( math.exp( lat * math.pi / 180.0)) - math.pi / 2.0)
return lat, lon
def PixelsToMeters(self, px, py, zoom):
"Converts pixel coordinates in given zoom level of pyramid to EPSG:900913"
res = self.Resolution( zoom )
mx = px * res - self.originShift
my = py * res - self.originShift
return mx, my
def MetersToPixels(self, mx, my, zoom):
"Converts EPSG:900913 to pyramid pixel coordinates in given zoom level"
res = self.Resolution( zoom )
px = (mx + self.originShift) / res
py = (my + self.originShift) / res
return px, py
def PixelsToTile(self, px, py):
"Returns a tile covering region in given pixel coordinates"
tx = int( math.ceil( px / float(self.tileSize) ) - 1 )
ty = int( math.ceil( py / float(self.tileSize) ) - 1 )
return tx, ty
def PixelsToRaster(self, px, py, zoom):
"Move the origin of pixel coordinates to top-left corner"
mapSize = self.tileSize << zoom
return px, mapSize - py
def MetersToTile(self, mx, my, zoom):
"Returns tile for given mercator coordinates"
px, py = self.MetersToPixels( mx, my, zoom)
return self.PixelsToTile( px, py)
def TileBounds(self, tx, ty, zoom):
"Returns bounds of the given tile in EPSG:900913 coordinates"
minx, miny = self.PixelsToMeters( tx*self.tileSize, ty*self.tileSize, zoom )
maxx, maxy = self.PixelsToMeters( (tx+1)*self.tileSize, (ty+1)*self.tileSize, zoom )
return ( minx, miny, maxx, maxy )
def TileLatLonBounds(self, tx, ty, zoom ):
"Returns bounds of the given tile in latutude/longitude using WGS84 datum"
bounds = self.TileBounds( tx, ty, zoom)
minLat, minLon = self.MetersToLatLon(bounds[0], bounds[1])
maxLat, maxLon = self.MetersToLatLon(bounds[2], bounds[3])
return ( minLat, minLon, maxLat, maxLon )
def Resolution(self, zoom ):
"Resolution (meters/pixel) for given zoom level (measured at Equator)"
# return (2 * math.pi * 6378137) / (self.tileSize * 2**zoom)
return self.initialResolution / (2**zoom)
def ZoomForPixelSize(self, pixelSize ):
"Maximal scaledown zoom of the pyramid closest to the pixelSize."
for i in range(30):
if pixelSize > self.Resolution(i):
return i-1 if i!=0 else 0 # We don't want to scale up
def GoogleTile(self, tx, ty, zoom):
"Converts TMS tile coordinates to Google Tile coordinates"
# coordinate origin is moved from bottom-left to top-left corner of the extent
return tx, (2**zoom - 1) - ty
def QuadTree(self, tx, ty, zoom ):
"Converts TMS tile coordinates to Microsoft QuadTree"
quadKey = ""
ty = (2**zoom - 1) - ty
for i in range(zoom, 0, -1):
digit = 0
mask = 1 << (i-1)
if (tx & mask) != 0:
digit += 1
if (ty & mask) != 0:
digit += 2
quadKey += str(digit)
return quadKey
Here is my C# port.
public class GlobalMercator {
private Int32 TileSize;
private Double InitialResolution;
private Double OriginShift;
private const Int32 EarthRadius = 6378137;
public GlobalMercator() {
TileSize = 256;
InitialResolution = 2 * Math.PI * EarthRadius / TileSize;
OriginShift = 2 * Math.PI * EarthRadius / 2;
}
public DPoint LatLonToMeters(Double lat, Double lon) {
var p = new DPoint();
p.X = lon * OriginShift / 180;
p.Y = Math.Log(Math.Tan((90 + lat) * Math.PI / 360)) / (Math.PI / 180);
p.Y = p.Y * OriginShift / 180;
return p;
}
public GeoPoint MetersToLatLon(DPoint m) {
var ll = new GeoPoint();
ll.Longitude = (m.X / OriginShift) * 180;
ll.Latitude = (m.Y / OriginShift) * 180;
ll.Latitude = 180 / Math.PI * (2 * Math.Atan(Math.Exp(ll.Latitude * Math.PI / 180)) - Math.PI / 2);
return ll;
}
public DPoint PixelsToMeters(DPoint p, Int32 zoom) {
var res = Resolution(zoom);
var met = new DPoint();
met.X = p.X * res - OriginShift;
met.Y = p.Y * res - OriginShift;
return met;
}
public DPoint MetersToPixels(DPoint m, Int32 zoom) {
var res = Resolution(zoom);
var pix = new DPoint();
pix.X = (m.X + OriginShift) / res;
pix.Y = (m.Y + OriginShift) / res;
return pix;
}
public Point PixelsToTile(DPoint p) {
var t = new Point();
t.X = (Int32)Math.Ceiling(p.X / (Double)TileSize) - 1;
t.Y = (Int32)Math.Ceiling(p.Y / (Double)TileSize) - 1;
return t;
}
public Point PixelsToRaster(Point p, Int32 zoom) {
var mapSize = TileSize << zoom;
return new Point(p.X, mapSize - p.Y);
}
public Point MetersToTile(Point m, Int32 zoom) {
var p = MetersToPixels(m, zoom);
return PixelsToTile(p);
}
public Pair<DPoint> TileBounds(Point t, Int32 zoom) {
var min = PixelsToMeters(new DPoint(t.X * TileSize, t.Y * TileSize), zoom);
var max = PixelsToMeters(new DPoint((t.X + 1) * TileSize, (t.Y + 1) * TileSize), zoom);
return new Pair<DPoint>(min, max);
}
public Pair<GeoPoint> TileLatLonBounds(Point t, Int32 zoom) {
var bound = TileBounds(t, zoom);
var min = MetersToLatLon(bound.Min);
var max = MetersToLatLon(bound.Max);
return new Pair<GeoPoint>(min, max);
}
public Double Resolution(Int32 zoom) {
return InitialResolution / (2 ^ zoom);
}
public Double ZoomForPixelSize(Double pixelSize) {
for (var i = 0; i < 30; i++)
if (pixelSize > Resolution(i))
return i != 0 ? i - 1 : 0;
throw new InvalidOperationException();
}
public Point ToGoogleTile(Point t, Int32 zoom) {
return new Point(t.X, ((Int32)Math.Pow(2, zoom) - 1) - t.Y);
}
public Point ToTmsTile(Point t, Int32 zoom) {
return new Point(t.X, ((Int32)Math.Pow(2, zoom) - 1) - t.Y);
}
}
public struct Point {
public Point(Int32 x, Int32 y)
: this() {
X = x;
Y = y;
}
public Int32 X { get; set; }
public Int32 Y { get; set; }
}
public struct DPoint {
public DPoint(Double x, Double y)
: this() {
this.X = x;
this.Y = y;
}
public Double X { get; set; }
public Double Y { get; set; }
public static implicit operator DPoint(Point p) {
return new DPoint(p.X, p.Y);
}
}
public class GeoPoint {
public Double Latitude { get; set; }
public Double Longitude { get; set; }
}
public class Pair<T> {
public Pair() {}
public Pair(T min, T max) {
Min = min;
Max = max;
}
public T Min { get; set; }
public T Max { get; set; }
}
I have two questions.
Did I port the code correctly? (I intentionally omitted one method as I don't use it and added one my own)
Here I have coordinates
WGS84 datum (longitude/latitude):
-123.75 36.59788913307022
-118.125 40.97989806962013
Spherical Mercator (meters):
-13775786.985667605 4383204.9499851465
-13149614.849955441 5009377.085697312
Pixels
2560 6144 2816 6400
Google
x:10, y:24, z:6
TMS
x:10, y:39, z:6
QuadTree
023010
How should I chain the methods so that I get from Google's tile coordinates (10, 24, 6) the spherical mercator meters?
Update
Finding answer for my second question is more important for me.
There's at least one bug in your class:
public Double Resolution(Int32 zoom) {
return InitialResolution / (2 ^ zoom); // BAD CODE, USE Math.Pow, not ^
}
Where you've mistaken the binary XOR operator for the exponent operator.
I've rewritten the code, made most functions static, and added a few more relevant functions:
/// <summary>
/// Conversion routines for Google, TMS, and Microsoft Quadtree tile representations, derived from
/// http://www.maptiler.org/google-maps-coordinates-tile-bounds-projection/
/// </summary>
public class WebMercator
{
private const int TileSize = 256;
private const int EarthRadius = 6378137;
private const double InitialResolution = 2 * Math.PI * EarthRadius / TileSize;
private const double OriginShift = 2 * Math.PI * EarthRadius / 2;
//Converts given lat/lon in WGS84 Datum to XY in Spherical Mercator EPSG:900913
public static Point LatLonToMeters(double lat, double lon)
{
var p = new Point();
p.X = lon * OriginShift / 180;
p.Y = Math.Log(Math.Tan((90 + lat) * Math.PI / 360)) / (Math.PI / 180);
p.Y = p.Y * OriginShift / 180;
return p;
}
//Converts XY point from (Spherical) Web Mercator EPSG:3785 (unofficially EPSG:900913) to lat/lon in WGS84 Datum
public static Point MetersToLatLon(Point m)
{
var ll = new Point();
ll.X = (m.X / OriginShift) * 180;
ll.Y = (m.Y / OriginShift) * 180;
ll.Y = 180 / Math.PI * (2 * Math.Atan(Math.Exp(ll.Y * Math.PI / 180)) - Math.PI / 2);
return ll;
}
//Converts pixel coordinates in given zoom level of pyramid to EPSG:900913
public static Point PixelsToMeters(Point p, int zoom)
{
var res = Resolution(zoom);
var met = new Point();
met.X = p.X * res - OriginShift;
met.Y = p.Y * res - OriginShift;
return met;
}
//Converts EPSG:900913 to pyramid pixel coordinates in given zoom level
public static Point MetersToPixels(Point m, int zoom)
{
var res = Resolution(zoom);
var pix = new Point();
pix.X = (m.X + OriginShift) / res;
pix.Y = (m.Y + OriginShift) / res;
return pix;
}
//Returns a TMS (NOT Google!) tile covering region in given pixel coordinates
public static Tile PixelsToTile(Point p)
{
var t = new Tile();
t.X = (int)Math.Ceiling(p.X / (double)TileSize) - 1;
t.Y = (int)Math.Ceiling(p.Y / (double)TileSize) - 1;
return t;
}
public static Point PixelsToRaster(Point p, int zoom)
{
var mapSize = TileSize << zoom;
return new Point(p.X, mapSize - p.Y);
}
//Returns tile for given mercator coordinates
public static Tile MetersToTile(Point m, int zoom)
{
var p = MetersToPixels(m, zoom);
return PixelsToTile(p);
}
//Returns bounds of the given tile in EPSG:900913 coordinates
public static Rect TileBounds(Tile t, int zoom)
{
var min = PixelsToMeters(new Point(t.X * TileSize, t.Y * TileSize), zoom);
var max = PixelsToMeters(new Point((t.X + 1) * TileSize, (t.Y + 1) * TileSize), zoom);
return new Rect(min, max);
}
//Returns bounds of the given tile in latutude/longitude using WGS84 datum
public static Rect TileLatLonBounds(Tile t, int zoom)
{
var bound = TileBounds(t, zoom);
var min = MetersToLatLon(new Point(bound.Left, bound.Top));
var max = MetersToLatLon(new Point(bound.Right, bound.Bottom));
return new Rect(min, max);
}
//Resolution (meters/pixel) for given zoom level (measured at Equator)
public static double Resolution(int zoom)
{
return InitialResolution / (Math.Pow(2, zoom));
}
public static double ZoomForPixelSize(double pixelSize)
{
for (var i = 0; i < 30; i++)
if (pixelSize > Resolution(i))
return i != 0 ? i - 1 : 0;
throw new InvalidOperationException();
}
// Switch to Google Tile representation from TMS
public static Tile ToGoogleTile(Tile t, int zoom)
{
return new Tile(t.X, ((int)Math.Pow(2, zoom) - 1) - t.Y);
}
// Switch to TMS Tile representation from Google
public static Tile ToTmsTile(Tile t, int zoom)
{
return new Tile(t.X, ((int)Math.Pow(2, zoom) - 1) - t.Y);
}
//Converts TMS tile coordinates to Microsoft QuadTree
public static string QuadTree(int tx, int ty, int zoom)
{
var quadtree = "";
ty = ((1 << zoom) - 1) - ty;
for (var i = zoom; i >= 1; i--)
{
var digit = 0;
var mask = 1 << (i - 1);
if ((tx & mask) != 0)
digit += 1;
if ((ty & mask) != 0)
digit += 2;
quadtree += digit;
}
return quadtree;
}
//Converts a quadtree to tile coordinates
public static Tile QuadTreeToTile(string quadtree, int zoom)
{
int tx= 0, ty = 0;
for (var i = zoom; i >= 1; i--)
{
var ch = quadtree[zoom - i];
var mask = 1 << (i - 1);
var digit = ch - '0';
if ((digit & 1) != 0)
tx += mask;
if ((digit & 2) != 0)
ty += mask;
}
ty = ((1 << zoom) - 1) - ty;
return new Tile(tx, ty);
}
//Converts a latitude and longitude to quadtree at the specified zoom level
public static string LatLonToQuadTree(Point latLon, int zoom)
{
Point m = LatLonToMeters(latLon.Y, latLon.X);
Tile t = MetersToTile(m, zoom);
return QuadTree(t.X, t.Y, zoom);
}
//Converts a quadtree location into a latitude/longitude bounding rectangle
public static Rect QuadTreeToLatLon(string quadtree)
{
int zoom = quadtree.Length;
Tile t = QuadTreeToTile(quadtree, zoom);
return TileLatLonBounds(t, zoom);
}
//Returns a list of all of the quadtree locations at a given zoom level within a latitude/longude box
public static List<string> GetQuadTreeList(int zoom, Point latLonMin, Point latLonMax)
{
if (latLonMax.Y< latLonMin.Y|| latLonMax.X< latLonMin.X)
return null;
Point mMin = LatLonToMeters(latLonMin.Y, latLonMin.X);
Tile tmin = MetersToTile(mMin, zoom);
Point mMax = LatLonToMeters(latLonMax.Y, latLonMax.X);
Tile tmax = MetersToTile(mMax, zoom);
var arr = new List<string>();
for (var ty = tmin.Y; ty <= tmax.Y; ty++)
{
for (var tx = tmin.X; tx <= tmax.X; tx++)
{
var quadtree = QuadTree(tx, ty, zoom);
arr.Add(quadtree);
}
}
return arr;
}
}
/// <summary>
/// Reference to a Tile X, Y index
/// </summary>
public class Tile
{
public Tile() { }
public Tile(int x, int y)
{
X = x;
Y = y;
}
public int X { get; set; }
public int Y { get; set; }
}
To solve your second question, use the following sequence:
int zoom = 6;
Tile googleTile = new Tile(10,24);
Tile tmsTile = GlobalMercator.ToTmsTile(googleTile, zoom);
Rect r3 = GlobalMercator.TileLatLonBounds(tmsTile, zoom);
var tl = GlobalMercator.LatLonToMeters(r3.Top, r3.Left);
var br = GlobalMercator.LatLonToMeters(r3.Bottom, r3.Right);
Debug.WriteLine("{0:0.000} {1:0.000}", tl.X, tl.Y);
Debug.WriteLine("{0:0.000} {1:0.000}", br.X, br.Y);
// -13775787.000 4383205.000
// -13149615.000 5009376.500
The best opensource solution for converting coordinates from one projection to another is Proj4 originally written in c but ported to numerous programming languages. The port to c# that I have tried and used is DotSpatial Projections found on CodePlex. It is easy to find out how to use it based on the examples. The only thing you need to know are conversion parameters for your case.
CoordinateSharp is available on NuGet. It's light weight and makes coordinate conversions really. It's not designed for mapping, but strait up conversions (and location based celestial information) if that is a factor.
Example
Coordinate c = new Coordinate(myLat,myLong);
c.UTM; //Outputs UTM string
c.UTM.Easting //UTM easting property
A couple of pointers for anyone reading that wants to use Oybek excellent code:
You need to add using System.Windows but also Add a Reference to the WindowsBase assembly, otherwise VS wont find Point and Rect.
Note that just must not use System.Drawing
And here's a new function that will convert Zoom lat/lng to a Google Tile:
public static Tile LatLonToGoogleTile(Point latLon, int zoom)
{
Point m = LatLonToMeters(latLon.Y, latLon.X);
Tile t = MetersToTile(m, zoom);
return ToGoogleTile(t, zoom);
}