fit a line to 3D data with weighted regression - c#

I am trying to calculate a 3D linear regression LINE using the Singular Value Decomposition method (SVD).
This works fine. Now, I'd like to generalise the method for weighted regression.
how to calculate the optimale beta with SVD? Below is my C# code which uses the CSML package. The LinearRegression() function works just fine. WeightedLinearRegression() does not and I assume this is because I cannot just simply define
weighted_mult_mat = M_tr * W * M;
CODE:
public static Vector3D WeightedLinearRegression(List<Point3D> pts, List<double> weights) {
// normalisation
double sum = weights.Sum();
if (sum != weights.Count) {
for (int i = 0; i < weights.Count; i++ ) {
weights[i] = weights[i] / sum;
}
}
Point3D avg = pts.average();
// populate Matrix M
CSML.Matrix M = new CSML.Matrix(pts.Count, 3); // init
// populate matrix M
for (int i = 1; i < pts.Count + 1; i++) {
M[i, 1] = new Complex(pts[i - 1].X - avg.X);
M[i, 2] = new Complex(pts[i - 1].Y - avg.Y);
M[i, 3] = new Complex(pts[i - 1].Z - avg.Z);
}
CSML.Matrix M_tr = M.Transpose();
// populate weights matrix
CSML.Matrix W = new CSML.Matrix(pts.Count, pts.Count); // init
for (int i = 1; i < pts.Count + 1; i++) {
W[i, i] = new Complex(weights[i-1]);
}
// compute matrix
CSML.Matrix weighted_mult_mat = new CSML.Matrix();
weighted_mult_mat = M_tr * W;
weighted_mult_mat = weighted_mult_mat * M;
var weighted_dense_mat = new DenseMatrix(3, 3);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
weighted_dense_mat[i, j] = weighted_mult_mat[i + 1, j + 1].Re;
}
}
var weighted_svd = weighted_dense_mat.Svd(true);
var weighted_vt = weighted_svd.VT;
Vector3D weighted_dirVect = new Vector3D(weighted_vt[0, 0], weighted_vt[0, 1], weighted_vt[0, 2]);
weighted_dirVect.Normalize();
return weighted_dirVect;
}
public static Vector3D LinearRegression(List<Point3D> pts) {
Point3D avg = pts.average();
// populate Matrix M
CSML.Matrix M = new CSML.Matrix(pts.Count, 3); // init
// populate matrix M
for (int i = 1; i < pts.Count + 1; i++) {
M[i, 1] = new Complex(pts[i - 1].X - avg.X);
M[i, 2] = new Complex(pts[i - 1].Y - avg.Y);
M[i, 3] = new Complex(pts[i - 1].Z - avg.Z);
}
CSML.Matrix M_tr = M.Transpose();
CSML.Matrix mult_mat = new CSML.Matrix();
mult_mat = M_tr * M;
var dense_mat = new DenseMatrix(3, 3);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) {
dense_mat[i, j] = mult_mat[i + 1, j + 1].Re ;
}
}
var svd = dense_mat.Svd(true);
var vt = svd.VT;
Vector3D dirVect = new Vector3D(vt[0, 0], vt[0, 1], vt[0, 2]);
dirVect.Normalize();
return dirVect;
}

Related

It's possible to write neural network for different size of training data inputs and outputs

It's possible to write neural network for different size of training data inputs and outputs
for example:
inputs are 1. (1,2,3,4) , 2. (2,3,1), 3. (1,2,3,4,5) and so on...
and the same for outputs 1. (0,0,1,1) 2. (1,1,1) 3. (0,0,1,1,1)
So far I have managed to write the one which only works with the same size of training data which mean that all my training data needs to have the same length.
So far I'm stuck with this
NeuralNetwork net;
int[] layers = new int[3]
{
3/*Always the same*/,
1/*Always the same*/,
3 /*Always the same*/
};
string[] activation = new string[2] { "leakyrelu", "leakyrelu" };
net = new NeuralNetwork(layers, activation);
What I need
NeuralNetwork net1;
int[] layers1 = new int[3]
{
input.Length /*Based on input's Length*/,
1/*Always the same*/,
output.Length /*Based on output's Length*/
};
string[] activation1 = new string[2] { "leakyrelu", "leakyrelu" };
net = new NeuralNetwork(layers, activation);
// BackPropagate
public void BackPropagate(float[] inputs, float[] expected)
{
float[] output = FeedForward(inputs);
cost = 0;
for (int i = 0; i < output.Length; i++) cost += (float)Math.Pow(output[i] - expected[i], 2);
cost = cost / 2;
float[][] gamma;
List<float[]> gammaList = new List<float[]>();
for (int i = 0; i < layers.Length; i++)
{
gammaList.Add(new float[layers[i]]);
}
gamma = gammaList.ToArray();
int layer = layers.Length - 2;
for (int i = 0; i < output.Length; i++)
gamma[layers.Length-1][i] = (output[i] - expected[i]) * activateDer(output[i],layer);
for (int i = 0; i < neurons[layers.Length - 1].Length; i++)
{
biases[layers.Length - 1][i] -= gamma[layers.Length - 1][i] * learningRate;
for (int j = 0; j < neurons[layers.Length - 2].Length; j++)
{
weights[layers.Length - 2][i][j] -= gamma[layers.Length - 1][i] * neurons[layers.Length-2][j] * learningRate;
}
}
for (int i = layers.Length - 2; i > 0; i--)
{
layer = i - 1;
for (int j = 0; j < neurons[i].Length; j++)
{
gamma[i][j] = 0;
for (int k = 0; k < gamma[i+1].Length; k++)
{
gamma[i][j] = gamma[i + 1][k] * weights[i][k][j];
}
gamma[i][j] *= activateDer(neurons[i][j],layer);
}
for (int j = 0; j < neurons[i].Length; j++)
{
biases[i][j] -= gamma[i][j] * learningRate;
for (int k = 0; k < neurons[i-1].Length; k++)
{
weights[i - 1][j][k] -= gamma[i][j] * neurons[i-1][k] * learningRate;
}
}
}
}

How can I generate Blanking and sync signal based on my code?

I do not know how to generate the Blanking and Sync signal for my PAL signal. I am working directly with pixel values from an image, and putting them into the formulas from the standard.
I took the pixel R, G and B values and computed the luminance Y.
I tried to make the sync and blanking signal manual but with no results.
//load values
float[,] rValues = new float[picture1.Width, picture1.Height];
float[,] gValues = new float[picture1.Width, picture1.Height];
float[,] bValues = new float[picture1.Width, picture1.Height];
using (Bitmap bmp = new Bitmap(picture1))
{
for (int i=0;i<bmp.Width;i++)
{
for(int j=0;j<bmp.Height;j++)
{
Color clr = bmp.GetPixel(i, j);
rValues[i, j] = clr.R;
gValues[i, j] = clr.G;
bValues[i, j] = clr.B;
}
}
}
//changing the matrices into 1d arrays
----------
double[] r = new double[picture1.Height * picture1.Width];
double[] g = new double[picture1.Height * picture1.Width];
double[] b = new double[picture1.Height * picture1.Width];
int k = 0;
for (int i = 0; i < picture1.Height; i++)
{
for (int j = 0; j < picture1.Width; j++)
{
r[k] = rValues[j, i];
g[k] = gValues[j, i];
b[k] = bValues[j, i];
k++;
}
}
// calculating the luminance Y
double[] Y = new double[picture1.Height * picture1.Width];
for (int i=0; i < Y.Length; i++)
{
Y[i] = 0.3 * r[i] + 0.59 * g[i] + 0.11 * b[i];
}
// trying to make a manual signal
double[] sync = new double[135];
for (int i = 0; i < 135; i++)
{
if (i < 17)
{
sync[i] = 0;
}
if (i > 16 && i < 70)
{
sync[i] = -0.3;
}
if (i > 69)
{
sync[i] = 0;
}
}

How to make this more clean

I have a big part of code like this that iterate trough an array
void GetSpawnablePosition() {
Vector2[] coordX = { Vector2.up, Vector2.down };
Vector2[] coordY = { Vector2.left, Vector2.right };
for (int i = 0; i < coordY.Length; i++)
{
Vector2[] newArray = new Vector2[enemyGrid.grid[0].Length - 2];
if (coordY[i] == Vector2.left)
{
for (int j = 0; j < enemyGrid.grid[0].Length - 2; j++)
{
newArray[j] = new Vector2(0, j+1);
}
}
if (coordY[i] == Vector2.right)
{
for (int j = 0; j < enemyGrid.grid[0].Length - 2; j++)
{
newArray[j] = new Vector2(enemyGrid.grid[0].Length - 1, j + 1);
}
}
spawnablePosition.Add(coordY[i], newArray);
}
for (int i = 0; i < coordY.Length; i++)
{
Vector2[] newArray = new Vector2[enemyGrid.grid.Length - 1];
if (coordX[i] == Vector2.down)
{
for (int j = 0; j <= enemyGrid.grid.Length - 2; j++)
{
newArray[j] = new Vector2(j+1,0);
}
}
if (coordX[i] == Vector2.up)
{
for (int j = 0; j <= enemyGrid.grid.Length - 2; j++)
{
newArray[j] = new Vector2(j + 1, enemyGrid.grid[0].Length - 1);
}
}
spawnablePosition.Add(coordX[i], newArray);
}
}
The snippet is supposed to take the index x and y of a grid
and
put it in a dictionary like this
Vector2.up => [[0][1],[0][2],[0][3],[0][4],[0][5]]
Vector2.left=> [[1][0],[2][0],[3][0],[4][0],[5][0]]
Vector2.right=> [[1][6],[2][6],[3][6],[4][6],[5][6]]
Vector2.down=> [[6][1],[6][2],[6][3],[6][4],[6][5]]
I tried to refactor it to make it smaller or more clear, but really, I honestly can't find a good solution that make that big thing smaller.
Can someone help me ?
Something like:
var yLength = enemyGrid.grid[0].Length;
var xLength = enemyGrid.grid.Length;
spawnablePosition.Add(Vector2.left, Enumerable.Range(1, yLength).Select(y => new Vector2(0, y)).ToArray());
spawnablePosition.Add(Vector2.right, Enumerable.Range(1, yLength).Select(y => new Vector2(xLength - 1, y)).ToArray());
spawnablePosition.Add(Vector2.up, Enumerable.Range(1, xLength).Select(x => new Vector2(x, 0)).ToArray());
spawnablePosition.Add(Vector2.down, Enumerable.Range(1, xLength).Select(x => new Vector2(x, yLength - 1)).ToArray());
Ensure that I don't mess with corresponding array lengths.

Harris Corner Detection Highlighting all edges not corners

I need to implement Harris corner detection, which should only have positive R values. If I use threshold of R>0, it highlights nothing, if R < 0 however, from even -0.00001 to -1000000 it outputs perfect edge detection, but no corners.
Input and output:
http://imgur.com/a/TtQSD\
Main Code:
Bitmap b = (Bitmap)pictureBox1.Image;
var i = IntensityMat(b);
var n = Mask(i,-0.00000001);
var ty = ToBit(n);
pictureBox1.Image = ty;
Functions:
public double Intensity(Color c)
{
int x = c.R;
int y = c.G;
int z = c.B;
return Math.Sqrt(x * x + y * y + z * z);
}
public double Trace(double[,] m)
{
double sum = 0;
for (int i = 0; i < m.GetLength(0); i++)
{
sum += m[i, i];
}
return sum;
}
public double[,] M(double ix,double iy)
{
double[,] m = new double[2, 2];
m[0, 0] = ix*ix;
m[1, 1] = iy*iy;
m[0, 1] = ix * iy;
m[1, 0] = ix * iy;
return m;
}
public double Det(double[,] m)
{
return m[1, 1] * m[0, 0] - m[0, 1] * m[1, 0];
}
public double R(double[,] m,double k=0.04)
{
var t = Trace(m);
return Det(m) - k * t * t;
}
int[,] IntensityMat(Bitmap b)
{
int[,] n = new int[b.Width, b.Height];
for (int i = 0; i < b.Width; i++)
{
for (int j = 0; j < b.Height; j++)
{
Color c = b.GetPixel(i, j);
n[i, j] = (int)Intensity(c);
}
}
return n;
}
Bitmap ToBit(int[,] bd)
{
Bitmap b = new Bitmap(bd.GetLength(0), bd.GetLength(1));
for (int i = 0; i < b.Width; i++)
{
for (int j = 0; j < b.Height; j++)
{
var t = bd[i, j];
b.SetPixel(i, j, Color.FromArgb(t, t, t));
}
}
return b;
}
int[,] Mask(int[,] m,double thresh)// m matrix of I
{
int[,] n = new int[m.GetLength(0), m.GetLength(1)];
for (int i = 1; i < m.GetLength(0); i++)
{
for (int j = 1; j < m.GetLength(1); j++)
{
double ix = Math.Abs(m[i-1,j]-m[i,j]);
double iy = Math.Abs(m[i , j-1] - m[i, j]);
var lap = M(ix, iy);
var r = R(lap);
if (r > thresh)
{
n[i, j] = 255;
}
}
}
return n;
}

Unstable calculation error

I need to calculate matrix: ( X^(T) * X )^(-1).
Legend for the code&comments:
x is double[,] array;
xT - transposed matrix
^(-1) - inverted matrix
Every time i generate new random matrix to work with it and i found out that program is very unstable, because it isn't working properly with any input data. I'm sure about that because i need to get Identity matrix in the end if everything's fine, but sometimes i get a totally terrible Ineverted matrix so i don't get an Identity matrix. I'm dissappointes because i always use the same type of data and do not convert anything. Compiler is MVS 2010. Hope You will help me.
Here is my Program.cs:
static void Main(string[] args)
{
Matrix x = new Matrix(5, 4);
//Matrix temp = new Matrix(x.Row, x.Col);
//double[] y = new double[x.Row];
//double[] b = new double[x.Row];
//this data isn't calculated correctly. used for debugging
x.MatrixX[0, 0] = 7; x.MatrixX[0, 1] = 6; x.MatrixX[0, 2] = 5; x.MatrixX[0, 3] = 8;
x.MatrixX[1, 0] = 7; x.MatrixX[1, 1] = 5; x.MatrixX[1, 2] = 8; x.MatrixX[1, 3] = 5;
x.MatrixX[2, 0] = 6; x.MatrixX[2, 1] = 8; x.MatrixX[2, 2] = 6; x.MatrixX[2, 3] = 8;
x.MatrixX[3, 0] = 8; x.MatrixX[3, 1] = 5; x.MatrixX[3, 2] = 8; x.MatrixX[3, 3] = 7;
x.MatrixX[4, 0] = 8; x.MatrixX[4, 1] = 5; x.MatrixX[4, 2] = 6; x.MatrixX[4, 3] = 7;
/*
7,00000 6,00000 5,00000 8,00000
7,00000 5,00000 8,00000 5,00000
6,00000 8,00000 6,00000 8,00000
8,00000 5,00000 8,00000 7,00000
8,00000 5,00000 6,00000 7,00000
*/
//random matrix generation
/*
Random rnd = new Random();
for (int i = 0; i < x.Row; i++)
for (int j = 0; j < x.Col; j++)
x.MatrixX[i, j] = rnd.Next(5, 10);
*/
/*i'm going to calculate: ( X^(T) * X )^(-1)
* 1. transpose X
* 2. multiply X and (1)
* 3. invert matrix (2)
* +4. i wanna check the results: Multilate of (2) and (3) = Identity_matrix.
* */
Matrix.Display(x);
//1
Matrix xt = Matrix.Transpose(x);
Matrix.Display(xt);
//2
Matrix xxt = Matrix.Multiply(x, xt);
Matrix.Display(xxt);
//3
Matrix xxtinv = Matrix.Invert(Matrix.Multiply(x, xt));
Matrix.Display(xxtinv);
//4
Console.WriteLine("Invert(xxt) * xxt. IdentityMatrix:");
Matrix IdentityMatrix = Matrix.Multiply(xxtinv, xxt);
Matrix.Display(IdentityMatrix);
Console.ReadKey();
}
And here is Matrix.cs with all functions:
public class Matrix
{
private double[,] matrix;
private int row;
private int col;
#region constructors
public Matrix(int Row, int Col)
{
this.row = Row;
this.col = Col;
matrix = new double[Row, Col];
}
public Matrix()
{
Random rnd = new Random();
Row = rnd.Next(3, 7);
Col = rnd.Next(3, 7);
matrix = new double[Row, Col];
for (int i = 0; i < Row; i++)
for (int j = 0; j < Col; j++)
matrix[i, j] = rnd.Next(5, 10);
}
public Matrix(Matrix a)
{
this.Col = a.Col;
this.Row = a.Row;
this.matrix = a.matrix;
}
#endregion
#region properties
public int Col
{
get { return col; }
set { col = value; }
}
public int Row
{
get { return row; }
set { row = value; }
}
public double[,] MatrixX
{
get { return matrix; }
set { matrix = value; }
}
#endregion
static public Matrix Transpose(Matrix array)
{
Matrix temp = new Matrix(array.Col, array.Row);
for (int i = 0; i < array.Row; i++)
for (int j = 0; j < array.Col; j++)
temp.matrix[j, i] = array.matrix[i, j];
return temp;
}
static public void Display(Matrix array)
{
for (int i = 0; i < array.Row; i++)
{
for (int j = 0; j < array.Col; j++)
Console.Write("{0,5:f2}\t", array.matrix[i, j]);
Console.WriteLine();
}
Console.WriteLine();
}
static public Matrix Multiply(Matrix a, Matrix b)
{
if (a.Col != b.Row) throw new Exception("multiplication is impossible: a.Col != b.Row");
Matrix r = new Matrix(a.Row, b.Col);
for (int i = 0; i < a.Row; i++)
{
for (int j = 0; j < b.Col; j++)
{
double sum = 0;
for (int k = 0; k < b.Row; k++)
sum += a.matrix[i, k] * b.matrix[k, j];
r.matrix[i, j] = sum;
}
}
return r;
}
static public Matrix Invert(Matrix a)
{
Matrix E = new Matrix(a.Row, a.Col);
double temp = 0;
int n = a.Row;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
{
E.matrix[i, j] = 0.0;
if (i == j)
E.matrix[i, j] = 1.0;
}
for (int k = 0; k < n; k++)
{
temp = a.matrix[k, k];
for (int j = 0; j < n; j++)
{
a.matrix[k, j] /= temp;
E.matrix[k, j] /= temp;
}
for (int i = k + 1; i < n; i++)
{
temp = a.matrix[i, k];
for (int j = 0; j < n; j++)
{
a.matrix[i, j] -= a.matrix[k, j] * temp;
E.matrix[i, j] -= E.matrix[k, j] * temp;
}
}
}
for (int k = n - 1; k > 0; k--)
{
for (int i = k - 1; i >= 0; i--)
{
temp = a.matrix[i, k];
for (int j = 0; j < n; j++)
{
a.matrix[i, j] -= a.matrix[k, j] * temp;
E.matrix[i, j] -= E.matrix[k, j] * temp;
}
}
}
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
{
a.matrix[i, j] = E.matrix[i, j];
}
return a;
}
}
In your example, the determinant of x * transpose(x) is zero. As a result, there is no inverse, which is probably why you're getting strange results.
I also note that your Inverse function modifies the matrix passed to it. This should probably be modified to avoid that.

Categories

Resources