Microsoft Solver Foundation doesn't solve what is solved by Excel solver - c#

I have a non-linear optimization problem with constraints. I solved it with Excel Solver (GRG)
but when I tried to translate it to C# with Microsoft Solver Foundation it gets solution.Quality = SolverQuality.Unknown.
The problem is this: I have some decisions and limit constraints (upper and lower) for every one of them. And two other constraints (this ones are the problematic constraints):
the sum of the Decision need to be 1 (100%)
(MMULT(TRANSPOSE(Decision-Array),MMULT(Tabel-with-values,Decision-Array)))^0.5 needs to be equal to another value
The goal is sum(Decision * value). I write it by Microsoft Solver Foundation like this:
SolverContext solverData = SolverContext.GetContext();
context.ClearModel();
model = context.CreateModel();
for (i=0 ; i< decisionCount; i++)
{
var decision = new Decision(SolverDomain.RealNonnegative, "decisions_" + i);
model.AddDecision(decision);
model.AddConstraint("limits_of_" + i, downConstraint(i) <= decision <= upConstraint(i));
}
var fdecision = new Decision(SolverDomain.RealNonnegative, "formula");
//mymatrix is double[,]
model.AddConstraint("f_constraint_1", fdecision == matMult(transpose(model.Decisions.ToList()),
matMult(matrix(mymatrix), transpose(model.Decisions.ToList()))[0, 0]);
//myAalue is double = givvenValue^2
solverData.model.AddConstraint("f_constraint_2", fdecision == myAalue);
var udecision = new Decision(SolverDomain.RealNonnegative, "upto100");
model.AddConstraint("upto1_constraint_1", udecision == UpTo100Precent(model.Decisions.ToList()));
model.AddConstraint("upto1_constraint_2", udecision == 1);
solverData.model.AddGoal("goal", GoalKind.Maximize, CalcGoal(model.Decisions.ToList()));
model.AddDecision(udecision);
model.AddDecision(fdecision);
Solution solution = context.Solve(new HybridLocalSearchDirective() { TimeLimit = 60000 });
//add here I get solution.Quality == SolverQuality.Unknown
These are the functions I use in above code:
private Term[,] matrix(Double[,] m)
{
int rows = m.GetLength(0);
int cols = m.GetLength(1);
Term[,] r = new Term[rows, cols];
for (int row = 0; row < rows; row++)
for (int col = 0; col < cols; col++)
r[row, col] = m[row, col];
return r;
}
private Term[,] matMult(Term[,] a, Term[,] b)
{
int rows = a.GetLength(0);
int cols = b.GetLength(1);
Term[,] r = new Term[rows, cols];
for (int row = 0; row < rows; row++)
for (int col = 0; col < cols; col++)
{
r[row, col] = 0;
for (int k = 0; k < a.GetLength(1); k++)
{
r[row, col] += a[row, k] * b[k, col];
}
}
return r;
}
private Term[,] transpose(Term[,] m)
{
int rows = m.GetLength(0);
int cols = m.GetLength(1);
Term[,] r = new Term[cols, rows];
for (int row = 0; row < rows; row++)
for (int col = 0; col < cols; col++)
{
r[col, row] = m[row, col];
}
return r;
}
private Term UpTo100Precent(List<Decision> decisions)
{
Term to100 = 0;
foreach (var decision in decisions)
{
to100 += decision;
}
return to100;
}
private Term CalcGoal(List<Decision> decisions)
{
Term x = 0;
//A is double[]
for (i = 0; i < decisions.Count ; i++)
{
x += decisions[i] * A[1]
}
return x;
}
Does someone have a solution?

Related

Converting this MatLab iterating code to C#. Outputs from matlab and c# are different

I am trying to convert this code:
function [C] = cumulativeMaxV2(A)
cols = size(A,2);
bscans = size(A,3);
C = zeros(size(A));
for col = 1:cols
for bscan = 1:bscans
aline = A(:,col,bscan);
for i = 1:length(aline)
if i == 1
C(i,col,bscan)=0;
else
C(i,col,bscan) = max(A(1:i-1, col,bscan));
end
end
end
end
My C# code is below:
static double[,,] CumulativeMax(double[,,] A)
{
int cols = 304; //A.GetLength(1);
int bscans = 304; //A.GetLength(2);
double[,,] C = new double[160, 304, 304];
Console.Write("Processing... ");
using (var progress = new ProgressBar())
{
for (int col = 0; col < cols; col++)
{
for (int bscan = 0; bscan < bscans; bscan++)
{
double[] aline = new double[160];
for (int i = 0; i < 160; i++)
aline[i] = A[i,col,bscan];
for (int i = 0; i < aline.GetLength(0); i++)
{
if (i == 0)
C[i,col,bscan] = 0d;
else if (i == 1)
{
double[] temp = new double[i];
for (int x = 0; x < i; x++)
temp[x] = A[x,col,bscan];
C[i,col,bscan] = temp.Max();
}
else
{
double[] temp = new double[i - 1];
for (int x = 0; x < i - 1; x++)
temp[x] = A[x,col,bscan];
C[i,col,bscan] = temp.Max();
}
}
}
progress.Report((double)col/cols);
}
}
Console.WriteLine("Done.");
return C;
}
Outputs from MatLab do not match those from the C# code.
Any pointers to where the bugs are in my C# code would be great. I'm not very good with MatLab.
I think this may be due to how MatLab's max function deals with infinity and NaNs.

Genetic algorithm - clustering points on screen

Below is the code I wrote for clustering using genetic algorithm. Points are from a picturebox, generated randomly (X,Y) before calling this class. However, the result of this algorithm is much worse than k-means or lbg I'm comparing it to. Can someone take a look for any errors in the algorithm, maybe I omitted something. Thanks.
I did this using arrays, the 2 other I did using lists, but I don't think that should have any impact on result.
public class geneticAlgorithm
{
static int pom = 0;
static PictureBox pb1;
public geneticAlgorithm(PictureBox pb)
{
pb1 = pb;
}
public static void doGA(PointCollection points, int clusterCounter)
//points is a list of points,
//those point have (X,Y) coordinates generated randomly from pictureBox
//coordinates. clusterCounter is how many clusters I want to divide the points into
{
//this part converts list of points into array testtab,
//where each array field hold X,Y of a point
Point[] test = new Point[points.Count];
test = points.ToArray();
double[][] testtab = new double[test.Length][];
for (int i = 0; i < testtab.GetLength(0); i++)
{
testtab[i] = new double[2];
testtab[i][0] = test[i].X;
testtab[i][1] = test[i].Y;
}
//end of converting
int n = testtab.GetLength(0);
int k = clusterCounter;
int chromosomeCount = 500;
int dimensions = 2;
double[][] testClusters = new double[k][];
for (int i = 0; i < k; i++)
{
testClusters[i] = new double[dimensions];
}
double[][] testChromosomes = new double[chromosomeCount][];
for (int i = 0; i < chromosomeCount; i++)
{
testChromosomes[i] = new double[2 * k];
}
int[][] testChromosomesInt = new int[chromosomeCount][];
for (int i = 0; i < chromosomeCount; i++)
{
testChromosomesInt[i] = new int[2 * k];
}
double[] partner = new double[chromosomeCount];
double[][] roulette = new double[chromosomeCount][];
for (int i = 0; i < chromosomeCount; i++)
{
roulette[i] = new double[1];
}
double[][] errors = new double[chromosomeCount][];
for (int i = 0; i < chromosomeCount; i++)
{
errors[i] = new double[1];
}
double crossingPossibility = 0.01;
double mutationPossibility = 0.0001;
int maxIterations = 10000;
//here I create chromosomes and initial clusters
for (int j = 0; j < chromosomeCount; j++)
{
for (int i = 0; i < k; i++)
{
Random rnd = new Random();
int r = rnd.Next(n);
for (int q = 0; q < dimensions; q++)
{
testClusters[i][q] = testtab[r][q];
}
}
int help = 0;
for (int i = 0; i < k; i++)
for (int l = 0; l < dimensions; l++) // here is creation of chromosome
{
testChromosomes[j][help] = testClusters[i][l];
help++;
}
//end
//here I call accomodation function to see which of them are good
errors[j][0] = accomodationFunction(testClusters, testtab, n, k);
//end
//cleaning of the cluster table
testClusters = new double[k][];
for (int i = 0; i < k; i++)
{
testClusters[i] = new double[dimensions];
}
}
//end
for (int counter = 0; counter < maxIterations; counter++)
{
//start of the roulette
double s = 0.0;
for (int i = 0; i < chromosomeCount; i++)
s += errors[i][0];
for (int i = 0; i < chromosomeCount; i++)
errors[i][0] = chromosomeCount * errors[i][0] / s;
int idx = 0;
for (int i = 0; i < chromosomeCount; i++)
for (int j = 0; i < errors[i][0] && idx < chromosomeCount; j++)
{
roulette[idx++][0] = i;
}
double[][] newTab = new double[chromosomeCount][];
for (int i = 0; i < chromosomeCount; i++)
{
newTab[i] = new double[2 * k];
}
Random rnd = new Random();
for (int i = 0; i < chromosomeCount; i++)
{
int q = rnd.Next(chromosomeCount);
newTab[i] = testChromosomes[(int)roulette[q][0]];
}
testChromosomes = newTab;
//end of roulette
//start of crossing chromosomes
for (int i = 0; i < chromosomeCount; i++)
partner[i] = (rnd.NextDouble() < crossingPossibility + 1) ? rnd.Next(chromosomeCount) : -1;
for (int i = 0; i < chromosomeCount; i++)
if (partner[i] != -1)
testChromosomes[i] = crossing(testChromosomes[i], testChromosomes[(int)partner[i]], rnd.Next(2 * k), k);
//end of crossing
//converting double to int
for (int i = 0; i < chromosomeCount; i++)
for (int j = 0; j < 2 * k; j++)
testChromosomes[i][j] = (int)Math.Round(testChromosomes[i][j]);
//end of conversion
//start of mutation
for (int i = 0; i < chromosomeCount; i++)
if (rnd.NextDouble() < mutationPossibility + 1)
testChromosomesInt[i] = mutation(testChromosomesInt[i], rnd.Next(k * 2), rnd.Next(10));
//end of mutation
}
//painting of the found centre on the picture box
int centrum = max(errors, chromosomeCount);
Graphics g = pb1.CreateGraphics();
SolidBrush brush = new SolidBrush(Color.Red);
for (int i = 0; i < 2 * k - 1; i += 2)
{
g.FillRectangle(brush, testChromosomesInt[centrum][i], testChromosomesInt[centrum][i + 1], 20, 20);
}
return;
}
//end of painting
public static int max(double[][] tab, int chromosomeCount)
{
double max = 0;
int k = 0;
for (int i = 0; i < chromosomeCount; i++)
{
if (max < tab[i][0])
{
max = tab[i][0];
k = i;
}
}
return k;
}
public static int[] mutation(int[] tab, int elem, int bit)
{
int mask = 1;
mask <<= bit;
tab[elem] = tab[elem] ^ mask;
return tab;
}
public static double[] crossing(double[] tab, double[] tab2, int p, int k)
{
double[] hold = new double[2 * k];
for (int i = 0; i < p; i++)
hold[i] = tab[i];
for (int i = p; i < 2 * k; i++)
hold[i] = tab2[i];
return hold;
}
//accomodation function, checks to which centre which point belongs based on distance
private static double accomodationFunction(double[][] klastry, double[][] testtab, int n, int k)
{
double Error = 0;
for (int i = 0; i < n; i++)
{
double min = 0;
int ktory = 0;
for (int j = 0; j < k; j++)
{
double dist = distance(klastry[j], testtab[i]);
if (j == 0)
{
min = dist;
}
if (min > dist)
{
min = dist;
ktory = j;
}
}
Error += min;
}
pom++;
return 1 / Error;
}
public static double distance(double[] tab, double[] tab2)
{
double dist = 0.0;
for (int i = 0; i < tab.GetLength(0); i++)
dist += Math.Pow(tab[i] - tab2[i], 2);
return dist;
}
}
The algorithm should work like so: (excuse me if not the best english)
1. Get random points (let's say 100)
2. Check into how many clusters we want to split them (the same thing I would do using k-means for example
3. Get starting population of chromosomes
4. Throu cutting on the roulette, crossing and mutation pick the best x centres, where x is the amount of clusters we want
5. Paint them.
Here are some results, and why I think it's wrong: (it's using 100 points, 5 clusters)
k-means:
lbg:
genetic(without colors now):
I hope this clarifies a bit.

c# parallel.for gaussian elimination

I am trying to solve Gaussian elimination in c# using parallel-processing (in this case Parallel.For). My code is sometimes running fine and sometimes not.
I used as a starting point the code shown on the link: http://www.codeproject.com/Tips/388179/Linear-Equation-Solver-Gaussian-Elimination-Csharp .
I didn't use Parallel.For for back insertion just yet because there is problem with elimination.
Elimination Class:
public static bool Gauss(double[][] M)
{
int rowCount = (M.GetLength(0));
int temp = rowCount - 1;
//elimination
Parallel.For(0, temp, sourceRow =>
//for (int sourceRow = 0; sourceRow + 1 < rowCount; sourceRow++)//diagonal
{
for (int destRow = sourceRow + 1; destRow < rowCount; destRow++)//destination row
{
double df = M[sourceRow][sourceRow];
double sf = M[destRow][sourceRow];
for (int j = 0; j < rowCount + 1; j++) // line
{
M[destRow][j] = (M[destRow][j] * df - M[sourceRow][j] * sf) / df;
}
}
});
//back-insertion
for (int row = rowCount - 1; row >= 0; row--)
{
double f = M[row][row];
if (f == 0) return false;
for (int i = 0; i < rowCount + 1; i++) M[row][i] /= f;
for (int destRow = 0; destRow < row; destRow++)
{
M[destRow][rowCount] -= M[destRow][row] * M[row][rowCount];
M[destRow][row] = 0;
}
}
return true;
}
Generate Array:
public static double[][] GenerateArray2(int n)
{
Random rnd = new Random();
double[][] M = new double[n][];
for (int i = 0; i < n; i++)
{
M[i] = new double[n + 1]; // Create inner array
for (int j = 0; j < n + 1; j++)
M[i][j] = rnd.Next(-20, 20);
}
return M;
}
Print Array
public static string Print2(double[][] M)
{
string str = "";
int rowCount = M.GetUpperBound(0) + 1;
for (int row = 0; row < rowCount; row++)
{
for (int i = 0; i <= rowCount; i++)
{
str += M[row][i];
str += "\t";
}
str += Environment.NewLine;
}
return str;
}
Results are wrong in matrix 16x17 and up. At this point I don't know how to continue, I tried to lock M in elimination, didn't help.
Thanks for any help.

How can I use hopfield network to learn more patterns?

Is there any relation between number of neurons and ability of Hopfield network to recognize patterns?
I write neural network program in C# to recognize patterns with Hopfield network. My network has 64 neurons. When I train network for 2 patterns, every things work nice and easy, but when I train network for more patterns, Hopfield can't find answer!
So, according to my code, how can I use Hopfield network to learn more patterns?
Should I make changes in this code?
There is my train() function:
public void Train(bool[,] pattern)
{
//N is number of rows in our square matrix
//Convert input pattern to bipolar
int[,] PatternBipolar = new int[N, N];
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
if (pattern[i, j] == true)
{
PatternBipolar[i, j] = 1;
}
else
{
PatternBipolar[i, j] = -1;
}
}
//convert to row matrix
int count1 = 0;
int[] RowMatrix = new int[(int)Math.Pow(N, 2)];
for (int j = 0; j < N; j++)
for (int i = 0; i < N; i++)
{
RowMatrix[count1] = PatternBipolar[i, j];
count1++;
}
//convert to column matrix
int count2 = 0;
int[] ColMatrix = new int[(int)Math.Pow(N, 2)];
for (int j = 0; j < N; j++)
for (int i = 0; i < N; i++)
{
ColMatrix[count2] = PatternBipolar[i, j];
count2++;
}
//multiplication
int[,] MultipliedMatrix = new int[(int)Math.Pow(N, 2), (int)Math.Pow(N, 2)];
for (int i = 0; i < (int)Math.Pow(N, 2); i++)
for (int j = 0; j < (int)Math.Pow(N, 2); j++)
{
MultipliedMatrix[i, j] = ColMatrix[i] * RowMatrix[j];
}
//cells in the northwest diagonal get set to zero
for (int i = 0; i < (int)Math.Pow(N, 2); i++)
MultipliedMatrix[i, i] = 0;
// WightMatrix + MultipliedMatrix
for (int i = 0; i < (int)Math.Pow(N, 2); i++)
for (int j = 0; j < (int)Math.Pow(N, 2); j++)
{
WeightMatrix[i, j] += MultipliedMatrix[i, j];
}
And there is Present() function (this function is used to return answer for a given pattern):
public void Present(bool[,] Pattern)
{
int[] output = new int[(int)(int)Math.Pow(N, 2)];
for (int j = 0; j < N; j++)
for (int i = 0; i < N; i++)
{
OutputShowMatrix[i, j] = 0;
}
//convert bool to binary
int[] PatternBinary = new int[(int)Math.Pow(N, 2)];
int count = 0;
for (int j = 0; j < N; j++)
for (int i = 0; i < N; i++)
{
if (Pattern[i, j] == true)
{
PatternBinary[count] = 1;
}
else
{
PatternBinary[count] = 0;
}
count++;
}
count = 0;
int activation = 0;
for (int j = 0; j < (int)Math.Pow(N, 2); j++)
{
for (int i = 0; i < (int)Math.Pow(N, 2); i++)
{
activation = activation + (PatternBinary[i] * WeightMatrix[i, j]);
}
if (activation > 0)
{
output[count] = 1;
}
else
{
output[count] = 0;
}
count++;
activation = 0;
}
count = 0;
for (int j = 0; j < N; j++)
for (int i = 0; i < N; i++)
{
OutputShowMatrix[i, j] = output[count++];
}
In below images I trained Hopfield for characters A and P and when input patterns are like A or P, network recognize them in true way
Then I train it for character C:
This is where every things go wrong!
Now if I enter pattern like C, this issue happen:
And if enter pattern like A, see what happen:
And if train more patterns, whole of grid become black!
I've spotted only one mistake in your code: you perform only one iteration of node value calculation, without verifying if the values have converged. I've fixed this method like this:
public bool[,] Present(bool[,] pattern)
{
bool[,] result = new bool[N, N];
int[] activation = new int[N * N];
int count = 0;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
activation[count++] = pattern[i, j] ? 1 : 0;
}
bool convergence = false;
while (!convergence)
{
convergence = true;
var previousActivation = (int[])activation.Clone();
for (int i = 0; i < N * N; i++)
{
activation[i] = 0;
for (int j = 0; j < N * N; j++)
{
activation[i] += (previousActivation[j] * WeightMatrix[i, j]);
}
activation[i] = activation[i] > 0 ? 1 : 0;
if (activation[i] != previousActivation[i])
{
convergence = false;
}
}
}
count = 0;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
result[i, j] = activation[count++] == 1;
}
return result;
}
This slightly improves the results, however probably should also be improved to calculate the values asynchronously to avoid cycles.
Unfortunately, this still introduces the behaviour you've described. This is results from the phenomena called spurious patterns. For the network to learn more than one pattern consider training it with a Hebb rule. You can read about the spurious patterns, stability and learning of the Hopfield network here and here.

Unstable calculation error

I need to calculate matrix: ( X^(T) * X )^(-1).
Legend for the code&comments:
x is double[,] array;
xT - transposed matrix
^(-1) - inverted matrix
Every time i generate new random matrix to work with it and i found out that program is very unstable, because it isn't working properly with any input data. I'm sure about that because i need to get Identity matrix in the end if everything's fine, but sometimes i get a totally terrible Ineverted matrix so i don't get an Identity matrix. I'm dissappointes because i always use the same type of data and do not convert anything. Compiler is MVS 2010. Hope You will help me.
Here is my Program.cs:
static void Main(string[] args)
{
Matrix x = new Matrix(5, 4);
//Matrix temp = new Matrix(x.Row, x.Col);
//double[] y = new double[x.Row];
//double[] b = new double[x.Row];
//this data isn't calculated correctly. used for debugging
x.MatrixX[0, 0] = 7; x.MatrixX[0, 1] = 6; x.MatrixX[0, 2] = 5; x.MatrixX[0, 3] = 8;
x.MatrixX[1, 0] = 7; x.MatrixX[1, 1] = 5; x.MatrixX[1, 2] = 8; x.MatrixX[1, 3] = 5;
x.MatrixX[2, 0] = 6; x.MatrixX[2, 1] = 8; x.MatrixX[2, 2] = 6; x.MatrixX[2, 3] = 8;
x.MatrixX[3, 0] = 8; x.MatrixX[3, 1] = 5; x.MatrixX[3, 2] = 8; x.MatrixX[3, 3] = 7;
x.MatrixX[4, 0] = 8; x.MatrixX[4, 1] = 5; x.MatrixX[4, 2] = 6; x.MatrixX[4, 3] = 7;
/*
7,00000 6,00000 5,00000 8,00000
7,00000 5,00000 8,00000 5,00000
6,00000 8,00000 6,00000 8,00000
8,00000 5,00000 8,00000 7,00000
8,00000 5,00000 6,00000 7,00000
*/
//random matrix generation
/*
Random rnd = new Random();
for (int i = 0; i < x.Row; i++)
for (int j = 0; j < x.Col; j++)
x.MatrixX[i, j] = rnd.Next(5, 10);
*/
/*i'm going to calculate: ( X^(T) * X )^(-1)
* 1. transpose X
* 2. multiply X and (1)
* 3. invert matrix (2)
* +4. i wanna check the results: Multilate of (2) and (3) = Identity_matrix.
* */
Matrix.Display(x);
//1
Matrix xt = Matrix.Transpose(x);
Matrix.Display(xt);
//2
Matrix xxt = Matrix.Multiply(x, xt);
Matrix.Display(xxt);
//3
Matrix xxtinv = Matrix.Invert(Matrix.Multiply(x, xt));
Matrix.Display(xxtinv);
//4
Console.WriteLine("Invert(xxt) * xxt. IdentityMatrix:");
Matrix IdentityMatrix = Matrix.Multiply(xxtinv, xxt);
Matrix.Display(IdentityMatrix);
Console.ReadKey();
}
And here is Matrix.cs with all functions:
public class Matrix
{
private double[,] matrix;
private int row;
private int col;
#region constructors
public Matrix(int Row, int Col)
{
this.row = Row;
this.col = Col;
matrix = new double[Row, Col];
}
public Matrix()
{
Random rnd = new Random();
Row = rnd.Next(3, 7);
Col = rnd.Next(3, 7);
matrix = new double[Row, Col];
for (int i = 0; i < Row; i++)
for (int j = 0; j < Col; j++)
matrix[i, j] = rnd.Next(5, 10);
}
public Matrix(Matrix a)
{
this.Col = a.Col;
this.Row = a.Row;
this.matrix = a.matrix;
}
#endregion
#region properties
public int Col
{
get { return col; }
set { col = value; }
}
public int Row
{
get { return row; }
set { row = value; }
}
public double[,] MatrixX
{
get { return matrix; }
set { matrix = value; }
}
#endregion
static public Matrix Transpose(Matrix array)
{
Matrix temp = new Matrix(array.Col, array.Row);
for (int i = 0; i < array.Row; i++)
for (int j = 0; j < array.Col; j++)
temp.matrix[j, i] = array.matrix[i, j];
return temp;
}
static public void Display(Matrix array)
{
for (int i = 0; i < array.Row; i++)
{
for (int j = 0; j < array.Col; j++)
Console.Write("{0,5:f2}\t", array.matrix[i, j]);
Console.WriteLine();
}
Console.WriteLine();
}
static public Matrix Multiply(Matrix a, Matrix b)
{
if (a.Col != b.Row) throw new Exception("multiplication is impossible: a.Col != b.Row");
Matrix r = new Matrix(a.Row, b.Col);
for (int i = 0; i < a.Row; i++)
{
for (int j = 0; j < b.Col; j++)
{
double sum = 0;
for (int k = 0; k < b.Row; k++)
sum += a.matrix[i, k] * b.matrix[k, j];
r.matrix[i, j] = sum;
}
}
return r;
}
static public Matrix Invert(Matrix a)
{
Matrix E = new Matrix(a.Row, a.Col);
double temp = 0;
int n = a.Row;
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
{
E.matrix[i, j] = 0.0;
if (i == j)
E.matrix[i, j] = 1.0;
}
for (int k = 0; k < n; k++)
{
temp = a.matrix[k, k];
for (int j = 0; j < n; j++)
{
a.matrix[k, j] /= temp;
E.matrix[k, j] /= temp;
}
for (int i = k + 1; i < n; i++)
{
temp = a.matrix[i, k];
for (int j = 0; j < n; j++)
{
a.matrix[i, j] -= a.matrix[k, j] * temp;
E.matrix[i, j] -= E.matrix[k, j] * temp;
}
}
}
for (int k = n - 1; k > 0; k--)
{
for (int i = k - 1; i >= 0; i--)
{
temp = a.matrix[i, k];
for (int j = 0; j < n; j++)
{
a.matrix[i, j] -= a.matrix[k, j] * temp;
E.matrix[i, j] -= E.matrix[k, j] * temp;
}
}
}
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
{
a.matrix[i, j] = E.matrix[i, j];
}
return a;
}
}
In your example, the determinant of x * transpose(x) is zero. As a result, there is no inverse, which is probably why you're getting strange results.
I also note that your Inverse function modifies the matrix passed to it. This should probably be modified to avoid that.

Categories

Resources