This is the implementation from Microsoft for Sinh of a Complex
public static Complex Sinh(Complex value) /* Hyperbolic sin */
{
double a = value.m_real;
double b = value.m_imaginary;
return new Complex(Math.Sinh(a) * Math.Cos(b), Math.Cosh(a) * Math.Sin(b));
}
and the implementation for Cosh
public static Complex Cos(Complex value) {
double a = value.m_real;
double b = value.m_imaginary;
return new Complex(Math.Cos(a) * Math.Cosh(b), - (Math.Sin(a) * Math.Sinh(b)));
}
and finally the the implementation for Tanh
public static Complex Tanh(Complex value) /* Hyperbolic tan */
{
return (Sinh(value) / Cosh(value));
}
Source: https://referencesource.microsoft.com/System.Numerics/a.html#e62f37ac1d0c67da
I don't understand why Microsoft implented the Tanh method that way?
It will fail for very large values. E.g.:
tanh(709 + 0i) --> 1, ok
tanh(711 + 0i) --> NaN, failed should be 1
Any ideas how to improve the tanh method that?
For double the Math.Tanh methods works for large values.
The complex tanh method could be implemented like that:
public static Complex Tanh(Complex value)
{
double a = value.Real;
double b = value.Imaginary;
double tanh_a = Math.Tanh(a);
double tan_b = Math.Tan(b);
Complex num = new Complex(tanh_a, tan_b);
Complex den = new Complex(1, tanh_a * tan_b);
return num / den;
}
This will work as well for large values, see https://dotnetfiddle.net/xGWdQt.
Update
As well the complex tan method needs to be re-implemented that it works with larges values (imaginary part):
public static Complex Tan(Complex value)
{
double a = value.Real;
double b = value.Imaginary;
double tan_a = Math.Tan(a);
double tanh_b = Math.Tanh(b);
Complex num = new Complex(tan_a, tanh_b);
Complex den = new Complex(1, -tan_a * tanh_b);
return num / den;
}
See https://dotnetfiddle.net/dh6CSG.
Using the comment from Hans Passant another way to implement the tanh method would be:
public static Complex Tanh(Complex value)
{
if (Math.Abs(value.Real) > 20)
return new Complex(Math.Sign(value.Real), 0);
else
return Complex.Tanh(value);
}
See https://dotnetfiddle.net/QvUECX.
And the tan method:
public static Complex Tan(Complex value)
{
if (Math.Abs(value.Imaginary) > 20)
return new Complex(0, Math.Sign(value.Imaginary));
else
return Complex.Tan(value);
}
See https://dotnetfiddle.net/Xzclcu.
Related
I'm having trouble implementing the lm optimizer in the alglib library. I'm not sure why the parameters are hardly changing at all while still receiving an exit code of 4. I have been unable to determine what i am doing wrong with the documentation for alglib. Below is the full source I am running:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.IO;
using System.Threading.Tasks;
namespace FBkineticsFitter
{
class Program
{
public static int Main(string[] args)
{
/*
* This code finds the parameters ka, kd, and Bmax from the minimization of the residuals using "V" mode of the Levenberg-Marquardt optimizer (alglib library).
* This optimizer is used because the equation is non-linear and this particular version of the optimizer does not require the ab inito calculation of partial
* derivatives, a jacobian matrix, or other parameter-space definitions, so it's implementation is simple.
*
* The equations being solved represent a model of a protein-protein interaction where protein in solution is interacting with immobilized protein on a sensor
* in a 1:1 stoichiometery. Mass transport limit is not taken into account. The detials of this equation are described in:
* R.B.M. Schasfoort and Anna J. Tudos Handbook of Surface Plasmon Resonance, 2008, Chapter 5, ISBN: 978-0-85404-267-8
*
* Y=((ka*Cpro*Bmax)/(ka*Cpro+kd))*(1-exp(-1*X*(ka*Cpro+kd))) ; this equation describes the association phase
*
* Y=Req*exp(-1*X*kd) ; this equation describes the dissociation phase
*
* The data are fit globally such that Bmax and Req parameters are linked and kd parameters are linked during simultaneous optimization for the most robust fit
*
* Y= signal
* X= time
* ka= association constant
* kd= dissociation constant
* Bmax= maximum binding capacity at equilibrium
* Req=(Cpro/(Cpro+kobs))*Bmax :. in this case Req=Bmax because Cpro=0 during the dissociation step
* Cpro= concentration of protein in solution
*
* additional calculations:
* kobs=ka*Cpro
* kD=kd/ka
*/
GetRawDataXY(#"C:\Results.txt");
double epsg = .0000001;
double epsf = 0;
double epsx = 0;
int maxits = 0;
alglib.minlmstate state;
alglib.minlmreport rep;
alglib.minlmcreatev(2, GlobalVariables.param, 0.0001, out state);
alglib.minlmsetcond(state, epsg, epsf, epsx, maxits);
alglib.minlmoptimize(state, Calc_residuals, null, null);
alglib.minlmresults(state, out GlobalVariables.param, out rep);
System.Console.WriteLine("{0}", rep.terminationtype); ////1=relative function improvement is no more than EpsF. 2=relative step is no more than EpsX. 4=gradient norm is no more than EpsG. 5=MaxIts steps was taken. 7=stopping conditions are too stringent,further improvement is impossible, we return best X found so far. 8= terminated by user
System.Console.WriteLine("{0}", alglib.ap.format(GlobalVariables.param, 20));
System.Console.ReadLine();
return 0;
}
public static void Calc_residuals(double[] param, double[] fi, object obj)
{
/*calculate the difference of the model and the raw data at each X (I.E. residuals)
* the sum of the square of the residuals is returned to the optimized function to be minimized*/
fi[0] = 0;
fi[1] = 0;
for (int i = 0; i < GlobalVariables.rawXYdata[0].Count();i++ )
{
if (GlobalVariables.rawXYdata[1][i] <= GlobalVariables.breakpoint)
{
fi[0] += System.Math.Pow((kaEQN(GlobalVariables.rawXYdata[0][i]) - GlobalVariables.rawXYdata[1][i]), 2);
}
else
{
fi[1] += System.Math.Pow((kdEQN(GlobalVariables.rawXYdata[0][i]) - GlobalVariables.rawXYdata[1][i]), 2);
}
}
}
public static double kdEQN(double x)
{
/*Calculate kd Y value based on the incremented parameters*/
return GlobalVariables.param[2] * Math.Exp(-1 * x * GlobalVariables.param[1]);
}
public static double kaEQN(double x)
{
/*Calculate ka Y value based on the incremented parameters*/
return ((GlobalVariables.param[0] * GlobalVariables.Cpro * GlobalVariables.param[2]) / (GlobalVariables.param[0] * GlobalVariables.Cpro + GlobalVariables.param[1])) * (1 - Math.Exp(-1 * x * (GlobalVariables.param[0] * GlobalVariables.Cpro + GlobalVariables.param[1])));
}
public static void GetRawDataXY(string filename)
{
/*Read in Raw data From tab delim txt*/
string[] elements = { "x", "y" };
int count = 0;
GlobalVariables.rawXYdata[0] = new double[1798];
GlobalVariables.rawXYdata[1] = new double[1798];
using (StreamReader sr = new StreamReader(filename))
{
while (sr.Peek() >= 0)
{
elements = sr.ReadLine().Split('\t');
GlobalVariables.rawXYdata[0][count] = Convert.ToDouble(elements[0]);
GlobalVariables.rawXYdata[1][count] = Convert.ToDouble(elements[1]);
count++;
}
}
}
public class GlobalVariables
{
public static double[] param = new double[] { 1, .02, 0.13 }; ////ka,kd,Bmax these are initial guesses for the algorithm
public static double[][] rawXYdata = new double[2][];
public static double Cpro = 100E-9;
public static double kD = 0;
public static double breakpoint = 180;
}
}
}
According to Sergey Bochkanova The issue is the following:
"You should use param[] array which is provided to you by optimizer. It creates its internal copy of your param, and updates this copy - not your param array.
From the optimizer point of view, it has function which never changes when it changes its internal copy of param. So, it terminates right after first iteration."
Here is the updated and working example code:
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.IO;
using System.Threading.Tasks;
namespace FBkineticsFitter
{
class Program
{
public static int Main(string[] args)
{
/*
* This code finds the parameters ka, kd, and Bmax from the minimization of the residuals using "V" mode of the Levenberg-Marquardt optimizer (alglib library).
* This optimizer is used because the equation is non-linear and this particular version of the optimizer does not require the ab inito calculation of partial
* derivatives, a jacobian matrix, or other parameter-space definitions, so it's implementation is simple.
*
* The equations being solved represent a model of a protein-protein interaction where protein in solution is interacting with immobilized protein on a sensor
* in a 1:1 stoichiometery. Mass transport limit is not taken into account. The detials of this equation are described in:
* R.B.M. Schasfoort and Anna J. Tudos Handbook of Surface Plasmon Resonance, 2008, Chapter 5, ISBN: 978-0-85404-267-8
*
* Y=((Cpro*Rmax)/(Cpro+kd))*(1-exp(-1*X*(ka*Cpro+kd))) ; this equation describes the association phase
*
* Y=Req*exp(-1*X*kd)+NS ; this equation describes the dissociation phase
*
* According to ForteBio's Application Notes #14 the amplitudes of the data can be correctly accounted for by modifying the above equations as follows:
*
* Y=(Rmax*(1/(1+(kd/(ka*Cpro))))*(1-exp(((-1*Cpro)+kd)*X)) ; this equation describes the association phase
*
* Y=Y0*(exp(-1*kd*(X-X0))) ; this equation describes the dissociation phase
*
*
*
* The data are fit simultaneously such that all fitting parameters are linked during optimization for the most robust fit
*
* Y= signal
* X= time
* ka= association constant [fitting parameter 0]
* kd= dissociation constant [fitting parameter 1]
* Rmax= maximum binding capacity at equilibrium [fitting parameter 2]
* KD=kd/ka
* kobs=ka*Cpro+kd
* Req=(Cpro/(Cpro+KD))*Rmax
* Cpro= concentration of protein in solution
* NS= non-specific binding at time=infinity (constant correction for end point of fit) [this is taken into account in the amplitude corrected formula: Y0=Ylast]
* Y0= the initial value of Y for the first point of the dissociation curve (I.E. the last point of the association phase)
* X0= the initial value of X for the first point of the dissociation phase
*
*/
GetRawDataXY(#"C:\Results.txt");
double epsg = .00001;
double epsf = 0;
double epsx = 0;
int maxits = 10000;
alglib.minlmstate state;
alglib.minlmreport rep;
double[] param = new double[] { 1000000, .0100, 0.20};////ka,kd,Rmax these are initial guesses for the algorithm and should be mid range for the expected data., The last parameter Rmax should be guessed as the maximum Y-value of Ka
double[] scaling= new double[] { 1E6,1,1};
alglib.minlmcreatev(2, param, 0.001, out state);
alglib.minlmsetcond(state, epsg, epsf, epsx, maxits);
alglib.minlmsetgradientcheck(state, 1);
alglib.minlmsetscale(state, scaling);
alglib.minlmoptimize(state, Calc_residuals, null, V.rawXYdata);
alglib.minlmresults(state, out param, out rep);
System.Console.WriteLine("{0}", rep.terminationtype); ////1=relative function improvement is no more than EpsF. 2=relative step is no more than EpsX. 4=gradient norm is no more than EpsG. 5=MaxIts steps was taken. 7=stopping conditions are too stringent,further improvement is impossible, we return best X found so far. 8= terminated by user
System.Console.WriteLine("{0}", alglib.ap.format(param, 25));
System.Console.ReadLine();
return 0;
}
public static void Calc_residuals(double[] param, double[] fi, object obj)
{
/*calculate the difference of the model and the raw data at each X (I.E. residuals)
* the sum of the square of the residuals is returned to the optimized function to be minimized*/
CalcVariables(param);
fi[0] = 0;
fi[1] = 0;
for (int i = 0; i < V.rawXYdata[0].Count(); i++)
{
if (V.rawXYdata[0][i] <= V.breakpoint)
{
fi[0] += System.Math.Pow((kaEQN(V.rawXYdata[0][i], param) - V.rawXYdata[1][i]), 2);
}
else
{
if (!V.breakpointreached)
{
V.breakpointreached = true;
V.X_0 = V.rawXYdata[0][i];
V.Y_0 = V.rawXYdata[1][i];
}
fi[1] += System.Math.Pow((kdEQN(V.rawXYdata[0][i], param) - V.rawXYdata[1][i]), 2);
}
}
if (param[0] <= 0 || param[1] <=0 || param[2] <= 0)////Exponentiates the error if the parameters go negative to favor positive non-zero values
{
fi[0] = Math.Pow(fi[0], 2);
fi[1] = Math.Pow(fi[1], 2);
}
System.Console.WriteLine("{0}"+" "+V.Cpro+" -->"+fi[0], alglib.ap.format(param, 5));
Console.WriteLine((kdEQN(V.rawXYdata[0][114], param)));
}
public static double kdEQN(double X, double[] param)
{
/*Calculate kd Y value based on the incremented parameters*/
return (V.Rmax * (1 / (1 + (V.kd / (V.ka * V.Cpro)))) * (1 - Math.Exp((-1 * V.ka * V.Cpro) * V.X_0))) * Math.Exp((-1 * V.kd) * (X - V.X_0));
}
public static double kaEQN(double X, double[] param)
{
/*Calculate ka Y value based on the incremented parameters*/
return ((V.Cpro * V.Rmax) / (V.Cpro + V.kd)) * (1 - Math.Exp(-1 * X * ((V.ka * V.Cpro) + V.kd)));
}
public static void GetRawDataXY(string filename)
{
/*Read in Raw data From tab delim txt*/
string[] elements = { "x", "y" };
int count = 0;
V.rawXYdata[0] = new double[226];
V.rawXYdata[1] = new double[226];
using (StreamReader sr = new StreamReader(filename))
{
while (sr.Peek() >= 0)
{
elements = sr.ReadLine().Split('\t');
V.rawXYdata[0][count] = Convert.ToDouble(elements[0]);
V.rawXYdata[1][count] = Convert.ToDouble(elements[1]);
count++;
}
}
}
public class V
{
/*Global Variables*/
public static double[][] rawXYdata = new double[2][];
public static double Cpro = 100E-9;
public static bool breakpointreached = false;
public static double X_0 = 0;
public static double Y_0 = 0;
public static double ka = 0;
public static double kd = 0;
public static double Rmax = 0;
public static double KD = 0;
public static double Kobs = 0;
public static double Req = 0;
public static double breakpoint = 180;
}
public static void CalcVariables(double[] param)
{
V.ka = param[0];
V.kd = param[1];
V.Rmax = param[2];
V.KD = param[1] / param[0];
V.Kobs = param[0] * V.Cpro + param[1];
V.Req = (V.Cpro / (V.Cpro + param[0] * V.Cpro + param[1])) * param[2];
}
}
}
I don't like this code, it is overcomplicated and impractical, so I'm looking to simplify it.
I want it to change a var by a random amount, and I need to put at least 150 variables into this code.
//Variable list
public double price1 = 100;
public double price2 = 100;
public double price3 = 100;
public void DaysEnd(){ //Simplified version of inefficient code
var = price1;
HVariation();
price1 = newvar;
var = price2;
HVariation();
price2 = newvar;
var = price2;
MVariation();
price2 = newvar;
var = price3;
LVariation();
price3 = newvar;
}
public void Hvariation(){
newvar = var + (var * (Random.NextDouble(0 - 0.5, 0.5)));
}
public void Mvariation(){
newvar = var + (var * (Random.NextDouble(0 - 0.25, 0.25)));
}
public void Lvariation(){
newvar = var + (var * (Random.NextDouble(0 - 0.1, 0.5)));
}
This should get you started
List<double> values = new List<double> { 100, 100, 200, 500, ... };
values = values.Select(val => Hvariation(val)).ToList();
// now all values have been altered by Hvariation
...
private readonly Random _rand = new Random();
public double Hvariation(double val) {
return val + (val * (_rand.NextDouble(-0.5, 0.5)));
}
The first thing to do is find repeated code. For example:
var = price3;
LVariation(); //Different variations
price3 = newvar;
This can be turned into a method (that takes the variation as a parameter).
To do this, you will also need to make a default variation that takes the min and max:
public void Variation(double min, double max){
newvar = var + (var * (Random.NextDouble(min, max)));
}
You can then put this together to reduce code to look some thing like this:
public double UpdatePrice(double price, double min, double max)
{
var = price;
Variation(min, max);
return newvar;
}
In general, if I have to copy the code more than once (or even once if the amount copied is significant), I turn the code into a method.
You can simplify this by instead of defining three variation methods, defining a variation level and passing it into a single method. I'm not sure if you would need it to be in arrays or if you can use lists (in which case lists are preferable), but you can store your variable in an array instead of defining a variable name for each one and separate them into logical groupings as you need to. You can then apply the change/transformation to each array using LINQ. An example of this would be
public enum VariationLevel
{
High,
Medium,
Low
};
public double[] HighVariancePrices =
{
100, 100, 100, 100, 100
};
public double[] MediumVariancePrices =
{
100, 100, 100, 100, 100
};
public double[] LowVariancePrices =
{
100, 100, 100, 100, 100
};
public void DaysEnd()
{
HighVariancePrices = HighVariancePrices.Select(price => GetVariation(price, VariationLevel.High)).ToArray();
MediumVariancePrices = MediumVariancePrices.Select(price => GetVariation(price, VariationLevel.Medium)).ToArray();
LowVariancePrices = LowVariancePrices.Select(price => GetVariation(price, VariationLevel.Low)).ToArray();
}
public double GetVariation(double value, VariationLevel variationLevel)
{
switch (variationLevel)
{
case VariationLevel.High:
return value + (value * (Random.NextDouble(0 - 0.5, 0.5)));
case VariationLevel.Medium:
return value + (value * (Random.NextDouble(0 - 0.25, 0.25)));
case VariationLevel.Low:
return value + (value * (Random.NextDouble(0 - 0.1, 0.5)));
}
}
However, the code around Random.NextDouble() doesn't compile (because NextDouble doesn't take arguments) so I'm not certain what you're trying to do there, but that's outside of the scope of "how can I simplify my code?" Hope this helps some.
By "direct" I mean something like Size*2 (which doesn't work) as opposed to:
size1 = new Size(size1.Width * 2, size1.Height * 2);
You can technically write an extension method:
public static class Extensions {
public static Size Multiply(this Size size, double factor) {
return new Size((int)(size.Width * factor), (int)(size.Height * factor));
}
}
But just about nobody is going to use it correctly. They'll write
this.Size.Multiply(1.2);
instead of the required
this.Size = this.Size.Multiply(1.2);
An almost inevitable mistake because it looks like an instance method. So don't do it, just write a static helper method.
You can overload the * operator:
class Size
{
public int Width { get; set; }
public int Height { get; set; }
public Size(int w, int h)
{
this.Width = w;
this.Height = h;
}
public static Size operator *(Size s, int n)
{
return new Size(s.Width * n, s.Height * n);
}
}
Now you can do:
Size s1 = new Size(1, 2);
Size s = s1 * 2; // s.Height = 2, s.Width = 4
Since there is no * operator implemented for the Size struct you would have to create one. But with extension methods it is not possible to create new operators. Instead you could create an extension method called multiply for instance.
Not without work but it is quite easy to overload operators
see demo of operator overloading on codeproject
see right at the bottom:
public static MySize operator +(MySize mySize, Int32 value)
{
return new MySize(
mySize.m_Width + value,
mySize.m_Height + value);
}
It shouldn't take you long to work out the * operator overload
I have a C# code which is working good when the "optimize code" option is off, but fails otherwise. Is there any function or class attribute which can prevent the optimisation of a function or class, but let the compiler optimize the others ?
(I tried unsafe or MethodImpl, but without success)
Thanks
Edit :
I have done some more test...
The code is like this :
double arg = (Math.PI / 2d - Math.Atan2(a, d));
With a = 1 and d = 0, arg should be 0.
Thid code is a function which is called by Excel via ExcelDNA.
Calling an identical code from an optimized console app : OK
Calling this code from Excel without optimization : OK
Calling this code from Excel with optimization : Not OK, arg == 0 is false (instead arg is a very small value near 0, but not 0)
Same result with [MethodImpl(MethodImplOptions.NoOptimization)] before the called function.
This is very likely to do with the floating point mode which Excel likely has set - meaning that your program is calculating floating points slightly different because of the program (Excel) hosting your assembly (DLL). This might impact how your results are calculated, or how/what values are automatically coerced to zero.
To be absolutely sure you are not going to run into issues with different floating point modes and/or errors you should check for equality rather by checking if the values are very close together. This is not really a hack.
public class AlmostDoubleComparer : IComparer<double>
{
public static readonly AlmostDoubleComparer Default = new AlmostDoubleComparer();
public const double Epsilon = double.Epsilon * 64d; // 0.{322 zeroes}316
public static bool IsZero(double x)
{
return Compare(x, 0) == 0;
}
public static int Compare(double x, double y)
{
// Very important that cmp(x, y) == cmp(y, x)
if (Double.IsNaN(x) || Double.IsNaN(y))
return 1;
if (Double.IsInfinity(x) || Double.IsInfinity(y))
return 1;
var absX = Math.Abs(x);
var absY = Math.Abs(y);
var diff = absX > absY ? absX - absY : absY - absX;
if (diff < Epsilon)
return 0;
if (x < y)
return -1;
else
return 1;
}
int IComparer<double>.Compare(double x, double y)
{
return Compare(x, y);
}
}
// E.g.
double arg = (Math.PI / 2d - Math.Atan2(a, d));
if (AlmostDoubleComparer.IsZero(arg))
// Regard it as zero.
I also ported the re-interpret integer comparison, in case you find that more suitable (it deals with larger values more consistently).
public class AlmostDoubleComparer : IComparer<double>
{
public static readonly AlmostDoubleComparer Default = new AlmostDoubleComparer();
public const double MaxUnitsInTheLastPlace = 3;
public static bool IsZero(double x)
{
return Compare(x, 0) == 0;
}
public static int Compare(double x, double y)
{
// Very important that cmp(x, y) == cmp(y, x)
if (Double.IsNaN(x) || Double.IsNaN(y))
return 1;
if (Double.IsInfinity(x) || Double.IsInfinity(y))
return 1;
var ix = DoubleInt64.Reinterpret(x);
var iy = DoubleInt64.Reinterpret(y);
var diff = Math.Abs(ix - iy);
if (diff < MaxUnitsInTheLastPlace)
return 0;
if (ix < iy)
return -1;
else
return 1;
}
int IComparer<double>.Compare(double x, double y)
{
return Compare(x, y);
}
}
[StructLayout(LayoutKind.Explicit)]
public struct DoubleInt64
{
[FieldOffset(0)]
private double _double;
[FieldOffset(0)]
private long _int64;
private DoubleInt64(long value)
{
_double = 0d;
_int64 = value;
}
private DoubleInt64(double value)
{
_int64 = 0;
_double = value;
}
public static double Reinterpret(long value)
{
return new DoubleInt64(value)._double;
}
public static long Reinterpret(double value)
{
return new DoubleInt64(value)._int64;
}
}
Alternatively you could try and NGen the assembly and see if you can work around the either the mode Excel has, or how it is hosting the CLR.
That is what you get when working with floating point datatypes. You don't get exactly 0, but a very close value, since a double has limited precision and not every value can be represented and sometimes those tiny precision errors add up. You either need to expect that (check that the value is close enough to 0).
I am writing an app that requires the calculation of the Gamma function.
A code (part of a class) snippet is below:
namespace PB.Utilities.Math
{
// class definition
public class SpecialFunctions
{
// Private Fields
// Instance Constructor
public SpecialFunctions() {}
// Public Method for Gamma Function
// x = input value; x MUST BE > 0
// GammaLn = secondary output value equal to natural log of Gamma Function
public double Gamma(double x, out double GammaLn)
{
try
{
if (x <= 0) throw new System.ArgumentException("arg <= 0 in GammaFunction", "x");
}
catch
{
System.Console.WriteLine("argument <= 0 in GammaFunction");
System.Console.ReadKey();
}
double gammaln;
double _gamma = gamma(x, out gammaln);
GammaLn = gammaln;
return _gamma;
}
// private method for Gamma Function
private double gamma(double xx, out double gammaln)
{
// private constants
int j;
double x,tmp,y,ser;
const double k1 = 5.24218750000000000;
const double k2 = 0.999999999999997092;
const double k3 = 2.5066282746310005;
double[] cof = new double[14]
{
57.1562356658629235, -59.5979603554754912, 14.1360979747417471,
-0.491913816097620199, 0.339946499848118887e-4, 0.465236289270485756e-4,
-0.983744753048795646e-4, 0.158088703224912494e-3, -0.210264441724104883e-3,
0.217439618115212643e-3, -0.164318106536763890e-3, 0.844182239838527433e-4,
-0.261908384015814087e-4, 0.368991826595316234e-5
};
y = x = xx;
tmp = x + k1;
tmp = (x + 0.5) * System.Math.Log(tmp) - tmp;
ser = k2;
for (j = 0; j < 14; j++) ser += cof[j]/++y;
gammaln = tmp + System.Math.Log(k3*ser/x);
return System.Math.Exp(gammaln);
}
}
}
public class BSA
{
static void Main()
{
// Create an object of type PB.Utilities.Math.SpecialFunctions
PB.Utilities.Math.SpecialFunctions Function = new PB.Utilities.Math.SpecialFunctions();
// Call the public method GammaFunction.
double GammaLn1;
double GammaLn2;
double GammaLn3;
double g1 = Function.Gamma(3.5, out GammaLn1);
double g2 = Function.Gamma(1.5, out GammaLn2);
double g3 = Function.Gamma(1/7, out GammaLn3);
System.Console.WriteLine("g(7/2) = "+g1);
System.Console.WriteLine("g(3/2) = "+g2);
System.Console.WriteLine("g(1/7) = "+g3);
}
}
The issue is that at compilation, the parameter x in Gamma (even though x is being assigned the value 3.5 in the calling component) is assigned a value of 0 which triggers the exception. Can anyone please suggest how I can get around this? Thank you.
Seems to be 3.5 in my test cases. Are you sure you haven't excluded some information that might be the issue?
using System;
namespace Doubletesting
{
class Program
{
static void Main(string[] args)
{
double d = Doubletesting.TestDouble(3.5);
Console.WriteLine(d.ToString());
Console.ReadKey();
}
public static double TestDouble(double x)
{
double result;
result = x;
return result;
}
}
}
Result
3.5
UPDATED
The Error is caused by your Function.Gamma(1 / 7, out GammaLn3). This is because both 1 and 7 are INT and dividing (int)1 by (int)7 is zero. Try Function.Gamma(1f / 7f, out GammaLn3).