c# LOESS/LOWESS regression [closed] - c#

Closed. This question does not meet Stack Overflow guidelines. It is not currently accepting answers.
We don’t allow questions seeking recommendations for books, tools, software libraries, and more. You can edit the question so it can be answered with facts and citations.
Closed last year.
Improve this question
Do you know of a .net library to perform a LOESS/LOWESS regression? (preferably free/open source)

Port from java to c#
public class LoessInterpolator
{
public static double DEFAULT_BANDWIDTH = 0.3;
public static int DEFAULT_ROBUSTNESS_ITERS = 2;
/**
* The bandwidth parameter: when computing the loess fit at
* a particular point, this fraction of source points closest
* to the current point is taken into account for computing
* a least-squares regression.
*
* A sensible value is usually 0.25 to 0.5.
*/
private double bandwidth;
/**
* The number of robustness iterations parameter: this many
* robustness iterations are done.
*
* A sensible value is usually 0 (just the initial fit without any
* robustness iterations) to 4.
*/
private int robustnessIters;
public LoessInterpolator()
{
this.bandwidth = DEFAULT_BANDWIDTH;
this.robustnessIters = DEFAULT_ROBUSTNESS_ITERS;
}
public LoessInterpolator(double bandwidth, int robustnessIters)
{
if (bandwidth < 0 || bandwidth > 1)
{
throw new ApplicationException(string.Format("bandwidth must be in the interval [0,1], but got {0}", bandwidth));
}
this.bandwidth = bandwidth;
if (robustnessIters < 0)
{
throw new ApplicationException(string.Format("the number of robustness iterations must be non-negative, but got {0}", robustnessIters));
}
this.robustnessIters = robustnessIters;
}
/**
* Compute a loess fit on the data at the original abscissae.
*
* #param xval the arguments for the interpolation points
* #param yval the values for the interpolation points
* #return values of the loess fit at corresponding original abscissae
* #throws MathException if some of the following conditions are false:
* <ul>
* <li> Arguments and values are of the same size that is greater than zero</li>
* <li> The arguments are in a strictly increasing order</li>
* <li> All arguments and values are finite real numbers</li>
* </ul>
*/
public double[] smooth(double[] xval, double[] yval)
{
if (xval.Length != yval.Length)
{
throw new ApplicationException(string.Format("Loess expects the abscissa and ordinate arrays to be of the same size, but got {0} abscisssae and {1} ordinatae", xval.Length, yval.Length));
}
int n = xval.Length;
if (n == 0)
{
throw new ApplicationException("Loess expects at least 1 point");
}
checkAllFiniteReal(xval, true);
checkAllFiniteReal(yval, false);
checkStrictlyIncreasing(xval);
if (n == 1)
{
return new double[] { yval[0] };
}
if (n == 2)
{
return new double[] { yval[0], yval[1] };
}
int bandwidthInPoints = (int)(bandwidth * n);
if (bandwidthInPoints < 2)
{
throw new ApplicationException(string.Format("the bandwidth must be large enough to accomodate at least 2 points. There are {0} " +
" data points, and bandwidth must be at least {1} but it is only {2}",
n, 2.0 / n, bandwidth
));
}
double[] res = new double[n];
double[] residuals = new double[n];
double[] sortedResiduals = new double[n];
double[] robustnessWeights = new double[n];
// Do an initial fit and 'robustnessIters' robustness iterations.
// This is equivalent to doing 'robustnessIters+1' robustness iterations
// starting with all robustness weights set to 1.
for (int i = 0; i < robustnessWeights.Length; i++) robustnessWeights[i] = 1;
for (int iter = 0; iter <= robustnessIters; ++iter)
{
int[] bandwidthInterval = { 0, bandwidthInPoints - 1 };
// At each x, compute a local weighted linear regression
for (int i = 0; i < n; ++i)
{
double x = xval[i];
// Find out the interval of source points on which
// a regression is to be made.
if (i > 0)
{
updateBandwidthInterval(xval, i, bandwidthInterval);
}
int ileft = bandwidthInterval[0];
int iright = bandwidthInterval[1];
// Compute the point of the bandwidth interval that is
// farthest from x
int edge;
if (xval[i] - xval[ileft] > xval[iright] - xval[i])
{
edge = ileft;
}
else
{
edge = iright;
}
// Compute a least-squares linear fit weighted by
// the product of robustness weights and the tricube
// weight function.
// See http://en.wikipedia.org/wiki/Linear_regression
// (section "Univariate linear case")
// and http://en.wikipedia.org/wiki/Weighted_least_squares
// (section "Weighted least squares")
double sumWeights = 0;
double sumX = 0, sumXSquared = 0, sumY = 0, sumXY = 0;
double denom = Math.Abs(1.0 / (xval[edge] - x));
for (int k = ileft; k <= iright; ++k)
{
double xk = xval[k];
double yk = yval[k];
double dist;
if (k < i)
{
dist = (x - xk);
}
else
{
dist = (xk - x);
}
double w = tricube(dist * denom) * robustnessWeights[k];
double xkw = xk * w;
sumWeights += w;
sumX += xkw;
sumXSquared += xk * xkw;
sumY += yk * w;
sumXY += yk * xkw;
}
double meanX = sumX / sumWeights;
double meanY = sumY / sumWeights;
double meanXY = sumXY / sumWeights;
double meanXSquared = sumXSquared / sumWeights;
double beta;
if (meanXSquared == meanX * meanX)
{
beta = 0;
}
else
{
beta = (meanXY - meanX * meanY) / (meanXSquared - meanX * meanX);
}
double alpha = meanY - beta * meanX;
res[i] = beta * x + alpha;
residuals[i] = Math.Abs(yval[i] - res[i]);
}
// No need to recompute the robustness weights at the last
// iteration, they won't be needed anymore
if (iter == robustnessIters)
{
break;
}
// Recompute the robustness weights.
// Find the median residual.
// An arraycopy and a sort are completely tractable here,
// because the preceding loop is a lot more expensive
System.Array.Copy(residuals, sortedResiduals, n);
//System.arraycopy(residuals, 0, sortedResiduals, 0, n);
Array.Sort<double>(sortedResiduals);
double medianResidual = sortedResiduals[n / 2];
if (medianResidual == 0)
{
break;
}
for (int i = 0; i < n; ++i)
{
double arg = residuals[i] / (6 * medianResidual);
robustnessWeights[i] = (arg >= 1) ? 0 : Math.Pow(1 - arg * arg, 2);
}
}
return res;
}
/**
* Given an index interval into xval that embraces a certain number of
* points closest to xval[i-1], update the interval so that it embraces
* the same number of points closest to xval[i]
*
* #param xval arguments array
* #param i the index around which the new interval should be computed
* #param bandwidthInterval a two-element array {left, right} such that: <p/>
* <tt>(left==0 or xval[i] - xval[left-1] > xval[right] - xval[i])</tt>
* <p/> and also <p/>
* <tt>(right==xval.length-1 or xval[right+1] - xval[i] > xval[i] - xval[left])</tt>.
* The array will be updated.
*/
private static void updateBandwidthInterval(double[] xval, int i, int[] bandwidthInterval)
{
int left = bandwidthInterval[0];
int right = bandwidthInterval[1];
// The right edge should be adjusted if the next point to the right
// is closer to xval[i] than the leftmost point of the current interval
int nextRight = nextNonzero(weights, right);
if (nextRight < xval.Length && xval[nextRight] - xval[i] < xval[i] - xval[left])
{
int nextLeft = nextNonzero(weights, bandwidthInterval[0]);
bandwidthInterval[0] = nextLeft;
bandwidthInterval[1] = nextRight;
}
}
/**
* Compute the
* tricube
* weight function
*
* #param x the argument
* #return (1-|x|^3)^3
*/
private static double tricube(double x)
{
double tmp = Math.abs(x);
tmp = 1 - tmp * tmp * tmp;
return tmp * tmp * tmp;
}
/**
* Check that all elements of an array are finite real numbers.
*
* #param values the values array
* #param isAbscissae if true, elements are abscissae otherwise they are ordinatae
* #throws MathException if one of the values is not
* a finite real number
*/
private static void checkAllFiniteReal(double[] values, bool isAbscissae)
{
for (int i = 0; i < values.Length; i++)
{
double x = values[i];
if (Double.IsInfinity(x) || Double.IsNaN(x))
{
string pattern = isAbscissae ?
"all abscissae must be finite real numbers, but {0}-th is {1}" :
"all ordinatae must be finite real numbers, but {0}-th is {1}";
throw new ApplicationException(string.Format(pattern, i, x));
}
}
}
/**
* Check that elements of the abscissae array are in a strictly
* increasing order.
*
* #param xval the abscissae array
* #throws MathException if the abscissae array
* is not in a strictly increasing order
*/
private static void checkStrictlyIncreasing(double[] xval)
{
for (int i = 0; i < xval.Length; ++i)
{
if (i >= 1 && xval[i - 1] >= xval[i])
{
throw new ApplicationException(string.Format(
"the abscissae array must be sorted in a strictly " +
"increasing order, but the {0}-th element is {1} " +
"whereas {2}-th is {3}",
i - 1, xval[i - 1], i, xval[i]));
}
}
}
}

Since I'm unable to comment on other people's posts (new user), and people seem to think I should do that with this instead of editing the above answer, I'm simply going to write it as an answer even though I know this is better as a comment.
The updateBandwidthInterval method in the above answer forgets to check the left side as written in the method comment. This can give NaN issues for sumWeights. The below should fix that. I encountered this when doing a c++ implementation based on the above.
/**
* Given an index interval into xval that embraces a certain number of
* points closest to xval[i-1], update the interval so that it embraces
* the same number of points closest to xval[i]
*
* #param xval arguments array
* #param i the index around which the new interval should be computed
* #param bandwidthInterval a two-element array {left, right} such that: <p/>
* <tt>(left==0 or xval[i] - xval[left-1] > xval[right] - xval[i])</tt>
* <p/> and also <p/>
* <tt>(right==xval.length-1 or xval[right+1] - xval[i] > xval[i] - xval[left])</tt>.
* The array will be updated.
*/
private static void updateBandwidthInterval(double[] xval, int i, int[] bandwidthInterval)
{
int left = bandwidthInterval[0];
int right = bandwidthInterval[1];
// The edges should be adjusted if the previous point to the
// left is closer to x than the current point to the right or
// if the next point to the right is closer
// to x than the leftmost point of the current interval
if (left != 0 &&
xval[i] - xval[left - 1] < xval[right] - xval[i])
{
bandwidthInterval[0]++;
bandwidthInterval[1]++;
}
else if (right < xval.Length - 1 &&
xval[right + 1] - xval[i] < xval[i] - xval[left])
{
bandwidthInterval[0]++;
bandwidthInterval[1]++;
}
}

Hope someone after 5 years find this useful. This is the original code posted by Tutcugil but with the missing methods and updated.
using System;
using System.Linq;
namespace StockCorrelation
{
public class LoessInterpolator
{
public static double DEFAULT_BANDWIDTH = 0.3;
public static int DEFAULT_ROBUSTNESS_ITERS = 2;
/**
* The bandwidth parameter: when computing the loess fit at
* a particular point, this fraction of source points closest
* to the current point is taken into account for computing
* a least-squares regression.
*
* A sensible value is usually 0.25 to 0.5.
*/
private double bandwidth;
/**
* The number of robustness iterations parameter: this many
* robustness iterations are done.
*
* A sensible value is usually 0 (just the initial fit without any
* robustness iterations) to 4.
*/
private int robustnessIters;
public LoessInterpolator()
{
this.bandwidth = DEFAULT_BANDWIDTH;
this.robustnessIters = DEFAULT_ROBUSTNESS_ITERS;
}
public LoessInterpolator(double bandwidth, int robustnessIters)
{
if (bandwidth < 0 || bandwidth > 1)
{
throw new ApplicationException(string.Format("bandwidth must be in the interval [0,1], but got {0}", bandwidth));
}
this.bandwidth = bandwidth;
if (robustnessIters < 0)
{
throw new ApplicationException(string.Format("the number of robustness iterations must be non-negative, but got {0}", robustnessIters));
}
this.robustnessIters = robustnessIters;
}
/**
* Compute a loess fit on the data at the original abscissae.
*
* #param xval the arguments for the interpolation points
* #param yval the values for the interpolation points
* #return values of the loess fit at corresponding original abscissae
* #throws MathException if some of the following conditions are false:
* <ul>
* <li> Arguments and values are of the same size that is greater than zero</li>
* <li> The arguments are in a strictly increasing order</li>
* <li> All arguments and values are finite real numbers</li>
* </ul>
*/
public double[] smooth(double[] xval, double[] yval, double[] weights)
{
if (xval.Length != yval.Length)
{
throw new ApplicationException(string.Format("Loess expects the abscissa and ordinate arrays to be of the same size, but got {0} abscisssae and {1} ordinatae", xval.Length, yval.Length));
}
int n = xval.Length;
if (n == 0)
{
throw new ApplicationException("Loess expects at least 1 point");
}
checkAllFiniteReal(xval, true);
checkAllFiniteReal(yval, false);
checkStrictlyIncreasing(xval);
if (n == 1)
{
return new double[] { yval[0] };
}
if (n == 2)
{
return new double[] { yval[0], yval[1] };
}
int bandwidthInPoints = (int)(bandwidth * n);
if (bandwidthInPoints < 2)
{
throw new ApplicationException(string.Format("the bandwidth must be large enough to accomodate at least 2 points. There are {0} " +
" data points, and bandwidth must be at least {1} but it is only {2}",
n, 2.0 / n, bandwidth
));
}
double[] res = new double[n];
double[] residuals = new double[n];
double[] sortedResiduals = new double[n];
double[] robustnessWeights = new double[n];
// Do an initial fit and 'robustnessIters' robustness iterations.
// This is equivalent to doing 'robustnessIters+1' robustness iterations
// starting with all robustness weights set to 1.
for (int i = 0; i < robustnessWeights.Length; i++) robustnessWeights[i] = 1;
for (int iter = 0; iter <= robustnessIters; ++iter)
{
int[] bandwidthInterval = { 0, bandwidthInPoints - 1 };
// At each x, compute a local weighted linear regression
for (int i = 0; i < n; ++i)
{
double x = xval[i];
// Find out the interval of source points on which
// a regression is to be made.
if (i > 0)
{
updateBandwidthInterval(xval, weights, i, bandwidthInterval);
}
int ileft = bandwidthInterval[0];
int iright = bandwidthInterval[1];
// Compute the point of the bandwidth interval that is
// farthest from x
int edge;
if (xval[i] - xval[ileft] > xval[iright] - xval[i])
{
edge = ileft;
}
else
{
edge = iright;
}
// Compute a least-squares linear fit weighted by
// the product of robustness weights and the tricube
// weight function.
// See http://en.wikipedia.org/wiki/Linear_regression
// (section "Univariate linear case")
// and http://en.wikipedia.org/wiki/Weighted_least_squares
// (section "Weighted least squares")
double sumWeights = 0;
double sumX = 0, sumXSquared = 0, sumY = 0, sumXY = 0;
double denom = Math.Abs(1.0 / (xval[edge] - x));
for (int k = ileft; k <= iright; ++k)
{
double xk = xval[k];
double yk = yval[k];
double dist;
if (k < i)
{
dist = (x - xk);
}
else
{
dist = (xk - x);
}
double w = tricube(dist * denom) * robustnessWeights[k];
double xkw = xk * w;
sumWeights += w;
sumX += xkw;
sumXSquared += xk * xkw;
sumY += yk * w;
sumXY += yk * xkw;
}
double meanX = sumX / sumWeights;
double meanY = sumY / sumWeights;
double meanXY = sumXY / sumWeights;
double meanXSquared = sumXSquared / sumWeights;
double beta;
if (meanXSquared == meanX * meanX)
{
beta = 0;
}
else
{
beta = (meanXY - meanX * meanY) / (meanXSquared - meanX * meanX);
}
double alpha = meanY - beta * meanX;
res[i] = beta * x + alpha;
residuals[i] = Math.Abs(yval[i] - res[i]);
}
// No need to recompute the robustness weights at the last
// iteration, they won't be needed anymore
if (iter == robustnessIters)
{
break;
}
// Recompute the robustness weights.
// Find the median residual.
// An arraycopy and a sort are completely tractable here,
// because the preceding loop is a lot more expensive
System.Array.Copy(residuals, sortedResiduals, n);
//System.arraycopy(residuals, 0, sortedResiduals, 0, n);
Array.Sort<double>(sortedResiduals);
double medianResidual = sortedResiduals[n / 2];
if (medianResidual == 0)
{
break;
}
for (int i = 0; i < n; ++i)
{
double arg = residuals[i] / (6 * medianResidual);
robustnessWeights[i] = (arg >= 1) ? 0 : Math.Pow(1 - arg * arg, 2);
}
}
return res;
}
public double[] smooth(double[] xval, double[] yval)
{
if (xval.Length != yval.Length)
{
throw new Exception($"xval and yval len are different");
}
double[] unitWeights = Enumerable.Repeat(1.0, xval.Length).ToArray();
return smooth(xval, yval, unitWeights);
}
/**
* Given an index interval into xval that embraces a certain number of
* points closest to xval[i-1], update the interval so that it embraces
* the same number of points closest to xval[i]
*
* #param xval arguments array
* #param i the index around which the new interval should be computed
* #param bandwidthInterval a two-element array {left, right} such that: <p/>
* <tt>(left==0 or xval[i] - xval[left-1] > xval[right] - xval[i])</tt>
* <p/> and also <p/>
* <tt>(right==xval.length-1 or xval[right+1] - xval[i] > xval[i] - xval[left])</tt>.
* The array will be updated.
*/
private static void updateBandwidthInterval(double[] xval, double[] weights,
int i,
int[] bandwidthInterval)
{
int left = bandwidthInterval[0];
int right = bandwidthInterval[1];
// The right edge should be adjusted if the next point to the right
// is closer to xval[i] than the leftmost point of the current interval
int nextRight = nextNonzero(weights, right);
if (nextRight < xval.Length && xval[nextRight] - xval[i] < xval[i] - xval[left])
{
int nextLeft = nextNonzero(weights, bandwidthInterval[0]);
bandwidthInterval[0] = nextLeft;
bandwidthInterval[1] = nextRight;
}
}
private static int nextNonzero(double[] weights, int i)
{
int j = i + 1;
while (j < weights.Length && weights[j] == 0)
{
++j;
}
return j;
}
/**
* Compute the
* tricube
* weight function
*
* #param x the argument
* #return (1-|x|^3)^3
*/
private static double tricube(double x)
{
double tmp = Math.Abs(x);
tmp = 1 - tmp * tmp * tmp;
return tmp * tmp * tmp;
}
/**
* Check that all elements of an array are finite real numbers.
*
* #param values the values array
* #param isAbscissae if true, elements are abscissae otherwise they are ordinatae
* #throws MathException if one of the values is not
* a finite real number
*/
private static void checkAllFiniteReal(double[] values, bool isAbscissae)
{
for (int i = 0; i < values.Length; i++)
{
double x = values[i];
if (Double.IsInfinity(x) || Double.IsNaN(x))
{
string pattern = isAbscissae ?
"all abscissae must be finite real numbers, but {0}-th is {1}" :
"all ordinatae must be finite real numbers, but {0}-th is {1}";
throw new ApplicationException(string.Format(pattern, i, x));
}
}
}
/**
* Check that elements of the abscissae array are in a strictly
* increasing order.
*
* #param xval the abscissae array
* #throws MathException if the abscissae array
* is not in a strictly increasing order
*/
private static void checkStrictlyIncreasing(double[] xval)
{
for (int i = 0; i < xval.Length; ++i)
{
if (i >= 1 && xval[i - 1] >= xval[i])
{
throw new ApplicationException(string.Format(
"the abscissae array must be sorted in a strictly " +
"increasing order, but the {0}-th element is {1} " +
"whereas {2}-th is {3}",
i - 1, xval[i - 1], i, xval[i]));
}
}
}
}
}

Related

Scale FFT result frequencys in log

I am about to program a visualizer with pretty good results. I have got an array with the size of 1500, with the magnitude of the frequencys in it. Now I want to convert this array in an array with 100 values. For example in the 1st index of the 2nd array should be the average of the first two values in the first array. On the 2nd index of the 2nd array should be the values of index 3-6. But i don't know how to calculate this properly. So how can I convert the first array into the second one?
I have found an answer in the rainmeter source code. Maybe it will now be clearer what I wanted to do here is the c# code:
To get an array with an specific length, log scaled with min. and max. frequencies.
private float[] getFrequencies(int min, int max, int nBands)
{
float[] returnVal = new float[nBands];
double step = (Math.Log(max / min) / nBands) / Math.Log(2.0);
returnVal[0] = (float)(min * Math.Pow(2.0, step / 2.0));
for (int iBand = 1; iBand < nBands; ++iBand)
{
returnVal[iBand] = (float)(returnVal[iBand - 1] * Math.Pow(2.0, step));
}
return returnVal;
}
And to fill the output array:
private double[] getLogArray(double[] data, int nBands, int minFreq, int maxFreq)
{
float[] bandFreq = getFrequencies(minFreq, maxFreq, nBands);
float df = (float)sampleRate / samples;
float scalar = 1.0f / sampleRate;
double[] bandOut = new double[nBands];
int iBin = 0;
int iBand = 0;
float f0 = 0.0f;
while (iBin <= (samples / 2) && iBand < nBands)
{
float fLin1 = ((float)iBin + 0.5f) * df;
float fLog1 = bandFreq[iBand];
float x = (float)data[iBin];
if (fLin1 <= fLog1)
{
bandOut[iBand] += (fLin1 - f0) * x * scalar;
f0 = fLin1;
iBin += 1;
}
else
{
bandOut[iBand] += (fLog1 - f0) * x * scalar;
f0 = fLog1;
iBand += 1;
}
}
return bandOut;
}
Have a nice day and sorry for the late response.

RSI vs Wilder's RSI Calculation Problems

I am having trouble getting a smoothed RSI. The below picture is from freestockcharts.com. The calculation uses this code.
public static double CalculateRsi(IEnumerable<double> closePrices)
{
var prices = closePrices as double[] ?? closePrices.ToArray();
double sumGain = 0;
double sumLoss = 0;
for (int i = 1; i < prices.Length; i++)
{
var difference = prices[i] - prices[i - 1];
if (difference >= 0)
{
sumGain += difference;
}
else
{
sumLoss -= difference;
}
}
if (sumGain == 0) return 0;
if (Math.Abs(sumLoss) < Tolerance) return 100;
var relativeStrength = sumGain / sumLoss;
return 100.0 - (100.0 / (1 + relativeStrength));
}
https://stackoverflow.com/questions/...th-index-using-some-programming-language-js-c
This seems to be the pure RSI with no smoothing. How does a smoothed RSI get calculated? I have tried changing it to fit the definitions of the these two sites however the output was not correct. It was barely smoothed.
(I don't have enough rep to post links)
tc2000 -> Indicators -> RSI_and_Wilder_s_RSI (Wilder's smoothing = Previous MA value + (1/n periods * (Close - Previous MA)))
priceactionlab -> wilders-cutlers-and-harris-relative-strength-index (RS = EMA(Gain(n), n)/EMA(Loss(n), n))
Can someone actually do the calculation with some sample data?
Wilder's RSI vs RSI
In order to calculate the RSI, you need a period to calculate it with. As noted on Wikipedia, 14 is used quite often.
So the calculation steps would be as follows:
Period 1 - 13, RSI = 0
Period 14:
AverageGain = TotalGain / PeriodCount;
AverageLoss = TotalLoss / PeriodCount;
RS = AverageGain / AverageLoss;
RSI = 100 - 100 / (1 + RS);
Period 15 - to period (N):
if (Period(N)Change > 0
AverageGain(N) = ((AverageGain(N - 1) * (PeriodCount - 1)) + Period(N)Change) / PeriodCount;
else
AverageGain(N) = (AverageGain(N - 1) * (PeriodCount - 1)) / PeriodCount;
if (this.Change < 0)
AverageLoss(N) = ((AverageLoss(N - 1) * (PeriodCount - 1)) + Math.Abs(Period(N)Change)) / PeriodCount;
else
AverageLoss(N) = (AverageLoss(N - 1) * (PeriodCount - 1)) / PeriodCount;
RS = AverageGain / AverageLoss;
RSI = 100 - (100 / (1 + RS));
Thereafter, to smooth the values, you need to apply a moving average of a certain period to your RSI values. To do that, traverse your RSI values from the last index to the first and calculate your average for the current period based on the preceding x smoothing periods.
Once done, just reverse the list of values to get the expected order:
List<double> SmoothedRSI(IEnumerable<double> rsiValues, int smoothingPeriod)
{
if (rsiValues.Count() <= smoothingPeriod)
throw new Exception("Smoothing period too large or too few RSI values passed.");
List<double> results = new List<double>();
List<double> reversedRSIValues = rsiValues.Reverse().ToList();
for (int i = 1; i < reversedRSIValues.Count() - smoothingPeriod - 1; i++)
results.Add(reversedRSIValues.Subset(i, i + smoothingPeriod).Average());
return results.Reverse().ToList();
}
The Subset method is just a simple extension method as follows:
public static List<double> Subset(this List<double> values, int start, int end)
{
List<double> results = new List<double>();
for (int i = start; i <= end; i++)
results.Add(values[i]);
return results;
}
Disclaimer, I did not test the code, but it should give you an idea of how the smoothing is applied.
You can't get accurate values without buffers / global variables to store data.
This is a smoothed indicator, meaning it doesn't only use 14 bars but ALL THE BARS:
Here's a step by step article with working and verified source codes generating exactly the same values if prices and number of available bars are the same, of course (you only need to load the price data from your source):
Tested and verified:
using System;
using System.Data;
using System.Globalization;
namespace YourNameSpace
{
class PriceEngine
{
public static DataTable data;
public static double[] positiveChanges;
public static double[] negativeChanges;
public static double[] averageGain;
public static double[] averageLoss;
public static double[] rsi;
public static double CalculateDifference(double current_price, double previous_price)
{
return current_price - previous_price;
}
public static double CalculatePositiveChange(double difference)
{
return difference > 0 ? difference : 0;
}
public static double CalculateNegativeChange(double difference)
{
return difference < 0 ? difference * -1 : 0;
}
public static void CalculateRSI(int rsi_period, int price_index = 5)
{
for(int i = 0; i < PriceEngine.data.Rows.Count; i++)
{
double current_difference = 0.0;
if (i > 0)
{
double previous_close = Convert.ToDouble(PriceEngine.data.Rows[i-1].Field<string>(price_index));
double current_close = Convert.ToDouble(PriceEngine.data.Rows[i].Field<string>(price_index));
current_difference = CalculateDifference(current_close, previous_close);
}
PriceEngine.positiveChanges[i] = CalculatePositiveChange(current_difference);
PriceEngine.negativeChanges[i] = CalculateNegativeChange(current_difference);
if(i == Math.Max(1,rsi_period))
{
double gain_sum = 0.0;
double loss_sum = 0.0;
for(int x = Math.Max(1,rsi_period); x > 0; x--)
{
gain_sum += PriceEngine.positiveChanges[x];
loss_sum += PriceEngine.negativeChanges[x];
}
PriceEngine.averageGain[i] = gain_sum / Math.Max(1,rsi_period);
PriceEngine.averageLoss[i] = loss_sum / Math.Max(1,rsi_period);
}else if (i > Math.Max(1,rsi_period))
{
PriceEngine.averageGain[i] = ( PriceEngine.averageGain[i-1]*(rsi_period-1) + PriceEngine.positiveChanges[i]) / Math.Max(1, rsi_period);
PriceEngine.averageLoss[i] = ( PriceEngine.averageLoss[i-1]*(rsi_period-1) + PriceEngine.negativeChanges[i]) / Math.Max(1, rsi_period);
PriceEngine.rsi[i] = PriceEngine.averageLoss[i] == 0 ? 100 : PriceEngine.averageGain[i] == 0 ? 0 : Math.Round(100 - (100 / (1 + PriceEngine.averageGain[i] / PriceEngine.averageLoss[i])), 5);
}
}
}
public static void Launch()
{
PriceEngine.data = new DataTable();
//load {date, time, open, high, low, close} values in PriceEngine.data (6th column (index #5) = close price) here
positiveChanges = new double[PriceEngine.data.Rows.Count];
negativeChanges = new double[PriceEngine.data.Rows.Count];
averageGain = new double[PriceEngine.data.Rows.Count];
averageLoss = new double[PriceEngine.data.Rows.Count];
rsi = new double[PriceEngine.data.Rows.Count];
CalculateRSI(14);
}
}
}
For detailed step-by-step instructions, I wrote a lengthy article, you can check it here: https://turmanauli.medium.com/a-step-by-step-guide-for-calculating-reliable-rsi-values-programmatically-a6a604a06b77
P.S. functions only work for simple indicators (Simple Moving Average), even Exponential Moving Average, Average True Range absolutely require global variables to store previous values.

C# Exp cannot get result

When I using Math.Exp() in C# I have some questions?This code is about Kernel density estimation, and I don't have any knowledge about kernel density estimation. So I look up some wiki and some paper.
I try to write it by C#. The problem is when "distance" is getting higher the result is become 0. It's confuse me and I cannot find any other way to get the right result.
disExp = Math.Pow(Math.E, -(distance / 2 * Math.Pow(h, 2)));
So, can any one help me to get the solution? Or give me some idea about Kernel density estimation on C#. Sorry for poor English.
Try this
public static double[,] KernelDensityEstimation(double[] data, double sigma, int nsteps)
{
// probability density function (PDF) signal analysis
// Works like ksdensity in mathlab.
// KDE performs kernel density estimation (KDE)on one - dimensional data
// http://en.wikipedia.org/wiki/Kernel_density_estimation
// Input: -data: input data, one-dimensional
// -sigma: bandwidth(sometimes called "h")
// -nsteps: optional number of abscis points.If nsteps is an
// array, the abscis points will be taken directly from it. (default 100)
// Output: -x: equispaced abscis points
// -y: estimates of p(x)
// This function is part of the Kernel Methods Toolbox(KMBOX) for MATLAB.
// http://sourceforge.net/p/kmbox
// Converted to C# code by ksandric
double[,] result = new double[nsteps, 2];
double[] x = new double[nsteps], y = new double[nsteps];
double MAX = Double.MinValue, MIN = Double.MaxValue;
int N = data.Length; // number of data points
// Find MIN MAX values in data
for (int i = 0; i < N; i++)
{
if (MAX < data[i])
{
MAX = data[i];
}
if (MIN > data[i])
{
MIN = data[i];
}
}
// Like MATLAB linspace(MIN, MAX, nsteps);
x[0] = MIN;
for (int i = 1; i < nsteps; i++)
{
x[i] = x[i - 1] + ((MAX - MIN) / nsteps);
}
// kernel density estimation
double c = 1.0 / (Math.Sqrt(2 * Math.PI * sigma * sigma));
for (int i = 0; i < N; i++)
{
for (int j = 0; j < nsteps; j++)
{
y[j] = y[j] + 1.0 / N * c * Math.Exp(-(data[i] - x[j]) * (data[i] - x[j]) / (2 * sigma * sigma));
}
}
// compilation of the X,Y to result. Good for creating plot(x, y)
for (int i = 0; i < nsteps; i++)
{
result[i, 0] = x[i];
result[i, 1] = y[i];
}
return result;
}
kernel density estimation C#
plot

C# - Index was out of range

I am trying to convert a C++ class to C# and in the process learn something of C++. I had never run into a vector<> before and my understanding is this is like a List<> function in C#. During the conversion of the class I re-wrote the code using List futures_price = New List(Convert.ToInt32(no_steps) + 1);. As soon as I run the code, I get a "Index was out of range" error.
Having looked around on SOF, I believe the issue is regarding the parameter being out of index range relating to this, but I do not see a simple solution to solve this with the below code.
In particular, this is the line that is triggering the error: futures_prices[0] = spot_price * Math.Pow(d, no_steps);
Below is the full code:
public double futures_option_price_call_american_binomial(double spot_price, double option_strike, double r, double sigma, double time, double no_steps)
{
//double spot_price, // price futures contract
//double option_strike, // exercise price
//double r, // interest rate
//double sigma, // volatility
//double time, // time to maturity
//int no_steps
List<double> futures_prices = new List<double>(Convert.ToInt32(no_steps) + 1);
//(no_steps+1);
//double call_values = (no_steps+1);
List<double> call_values = new List<double>(Convert.ToInt32(no_steps) + 1);
double t_delta = time/no_steps;
double Rinv = Math.Exp(-r*(t_delta));
double u = Math.Exp(sigma * Math.Sqrt(t_delta));
double d = 1.0/u;
double uu= u*u;
double pUp = (1-d)/(u-d); // note how probability is calculated
double pDown = 1.0 - pUp;
futures_prices[0] = spot_price * Math.Pow(d, no_steps);
int i;
for (i=1; i<=no_steps; ++i) futures_prices[i] = uu*futures_prices[i-1]; // terminal tree nodes
for (i=0; i<=no_steps; ++i) call_values[i] = Math.Max(0.0, (futures_prices[i]-option_strike));
for (int step = Convert.ToInt32(no_steps) - 1; step >= 0; --step)
{
for (i = 0; i <= step; ++i)
{
futures_prices[i] = d * futures_prices[i + 1];
call_values[i] = (pDown * call_values[i] + pUp * call_values[i + 1]) * Rinv;
call_values[i] = Math.Max(call_values[i], futures_prices[i] - option_strike); // check for exercise
};
};
return call_values[0];
}
Here is the original source in C++:
double futures_option_price_call_american_binomial(const double& F, // price futures contract
const double& K, // exercise price
const double& r, // interest rate
const double& sigma, // volatility
const double& time, // time to maturity
const int& no_steps) { // number of steps
vector<double> futures_prices(no_steps+1);
vector<double> call_values (no_steps+1);
double t_delta= time/no_steps;
double Rinv = exp(-r*(t_delta));
double u = exp(sigma*sqrt(t_delta));
double d = 1.0/u;
double uu= u*u;
double pUp = (1-d)/(u-d); // note how probability is calculated
double pDown = 1.0 - pUp;
futures_prices[0] = F*pow(d, no_steps);
int i;
for (i=1; i<=no_steps; ++i) futures_prices[i] = uu*futures_prices[i-1]; // terminal tree nodes
for (i=0; i<=no_steps; ++i) call_values[i] = max(0.0, (futures_prices[i]-K));
for (int step=no_steps-1; step>=0; --step) {
for (i=0; i<=step; ++i) {
futures_prices[i] = d*futures_prices[i+1];
call_values[i] = (pDown*call_values[i]+pUp*call_values[i+1])*Rinv;
call_values[i] = max(call_values[i], futures_prices[i]-K); // check for exercise
};
};
return call_values[0];
};
A List<double> starts out empty until you add items to it. (passing the constructor argument just sets the capacity, preventing costly resizes)
You can't access [0] until you Add() it.
To use it the way you are, use an array instead.
As SLaks says, it's better to use an Array in this situation. C# lists are filled with Add method and values are removed through Remove method... this would be more complicated and memory/performance expensive as you are also replacing values.
public Double FuturesOptionPriceCallAmericanBinomial(Double spotPrice, Double optionStrike, Double r, Double sigma, Double time, Double steps)
{
// Avoid calling Convert multiple times as it can be quite performance expensive.
Int32 stepsInteger = Convert.ToInt32(steps);
Double[] futurePrices = new Double[(stepsInteger + 1)];
Double[] callValues = new Double[(stepsInteger + 1)];
Double tDelta = time / steps;
Double rInv = Math.Exp(-r * (tDelta));
Double u = Math.Exp(sigma * Math.Sqrt(tDelta));
Double d = 1.0 / u;
Double uu = u * u;
Double pUp = (1 - d) / (u - d);
Double pDown = 1.0 - pUp;
futurePrices[0] = spotPrice * Math.Pow(d, steps);
for (Int32 i = 1; i <= steps; ++i)
futurePrices[i] = uu * futurePrices[(i - 1)];
for (Int32 i = 0; i <= steps; ++i)
callValues[i] = Math.Max(0.0, (futurePrices[i] - optionStrike));
for (Int32 step = stepsInteger - 1; step >= 0; --step)
{
for (Int32 i = 0; i <= step; ++i)
{
futurePrices[i] = d * futurePrices[(i + 1)];
callValues[i] = ((pDown * callValues[i]) + (pUp * callValues[i + 1])) * rInv;
callValues[i] = Math.Max(callValues[i], (futurePrices[i] - option_strike));
}
}
return callValues[0];
}

FFT algorithm getting wrong sound frequency value

I have run this FFT algorithm on a 440Hz audio file. But I get an unexpected sound frequency: 510Hz.
Is the byteArray containing .wav correctly converted into 2 double arrays (Re & Im parts)? The imaginary array contains only 0.
I assume that the highest sound frequency is the maximum of xRe array: please look at the very end of the run() function? Maybe that is my mistake: is it average or something like that?
What is the problem then?
UPDATED: The biggest sum Re+Im is at index = 0 so I get frequency = 0;
Whole project: contains .wav -> just open and run.
using System;
using System.Net;
using System.IO;
namespace FFT {
/**
* Performs an in-place complex FFT.
*
* Released under the MIT License
*
* Copyright (c) 2010 Gerald T. Beauregard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
public class FFT2 {
// Element for linked list in which we store the
// input/output data. We use a linked list because
// for sequential access it's faster than array index.
class FFTElement {
public double re = 0.0; // Real component
public double im = 0.0; // Imaginary component
public FFTElement next; // Next element in linked list
public uint revTgt; // Target position post bit-reversal
}
private static int sampleRate;
private uint m_logN = 0; // log2 of FFT size
private uint m_N = 0; // FFT size
private FFTElement[] m_X; // Vector of linked list elements
/**
*
*/
public FFT2() {
}
/**
* Initialize class to perform FFT of specified size.
*
* #param logN Log2 of FFT length. e.g. for 512 pt FFT, logN = 9.
*/
public void init(uint logN) {
m_logN = logN;
m_N = (uint)(1 << (int)m_logN);
// Allocate elements for linked list of complex numbers.
m_X = new FFTElement[m_N];
for (uint k = 0; k < m_N; k++)
m_X[k] = new FFTElement();
// Set up "next" pointers.
for (uint k = 0; k < m_N - 1; k++)
m_X[k].next = m_X[k + 1];
// Specify target for bit reversal re-ordering.
for (uint k = 0; k < m_N; k++)
m_X[k].revTgt = BitReverse(k, logN);
}
/**
* Performs in-place complex FFT.
*
* #param xRe Real part of input/output
* #param xIm Imaginary part of input/output
* #param inverse If true, do an inverse FFT
*/
public void run(double[] xRe, double[] xIm, bool inverse = false) {
uint numFlies = m_N >> 1; // Number of butterflies per sub-FFT
uint span = m_N >> 1; // Width of the butterfly
uint spacing = m_N; // Distance between start of sub-FFTs
uint wIndexStep = 1; // Increment for twiddle table index
// Copy data into linked complex number objects
// If it's an IFFT, we divide by N while we're at it
FFTElement x = m_X[0];
uint k = 0;
double scale = inverse ? 1.0 / m_N : 1.0;
while (x != null) {
x.re = scale * xRe[k];
x.im = scale * xIm[k];
x = x.next;
k++;
}
// For each stage of the FFT
for (uint stage = 0; stage < m_logN; stage++) {
// Compute a multiplier factor for the "twiddle factors".
// The twiddle factors are complex unit vectors spaced at
// regular angular intervals. The angle by which the twiddle
// factor advances depends on the FFT stage. In many FFT
// implementations the twiddle factors are cached, but because
// array lookup is relatively slow in C#, it's just
// as fast to compute them on the fly.
double wAngleInc = wIndexStep * 2.0 * Math.PI / m_N;
if (inverse == false)
wAngleInc *= -1;
double wMulRe = Math.Cos(wAngleInc);
double wMulIm = Math.Sin(wAngleInc);
for (uint start = 0; start < m_N; start += spacing) {
FFTElement xTop = m_X[start];
FFTElement xBot = m_X[start + span];
double wRe = 1.0;
double wIm = 0.0;
// For each butterfly in this stage
for (uint flyCount = 0; flyCount < numFlies; ++flyCount) {
// Get the top & bottom values
double xTopRe = xTop.re;
double xTopIm = xTop.im;
double xBotRe = xBot.re;
double xBotIm = xBot.im;
// Top branch of butterfly has addition
xTop.re = xTopRe + xBotRe;
xTop.im = xTopIm + xBotIm;
// Bottom branch of butterly has subtraction,
// followed by multiplication by twiddle factor
xBotRe = xTopRe - xBotRe;
xBotIm = xTopIm - xBotIm;
xBot.re = xBotRe * wRe - xBotIm * wIm;
xBot.im = xBotRe * wIm + xBotIm * wRe;
// Advance butterfly to next top & bottom positions
xTop = xTop.next;
xBot = xBot.next;
// Update the twiddle factor, via complex multiply
// by unit vector with the appropriate angle
// (wRe + j wIm) = (wRe + j wIm) x (wMulRe + j wMulIm)
double tRe = wRe;
wRe = wRe * wMulRe - wIm * wMulIm;
wIm = tRe * wMulIm + wIm * wMulRe;
}
}
numFlies >>= 1; // Divide by 2 by right shift
span >>= 1;
spacing >>= 1;
wIndexStep <<= 1; // Multiply by 2 by left shift
}
// The algorithm leaves the result in a scrambled order.
// Unscramble while copying values from the complex
// linked list elements back to the input/output vectors.
x = m_X[0];
while (x != null) {
uint target = x.revTgt;
xRe[target] = x.re;
xIm[target] = x.im;
x = x.next;
}
//looking for max IS THIS IS FREQUENCY
double max = 0, index = 0;
for (int i = 0; i < xRe.Length; i++) {
if (xRe[i] + xIm[i] > max) {
max = xRe[i]*xRe[i] + xIm[i]*xIm[i];
index = i;
}
}
max = Math.Sqrt(max);
/* if the peak is at bin index i then the corresponding
frequency will be i * Fs / N whe Fs is the sample rate in Hz and N is the FFT size.*/
//DONT KNOW WHY THE BIGGEST VALUE IS IN THE BEGINNING
Console.WriteLine("max "+ max+" index " + index + " m_logN" + m_logN + " " + xRe[0]);
max = index * sampleRate / m_logN;
Console.WriteLine("max " + max);
}
/**
* Do bit reversal of specified number of places of an int
* For example, 1101 bit-reversed is 1011
*
* #param x Number to be bit-reverse.
* #param numBits Number of bits in the number.
*/
private uint BitReverse(
uint x,
uint numBits) {
uint y = 0;
for (uint i = 0; i < numBits; i++) {
y <<= 1;
y |= x & 0x0001;
x >>= 1;
}
return y;
}
public static void Main(String[] args) {
// BinaryReader reader = new BinaryReader(System.IO.File.OpenRead(#"C:\Users\Duke\Desktop\e.wav"));
BinaryReader reader = new BinaryReader(File.Open(#"440.wav", FileMode.Open));
//Read the wave file header from the buffer.
int chunkID = reader.ReadInt32();
int fileSize = reader.ReadInt32();
int riffType = reader.ReadInt32();
int fmtID = reader.ReadInt32();
int fmtSize = reader.ReadInt32();
int fmtCode = reader.ReadInt16();
int channels = reader.ReadInt16();
sampleRate = reader.ReadInt32();
int fmtAvgBPS = reader.ReadInt32();
int fmtBlockAlign = reader.ReadInt16();
int bitDepth = reader.ReadInt16();
if (fmtSize == 18) {
// Read any extra values
int fmtExtraSize = reader.ReadInt16();
reader.ReadBytes(fmtExtraSize);
}
int dataID = reader.ReadInt32();
int dataSize = reader.ReadInt32();
// Store the audio data of the wave file to a byte array.
byte[] byteArray = reader.ReadBytes(dataSize);
/* for (int i = 0; i < byteArray.Length; i++) {
Console.Write(byteArray[i] + " ");
}*/
byte[] data = byteArray;
double[] arrRe = new double[data.Length];
for (int i = 0; i < arrRe.Length; i++) {
arrRe[i] = data[i] / 32768.0;
}
double[] arrI = new double[data.Length];
for (int i = 0; i < arrRe.Length; i++) {
arrI[i] = 0;
}
/**
* Initialize class to perform FFT of specified size.
*
* #param logN Log2 of FFT length. e.g. for 512 pt FFT, logN = 9.
*/
Console.WriteLine();
FFT2 fft2 = new FFT2();
uint logN = (uint)Math.Log(data.Length, 2);
fft2.init(logN);
fft2.run(arrRe, arrI);
// After this you have to split that byte array for each channel (Left,Right)
// Wav supports many channels, so you have to read channel from header
Console.ReadLine();
}
}
}
There are a few things that you need to address:
you're not applying a window function prior to the FFT - this will result in spectral leakage in the general case and you may get misleading results, particularly when looking for peaks, as there will be "smearing" of the spectrum.
when looking for peaks you should be looking at the magnitude of FFT output bins, not the individual real and imaginary parts - magnitude = sqrt(re^2 +im^2) (although you don't need to worry about the sqrt if you're just looking for peaks).
having identified a peak you need to convert the bin index into a frequency - if the peak is at bin index i then the corresponding frequency will be i * Fs / N where Fs is the sample rate in Hz and N is the FFT size.
for a real-to-complex FFT you can ignore the second N / 2 output bins as they are just the complex conjugate mirror image of the first N / 2 bins
(See also this answer for fuller explanations of the above.)

Categories

Resources