Is there any documentation on what RNG algorithm PowerShell's Get-Random cmdlet officially uses in PowerShell 5.1?
I did some investigating (via decompiling), and it seems Get-Random is just a wrapper for the native .NET Random class. I can confirm this by getting the same values on PowerShell 2.0 (Windows 7) vs C# (targeting .NET 4.5.2). However, Powershell 5.1 (Windows 10) seems to output different numbers.
PowerShell 2.0:
Get-Random -SetSeed 0 -Minimum 0 -Maximum 2147483647
# Produces 1559595546
PowerShell 5.1:
Get-Random -SetSeed 0 -Minimum 0 -Maximum 2147483647
# Produces: 1866861594
C#:
new Random(0).Next(0, 2147483647);
# Produces 1559595546
I did read that after PowerShell 2.0, Get-Random is supposed to support 64-bit numbers, but I set the minimum and maximum above to the 32-bit range for proper testing. Even different seeds, or altering the ranges to something like [0, 100] still yields different results on PowerShell 5.1.
My end goal is basically trying to reproduce random numbers produced in PowerShell 5.1 in either C++ or C# for sake of performance. I already have the C# Random class translated to C++.
You can view Power Shell's Get Random implementation on GitHub.
Comments in the source code show it is using its own generator which have comments indicating it has some deviations from the .net / CRL implementation.
In particular, it has its own PolymorphicRandomNumberGenerator class that provides a "re-implementation" of methods using the NextBytes() primitive based on the CLR implementation:
/// <summary>
/// Provides an adapter API for random numbers that may be either cryptographically random, or
/// generated with the regular pseudo-random number generator. Re-implementations of
/// methods using the NextBytes() primitive based on the CLR implementation:
/// http://referencesource.microsoft.com/#mscorlib/system/random.cs
/// </summary>
internal class PolymorphicRandomNumberGenerator
For example:
/// <summary>
/// Generates a non-negative random integer.
/// </summary>
/// <returns>A non-negative random integer.</returns>
internal int Next()
{
int result;
// The CLR implementation just fudges
// Int32.MaxValue down to (Int32.MaxValue - 1). This implementation
// errs on the side of correctness.
do
{
result = InternalSample();
}
while (result == Int32.MaxValue);
if (result < 0)
{
result += Int32.MaxValue;
}
return result;
}
The powershell implementation, while using the same underlying System.Random, will use different methods to generate the random values depending on the input. With your issue the power shell implementation does this:
var rnd = new Random(0);
int result;
byte[] data = new byte[sizeof(int)];
rnd.NextBytes(data);
result = BitConverter.ToInt32(data, 0);
console.log("result = {0}", result);
// result = 1866861594
Where which does not match the output of:
var rresult = new Random(0).Next(0, int.MaxValue);
console.log("result = {0}", result);
// result = 1559595546
Here's my ported C++ code for the PowerShell 5.0 PRNG, if it's of any use to anyone else searching. Confirmed it produces the same numbers as PowerShell 5.1 on Windows 10.
It utilizes my Random class that is a ported version of the .NET RNG, which I separated a bit to make both inherit from a common interface (Random.h) and renamed to RandomDotNet: https://stackoverflow.com/a/39338606/1301139
Random.h
#include <limits>
#include <Windows.h>
#pragma once
class Random
{
public:
virtual ~Random() {}
virtual int Next() = 0;
virtual int Next(int minValue, int maxValue) = 0;
virtual int Next(int maxValue) = 0;
virtual void NextBytes(BYTE *buffer, int bufferLen) {};
virtual double NextDouble() = 0;
};
RandomPS5.h
#include <limits>
#include <Windows.h>
#include "Random.h"
#pragma once
class RandomPS5 : public Random
{
protected:
double InternalSampleLargeRange();
int InternalSample();
int BytesToInt(BYTE *dword);
Random *pseudoGenerator;
public:
RandomPS5(int seed);
~RandomPS5();
int Next();
int Next(int minValue, int maxValue);
int Next(int maxValue);
double NextDouble();
void NextBytes(BYTE *buffer, int bufferLen);
};
RandomPS5.cpp
#include "stdafx.h"
#include "RandomPS5.h"
#include "RandomDotNet.h"
#include <limits.h>
#include <math.h>
#include <stdexcept>
#include <string>
// Naive conversion of BitConverter.ToInt32
int RandomPS5::BytesToInt(BYTE *b) {
int Int32 = 0;
Int32 = (Int32 << 8) + b[3];
Int32 = (Int32 << 8) + b[2];
Int32 = (Int32 << 8) + b[1];
Int32 = (Int32 << 8) + b[0];
return Int32;
}
RandomPS5::RandomPS5(int seed) {
pseudoGenerator = new RandomDotNet(seed);
}
RandomPS5::~RandomPS5(){
delete pseudoGenerator;
}
double RandomPS5::NextDouble() {
return Next() * (1.0 / 0x7FFFFFFF);
}
int RandomPS5::Next() {
int result;
do {
result = InternalSample();
} while (result == 0x7FFFFFFF);
if (result < 0) {
result += 0x7FFFFFFF;
}
return result;
}
int RandomPS5::Next(int maxValue) {
if (maxValue<0) {
throw std::invalid_argument("maxValue must be positive");
}
return Next(0, maxValue);
}
int RandomPS5::Next(int minValue, int maxValue) {
if (minValue > maxValue)
{
throw std::invalid_argument("minValue is larger than maxValue");
}
long range = (long)maxValue - (long)minValue;
if (range <= 0x7FFFFFFF)
{
return ((int)(NextDouble() * range) + minValue);
}
else
{
double largeSample = this->InternalSampleLargeRange() * (1.0 / (2 * 0x7FFFFFFF));
int result = (int)((long)(largeSample * range) + minValue);
return result;
}
}
int RandomPS5::InternalSample() {
BYTE *data = (BYTE*)malloc(sizeof(int));
this->NextBytes(data, sizeof(int));
int result = BytesToInt(data);
free(data);
return result;
}
double RandomPS5::InternalSampleLargeRange() {
double result;
do{
result = this->InternalSample();
} while (result == 0x7FFFFFFF);
result += 0x7FFFFFFF;
return result;
}
void RandomPS5::NextBytes(BYTE *buffer, int bufferLen) {
this->pseudoGenerator->NextBytes(buffer, bufferLen);
}
Main.cpp
#include "RandomDotNet.h"
#include "RandomPS5.h"
#include <Windows.h>
// Length of charset string
#define CHARSETLEN 62
// Random charset
const char charset[CHARSETLEN + 1] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
// Function that processes a record like PowerShell does for -ObjectList
void processRecord(CHAR *string, int len, Random *r) {
// Processed characters
int processed = 0;
int i, indexToReplace;
CHAR temp;
// Iterate the charset
for (i = 0; i < CHARSETLEN; ++i) {
if (processed < len) {
string[processed] = charset[i];
}
else if (r->Next(processed + 1) < len) {
string[r->Next(len)] = charset[i];
}
++processed;
}
// Iterate selected items to return them in "random" order
for (i = 0; i < len; ++i) {
// Get random index
indexToReplace = r->Next(i, len);
if (i != indexToReplace) {
// Swap
temp = string[i];
string[i] = string[indexToReplace];
string[indexToReplace] = temp;
}
}
// Terminate the string
string[len] = '\0';
}
int main(int argc, char* argv[]){
// Example usage with a given seed
Random *r = new RandomPS5(1000);
// Length of random string
int len = 49;
// Random string buffer
CHAR *buffer = (CHAR*)malloc(len + 1);
// ([char[]](Get-Random -Input $(48..57 + 65..90 + 97..122) -Count 49 -SetSeed 1000)) -Join ""
processRecord(buffer, len, r);
// Produces: y6FLfcKrpINqgP25GXS7Z0dVBmJOzntlQ3hjbHMAU1ExkewWY
printf("Random string: %s", buffer);
delete r;
return 0;
}
Related
I am using Hybridizer to total a FloatResidentArray and I am not able to return the calculated total to the device (or host) because of the need for a ref statement in the final AtomicExpr.apply statement.
Consider the following code which is based on the GenericReduce example provided by Altimesh.
The code takes a device resident array a, of float of length N and calculates the total – this value is placed in total[0].
[Kernel]
public static void Total(FloatResidentArray a, int N, float[] total)
{
var cache = new SharedMemoryAllocator<float>().allocate(blockDim.x);
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int cacheIndex = threadIdx.x;
float sum = 0f;
while (tid < N)
{
sum = sum + a[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = sum;
CUDAIntrinsics.__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
{
cache[cacheIndex] = cache[cacheIndex] + cache[cacheIndex + i];
}
CUDAIntrinsics.__syncthreads();
i >>= 1;
}
if (cacheIndex == 0)
{
AtomicExpr.apply(ref total[0], cache[0], (x, y) => x + y);
}
}
The above code does not compile because you cannot pass a float[] and a FloatResidentArray in the same parameter list.
If total is defined as a FloatResidentArray itself, then the compiler will not allow the ref keyword to be used in the final line of code.
If I simply pass a float, then the returned variable is not updated with the total.
If I pass a ref float - then the program throws a runtime error at the point where the HybRunner wraps the above code to create the dynamic – the error message is
Value types by reference are not supported
How do I return the total? –either to Device or Host memory – both are acceptable.
Well, you need to understand how marshalling works
Object and arrays (even resident array) are all hosts when created in .Net.
Then we marshal them (pin host memory, allocate device memory and copy host to device) right before kernel execution.
For a float[], that will be done automatically
For an IntPtr, we do nothing and the user has to ensure the IntPtr is a valid device pointer containing the data
For a resident array, we do nothing and the user has to manually call RefreshDevice() and RefreshHost when she wants to get the data back and forth.
Mixing ResidentArray and float[] is supported, as show in this screenshot of the generated dll :
What is not supported is : mixing managed types and IntPtr.
Here is a complete version of your code working, and returning the correct result:
using Hybridizer.Runtime.CUDAImports;
using System;
using System.Runtime.InteropServices;
namespace SimpleMetadataDecorator
{
class Program
{
[EntryPoint]
public static void Total(FloatResidentArray a, int N, float[] total)
{
var cache = new SharedMemoryAllocator<float>().allocate(blockDim.x);
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int cacheIndex = threadIdx.x;
float sum = 0f;
while (tid < N)
{
sum = sum + a[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = sum;
CUDAIntrinsics.__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
{
cache[cacheIndex] = cache[cacheIndex] + cache[cacheIndex + i];
}
CUDAIntrinsics.__syncthreads();
i >>= 1;
}
if (cacheIndex == 0)
{
AtomicExpr.apply(ref total[0], cache[0], (x, y) => x + y);
}
}
static void Main(string[] args)
{
const int N = 1024 * 1024 * 32;
FloatResidentArray arr = new FloatResidentArray(N);
float[] res = new float[1];
for (int i = 0; i < N; ++i)
{
arr[i] = 1.0F;
}
arr.RefreshDevice();
var runner = HybRunner.Cuda();
cudaDeviceProp prop;
cuda.GetDeviceProperties(out prop, 0);
runner.SetDistrib(16 * prop.multiProcessorCount, 1, 128, 1, 1, 128 * sizeof(float));
var wrapped = runner.Wrap(new Program());
runner.saveAssembly();
cuda.ERROR_CHECK((cudaError_t)(int)wrapped.Total(arr, N, res));
cuda.ERROR_CHECK(cuda.DeviceSynchronize());
Console.WriteLine(res[0]);
}
}
}
Currently I am in the middle of writing a program to find even Fibonacci numbers in a Windows Form Application(WPA), with user input.
When I execute my program, I come with different data in contrast to the test data that I have.
For example, When I type 100,000 as input, the output I am getting is 5500034 but it should be 60696.
The code of my program is as follows:
int val1 = 1;
int val2 = 2;
Int64 evenTerms = 2;
val2 = int.Parse(textBox3.Text);
while (val2 < 5000000)
{
int temp = val1;
val1 = val2;
val2 = temp + val2;
if (val2 % 2 == 0)
{
evenTerms += val2;
}
}
MessageBox.Show("" + val2);
Can anyone help me sort out the problem?
Thanks.
I suggest using generator to enumerate all the Fibonacci numbers:
public static IEnumerable<long> FiboGen() {
long left = 0;
long right = 1;
yield return left;
yield return right;
while (true) {
long result = left + right;
yield return result;
left = right;
right = result;
}
}
and then Linq to sum up the required values only:
int limit = int.Parse(textBox3.Text);
// 60696 for the 1000000 limit
// 4613732 for the 5000000 limit
var result = FiboGen() // take Fibonacci numbers
.Where(val => val % 2 == 0) // but only even ones
.TakeWhile(val => val < limit) // and less than limit
.Sum(); // finally sum them up.
MessageBox.Show(result.ToString());
well, first of Fibonacci starts with 1,1,2,3,.... meaning that you are one step ahead of the list. You should start with val1=1, and val2=1;
https://en.wikipedia.org/wiki/Fibonacci_number
Then why do you use your input param as a part of your calculation?!!
As far as I have understood your problem (The question is unclear), Hope this solution works :)
int val1 = 0;
int val2 = 1;
Int64 evenTerms = 0;
int val3 = int.Parse(textBox3.Text), val4 = 0, temp;
if (val3 < 5000000)
{
while (val4 < val3){
temp = val1 + val2;
val1 = val2;
val2 = temp;
if (temp % 2 == 0)
{
evenTerms += 1;
}
val4++;
}
}
MessageBox.Show("" + evenTerms);
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <assert.h>
#include <limits.h>
#include <stdbool.h>
int main() {
typedef unsigned long ulong;
ulong fib(ulong a, ulong b, ulong * odd_z, ulong n) {
ulong c = a + b;
if((c+b) >= n) { return 0; }
if(a%2 == 0) { *odd_z+=(b+c); }
return fib(b,c,odd_z, n);
}
int T;
scanf("%d",&T);
ulong odd_z = 0;
ulong *sum = &odd_z;
while(T--) {
ulong N;
scanf("%lu",&N);
fib(0,1,&odd_z, N);
printf("%lu\n",*sum);
*sum=0;
}
return 0;
}
This algorithm is also more time and space efficient
I am trying to adapt this code that can perform conversions to and from Base 52, which I am using to store RGB color information from C# to C++:
public static string ColourToBase52(Color colour)
{
int value = colour.ToArgb() & 0x00FFFFFF; // Mask off the alpha channel.
return ToBase52(value);
}
public static Color ColourFromBase52(string colour)
{
int value = FromBase52(colour);
return Color.FromArgb(unchecked((int)(0xFF000000 | value)));
}
public static string ToBase52(int value)
{
char[] baseChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ".ToCharArray();
int targetBase = baseChars.Length;
int i = 32;
char[] buffer = new char[i];
do
{
buffer[--i] = baseChars[value % targetBase];
value = value / targetBase;
}
while (value > 0);
char[] result = new char[32 - i];
Array.Copy(buffer, i, result, 0, 32 - i);
return new string(result).PadLeft(5, 'a');
}
public static int FromBase52(string value)
{
char[] baseChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ".ToCharArray();
int targetbase = baseChars.Length;
int multiplier = 1;
int result = 0;
for (int i = value.Length-1; i >= 0; --i)
{
int digit = Array.IndexOf(baseChars, value[i]);
result += digit*multiplier;
multiplier *= targetbase;
}
return result;
}
For my C++ code, I have opted to combine the functions that get and return the color value as an integer with the Base 52 conversion functions:
struct DIFColor *DIFBase52ToColor(std::string c)
{
const char *baseChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
int targetBase = 52;
int multiplier = 1;
int result = 0;
const char *d = c.c_str();
for (int i = c.length() - 1; i >= 0; --i)
{
int digit = DIFGetPositionInArray(baseChars, sizeof(baseChars), c[i]);
result += digit * multiplier;
multiplier = multiplier * targetBase;
}
uint8_t b = result & 255;
uint8_t g = (result >> 8) & 255;
uint8_t r = (result >> 16) * 255;
return CreateDIFColor(r,g,b);
}
std::string DIFColorToBase52(struct DIFColor *c)
{
int rgb = ((c->r&0x0ff)<<16)|((c->g&0x0ff)<<8)|(c->b&0x0ff);
const char *baseChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
int targetBase = 52;
int i = 32;
char *buffer = new char[i];
do
{
buffer[--i] = baseChars[rgb % targetBase];
rgb = rgb / targetBase;
}
while (rgb > 0);
char *result = new char[32 - i];
DIFCopyCharArray((const char *)buffer, i, 0, 32 - i, result);
std::string s((const char*)result);
s.insert(s.begin(), 5 - s.size(), 'a');
return s;
}
I also had to create two functions for array manipulation:
int DIFGetPositionInArray(const char *array, size_t size, const char c)
{
for (size_t i = 0; i < size; i++)
{
if (array[i] == c)
return (int)i;
}
return -1;
}
void DIFCopyCharArray(const char* source, int wheretostart, int wheretocopy, int numtocopy, char *dest)
{
int c = wheretocopy;
for(int i = wheretostart; i <= numtocopy; i++)
{
dest[c] = source[i];
c++;
}
}
However, when I tried to test it with a sanity check, it failed:
255,255,255 = 'aah1U' in Base52 RGB
aah1U = 1,245,59 in RGB
It also seems that every time I run the sanity check, a different value is produced:
255,255,255 = 'aah13' in Base52 RGB
aah13 = 1,245,59 in RGB
255,255,255 = 'aah1j' in Base52 RGB
aah1j = 1,245,59 in RGB
The expected output was:
255,255,255 = 'cpqEN' in Base52 RGB
cpqEN = 255,255,255 in RGB
Making me think that this is possibly a pointer problem.
The error is probably that you don't terminate the result string anywhere, which leads to undefined behavior in the following:
std::string s((const char*)result);
This is because the std::string constructor looks for the terminator when copying the C-style string you pass to it.
You can solve it two ways: Either add the terminator character '\0' to result, or tell the std::string constructor the length of result.
The problem lies in the fact that the array copy function is incorrect. It should be:
void DIFCopyCharArray(const char* source, int wheretostart, int wheretocopy, int numtocopy, char *dest)
{
int c = wheretocopy;
for(int i = wheretostart; c <= numtocopy; i++)
{
dest[c] = source[i];
c++;
}
dest[c] = '\0';
}
Also, the array search function does not work because sizeof(baseChars) returns 4, which is not the number of elements.
Use a function like this:
int DIFGetPositionInArray(const char *array, int arrayElements, const char c)
{
for (int i = 0; i < arrayElements; i++)
{
if (array[i] == c)
return i;
}
return -1;
}
And call it like this;
DIFGetPositionInArray(baseChars,52,d[i]);
Below is a Win32 Console App procedure that demonstrates the dependence of various pointers on an array. A change to the values in the original array (model) by for example uncommenting the lines marked '// uncomment ...' results in a change to the output. My question is how do I get or mimic this behaviour in a C# managed code environment (i.e. without using unsafe and pointers)?
#include "stdafx.h"
#include <iostream>
using namespace std;
int _tmain(int argc, _TCHAR* argv[])
{
float model[100];
for(int i = 0; i < 100; i++) { model[i] = i; }
// uncomment these to alter the results
//model[5] = 5000;
//model[20] = 20000;
//model[38] = 38000;
static const int componentCount = 5;
float* coefs = model; // coefs points to model[0]
float* mean = coefs + componentCount; // mean points to model[0 + componentCount] == model[5]
float* cov = mean + 3*componentCount; // cov points to model[0 + componentCount + 3*componentCount] == model[20]
int ci = 2;
float* c = cov + 9*ci; // c points to model[0 + componentCount + 3*componentCount + 9*ci] == model[38]
int i = 0;
cout <<"model : "<< model[i] << endl; // 0
cout <<"coefs : "<< coefs[i] << endl; // 0
cout <<"mean : "<< mean[i] << endl; // 5 (or 5000)
cout <<"cov : "<< cov[i] << endl; // 20 (or 20000)
cout <<"ci : "<< ci << endl; // 2
cout <<"c : "<< c[i] << endl; // 38 (or 38000)
cin.get(); }
You can do the same thing in C# without unsafe code:
struct ArrayPointer<T>
{
private T[] array;
private int offset;
public ArrayPointer(T[] array) : this(array, 0)
{
}
private ArrayPointer(T[] array, int offset)
{
Debug.Assert(array != null);
Debug.Assert(offset >= 0);
Debug.Assert(offset < array.Length);
this.array = array;
this.offset = offset;
}
public static ArrayPointer<T> operator+(ArrayPointer<T> p1, int p2)
{
return new ArrayPointer<T>(p1.array, p1.offset + p2);
}
And so on. Define operators for addition, subtraction, increment, decrement, comparison, indexing, conversion from arrays, and so on. Then you can say:
int[] arr = whatever;
ArrayPointer<int> pointer = arr;
pointer+=2;
pointer--;
int x = pointer[3];
and so on.
This approach has a lot of nice properties. For example, you can do a debug assert if you ever compare p1 > p2 when p1 and p2 are pointers to the interiors of different arrays. That is almost always a bug in C, but a hard one to catch.
You could write a class that represents an array with some offset, similar to the one below. Additionaly, you might want it to implement ICollection<T> or at least IEnumerable<T>.
class ArrayWithOffset<T>
{
T[] m_array;
int m_offset;
public ArrayWithOffset(T[] array, int offset)
{
m_array = array;
m_offset = offset;
}
public T this[int i]
{
return m_array[offset + i]
}
}
Instead of one parameter, pointer to array item, use pair of parameters (array, offset).
I have C# code as below:
private static string password = "Password";
private static string salt = "SALT";
private static string hashAlgorithm = "SHA1";
private static int iterations = 2;
var saltValueBytes = Encoding.UTF8.GetBytes(salt);
var passwordKey = new PasswordDeriveBytes(password, saltValueBytes, hashAlgorithm, iterations)
...
I need to implement the same in Mac, I came to know that Opnessl implements related methods(i.e. libcrypto). What is the equivalent method in Opnessl to above code?
This shows how to implement PBKDF1 with OpenSSL, which according to the documentation is the algorithm used by PasswordDeriveBytes.
#include <string.h>
#include <stdlib.h>
#include <openssl/sha.h>
void pbkdf1(const char *password, const char *salt, long iter, unsigned char dk[SHA_DIGEST_LENGTH])
{
size_t pwlen = strlen(password);
size_t dlen = pwlen + 8;
unsigned char *buf;
if (dlen > SHA_DIGEST_LENGTH)
buf = malloc(dlen);
else
buf = malloc(SHA_DIGEST_LENGTH);
memcpy(buf, password, pwlen);
strncpy((char *)buf + pwlen, salt, 8);
while (iter-- > 0)
{
SHA1(buf, dlen, buf);
dlen = SHA_DIGEST_LENGTH;
}
memcpy(dk, buf, SHA_DIGEST_LENGTH);
free(buf);
}
OpenSSL implements PBKDF2, which .NET exposes as Rfc2898DeriveBytes. PasswordDeriveBytes uses (according to the .NET 4 docs) "an extension of the PBKDF1 algorithm". PBKDF1 is not exposed by OpenSSL (and who knows what the 'extension' in question may be).
Using PBKDF2 (aka Rfc2898DeriveBytes) if possible will save you a lot of problems here.
This is a c++ quick and dirty translation of the mono source code to perform GetBytes(X) where X can be greater than the size of the hash. As you can see I'v implemented only the SHA1 version...
#include <iostream>
#include <string.h>
#include <openssl/sha.h>
#define SHA1_BYTES_LEN 20
using namespace std;
namespace DeriveKeys
{
class PasswordDeriveBytes
{
private:
unsigned char* password;
int pass_len;
unsigned char* salt;
int salt_len;
int IterationCount;
int state;
unsigned char* initial;
unsigned char* output;
unsigned int output_len;
unsigned int position;
int hashnumber;
public:
PasswordDeriveBytes(unsigned char* password, unsigned char* salt, int iterations)
{
Prepare(password, salt, iterations);
}
private:
string convertInt(int number)
{
if (number == 0)
return "0";
string temp="";
string returnvalue="";
while (number>0)
{
temp+=number%10+48;
number/=10;
}
for (unsigned int i=0; i<temp.length(); i++)
returnvalue+=temp[temp.length()-i-1];
return returnvalue;
}
void Prepare(unsigned char* password, unsigned char* salt, int iterations)
{
if (password == NULL)
return;
Prepare(password, strlen((const char*)password), salt, strlen((const char*)salt), iterations);
}
void Prepare(unsigned char* password, int pass_len, unsigned char* salt, int salt_len, int iterations)
{
if (password == NULL)
return;
this->password = new unsigned char[pass_len];
memcpy(this->password,password,pass_len);
//memcpy((char *)this->password, (const char*)password, pass_len);
this->pass_len = pass_len;
//(unsigned char*)password.Clone();
this->salt = new unsigned char[salt_len];
//strncpy((char *)this->salt, (const char*)salt, salt_len);
memcpy(this->salt,salt,salt_len);
this->salt_len = salt_len;
this->IterationCount = iterations;
state = 0;
}
public:
unsigned char* GetBytes(int cb)
{
if (cb < 1)
return NULL;
if (state == 0)
{
// it's now impossible to change the HashName, Salt
// and IterationCount
Reset();
state = 1;
}
unsigned char* result = new unsigned char[cb];
int cpos = 0;
// the initial hash (in reset) + at least one iteration
int iter = IterationCount-1;
if (iter < 1)
{
iter = 1;
}
// start with the PKCS5 key
if (this->output == NULL)
{
// calculate the PKCS5 key
this->output = initial;
this->output_len = SHA1_BYTES_LEN;
// generate new key material
for (int i = 0; i < iter - 1; i++)
{
SHA1((const unsigned char*)this->output,this->output_len,this->output);
this->output_len = SHA1_BYTES_LEN;
}
}
while (cpos < cb)
{
unsigned char* output2 = new unsigned char[SHA1_BYTES_LEN];
unsigned int output2_len = SHA1_BYTES_LEN;
if (hashnumber == 0)
{
SHA1((const unsigned char*)this->output,this->output_len,output2);
output2_len = SHA1_BYTES_LEN;
}
else if (hashnumber < 1000)
{
string n = convertInt(hashnumber);
output2 = new unsigned char[this->output_len + n.length()];
output2_len = this->output_len + n.length();
for (unsigned int j = 0; j < n.length(); j++)
output2[j] = (unsigned char)(n[j]);
memcpy(output2 + n.length(),this->output,this->output_len);
SHA1((const unsigned char*)output2,output2_len,output2);
output2_len = SHA1_BYTES_LEN;
}
else
{
return NULL;
}
int rem = this->output_len - this->position;
int l = cb - cpos;
if (l > rem)
{
l = rem;
}
memcpy(result + cpos, output2 + this->position, l);
cpos += l;
this->position += l;
while (this->position >= output2_len)
{
this->position -= output2_len;
this->hashnumber++;
}
}
return result;
}
void Reset()
{
this->state = 0;
this->position = 0;
this->hashnumber = 0;
this->initial = new unsigned char[SHA1_BYTES_LEN];
this->output = NULL;
this->output_len = 0;
if (this->salt != NULL)
{
unsigned char* rv = new unsigned char[this->pass_len + this->salt_len];
memcpy(rv,this->password, this->pass_len);
memcpy(rv + this->pass_len, this->salt, this->salt_len);
SHA1((const unsigned char*)rv,this->pass_len + this->salt_len, initial);
}
else
{
SHA1((const unsigned char*)this->password,this->pass_len,initial);
}
}
};
}