OpenSSL HMACSHA256 produces different result comparing to .NET - c#

I am using C# and C++ with OpenSSL to compute HMACSHA256 has with a key and both produce different results. What am I doing wrong?
C# code:
public static string CreateSignature(string signingString, string sharedKey)
{
var key = Encoding.ASCII.GetBytes(sharedKey);
var hmac = new HMACSHA256(key);
var data = Encoding.ASCII.GetBytes(signingString);
var hash = hmac.ComputeHash(data);
return Convert.ToBase64String(hash);
}
C++ code:
std::string SignatureProvider::getSignature(std::string stringToSign, std::string key)
{
const char* pKey = key.c_str();
const char* pData = stringToSign.c_str();
unsigned char* result = nullptr;
unsigned int len = 32;
result = (unsigned char*)malloc(sizeof(char) * len);
HMAC_CTX ctx;
HMAC_CTX_init(&ctx);
HMAC_Init_ex(&ctx, pKey, strlen(pKey), EVP_sha256(), NULL);
HMAC_Update(&ctx, (unsigned char*)&pData, strlen(pData));
HMAC_Final(&ctx, result, &len);
HMAC_CTX_cleanup(&ctx);
return base64_encode(result, len);
}
std::string base64_encode(unsigned char const* bytes_to_encode, unsigned int in_len)
{
std::string ret;
int i = 0;
int j = 0;
unsigned char char_array_3[3];
unsigned char char_array_4[4];
while (in_len--) {
char_array_3[i++] = *(bytes_to_encode++);
if (i == 3) {
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for (i = 0; (i <4); i++)
ret += base64_chars[char_array_4[i]];
i = 0;
}
}
if (i)
{
for (j = i; j < 3; j++)
char_array_3[j] = '\0';
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
for (j = 0; (j < i + 1); j++)
ret += base64_chars[char_array_4[j]];
while ((i++ < 3))
ret += '=';
}
return ret;
}
I just included base64 conversion for completeness, but it is already different before it.

Why don't you use HMAC function itself? I have tried with this code and both C++ and c# code result in same HMAC :
std::string getSignature(std::string stringToSign, std::string key)
{
const char* pKey = key.c_str();
const char* pData = stringToSign.c_str();
unsigned char* result = nullptr;
unsigned int len = 32;
result = (unsigned char*)malloc(sizeof(char) * len);
int nkeyLen = strlen(pKey);
int dataLen = strlen(pData);
result = HMAC(EVP_sha256(), pKey, nkeyLen, (unsigned char*)pData, dataLen, NULL, NULL);
return base64_encode(result, len);
}

Related

Reversing galois multiplication of two byte arrays in C#

I need help in finding the reverse of galois multiplication GF (2^128) in C#. The code below is being used in my AES-GCM functions. I found this code through the web though.
I tried to search the web for galois division but I have no luck in finding it.
Pardon me for my knowledge in this field and my English.
This function derives the value of 2^x.
public byte BIT(byte x)
{
return (byte)(1 << x);
}
This function converts byte array of 4 elements to unsigned int.
public uint WPA_GET_BE32(byte[] a)
{
return (uint)((a[0] << 24 )|( a[1] <<16 )|( a[2] << 8 )| a[3]);
}
This function converts unsigned int into byte array of 4 elements.
public void WPA_PUT_BE32(out byte[] a, uint val)
{
a = new byte[4];
a[0] = (byte)((val >> 24) & 0xff);
a[1] = (byte)((val >> 16) & 0xff);
a[2] = (byte)((val >> 8) & 0xff);
a[3] = (byte)(val & 0xff);
}
public void shift_right_block(ref byte[] v)
{
uint val;
byte[] temp = new byte[4];
temp = v.Skip(12).Take(4).ToArray();
val = WPA_GET_BE32(temp);
val >>= 1;
if ((v[11] & 0x01) > 0) val |= 0x80000000;
WPA_PUT_BE32(out temp, val);
Array.Copy(temp, 0, v, 12, 4);
temp = v.Skip(8).Take(4).ToArray();
val = WPA_GET_BE32(temp);
val >>= 1;
if ((v[7] & 0x01) > 0) val |= 0x80000000;
WPA_PUT_BE32(out temp, val);
Array.Copy(temp, 0, v, 8, 4);
temp = v.Skip(4).Take(4).ToArray();
val = WPA_GET_BE32(temp);
val >>= 1;
if ((v[3] & 0x01) > 0) val |= 0x80000000;
WPA_PUT_BE32(out temp, val);
Array.Copy(temp, 0, v, 4, 4);
temp = v.Skip(0).Take(4).ToArray();
val = WPA_GET_BE32(temp);
val >>= 1;
WPA_PUT_BE32(out temp, val);
Array.Copy(temp, 0, v, 0, 4);
}
This function does a exclusive-OR function on two byte arrays.
public void c_xor_16(ref byte[] dest, byte[] src)
{
int ndx = 0;
for (ndx = 0; ndx < 16; ndx++) dest[ndx] ^= src[ndx];
}
This is the main function and byte array z is the output of the GF multiplication.
public void c_gf_mult(byte[] x, byte[] y, ref byte[] z)
{
int i, j;
byte[] v = new byte[16];
z = new byte[16];
Array.Clear(z, 0, 16);
Array.Copy(y, v, 16);
for (i = 0; i < 16; i++)
{
for (j = 0; j < 8; j++)
{
if ((byte)(x[i] & BIT((byte)(7 - j))) > 0)
{
c_xor_16(ref z, v);
}
if ((byte)(v[15] & 0x01) > 0)
{
shift_right_block(ref v);
v[0] ^= 0xe1;
}
else
{
shift_right_block(ref v);
}
}
}
return;
}

Calculating CRC16 in C#

I'm trying to port an old code from C to C# which basically receives a string and returns a CRC16 of it...
The C method is as follow:
#define CRC_MASK 0x1021 /* x^16 + x^12 + x^5 + x^0 */
UINT16 CRC_Calc (unsigned char *pbData, int iLength)
{
UINT16 wData, wCRC = 0;
int i;
for ( ;iLength > 0; iLength--, pbData++) {
wData = (UINT16) (((UINT16) *pbData) << 8);
for (i = 0; i < 8; i++, wData <<= 1) {
if ((wCRC ^ wData) & 0x8000)
wCRC = (UINT16) ((wCRC << 1) ^ CRC_MASK);
else
wCRC <<= 1;
}
}
return wCRC;
}
My ported C# code is this:
private static ushort Calc(byte[] data)
{
ushort wData, wCRC = 0;
for (int i = 0; i < data.Length; i++)
{
wData = Convert.ToUInt16(data[i] << 8);
for (int j = 0; j < 8; j++, wData <<= 1)
{
var a = (wCRC ^ wData) & 0x8000;
if ( a != 0)
{
var c = (wCRC << 1) ^ 0x1021;
wCRC = Convert.ToUInt16(c);
}
else
{
wCRC <<= 1;
}
}
}
return wCRC;
}
The test string is "OPN"... It must return a uint which is (ofc) 2 bytes A8 A9 and the #CRC_MASK is the polynomial for that calculation. I did found several examples of CRC16 here and around the web, but none of them achieve this result since this CRC calculation must match the one that the device we are connecting to.
WHere is the mistake? I really appreciate any help.
Thanks! best regards
Gutemberg
UPDATE
Following the answer from #rcgldr, I put together the following sample:
_serial = new SerialPort("COM6", 19200, Parity.None, 8, StopBits.One);
_serial.Open();
_serial.Encoding = Encoding.GetEncoding(1252);
_serial.DataReceived += Serial_DataReceived;
var msg = "OPN";
var data = Encoding.GetEncoding(1252).GetBytes(msg);
var crc = BitConverter.GetBytes(Calc(data));
var msb = crc[0].ToString("X");
var lsb = crc[1].ToString("X");
//The following line must be something like: \x16OPN\x17\xA8\xA9
var cmd = string.Format(#"{0}{1}{2}\x{3}\x{4}", SYN, msg, ETB, msb, lsb);
//var cmd = "\x16OPN\x17\xA8\xA9";
_serial.Write(cmd);
The value of the cmd variable is what I'm trying to send to the device. If you have a look the the commented cmd value, this is a working string. The 2 bytes of the CRC16, goes in the last two parameters (msb and lsb). So, in the sample here, msb MUST be "\xA8" and lsb MUST be "\xA9" in order to the command to work(the CRC16 match on the device).
Any clues?
Thanks again.
UPDATE 2
For those who fall in the same case were you need to format the string with \x this is what I did to get it working:
protected string ToMessage(string data)
{
var msg = data + ETB;
var crc = CRC16.Compute(msg);
var fullMsg = string.Format(#"{0}{1}{2:X}{3:X}", SYN, msg, crc[0], crc[1]);
return fullMsg;
}
This return to me the full message that I need inclusing the \x on it. The SYN variable is '\x16' and ETB is '\x17'
Thank you all for the help!
Gutemberg
The problem here is that the message including the ETB (\x17) is 4 bytes long (the leading sync byte isn't used for the CRC): "OPN\x17" == {'O', 'P', 'N', 0x17}, which results in a CRC of {0xA8, 0xA9} to be appended to the message. So the CRC function is correct, but the original test data wasn't including the 4th byte which is 0x17.
This is a working example (at least with VS2015 express).
private static ushort Calc(byte[] data)
{
ushort wCRC = 0;
for (int i = 0; i < data.Length; i++)
{
wCRC ^= (ushort)(data[i] << 8);
for (int j = 0; j < 8; j++)
{
if ((wCRC & 0x8000) != 0)
wCRC = (ushort)((wCRC << 1) ^ 0x1021);
else
wCRC <<= 1;
}
}
return wCRC;
}

Conversion of CRC function from C to C# yields wrong values

I'm trying to convert a couple of simple CRC calculating functions from C to C#, but I seem to be getting incorrect results.
The C functions are:
#define CRC32_POLYNOMIAL 0xEDB88320
unsigned long CRC32Value(int i)
{
int j;
unsigned long ulCRC;
ulCRC = i;
for (j=8;j>0;j--)
{
if (ulCRC & 1)
ulCRC = (ulCRC >> 1)^CRC32_POLYNOMIAL;
else
ulCRC >>= 1;
}
return ulCRC;
}
unsigned long CalculateBlockCRC32(
unsigned long ulCount,
unsigned char *ucBuffer)
{
unsigned long ulTemp1;
unsigned long ulTemp2; unsigned long ulCRC = 0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & 0x00FFFFFFL;
ulTemp2 = CRC32Value(((int)ulCRC^*ucBuffer++)&0xff);
ulCRC = ulTemp1^ulTemp2;
}
return(ulCRC);
}
These are well defined, they are taken from a user manual. My C# versions of these functions are:
private ulong CRC32POLYNOMIAL = 0xEDB88320L;
private ulong CRC32Value(int i)
{
int j;
ulong ulCRC = (ulong)i;
for (j = 8; j > 0; j--)
{
if (ulCRC % 2 == 1)
{
ulCRC = (ulCRC >> 1) ^ CRC32POLYNOMIAL;
}
else
{
ulCRC >>= 1;
}
}
return ulCRC;
}
private ulong CalculateBlockCRC32(ulong ulCount, byte[] ucBuffer)
{
ulong ulTemp1;
ulong ulTemp2;
ulong ulCRC=0;
int bufind=0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & 0x00FFFFFFL;
ulTemp2 = CRC32Value(((int)ulCRC ^ ucBuffer[bufind]) & 0xFF);
ulCRC = ulTemp1 ^ ulTemp2;
bufind++;
}
return ulCRC;
}
As I mentioned, there are discrepancies between the C version and the C# version. One possible source is my understanding of the C expression ulCRC & 1 which I believe will only be true for odd numbers.
I call the C# function like this:
string contents = "some data";
byte[] toBeHexed = Encoding.ASCII.GetBytes(contents);
ulong calculatedCRC = this.CalculateBlockCRC32((ulong)toBeHexed.Length, toBeHexed);
And the C function is called like this:
char *Buff="some data";
unsigned long iLen = strlen(Buff);
unsigned long CRC = CalculateBlockCRC32(iLen, (unsigned char*) Buff);
I believe that I am calling the functions with the same data in each language, is that correct? If anyone could shed some light on this I would be very grateful.
As it has been already pointed by #Adriano Repetti you should use UInt32 datatype in place of the ulong type(it is 64 bit unsigned UInt64, whereas in VC++ unsigned long is only 32 bit unsigned type)
private UInt32 CRC32POLYNOMIAL = 0xEDB88320;
private UInt32 CRC32Value(int i)
{
int j;
UInt32 ulCRC = (UInt32)i;
for (j = 8; j > 0; j--)
{
if (ulCRC % 2 == 1)
{
ulCRC = (ulCRC >> 1) ^ CRC32POLYNOMIAL;
}
else
{
ulCRC >>= 1;
}
}
return ulCRC;
}
private UInt32 CalculateBlockCRC32(UInt32 ulCount, byte[] ucBuffer)
{
UInt32 ulTemp1;
UInt32 ulTemp2;
UInt32 ulCRC = 0;
int bufind = 0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & 0x00FFFFFF;
ulTemp2 = CRC32Value(((int)ulCRC ^ ucBuffer[bufind]) & 0xFF);
ulCRC = ulTemp1 ^ ulTemp2;
bufind++;
}
return ulCRC;
}
string contents = "12";
byte[] toBeHexed = Encoding.ASCII.GetBytes(contents);
UInt32 calculatedCRC = CalculateBlockCRC32((UInt32)toBeHexed.Length, toBeHexed);
Usually in C# it doesn't matter whether you use C# data type name(recommended by Microsoft) or ECMA type name. But in this and similar cases with bit level manipulation it can greatly clarify the intent and prevent mistakes.
In C it is always a good idea to use typedefs from stdint.h. They make the same job, as ECMA types in C# - clarify the intent, and also guarantee the length and sign of used datatypes(C compilers may use different lengths for the same types, because standard doesn't specify exact sizes):
#include <stdint.h>
#define CRC32_POLYNOMIAL ((uint32_t)0xEDB88320)
uint32_t CRC32Value(uint32_t i)
{
uint32_t j;
uint32_t ulCRC;
ulCRC = i;
for (j = 8; j > 0; j--)
{
if (ulCRC & 1)
ulCRC = (ulCRC >> 1) ^ CRC32_POLYNOMIAL;
else
ulCRC >>= 1;
}
return ulCRC;
}
uint32_t CalculateBlockCRC32(
size_t ulCount,
uint8_t *ucBuffer)
{
uint32_t ulTemp1;
uint32_t ulTemp2;
uint32_t ulCRC = 0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & ((uint32_t)0x00FFFFFF);
ulTemp2 = CRC32Value((ulCRC^*ucBuffer++)&0xff);
ulCRC = ulTemp1^ulTemp2;
}
return(ulCRC);
}
char *Buff = "12";
size_t iLen = strlen(Buff);
uint32_t CRC = CalculateBlockCRC32(iLen, (uint8_t *) Buff);
printf("%u", CRC);

Improving upon bit masking and shifting function

Can this function be improved upon to make it more efficient?:
private unsafe uint GetValue(uint value, int bitsToGrab, int bitsToMoveOver)
{
byte[] bytes = BitConverter.GetBytes(value);
uint myBitMask = 0x80; //MSB of 8 bits (byte)
int arrayIndex = 0;
for (int i = 0; i < bitsToMoveOver; i++)
{
if (myBitMask == 0)
{
arrayIndex++;
myBitMask = 0x80;
}
myBitMask >>= 1;
}
uint outputMask1 = (uint)(1 << (bitsToGrab - 1));
uint returnVal = 0;
for (int i = 0; i < bitsToGrab; i++)
{
if (myBitMask == 0)
{
arrayIndex++;
myBitMask = 0x80;
}
if ((bytes[arrayIndex] & myBitMask) > 0)
{
returnVal |= outputMask1;
}
outputMask1 >>= 1;
myBitMask >>= 1;
}
return returnVal;
}
i have an array of uints. each uint contains multiple pieces of data. In order to get the information, i pass in the number of bits, and the offset of those bits. Using that information, i build an output value.
The offset is generally on a byte boundary, but i cannot guarantee that it will be.
I'm actually really looking to see if i can simplify the code. Am i unnecessarily verbose in the code, or could it be done a bit cleaner?
Updated function: How do you guys feel about this?
private unsafe uint GetValue(uint value, int bitsToGrab, int bitsToMoveOver)
{
if (bitsToGrab + bitsToMoveOver >= 32)
{
return 0;
}
byte[] bytes = BitConverter.GetBytes(value);
Array.Reverse(bytes);
uint newValue = BitConverter.ToUInt32(bytes, 0);
uint grabMask = (0xFFFFFFFF << (32 - bitsToGrab));
grabMask >>= bitsToMoveOver;
uint returnVal = (newValue & grabMask) >> (32 - bitsToMoveOver - bitsToGrab);
return returnVal;
}
This needs testing (and assumes that bitsToGrab + bitsToMoveOver <= 32), but I think you can do this:
uint grabMask = ~(0xFFFFFFFF << (bitsToGrab + bitsToMoveOver));
return (value & grabMask) >> bitsToMoveOver;
Since the OP has indicated that it should be sampling bits from an internal binary representation of the number (including endian encoding), with byte order swapping within each word, you can swap bytes first like this:
uint reorderedValue = ((value << 8) & 0xFF00FF00) | ((value >> 8) & 0x00FF00FF);
uint grabMask = ~(0xFFFFFFFF << (bitsToGrab + bitsToMoveOver));
return (reorderedValue & grabMask) >> bitsToMoveOver;

jBCrypt 0.3 C# Port (BCrypt.net)

After looking into a bug in the original jBCrypt v0.1 C# port: BCrypt.net (Related Question). I decided to compare the new jBCrypt code against the old C# port to look for discrepancies and potential issues like the related question's bug.
Here is what I've found:
// original java (jBCrypt v0.3):
private static int streamtoword(byte data[], int offp[]) {
int i;
int word = 0;
int off = offp[0];
for (i = 0; i < 4; i++) {
word = (word << 8) | (data[off] & 0xff);
off = (off + 1) % data.length;
}
offp[0] = off;
return word;
}
// port to C# :
private static uint StreamToWord(byte[] data, ref int offset)
{
uint word = 0;
for (int i = 0; i < 4; i++)
{
// note the difference with the omission of "& 0xff"
word = (word << 8) | data[offset];
offset = (offset + 1) % data.Length;
}
return word;
}
if the prior is incorrect would the following fix it?
private static uint StreamToWord(byte[] data, ref int[] offsetp)
{
uint word = 0;
int offset = offsetp[0];
for (int i = 0; i < 4; i++)
{
word = (word << 8) | (uint)(data[offset] & 0xff);
offset = (offset + 1) % data.Length;
}
offsetp[0] = offset;
return word;
}
The & 0xff is required in the Java version because in Java, bytes are signed. (Some argue that this is a bug.)
In C#, bytes are unsigned, so the & 0xff is unnecessary.

Categories

Resources