Correctness of multiplication in galois field - c#

I'm into developing code to do arithmetic in Galois field gf(2^8) and I think I'm getting wrong results on multiplication operations.
private static byte Multiply(byte a, byte b)
{
byte result = 0;
while (b != 0)
{
if ((b & 1) != 0)
{
result ^= a;
}
a <<= 1;
b >>= 1;
}
return result;
}
The result for Multiply(1, 2) gives the correct value of 2 but Multiply(240, 249) gives me 112 instead of the expected 148.
Now I'm not sure if this value is good or not with Russian Peasant Multiplication.
Maybe there's another algorithm that gives correct results?

Example code:
#define POLY 0x11D
static BYTE GFMpy(BYTE b0, BYTE b1)
{
int i;
int product;
product = 0;
for(i = 0; i < 8; i++){
product <<= 1;
if(product & 0x100){
product ^= POLY;}
if(b0 & 0x80u){
product ^= b1;}
b0 <<= 1;}
return((BYTE)product);
}
Example using lookup tables:
#define POLY (0x11d)
/* all non-zero elements are powers of 2 for POLY == 0x11d */
typedef unsigned char BYTE;
/* ... */
static BYTE exp2[512];
static BYTE log2[256];
/* ... */
static void Tbli()
{
int i;
int b;
b = 0x01; /* init exp2 table */
for(i = 0; i < 512; i++){
exp2[i] = (BYTE)b;
b = (b << 1); /* powers of 2 */
if(b & 0x100)
b ^= POLY;
}
log2[0] = 0xff; /* init log2 table */
for(i = 0; i < 255; i++)
log2[exp2[i]] = (BYTE)i;
}
/* ... */
static BYTE GFMpy(BYTE m0, BYTE m1) /* multiply */
{
if(0 == m0 || 0 == m1)
return(0);
return(exp2[log2[m0] + log2[m1]]);
}
/* ... */
static BYTE GFDiv(BYTE m0, BYTE m1) /* divide */
{
if(0 == m0)
return(0);
return(exp2[log2[m0] + 255 - log2[m1]]);
}

Related

How to edit this code to calculate CRC-32/MPEG in C#

I have this code which calculate CRC-32, I need to edit this code with: Polynomial 0x04C11DB7 ,Initial value: 0xFFFFFFFF , XOR:0 .
So CRC32 for string "123456789" should be"0376E6E7", I found a code, it's very slow , But it works any way.
```internal static class Crc32
{
internal static uint[] MakeCrcTable()
{
uint c;
uint[] crcTable = new uint[256];
for (uint n = 0; n < 256; n++)
{
c = n;
for (int k = 0; k < 8; k++)
{
var res = c & 1;
c = (res == 1) ? (0xEDB88320 ^ (c >> 1)) : (c >> 1);
}
crcTable[n] = c;
}
return crcTable;
}
internal static uint CalculateCrc32(byte[] str)
{
uint[] crcTable = Crc32.MakeCrcTable();
uint crc = 0xffffffff;
for (int i = 0; i < str.Length; i++)
{
byte c = str[i];
crc = (crc >> 8) ^ crcTable[(crc ^ c) & 0xFF];
}
return ~crc; //(crc ^ (-1)) >> 0;
}
}```
Based on the added comments, what you are looking for is CRC-32/MPEG-2, which reverses the direction of the CRC, and eliminates the final exclusive-or, compared to the implementation you have, which is a CRC-32/ISO-HDLC.
To get there, you need to flip the CRC from reflected to forward. You bit-flip the polynomial to get 0x04c11db7, check the high bit instead of the low bit, reverse the shifts, both in the table generation and use of the table, and exclusive-or with the high byte of the CRC instead of the low byte.
To remove the final exclusive-or, remove the tilde at the end.

Difference in C# and C++ Byte array conversion for object

I am trying to convert some C++ code to C# for an application. The function I am trying to convert calculates the Checksum of an object which comprises of MAC address among other details. The Checksum function in C++ is defined as :
unsigned short CalculateCheckSum(unsigned char* p, int n)
{
unsigned short x, checksum = 0;
for (unsigned long i = 0; i < n; ++i)
{
x = p[i];
x <<= i % 8;
checksum += x;
}
return checksum != 0 ? checksum : 51;
I have written the same function defined in C# is :
public static ushort CalculateCheckSum(byte[] p, int n)
{
ushort x, checksum = 0;
for (int i = 0; i < n; ++i)
{
x = p[i];
x <<= i % 8;
checksum += x;
}
return (ushort)(checksum != 0 ? checksum : 51);
}
Here is the code that calculates the checksum in C++ :
PCInfoClass pcInfo;
char nicIDStr[1024];
strcpy_s(nicIDStr, "34-29-8f-93-16-61");
NICAddressStrToBinary(nicIDStr, pcInfo.nicID);
char outbuf[1000];
pcInfo.timeStamp = 1234;
pcInfo.expDate = 0;
I32 pcInfoSz = 20;
pcInfo.checksum = 0;
unsigned char* byteStr;
byteStr = (unsigned char*)&pcInfo;
pcInfo.checksum = CalculateCheckSum(byteStr, pcInfoSz);
Since the CalculateCheckSum method, takes a Byte array as an argument, I have used the BinaryFormatter class which comes with System.Runtime. I have tried to replicate the same functionality in C# with the following lines :
PCInfoClass pcInfo = new PCInfoClass();
char[] nicIDStr = new char[1024];
string str = "34-29-8f-93-16-61";
for (int i = 0; i < str.Length; i++)
{
nicIDStr[i] = str[i];
}
NICAddressStrToBinary(nicIDStr, pcInfo.nicID);
pcInfo.timeStamp = 1234;
pcInfo.expDate = 0;
int pcInfoSz = 20;
pcInfo.checksum = 0;
pcInfo.checksum = CalculateCheckSum(ObjectToByteArray1(pcInfo), pcInfoSz);
public static byte[] ObjectToByteArray1(Object obj)
{
if (obj == null)
return null;
BinaryFormatter bf = new BinaryFormatter();
MemoryStream ms = new MemoryStream();
bf.Serialize(ms, obj);
return ms.ToArray();
}
Unfortunately the value of checksum comes out to be different for both approaches so the conversion is stuck at this point.
The other method used in this code is NICAddressStrToBinary, in C++ its defined as :
bool NICAddressStrToBinary(const char* nicIDStr, unsigned char* outbuf)
{
int c, i, dgt;
if (nicIDStr == NULL) return false;
//converted char to integer as ascii number.
for (dgt = 0, i = 0; (c = nicIDStr[i]) != '\0'; ++i)
{
//if it is 45 '-' then the loop will continue;
if (c == '-') continue;
//if the ascii value is between 48 to 57 then we will decrrease with 48 of given integer
if ('0' <= c && c <= '9')
{
c -= '0';
}
else
if ('a' <= c && c <= 'f')
{
c -= 'a' - 10;
}
else
if ('A' <= c && c <= 'F')
{
c -= 'A' - 10;
}
else
{
return false;
}
if (dgt >= 6 * 2)
{
return false;
}
if (outbuf != NULL)
{
if ((dgt & 1) == 0)
{
//// it means c<<4 is c*2power4
outbuf[dgt / 2] = c << 4;
}
else
{
outbuf[dgt / 2] |= c;
}
}
dgt++;
}
if (dgt < 6 * 2)
{
return false;
}
return true;
}
In C# its been rewritten as :
public static void NICAddressStrToBinary(char[] nicIDStr, byte[] outbuf)
{
int c, i, dgt;
if (nicIDStr == null) return ;
for (dgt = 0, i = 0; i<=nicIDStr.Length-1; ++i)
{
c = nicIDStr[i];
if (c == '-') continue;
if ('0' <= c && c <= '9')
{
c -= '0';
}
else if ('a' <= c && c <= 'f')
{
c -= 'a' - 10;
}
else if ('A' <= c && c <= 'F')
{
c -= 'A' - 10;
}
else
{
return;
}
/* make sure there aren't too many digits
*/
if (dgt >= 6 * 2)
{
return ;
}
/* accumulate the binary NIC ID
* remembering that we're starting
* with the most significant digits first
*/
if (outbuf != null)
{
if ((dgt & 1) == 0)
{
//// it means c<<4 is c*2power4
outbuf[dgt / 2] = (byte)(c << 4);
}
else
{
outbuf[dgt / 2] |= (byte)c;
}
}
/* advance the digit index
*/
dgt++;
}
/* make sure I have enough digits
*/
if (dgt < 6 * 2)
{
return ;
}
return ;
}
Can someone please tell me what could be the cause of different values being calculated in C++ and C#?
Since the CalculateCheckSum method, takes a Byte array as an argument, I have used the BinaryFormatter class which comes with System.Runtime.
That would have been a reasonable choice in isolation, but BinaryFormatter uses a complicated format that isn't "just the bytes of the object". Those bytes are probably in there somewhere, but a lot of other stuff is too. So in this case it doesn't work out.
Even in C# there are ways to get the raw bytes of a given object, but you would have to specifically design PCInfoClass for that purpose: make it a struct, use a fixed-size array for nicID (references are a no-go). Then you can use some tricks (which trick you can use depends on the version of .NET you're targeting) to get the raw bytes of that struct.
My recommendation would be to use a BinaryWriter to manually write each field to a MemoryStream, then use ToArray as you did. Be very careful to call the right overloads of Write, and explicitly write padding bytes as well. I cannot write that code for you without knowing what the class definition looked like in C++.

CRC_82_Darc function in C#

I need to calculate the CRC_82_Darc hash in C#. Is there any preexisting Lib for this or did someone already write a function ? I could not find anything on Google.
Here is a simple bit-wise implementation in C:
// CRC-82/DARC Calculation
// Placed into the public domain by Mark Adler, 17 June 2017.
// CRC definition:
// width=82 poly=0x0308c0111011401440411 init=0 refin=true refout=true xorout=0
// check=0x09ea83f625023801fd612 name="CRC-82/DARC"
#include <stddef.h>
#include <stdint.h>
#define POLYHIGH 0x22080
#define POLYLOW 0x8a00a2022200c430
// Update crc[0..1] with the CRC-82/DARC of the len bytes at buf. If buf is
// NULL, then initialize crc[0..1] with the CRC-82/DARC of an empty message.
// The low 64 bits of the CRC are in crc[0], and the high 18 bits of the CRC
// are in the low 18 bits of crc[1]. The remaining bits of crc[1] are always
// zero.
void crc82darc(uint64_t *crc, void const *buf, size_t len) {
if (buf == NULL) {
crc[0] = crc[1] = 0;
return;
}
uint64_t cl = crc[0], ch = crc[1] & 0x3ffff;
for (size_t i = 0; i < len; i++) {
cl ^= ((unsigned char const *)buf)[i];
for (int k = 0; k < 8; k++) {
uint64_t low = cl & 1;
cl = (cl >> 1) | (ch << 63);
ch >>= 1;
if (low) {
cl ^= POLYLOW;
ch ^= POLYHIGH;
}
}
}
crc[0] = cl;
crc[1] = ch;
}
#ifdef TEST
#include <stdio.h>
int main(void)
{
uint64_t crc[2];
crc82darc(crc, NULL, 0); // initialize crc
crc82darc(crc, "123456789", 9);
printf("0x%05llx%016llx\n", crc[1], crc[0]);
return 0;
}
#endif

How to Implement CRC-16-DNP using C#?

I'm trying to implement a 16-CRC [DNP] using c#, the generator polynomial is given as
I found a standard solution for 16-crc : [ Source ]
public class Crc16
{
const ushort polynomial = 0xA001;
ushort[] table = new ushort[256];
public ushort ComputeChecksum ( byte[] bytes )
{
ushort crc = 0;
for ( int i = 0; i < bytes.Length; ++i )
{
byte index = ( byte ) ( crc ^ bytes[i] );
crc = ( ushort ) ( ( crc >> 8 ) ^ table[index] );
}
return crc;
}
public byte[] ComputeChecksumBytes ( byte[] bytes )
{
ushort crc = ComputeChecksum ( bytes );
return BitConverter.GetBytes ( crc );
}
public Crc16 ()
{
ushort value;
ushort temp;
for ( ushort i = 0; i < table.Length; ++i )
{
value = 0;
temp = i;
for ( byte j = 0; j < 8; ++j )
{
if ( ( ( value ^ temp ) & 0x0001 ) != 0 )
{
value = ( ushort ) ( ( value >> 1 ) ^ polynomial );
}
else
{
value >>= 1;
}
temp >>= 1;
}
table[i] = value;
}
}
}
Now, If I convert my polynomial I get 1 0011 1101 0110 0111 => (3D65)h & my question is what do I need to change to work the above solution for the given polynomial.
Edit: I also need to consider two things,
1) The initial value will be 0 &
2) The final CRC has to be complemented.
This was actually very helpful for me. However, I did not use the solution SanVEE did, I actually modified the code from his original post as described by Mark Adler and it works great. At least, so far the result matches up with the DNP3 checksum calculator found here: http://www.lammertbies.nl/comm/info/crc-calculation.html
The code posted as the answer for SanVEE looks like it might be very inefficient (e.g. using bools to store each bit), though I have not tested them to compare. Anyone facing the same question may want to examine both answers to see which works better for them.
public class Crc16DNP3
{
const ushort polynomial = 0xA6BC; //0xA001;
ushort[] table = new ushort[256];
public ushort ComputeChecksum(byte[] bytes)
{
ushort crc = 0;
for (int i = 0; i < bytes.Length; ++i)
{
byte index = (byte)(crc ^ bytes[i]);
crc = (ushort)((crc >> 8) ^ table[index]);
}
crc = SwapBytes((ushort)(crc ^ 0xffff));
return crc;
}
public byte[] ComputeChecksumBytes(byte[] bytes)
{
ushort crc = ComputeChecksum(bytes);
return BitConverter.GetBytes(crc);
}
// SwapBytes taken from http://stackoverflow.com/questions/19560436/bitwise-endian-swap-for-various-types
private ushort SwapBytes(ushort x)
{
return (ushort)((ushort)((x & 0xff) << 8) | ((x >> 8) & 0xff));
}
public Crc16DNP3()
{
ushort value;
ushort temp;
for (ushort i = 0; i < table.Length; ++i)
{
value = 0;
temp = i;
for (byte j = 0; j < 8; ++j)
{
if (((value ^ temp) & 0x0001) != 0)
{
value = (ushort)((value >> 1) ^ polynomial);
}
else
{
value >>= 1;
}
temp >>= 1;
}
table[i] = value;
}
}
}
What's wrong with the code at your first link? That also specifies how the CRC bytes are ordered in the message.
You need to reverse the polynomial below x16. The polynomial in bit form is 10011110101100101. Drop the leading 1 (x16), and you have in groups of four: 0011 1101 0110 0101. Reversed that is: 1010 0110 1011 1100. So you should set polynomial = 0xA6BC.
The initial value is already zero. Complementing the final CRC can be done simply with ^ 0xffff.
Finally, I ended up using the following solution & thought it's worth sharing & it may be useful for someone.
private static int GetCrc ( string BitString )
{
bool[] Res = new bool[17];
bool[] CRC = new bool[16];
int i;
bool DoInvert = false;
string crcBits = string.Empty;
for ( i = 0; i < 16; ++i ) // Init before calculation
CRC[i] = false;
for ( i = 0; i < BitString.Length; ++i )
{
DoInvert = ('1' == BitString[i]) ^ CRC[15]; // XOR required?
CRC[15] = CRC[14];
CRC[14] = CRC[13];
CRC[13] = CRC[12] ^ DoInvert;
CRC[12] = CRC[11] ^ DoInvert;
CRC[11] = CRC[10] ^ DoInvert;
CRC[10] = CRC[9] ^ DoInvert;
CRC[9] = CRC[8];
CRC[8] = CRC[7] ^ DoInvert;
CRC[7] = CRC[6];
CRC[6] = CRC[5] ^ DoInvert;
CRC[5] = CRC[4] ^ DoInvert;
CRC[4] = CRC[3];
CRC[3] = CRC[2];
CRC[2] = CRC[1] ^ DoInvert;
CRC[1] = CRC[0];
CRC[0] = DoInvert;
}
for ( i = 0; i < 16; ++i )
Res[15 - i] = CRC[i] ? true : false;
Res[16] = false;
// The final result must be Complemented
for ( i = 0; i < 16; i++ )
{
if ( Res[i] )
crcBits += "0";
else
crcBits += "1";
}
return Convert.ToInt32 ( crcBits, 2 );
}
The above C# solution is converted from C based auto generated code from here.

Conversion of CRC function from C to C# yields wrong values

I'm trying to convert a couple of simple CRC calculating functions from C to C#, but I seem to be getting incorrect results.
The C functions are:
#define CRC32_POLYNOMIAL 0xEDB88320
unsigned long CRC32Value(int i)
{
int j;
unsigned long ulCRC;
ulCRC = i;
for (j=8;j>0;j--)
{
if (ulCRC & 1)
ulCRC = (ulCRC >> 1)^CRC32_POLYNOMIAL;
else
ulCRC >>= 1;
}
return ulCRC;
}
unsigned long CalculateBlockCRC32(
unsigned long ulCount,
unsigned char *ucBuffer)
{
unsigned long ulTemp1;
unsigned long ulTemp2; unsigned long ulCRC = 0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & 0x00FFFFFFL;
ulTemp2 = CRC32Value(((int)ulCRC^*ucBuffer++)&0xff);
ulCRC = ulTemp1^ulTemp2;
}
return(ulCRC);
}
These are well defined, they are taken from a user manual. My C# versions of these functions are:
private ulong CRC32POLYNOMIAL = 0xEDB88320L;
private ulong CRC32Value(int i)
{
int j;
ulong ulCRC = (ulong)i;
for (j = 8; j > 0; j--)
{
if (ulCRC % 2 == 1)
{
ulCRC = (ulCRC >> 1) ^ CRC32POLYNOMIAL;
}
else
{
ulCRC >>= 1;
}
}
return ulCRC;
}
private ulong CalculateBlockCRC32(ulong ulCount, byte[] ucBuffer)
{
ulong ulTemp1;
ulong ulTemp2;
ulong ulCRC=0;
int bufind=0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & 0x00FFFFFFL;
ulTemp2 = CRC32Value(((int)ulCRC ^ ucBuffer[bufind]) & 0xFF);
ulCRC = ulTemp1 ^ ulTemp2;
bufind++;
}
return ulCRC;
}
As I mentioned, there are discrepancies between the C version and the C# version. One possible source is my understanding of the C expression ulCRC & 1 which I believe will only be true for odd numbers.
I call the C# function like this:
string contents = "some data";
byte[] toBeHexed = Encoding.ASCII.GetBytes(contents);
ulong calculatedCRC = this.CalculateBlockCRC32((ulong)toBeHexed.Length, toBeHexed);
And the C function is called like this:
char *Buff="some data";
unsigned long iLen = strlen(Buff);
unsigned long CRC = CalculateBlockCRC32(iLen, (unsigned char*) Buff);
I believe that I am calling the functions with the same data in each language, is that correct? If anyone could shed some light on this I would be very grateful.
As it has been already pointed by #Adriano Repetti you should use UInt32 datatype in place of the ulong type(it is 64 bit unsigned UInt64, whereas in VC++ unsigned long is only 32 bit unsigned type)
private UInt32 CRC32POLYNOMIAL = 0xEDB88320;
private UInt32 CRC32Value(int i)
{
int j;
UInt32 ulCRC = (UInt32)i;
for (j = 8; j > 0; j--)
{
if (ulCRC % 2 == 1)
{
ulCRC = (ulCRC >> 1) ^ CRC32POLYNOMIAL;
}
else
{
ulCRC >>= 1;
}
}
return ulCRC;
}
private UInt32 CalculateBlockCRC32(UInt32 ulCount, byte[] ucBuffer)
{
UInt32 ulTemp1;
UInt32 ulTemp2;
UInt32 ulCRC = 0;
int bufind = 0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & 0x00FFFFFF;
ulTemp2 = CRC32Value(((int)ulCRC ^ ucBuffer[bufind]) & 0xFF);
ulCRC = ulTemp1 ^ ulTemp2;
bufind++;
}
return ulCRC;
}
string contents = "12";
byte[] toBeHexed = Encoding.ASCII.GetBytes(contents);
UInt32 calculatedCRC = CalculateBlockCRC32((UInt32)toBeHexed.Length, toBeHexed);
Usually in C# it doesn't matter whether you use C# data type name(recommended by Microsoft) or ECMA type name. But in this and similar cases with bit level manipulation it can greatly clarify the intent and prevent mistakes.
In C it is always a good idea to use typedefs from stdint.h. They make the same job, as ECMA types in C# - clarify the intent, and also guarantee the length and sign of used datatypes(C compilers may use different lengths for the same types, because standard doesn't specify exact sizes):
#include <stdint.h>
#define CRC32_POLYNOMIAL ((uint32_t)0xEDB88320)
uint32_t CRC32Value(uint32_t i)
{
uint32_t j;
uint32_t ulCRC;
ulCRC = i;
for (j = 8; j > 0; j--)
{
if (ulCRC & 1)
ulCRC = (ulCRC >> 1) ^ CRC32_POLYNOMIAL;
else
ulCRC >>= 1;
}
return ulCRC;
}
uint32_t CalculateBlockCRC32(
size_t ulCount,
uint8_t *ucBuffer)
{
uint32_t ulTemp1;
uint32_t ulTemp2;
uint32_t ulCRC = 0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & ((uint32_t)0x00FFFFFF);
ulTemp2 = CRC32Value((ulCRC^*ucBuffer++)&0xff);
ulCRC = ulTemp1^ulTemp2;
}
return(ulCRC);
}
char *Buff = "12";
size_t iLen = strlen(Buff);
uint32_t CRC = CalculateBlockCRC32(iLen, (uint8_t *) Buff);
printf("%u", CRC);

Categories

Resources