How to calculate CRC_B in C# - c#

How to calculate CRC_B encoding in C# as described in ISO 14443?
Here is some background info:
CRC_B encoding
This annex is provided for explanatory purposes and indicates the bit patterns that will
exist in the physical layer. It is included for the purpose of checking an ISO/IEC
14443-3 Type B implementation of CRC_B encoding. Refer to ISO/IEC 3309 and CCITT X.25
2.2.7 and V.42 8.1.1.6.1 for further details. Initial Value = 'FFFF'
Example 1: for 0x00 0x00 0x00 you should end up with CRC_B of 0xCC 0xC6
Example 2: for 0x0F 0xAA 0xFF you should end up with CRC_B of 0xFC 0xD1
I tried some random CRC16 libraries but they aren't giving me the same results. I didn't get the same results from online checks either like in here.

I reversed this from the C code in ISO/IEC JTC1/SC17 N 3497 so its not pretty but does what you need:
public class CrcB
{
const ushort __crcBDefault = 0xffff;
private static ushort UpdateCrc(byte b, ushort crc)
{
unchecked
{
byte ch = (byte)(b^(byte)(crc & 0x00ff));
ch = (byte)(ch ^ (ch << 4));
return (ushort)((crc >> 8)^(ch << 8)^(ch << 3)^(ch >> 4));
}
}
public static ushort ComputeCrc(byte[] bytes)
{
var res = __crcBDefault;
foreach (var b in bytes)
res = UpdateCrc(b, res);
return (ushort)~res;
}
}
As a test, try the code below:
public static void Main(string[] args)
{
// test case 1 0xFC, 0xD1
var bytes = new byte[] { 0x0F, 0xAA, 0xFF };
var crc = CrcB.ComputeCrc(bytes);
var cbytes = BitConverter.GetBytes(crc);
Console.WriteLine("First (0xFC): {0:X}\tSecond (0xD1): {1:X}", cbytes[0], cbytes[1]);
// test case 2 0xCC, 0xC6
bytes = new byte[] { 0x00, 0x00, 0x00 };
crc = CrcB.ComputeCrc(bytes);
cbytes = BitConverter.GetBytes(crc);
Console.WriteLine("First (0xCC): {0:X}\tSecond (0xC6): {1:X}", cbytes[0], cbytes[1]);
Console.ReadLine();
}

Related

Decreasing volume of .wav file creates heavy distortion

I have a problem that just baffles me. I import a .wav file and read them as bytes. Then I turn them into integers that I then all divide by 2 (or some other number) in order to decrease the volume. Then I make a new .wav file into which I put the new data. The result is loud and heavy distortion over the original track.
Scroll to the Main() method for the relevant (C#-)code:
using System;
using System.IO;
namespace ConsoleApp2 {
class basic {
public static byte[] bit32(int num) { //turns int into byte array of length 4
byte[] numbyt = new byte[4] { 0x00, 0x00, 0x00, 0x00 };
int pow;
for (int k = 3; k >= 0; k--) {
pow = (int)Math.Pow(16, 2*k + 1);
numbyt[k] += (byte)(16*(num/pow));
num -= numbyt[k]*(pow/16);
numbyt[k] += (byte)(num/(pow/16));
num -= (num/(pow/16))*pow/16;
}
return numbyt;
}
public static byte[] bit16(int num) { //turns int into byte array of length 2
if (num < 0) {
num += 65535;
}
byte[] numbyt = new byte[2] { 0x00, 0x00 };
int pow;
for (int k = 1; k >= 0; k--) {
pow = (int)Math.Pow(16, 2*k + 1);
numbyt[k] += (byte)(16*(num/pow));
num -= numbyt[k]*(pow/16);
numbyt[k] += (byte)(num/(pow/16));
num -= (num/(pow/16))*pow/16;
}
return numbyt;
}
public static int bitint16(byte[] numbyt) { //turns byte array of length 2 into int
int num = 0;
num += (int)Math.Pow(16, 2)*numbyt[1];
num += numbyt[0];
return num;
}
}
class wavfile: FileStream {
public wavfile(string name, int len) : base(name, FileMode.Create) {
int samplerate = 44100;
byte[] riff = new byte[] { 0x52, 0x49, 0x46, 0x46 };
this.Write(riff, 0, 4);
byte[] chunksize;
chunksize = basic.bit32(36 + len*4);
this.Write(chunksize, 0, 4);
byte[] wavebyte = new byte[4] { 0x57, 0x41, 0x56, 0x45 };
this.Write(wavebyte, 0, 4);
byte[] fmt = new byte[] { 0x66, 0x6d, 0x74, 0x20 };
this.Write(fmt, 0, 4);
byte[] subchunk1size = new byte[] { 0x10, 0x00, 0x00, 0x00 };
this.Write(subchunk1size, 0, 4);
byte[] formchann = new byte[] { 0x01, 0x00, 0x02, 0x00 };
this.Write(formchann, 0, 4);
byte[] sampleratebyte = basic.bit32(samplerate);
this.Write(sampleratebyte, 0, 4);
byte[] byterate = basic.bit32(samplerate*4);
this.Write(byterate, 0, 4);
byte[] blockalign = new byte[] { 0x04, 0x00 };
this.Write(blockalign, 0, 2);
byte[] bits = new byte[] { 0x10, 0x00 };
this.Write(bits, 0, 2);
byte[] data = new byte[] { 0x64, 0x61, 0x74, 0x61 };
this.Write(data, 0, 4);
byte[] samplesbyte = basic.bit32(len*4);
this.Write(samplesbyte, 0, 4);
}
public void sound(int[] w, int len, wavfile wavorigin = null) {
byte[] wavbyt = new byte[len*4];
for (int t = 0; t < len*2; t++) {
byte[] wavbit16 = basic.bit16(w[t]);
wavbyt[2*t] = wavbit16[0];
wavbyt[2*t + 1] = wavbit16[1];
}
this.Write(wavbyt, 0, len*4);
System.Media.SoundPlayer player = new System.Media.SoundPlayer();
player.SoundLocation = this.Name;
while (true) {
player.Play();
Console.WriteLine("repeat?");
if (Console.ReadLine() == "no") {
break;
}
}
}
}
class Program {
static void Main() {
int[] song = new int[45000*2];
byte[] songbyt = File.ReadAllBytes("name.wav"); //use your stereo, 16bits per sample wav-file
for (int t = 0; t < 45000*2; t++) {
byte[] songbytsamp = new byte[2] { songbyt[44 + 2*t], songbyt[44 + 2*t + 1] }; //I skip the header
song[t] = basic.bitint16(songbytsamp)/2; //I divide by 2 here, remove the "/2" to hear the normal sound again
//song[t] *= 2;
}
wavfile wav = new wavfile("test.wav", 45000); //constructor class that writes the header of a .wav file
wav.sound(song, 45000); //method that writes the data from "song" into the .wav file
}
}
}
The problem is not the rounding down that happens when you divide an odd number by 2; you can uncomment the line that says song[t] *= 2; and hear for yourself that all of the distortion has completely disappeared again.
I must be making a small stupid mistake somewhere, but I cannot find it. I just want to make the sound data quieter to avoid distortion when I add more sounds to it.
Well, I knew it would be something stupid, and I was right. I forgot to account for the fact that negative numbers are written in signed 16 bit language as the numbers above 2^15, and when you divide by 2, you push them into (very large) positive values. I altered my code to substract 2^16 from any number that's above 2^15 before dividing by 2. I have to thank this person though: How to reduce volume of wav stream?
If this means that my question was a duplicate, then go ahead and delete it, but I'm letting it stay for now, because someone else might find it helpful.
Using Math.Pow to do bit and byte operations is a really bad idea. That function takes double values as inputs and returns a double. It also does exponentiation (not a trivial operation). Using traditional bit shift and mask operations is clearer, much faster and less likely to introduce noise (because of the inaccuracy of doubles).
As you noticed, you really want to work with unsigned quantities (like uint/UInt32 and ushort/UInt16). Sign extension trips up everyone when doing this sort of work.
This is not a full answer to your question, but it does present a way to do the byte operations that is arguably better.
First, create a small struct to hold a combination of a bit-mask and a shift quantity:
public struct MaskAndShift {
public uint Mask {get; set;}
public int Shift {get; set;}
}
Then I create two arrays of these structs for describing what should be done to extract individual bytes from a uint or a ushort. I put them both in a static class named Worker:
public static class Worker {
public static MaskAndShift[] Mask32 = new MaskAndShift[] {
new MaskAndShift {Mask = 0xFF000000, Shift = 24},
new MaskAndShift {Mask = 0x00FF0000, Shift = 16},
new MaskAndShift {Mask = 0x0000FF00, Shift = 8},
new MaskAndShift {Mask = 0x000000FF, Shift = 0},
};
public static MaskAndShift[] Mask16 = new MaskAndShift[] {
new MaskAndShift {Mask = 0x0000FF00, Shift = 8},
new MaskAndShift {Mask = 0x000000FF, Shift = 0},
};
}
Looking at the first entry in the first array, it says "to extract the first byte from a uint, mask that uint with 0xFF000000 and shift the result 24 bits to the right". If you have endian-ness issues, you can simply re-order the entries in the array.
Then I created this static function (in the Worker class) to convert a uint / UInt32 to an array of four bytes:
public static byte[] UintToByteArray (uint input) {
var bytes = new byte[4];
int i = 0;
foreach (var maskPair in Mask32) {
var masked = input & maskPair.Mask;
if (maskPair.Shift != 0) {
masked >>= maskPair.Shift;
}
bytes[i++] = (byte) masked;
}
return bytes;
}
The code to do the same operation for a 16 bit ushort (aka UInt16) looks nearly the same (there's probably an opportunity for some refactoring here):
public static byte[] UShortToByteArray (ushort input) {
var bytes = new byte[2];
int i = 0;
foreach (var maskPair in Mask16) {
var masked = input & maskPair.Mask;
if (maskPair.Shift != 0) {
masked >>= maskPair.Shift;
}
bytes[i++] = (byte) masked;
}
return bytes;
}
The reverse operation is much simpler (however, if you have endian-ness issues, you'll need to write the code). Here I just take the entries of the array, add them into a value and shift the result:
public static uint ByteArrayToUint (byte[] bytes) {
uint result = 0;
//note that the first time through, result is zero, so shifting is a noop
foreach (var b in bytes){
result <<= 8;
result += b;
}
return result;
}
Doing this for the 16 bit version ends up being effectively the same code, so...
public static ushort ByteArrayToUshort (byte[] bytes) {
return (ushort) ByteArrayToUint(bytes);
}
Bit-twiddling never works the first time. So I wrote some test code:
public static void Main(){
//pick a nice obvious pattern
uint bit32Test = (((0xF1u * 0x100u) + 0xE2u) * 0x100u + 0xD3u) * 0x100u + 0xC4u;
Console.WriteLine("Start");
Console.WriteLine("Input 32 Value: " + bit32Test.ToString("X"));
var bytes32 = Worker.UintToByteArray(bit32Test);
foreach (var b in bytes32){
Console.WriteLine(b.ToString("X"));
}
Console.WriteLine();
ushort bit16Test = (ushort)((0xB5u * 0x100u) + 0xA6u);
Console.WriteLine("Input 16 Value: " + bit16Test.ToString("X"));
var bytes16 = Worker.UShortToByteArray(bit16Test);
foreach (var b in bytes16){
Console.WriteLine(b.ToString("X"));
}
Console.WriteLine("\r\nNow the reverse");
uint reconstitued32 = Worker.ByteArrayToUint(bytes32);
Console.WriteLine("Reconstituted 32: " + reconstitued32.ToString("X"));
ushort reconstitued16 = Worker.ByteArrayToUshort(bytes16);
Console.WriteLine("Reconstituted 16: " + reconstitued16.ToString("X"));
}
The output from that test code looks like:
Start
Input 32 Value: F1E2D3C4
F1
E2
D3
C4
Input 16 Value: B5A6
B5
A6
Now the reverse
Reconstituted 32: F1E2D3C4
Reconstituted 16: B5A6
Also note that I do everything in hexadecimal - it makes everything so much easier to read and to understand.

Converting C openssl TripleDes encryption to .NET

I have been trying to replicate an encryption process for a 3rd party integration. The 3rd party uses openssl, and have given me there C code they use to perform the process. I have been trying to port this process across to C# for weeks, but I appear to be missing something I can not work out. Most likely I am missing something in transposing the C code and OpenSSL libraries, but I can not figure it out for the life of me
The main port os OpenSSL to .NET (https://github.com/openssl-net/openssl-net) unfortunatly does not have support for TripleDes, so can not be used
Here is the example C code
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <ctype.h>
#include <openssl/dh.h>
#include <openssl/pem.h>
#include <openssl/engine.h>
#include <openssl/bn.h>
#include <openssl/des.h>
#include <openssl/rand.h>
static void encrypt_ean(int argc, char **argv)
{
size_t i;
if (argc != 3)
usage();
char *mwkstr = argv[1];
char *ean = argv[2];
unsigned char mwk[24];
hex2bin(mwkstr, 48, mwk);
DES_key_schedule keysched[3];
set_key_checked(mwk, keysched);
unsigned char idata[16];
unsigned char odata[16];
DES_cblock zero_iv;
memset(&zero_iv, 0, sizeof(zero_iv));
if (RAND_bytes(idata+0, 8) != 1) {
fprintf(stderr, "RAND_bytes failed.\n");
exit(1);
}
for (i=0; i<8; i++)
idata[8+i] = (i >= eanlen) ? ' ' : ean[i];
idata[7] = chksum((char *)idata+8, 8);
if (g_verbose) {
printf("ean = %s\n", mean);
printf("idata = ");
for (i=0; i<sizeof(idata); i++)
printf("%d %02X\n", (int)i, idata[i]);
printf("\n");
}
DES_ede3_cbc_encrypt(idata, odata, sizeof(odata),
&keysched[0], &keysched[1], &keysched[2], &zero_iv, DES_ENCRYPT);
for (i=0; i<sizeof(odata); i++)
printf("%02X", odata[i]);
printf("\n");
}
static unsigned char chksum(char *data, size_t datalen)
{
size_t i;
unsigned char sum=0;
for (i=0; i<datalen; i++)
sum += data[i];
return sum;
}
static void hex2bin(const char *str, int len, unsigned char *bin)
{
int i, j, x;
for (i=0, j=0; i<len; i+=2) {
char tmpstr[3];
tmpstr[0] = str[i+0];
tmpstr[1] = str[i+1];
tmpstr[2] = '\0';
sscanf(tmpstr, "%02X", &x);
bin[j++] = x;
}
}
static int set_key_checked(unsigned char *key, DES_key_schedule *keysched)
{
if (DES_set_key_checked((const_DES_cblock *)(key+0), &keysched[0]) < 0) {
set_key_err:
fprintf(stderr, "DES_set_key_checked failed.\n");
exit(1);
}
if (DES_set_key_checked((const_DES_cblock *)(key+8), &keysched[1]) < 0)
goto set_key_err;
if (DES_set_key_checked((const_DES_cblock *)(key+16), &keysched[2]) < 0)
goto set_key_err;
return 0;
}
And here is my C# Code (Consider ean = pin for easier transposing)
internal static class PINEncoding
{
internal static string EncodePIN(string unencodedPIN, string decryptedWorkingKey)
{
var bytes = GenerateRandomBytes();
var asciiPin = ConvertPINToASCIIBytes(unencodedPIN);
var checksum = new byte[1];
checksum[0] = ComputeChecksum(asciiPin);
var pinBlock = ObtainPinBlock(bytes, checksum, asciiPin);
return EncryptPIN(pinBlock, decryptedWorkingKey);
}
private static byte[] GenerateRandomBytes()
{
Random rnd = new Random();
byte[] b = new byte[7];
rnd.NextBytes(b);
return b;
}
private static byte[] ConvertPINToASCIIBytes(string pin)
{
return ASCIIEncoding.ASCII.GetBytes(pin);
}
private static byte ComputeChecksum(byte[] data)
{
long longSum = data.Sum(x => (long)x);
return unchecked((byte)longSum);
}
private static byte[] ObtainPinBlock(byte[] random, byte[] checksum, byte[] asciiPin)
{
var result = new byte[random.Length + checksum.Length + asciiPin.Length];
Buffer.BlockCopy(random, 0, result, 0, random.Length);
Buffer.BlockCopy(checksum, 0, result, random.Length, checksum.Length);
Buffer.BlockCopy(asciiPin, 0, result, random.Length + checksum.Length, asciiPin.Length);
return result;
}
private static string EncryptPIN(byte[] eanBlock, string decryptedWorkingKey)
{
var keyAsBytes = HexStringBytesConverter.ConvertHexStringToByteArray(decryptedWorkingKey);
var byteResult = TripleDESEncryption.Encrypt(eanBlock, keyAsBytes);
return BitConverter.ToString(byteResult).Replace("-", "");
}
}
public static class TripleDESEncryption
{
public static byte[] Encrypt(byte[] toEncrypt, byte[] key)
{
using (var tdes = new TripleDESCryptoServiceProvider
{
Key = key,
IV = new byte[8] { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
Mode = CipherMode.CBC,
Padding = PaddingMode.None
})
{
var cTransform = tdes.CreateEncryptor();
return cTransform.TransformFinalBlock(toEncrypt, 0, toEncrypt.Length);
}
}
}
One of my example inputs and expected outs are
Unencoded Pin: 71548715
Decrypted Working Key: A7E5A86DB6F41FBA0DE99DE5BC3246ABA7E5A86DB6F41FBA
Expected Encryption Result: C097280EC13B486AE5DA57DB8F779184
Result Obtained by Above : C909165718FCE9A432AD432E7A104DCD
The C/C++ code performs an encryption with TripleDES in CBC mode without padding. The input parameters are the hex encoded key (argv[1]) and the EAN/PIN (argv[2]). The EAN/PIN is preceded by an 8 bytes value before encryption, whose first 7 bytes were randomly generated with RAND_bytes() and whose 8th byte is a checksum byte generated with chksum(). A zero IV is applied as IV.
The C# code does the same! Of course, because of the first random 7 bytes, this cannot be verified by just comparing the ciphertexts as you did, but by comparing the ciphertexts using the identical leading 7 bytes in both codes.
The leading 7 bytes for this test can be determined beforehand by decrypting the posted expected ciphertext using the posted key and a zero IV with a tool or other code. This decryption returns hex encoded the value 51174b043d6274a63731353438373135 (performed e.g. with http://tripledes.online-domain-tools.com/), of which the last 8 bytes are ASCII decoded 71548715, thus corresponding to the posted EAN/PIN. The first 7 bytes are hex encoded 51174b043d6274.
If for the test of the C# code in EncodePIN() the line
var bytes = GenerateRandomBytes();
is replaced by
var bytes = HexStringBytesConverter.ConvertHexStringToByteArray("51174b043d6274");
the call
Console.WriteLine(PINEncoding.EncodePIN("71548715", "A7E5A86DB6F41FBA0DE99DE5BC3246ABA7E5A86DB6F41FBA"));
returns
C097280EC13B486AE5DA57DB8F779184
in accordance with the expected ciphertext, proving that the C/C++ and C# code are functionally identical.
Note that the C/C++ code actually has a bit more functionality under the hood, e.g. checking the key (s. DES_set_key_checked), which however has no effect on the result if the key is valid (odd parity, not weak or semi weak).

Set Bits of a Byte by a ByteValue

I have a Byte scaleValue with the value 3 (0000.0011 binary)
Now I want to set the BIT 3 and 4 (Scale) of the Byte Config (see image) with help of my Byte scaleValue but it does not work.
Before: 0000.0000 (if configByte has the init value 0)
After: 0001.1000
Here is my code:
configByte = (byte) (configByte | (scaleValue << 3));
Byte Config:
If configByte is the entire 8-bit chunk, and scaleValue is the value (currently in bits 0/1) that you want to inject into bits 3/4, then fundamentally you need:
configByte = (byte)(configByte | (scaleValue << 3));
However, this assumes that:
bits 3/4 in configByte are currently zero
only bits 0/1 in scaleValue are set (or not)
If those two assumptions aren't true, then you need to mask out the issue with ... masks:
configByte = (byte)((configByte & 231) | ((scaleValue & 3) << 3));
The & 231 removes bits 3/4 in the old value. The & 3 enforces just bits 0/1 in the new value (before the shift)
You can use these extension methods:
public static bool GetBit(this byte data, byte position)
{
byte mask = (byte)(1 << position);
return (data & mask) != 0;
}
public static byte SetBit(this byte data, byte position, bool value)
{
byte mask = (byte)(1 << position);
if (value)
{
return (byte)(data | mask);
}
else
{
return (byte)( data & (~mask));
}
}
static void Main(string[] args)
{
byte data = 0b0000100;
if (data.GetBit(2))
{
}
data = data.SetBit(4, true);
data = data.SetBit(2, false);
}

Python XOR on hex and string (parity check function)

I am running into problems with Hex values and python. I am trying to write a function which performs a bytewise XOR and returns a Hex value.
Basically I am trying to convert this C# code to Python:
private byte[] AddParity(string _in)
{
byte parity = 0x7f;
List<byte> _out = new List<byte>();
ASCIIEncoding asc = new ASCIIEncoding();
byte[] bytes = asc.GetBytes(_in + '\r');
foreach (byte bt in bytes)
{
parity ^= bt;
_out.Add(bt);
}
_out.Add(parity);
return _out.ToArray();
}
Can someone point me in the right direction?
parity = 0x7f
parities = [int(item,16) ^ parity for item in "4e 7f 2b".split()]
#or maybe
parities = [ord(item) ^ parity for item in "somestring"]
I guess you are using this as some sort of checksum
parity = 0x7f
bits = []
for bit in "somestring":
parity ^= ord(bit)
parity &= 0xFF #ensure width
bits.append(bit)
bits.append(parity)
to do the checksum more pythonically you could do
this is the answer you want
bytestring = "vTest\r"
bits = chr(0x7f) + bytestring
checksum = reduce(lambda x,y:chr((ord(x)^ord(y))&0xff),bits)
message = bytestring+checksum
print map(lambda x:hex(ord(x)),message)
#Result:['0x76', '0x54', '0x65', '0x73', '0x74', '0xd', '0x32']
# ser.write(message)
if you want to see the hex values
print map(hex,parities)
or to see the binary
print map(bin,parities)

CRC16 ISO 13239 Implementation

i'm trying to implement Crc16 in C#. I already tried many different implementations, but most of them gives me different values. Here are some of the codes that i already used.
private static int POLYNOMIAL = 0x8408;
private static int PRESET_VALUE = 0xFFFF;
public static int crc16(byte[] data)
{
int current_crc_value = PRESET_VALUE;
for (int i = 0; i < data.Length; i++)
{
current_crc_value ^= data[i] & 0xFF;
for (int j = 0; j < 8; j++)
{
if ((current_crc_value & 1) != 0)
{
current_crc_value = (current_crc_value >> 1) ^ POLYNOMIAL;
}
else
{
current_crc_value = current_crc_value >> 1;
}
}
}
current_crc_value = ~current_crc_value;
return current_crc_value & 0xFFFF;
}
this is the another implementation that i used but both gives different values
const ushort polynomial = 0xA001;
ushort[] table = new ushort[256];
public ushort ComputeChecksum(byte[] bytes)
{
ushort crc = 0;
for (int i = 0; i < bytes.Length; ++i)
{
byte index = (byte)(crc ^ bytes[i]);
crc = (ushort)((crc >> 8) ^ table[index]);
}
return crc;
}
public byte[] ComputeChecksumBytes(byte[] bytes)
{
ushort crc = ComputeChecksum(bytes);
return BitConverter.GetBytes(crc);
}
public Crc16()
{
ushort value;
ushort temp;
for (ushort i = 0; i < table.Length; ++i)
{
value = 0;
temp = i;
for (byte j = 0; j < 8; ++j)
{
if (((value ^ temp) & 0x0001) != 0)
{
value = (ushort)((value >> 1) ^ polynomial);
}
else
{
value >>= 1;
}
temp >>= 1;
}
table[i] = value;
}
}
The value I`m using is an Octet String "[jp3]TEST [fl]Flashing[/fl]" and its expected value is 95F9 in hex. This is an example on the guide of NTCIP protocol
Thanks
This:
static readonly ushort[] fcstab = new ushort[] {
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
};
static ushort compute_fcs(byte[] data)
{
return compute_fcs(data, 0, data.Length);
}
static ushort compute_fcs(byte[] data, int start, int length)
{
ushort fcs = 0xFFFF;
int end = start + length;
for (int i = start; i < end; i++)
{
fcs = (ushort)(((ushort)(fcs >> 8)) ^ fcstab[(fcs ^ data[i]) & 0xFF]);
}
return (ushort)(~fcs);
}
static void Main(string[] args)
{
byte[] pattern = new byte[] { 0x02, 0x07, 0x01, 0x03, 0x01, 0x02, 0x00, 0x34, 0x07, 0x07, 0x1C, 0x59, 0x34, 0x6F, 0xE1, 0x83, 0x00, 0x00, 0x41, 0x06, 0x06, 0x7B, 0x3C, 0xFF, 0xCF, 0x3C, 0xC0 };
// http://www.ite.org/standards/1203v03-04%20Part%201%20dms2011.pdf
// Page 158, CRC = 0x52ED
ushort fcs = compute_fcs(pattern); // 0x52ED
}
will work for the only test given here http://www.ite.org/standards/1203v03-04%20Part%201%20dms2011.pdf (around page 158, CRC = 0x52ED).
For the string example of the PDF, as written some pages later:
" Indicates the CRC-16 (polynomial defined in ISO/IEC 3309) value
created using the values of the dmsMessageMultiString (MULTI-Message), the
dmsMessageBeacon, and the dmsMessagePixelService objects in the order listed,
not including the OER type or length fields. Note that the calculation shall
assume a value of zero (0) for the dmsMessageBeacon object and/or for the
dmsMessagePixelService object if they are not supported
(emphasis added)
so:
string str = "[jp3]TEST [fl]Flashing[/fl]";
var bytes = Encoding.ASCII.GetBytes(str);
Array.Resize(ref bytes, bytes.Length + 2);
// Note that these two rows are useless, because the Array.Resize will have already filled with 0
bytes[bytes.Length - 2] = 0; // dmsMessageBeacon
bytes[bytes.Length - 1] = 0; // dmsMessagePixelService
ushort fcs2 = compute_fcs(bytes); // 0xF995
var bytes2 = BitConverter.GetBytes(fcs2); // 0x95 0xF9
This shows that the protocol is little endian (as my PC, that is an Intel). In fact the CRC-16 of the string is 0xF995, but these 16 bits in memory appear as 0x95 0xF9 (as in the example, that shows the single bytes).
There are many algorithms of CRC-16 calculation.
For instance:
CRC-16-IBM which used in Modbus protocol, USB etc. is most popular.
CRC-16-CCITT used in Bluetooth.
I use CRC-16-IBM in my applications which work with industrial controllers.
public static UInt16 FastCRC16(byte[] Buffer, UInt16 ui_length)
{
UInt16[] crc_table = {
0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040};
UInt16 Crc = 65535;
UInt16 x;
for (UInt16 i = 0; i < ui_length; i++)
{
x = (UInt16)(Crc ^ Buffer[i]);
Crc = (UInt16)((Crc >> 8) ^ crc_table[x & 0x00FF]);
}
return Crc;
}

Categories

Resources