I am creating C# code for a server program that receives data from a Concox TR06 GPS tracker via TCP:
http://www.iconcox.com/uploads/soft/140920/1-140920023130.pdf
When first starting up, the tracker sends a login message, which needs to be acknowledged before it will send any position data. My first problem is that, according to the documentation, the acknowledge message is 18 bytes long, yet the example they provide is only 10 bytes long:
P.s. in the table above, the "bits" column I'm pretty sure should be labelled "bytes" instead...
Now, my main problem is in calculating the Error Check. According to the documentation:
The check code is generated by the CRC-ITU checking method. The check codes of data in the structure of the protocol, from the Packet Length to the Information Serial Number (including "Packet Length" and "Information Serial Number"), are values of CRC-ITU.
Ok, so in the above example, I need to calculate CRC on 0x05 0x01 0x00 0x01
Now, I'm guessing it's 16 bit CRC, as according to the diagram above, the CRC is 2 bytes long. I've implemented two different CRC implementations I found online at http://www.sanity-free.org/134/standard_crc_16_in_csharp.html and http://www.sanity-free.org/133/crc_16_ccitt_in_csharp.html but neither give me the answer that, according to the diagram above I am supposed to be getting - 0xD9 0xDC. I've even used this site - https://www.lammertbies.nl/comm/info/crc-calculation.html - to manually enter the 4 bytes, but nothing gives me the result I'm supposed to be getting according to the diagram above...
Any ideas where I might be going wrong? Any pointers/hints would be greatly appreciated. Thank you
i have implemented the same logic in nodejs (javascript). I hope this helps someone.
const crc16itu = hexString => {
if (!hexString) return 0x00;
const table = [
0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF,
0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7,
0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E,
0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876,
0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD,
0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5,
0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C,
0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974,
0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB,
0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3,
0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A,
0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72,
0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9,
0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1,
0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738,
0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70,
0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7,
0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF,
0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036,
0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E,
0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5,
0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD,
0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134,
0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C,
0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3,
0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB,
0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232,
0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A,
0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1,
0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9,
0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330,
0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78
];
let fcs = parseInt("FFFF", 16);
let i = 0;
while (i < hexString.length) {
let strHexNumber = hexString.substring(i, i + 2);
let intNumber = parseInt(strHexNumber, 16);
let crc16tabIndex = (fcs ^ intNumber) & parseInt("FF", 16);
fcs = (fcs >> 8) ^ table[crc16tabIndex];
i = i + 2;
}
return fcs ^ 0xFFFF;
};
module.exports = crc16itu;
The ITU CRC-16 is also called the X-25 CRC. You can find its specification here, which is:
width=16 poly=0x1021 init=0xffff refin=true refout=true xorout=0xffff check=0x906e name="X-25"
My crcany code will take that specification and generate C code to compute the CRC.
Here is the bit-wise (slow) code thusly generated:
#include <stddef.h>
unsigned crc16x_25_bit(unsigned crc, void const *data, size_t len) {
if (data == NULL)
return 0;
crc = ~crc;
crc &= 0xffff;
while (len--) {
crc ^= *(unsigned char const *)data++;
for (unsigned k = 0; k < 8; k++)
crc = crc & 1 ? (crc >> 1) ^ 0x8408 : crc >> 1;
}
crc ^= 0xffff;
return crc;
}
Code(written in C):
unsigned long chksum_crc32 (unsigned char *block, unsigned int length)
{
register unsigned long crc;
unsigned long i;
crc = 0xFFFFFFFF;
for (i = 0; i < length; i++)
{
crc = ((crc >> 8) & 0x00FFFFFF) ^ crc_tab[(crc ^ *block++) & 0xFF];
}
return (crc ^ 0xFFFFFFFF);
}
/* chksum_crc32gentab() -- to a global crc_tab[256], this one will
* calculate the crcTable for crc32-checksums.
* it is generated to the polynom [..]
*/
void chksum_crc32gentab ()
{
unsigned long crc, poly;
int i, j;
poly = 0xEDB88320L;
for (i = 0; i < 256; i++)
{
crc = i;
for (j = 8; j > 0; j--)
{
if (crc & 1)
{
crc = (crc >> 1) ^ poly;
}
else
{
crc >>= 1;
}
}
crc_tab[i] = crc;
}
}
For starters; I know how CRC works, first the divisor is calculated with a specified polynomial, then this FCS(frame check sequence) is appended to the data set and sent to the end users system. Once the transfer is finished, the FCS is checked with the same polynomial used to calculate the FCS, and if the remainder of the data with that divisor is zero, then you know the data is correct.
I do not understand the implementation of these two functions. From what I have learned, the function chksum_crc32gentab() generates all the possible hex values the checksum could take with the 32 bit CRC polynomial. One thing I dont get is how poly = 0xEDB88320L; is equivelent to a polynomial. I don't understand the logic in the bottom of this function either. For example, the conditional if (crc & 1), does this mean that for every bit in crc that is 1, compute, otherwise shift right one bit?
I also do not understand chksum_crc32(unsigned char *block, unsigned int length);. Does this function just take in a string of bytes and convert them to the proper crc value computed with the table?. I guess I am confused about the logic it uses within the for loop.
If anyone understands this code an explanation would be great; this does work for the crc32 conversion from the .net class, an example of how data is converted then used by these functions would be something like:
(C# source)
MemoryStream ms = new MemoryStream(System.Text.Encoding.Default.GetBytes(input));
foreach (byte b in crc32.ComputeHash(ms))
hash += b.ToString("x2").ToLower();
Here is the original site and project the C code was taken from. http://www.codeproject.com/Articles/35134/How-to-calculate-CRC-in-C
Any explanation would help
Or just google it... Second hit is: http://www.opensource.apple.com/source/xnu/xnu-1456.1.26/bsd/libkern/crc32.c
Backporting it from C#'s the hard way to do it, most of these algorithms are already in C.
In CRC calculations, binary polynomials, which are sums of x^n with either a 0 or 1 coefficient, are represented simply as binary words where the position of the 0 or 1 indicates which power of x it is a coefficient of.
0xEDB88320L represents the coefficients of the CRC32 polynomial as 1's where there is an x^n term (except for the x^32 term, which is left out). The CRC32 polynomial (why oh why doesn't stackoverflow have TeX equations like math.stackexchange -- I can't write decent equations here! sigh, sorry for the rant ...) is:
x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
Because of how this CRC is defined with respect to bit-ordering, the lowest coefficients are in the highest bits. So the first E in the hex constant above is 1110 representing (in order from left to right in the bits), 1 + x + x^2.
You can find the construction in the crc32.c source file of zlib, from which a snippet is shown here:
static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26};
/* make exclusive-or pattern from polynomial (0xedb88320UL) */
poly = 0;
for (n = 0; n < (int)(sizeof(p)/sizeof(unsigned char)); n++)
poly |= (z_crc_t)1 << (31 - p[n]);
/* generate a crc for every 8-bit value */
for (n = 0; n < 256; n++) {
c = (z_crc_t)n;
for (k = 0; k < 8; k++)
c = c & 1 ? poly ^ (c >> 1) : c >> 1;
crc_table[0][n] = c;
}
The if (crc & 1) or c & 1 ? above looks at the low bit of the CRC at each step before it is shifted away. That is effectively a carry bit for the polynomial subtraction operation, so if it is a one, the polynomial is subtracted (exclusive-ored) from the shifted down polynomial in the CRC (multiplied by x). The CRC is shifted down whether the low bit is 1 or not.
The chksum_crc32() function that you show indeed computes the CRC on the provided block of data. It is the standard table-based approach for CRC calculations on strings of bytes, which indexes the table by the exclusive-or of the data byte and the low byte of the CRC. This does the same thing as shifting in a bit at a time and applying the polynomial for 1 bits, but does it in one step instead of eight. The CRC is effectively multiplied by x^8 (the >> 8), and is exclusive-ored with the effect of exclusive-oring with the polynomial 0 to 8 times at various shifted locations depending on the index value. It is simply a speed trick using a pre-computed table.
You can find even more extreme speed tricks used in zlib's crc32.c that uses larger tables and processes more data a time.
Assuming the 64bit integer 0x000000000000FFFF which would be represented as
00000000 00000000 00000000 00000000
00000000 00000000 >11111111 11111111
How do I find the amount of unset bits to the left of the most significant set bit (the one marked with >) ?
In straight C (long long are 64 bit on my setup), taken from similar Java implementations: (updated after a little more reading on Hamming weight)
A little more explanation: The top part just sets all bit to the right of the most significant 1, and then negates it. (i.e. all the 0's to the 'left' of the most significant 1 are now 1's and everything else is 0).
Then I used a Hamming Weight implementation to count the bits.
unsigned long long i = 0x0000000000000000LLU;
i |= i >> 1;
i |= i >> 2;
i |= i >> 4;
i |= i >> 8;
i |= i >> 16;
i |= i >> 32;
// Highest bit in input and all lower bits are now set. Invert to set the bits to count.
i=~i;
i -= (i >> 1) & 0x5555555555555555LLU; // each 2 bits now contains a count
i = (i & 0x3333333333333333LLU) + ((i >> 2) & 0x3333333333333333LLU); // each 4 bits now contains a count
i = (i + (i >> 4)) & 0x0f0f0f0f0f0f0f0fLLU; // each 8 bits now contains a count
i *= 0x0101010101010101LLU; // add each byte to all the bytes above it
i >>= 56; // the number of bits
printf("Leading 0's = %lld\n", i);
I'd be curious to see how this was efficiency wise. Tested it with several values though and it seems to work.
Based on: http://www.hackersdelight.org/HDcode/nlz.c.txt
template<typename T> int clz(T v) {int n=sizeof(T)*8;int c=n;while (n){n>>=1;if (v>>n) c-=n,v>>=n;}return c-v;}
If you'd like a version that allows you to keep your lunch down, here you go:
int clz(uint64_t v) {
int n=64,c=64;
while (n) {
n>>=1;
if (v>>n) c-=n,v>>=n;
}
return c-v;
}
As you'll see, you can save cycles on this by careful analysis of the assembler, but the strategy here is not a terrible one. The while loop will operate Lg[64]=6 times; each time it will convert the problem into one of counting the number of leading bits on an integer of half the size.
The if statement inside the while loop asks the question: "can i represent this integer in half as many bits", or analogously, "if i cut this in half, have i lost it?". After the if() payload completes, our number will always be in the lowest n bits.
At the final stage, v is either 0 or 1, and this completes the calculation correctly.
If you are dealing with unsigned integers, you could do this:
#include <math.h>
int numunset(uint64_t number)
{
int nbits = sizeof(uint64_t)*8;
if(number == 0)
return nbits;
int first_set = floor(log2(number));
return nbits - first_set - 1;
}
I don't know how it will compare in performance to the loop and count methods that have already been offered because log2() could be expensive.
Edit:
This could cause some problems with high-valued integers since the log2() function is casting to double and some numerical issues may arise. You could use the log2l() function that works with long double. A better solution would be to use an integer log2() function as in this question.
// clear all bits except the lowest set bit
x &= -x;
// if x==0, add 0, otherwise add x - 1.
// This sets all bits below the one set above to 1.
x+= (-(x==0))&(x - 1);
return 64 - count_bits_set(x);
Where count_bits_set is the fastest version of counting bits you can find. See https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel for various bit counting techniques.
I'm not sure I understood the problem correctly. I think you have a 64bit value and want to find the number of leading zeros in it.
One way would be to find the most significant bit and simply subtract its position from 63 (assuming lowest bit is bit 0). You can find out the most significant bit by testing whether a bit is set from within a loop over all 64 bits.
Another way might be to use the (non-standard) __builtin_clz in gcc.
I agree with the binary search idea. However two points are important here:
The range of valid answers to your question is from 0 to 64 inclusive. In other words - there may be 65 different answers to the question. I think (almost sure) all who posted the "binary search" solution missed this point, hence they'll get wrong answer for either zero or a number with the MSB bit on.
If speed is critical - you may want to avoid the loop. There's an elegant way to achieve this using templates.
The following template stuff finds the MSB correctly of any unsigned type variable.
// helper
template <int bits, typename T>
bool IsBitReached(T x)
{
const T cmp = T(1) << (bits ? (bits-1) : 0);
return (x >= cmp);
}
template <int bits, typename T>
int FindMsbInternal(T x)
{
if (!bits)
return 0;
int ret;
if (IsBitReached<bits>(x))
{
ret = bits;
x >>= bits;
} else
ret = 0;
return ret + FindMsbInternal<bits/2, T>(x);
}
// Main routine
template <typename T>
int FindMsb(T x)
{
const int bits = sizeof(T) * 8;
if (IsBitReached<bits>(x))
return bits;
return FindMsbInternal<bits/2>(x);
}
Here you go, pretty trivial to update as you need for other sizes...
int bits_left(unsigned long long value)
{
static unsigned long long mask = 0x8000000000000000;
int c = 64;
// doh
if (value == 0)
return c;
// check byte by byte to see what has been set
if (value & 0xFF00000000000000)
c = 0;
else if (value & 0x00FF000000000000)
c = 8;
else if (value & 0x0000FF0000000000)
c = 16;
else if (value & 0x000000FF00000000)
c = 24;
else if (value & 0x00000000FF000000)
c = 32;
else if (value & 0x0000000000FF0000)
c = 40;
else if (value & 0x000000000000FF00)
c = 48;
else if (value & 0x00000000000000FF)
c = 56;
// skip
value <<= c;
while(!(value & mask))
{
value <<= 1;
c++;
}
return c;
}
Same idea as user470379's, but counting down ...
Assume all 64 bits are unset. While value is larger than 0 keep shifting the value right and decrementing number of unset bits:
/* untested */
int countunsetbits(uint64_t val) {
int x = 64;
while (val) { x--; val >>= 1; }
return x;
}
Try
int countBits(int value)
{
int result = sizeof(value) * CHAR_BITS; // should be 64
while(value != 0)
{
--result;
value = value >> 1; // Remove bottom bits until all 1 are gone.
}
return result;
}
Use log base 2 to get you the most significant digit which is 1.
log(2) = 1, meaning 0b10 -> 1
log(4) = 2, 5-7 => 2.xx, or 0b100 -> 2
log(8) = 3, 9-15 => 3.xx, 0b1000 -> 3
log(16) = 4 you get the idea
and so on...
The numbers in between become fractions of the log result. So typecasting the value to an int gives you the most significant digit.
Once you get this number, say b, the simple 64 - n will be the answer.
function get_pos_msd(int n){
return int(log2(n))
}
last_zero = 64 - get_pos_msd(n)