C# Pack 5 integers into 1 - c#

I'm trying to pack, and unpack, 5 integers ( max 999 for each ) into a single unique integer using bit shift operations:
static UInt64 Combine(uint a, uint b, uint c, uint d, uint e)
{
return (a << 48) | (b << 32 ) | (c << 16) | (d << 8) | e;
}
However, I am unable to unpack the number back.
Can anyone please guide me to what I could be doing wrong ?
thanks.

In order to pack the values 0..999, you need ten bits, not eight. Ten will give you the values 0..1023 whereas eight will only give you 0..255.
So the function you need is something like:
static UInt64 Combine(
uint a, uint b, uint c, uint d, uint e
) {
UInt64 retval = a;
retval = (retval << 10) | b;
retval = (retval << 10) | c;
retval = (retval << 10) | d;
retval = (retval << 10) | e;
return retval;
}
Then, to unpack them, just extract each group of ten bits, one at a time, such as:
static void Extract(UInt64 val, out uint a, out uint b,
out uint c, out uint d, out uint e
) {
e = Convert.ToUInt32(val & 0x3ff); val = val >> 10;
d = Convert.ToUInt32(val & 0x3ff); val = val >> 10;
c = Convert.ToUInt32(val & 0x3ff); val = val >> 10;
b = Convert.ToUInt32(val & 0x3ff); val = val >> 10;
a = Convert.ToUInt32(val & 0x3ff);
}

Another way of storing numbers. It's a little different. But, I thought I'd present it to you. Basically, we're just emulating "Unions":
using System.Runtime.InteropServices;
using System.Windows.Forms;
namespace Unions
{
public partial class Form1 : Form
{
[StructLayout(LayoutKind.Explicit)]
struct uShortArray
{
[FieldOffset(0)]
public ushort Bytes01;
[FieldOffset(2)]
public ushort Bytes23;
[FieldOffset(4)]
public ushort Bytes45;
[FieldOffset(6)]
public ushort Bytes67;
[FieldOffset(0)]
public long long1;
}
public Form1()
{
InitializeComponent();
}
private void Form1_Load(object sender, System.EventArgs e)
{
uShortArray ua = default(uShortArray);
ua.Bytes01 = 999;
ua.Bytes23 = 164;
ua.Bytes45 = 581;
ua.Bytes67 = 43;
MessageBox.Show($"ua = [Bytes 0 - 1 : {ua.Bytes01}] ... [Byte 2 - 3 : {ua.Bytes23}] ... [Bytes 4 - 5 : {ua.Bytes45}] ... [Bytes 6 - 7 : {ua.Bytes67}] ... [long1 : {ua.long1}]");
uShortArray ua2 = default(uShortArray);
Combine(out ua2, 543, 657, 23, 999);
MessageBox.Show($"ua2 = [Bytes 0 - 1 : {ua2.Bytes01}] ... [Byte 2 - 3 : {ua2.Bytes23}] ... [Bytes 4 - 5 : {ua2.Bytes45}] ... [Bytes 6 - 7 : {ua2.Bytes67}] ... [long1 : {ua2.long1}]");
uShortArray ua3 = default(uShortArray);
ua3.long1 = ua.long1; //As you can see, you don't need an extract. You just assign the "extract" value to long1.
MessageBox.Show($"ua3 = [Bytes 0 - 1 : {ua3.Bytes01}] ... [Byte 2 - 3 : {ua3.Bytes23}] ... [Bytes 4 - 5 : {ua3.Bytes45}] ... [Bytes 6 - 7 : {ua3.Bytes67}] ... [long1 : {ua3.long1}]");
}
private void Combine(out uShortArray inUA, ushort in1, ushort in2, ushort in3, ushort in4)
{
inUA = default(uShortArray);
inUA.Bytes01 = in1;
inUA.Bytes23 = in2;
inUA.Bytes45 = in3;
inUA.Bytes67 = in4;
}
}
}
This struct only stores 4 values. But, you can use a larger type, instead of long, to hold more numbers.

Related

Same structs have different HashCode

I have a code:
public class Point
{
public int x;
public int y;
public Point() { x = 0; y = 0; }
public Point(int a, int b) { x = a; y = b; }
}
public struct Coefficients{
public double a;
public double b;
public double c;
public Coefficients(double a, double b, double c)
{
this.a = a;
this.b = b;
this.c = c;
}
public static Coefficients GetFromPoints(Point point1, Point point2)
{
int x1 = point1.x;
int x2 = point2.x;
int y1 = point1.y;
int y2 = point2.y;
double a = y1- y2;
double b = x2 - x1;
double c = x1 * y2 - y1 * x2 ;
double max = Math.Max(Math.Max(a, b), c);
double min= Math.Min(Math.Min(a, b), c);
double divider = Math.Abs(max)> Math.Abs(min)?max:min;
divider = Math.Abs(divider) > 1? divider : 1;
return new Coefficients(a/divider, b/divider, c/divider);
}
}
public class Solution
{
public int MaxPoints(Point[] points)
{
var coef_list = new List<Coefficients>();
for (var x = 0; x < points.Length - 1; x++)
{
for (var y = x + 1; y < points.Length; y++)
{
var coef = Coefficients.GetFromPoints(points[x], points[y]);
coef_list.Add(coef);
}
}
foreach (var item in coef_list) {
Debug.WriteLine(item.a);
Debug.WriteLine(item.b);
Debug.WriteLine(item.c);
Debug.WriteLine(item.GetHashCode());
Debug.WriteLine("---------------");
}
return 0;
}
}
As you can see i used a struct and i remarked weird behavior.
If i have input data like this:
prg.MaxPoints(new Point[] { new Point(4, -1), new Point(4, 0), new Point(4, 5) });
Debug output is:
-0,25
0
1
-450335288
---------------
-0,25
0
1
-450335288
---------------
-0,25
0
1
-450335288
---------------
But if i change args. order to:
prg.MaxPoints(new Point[] { new Point(4, 0),new Point(4, -1) , new Point(4, 5) });
Debug out is:
-0,25
0
1
1697148360
---------------
-0,25
0
1
-450335288
---------------
-0,25
0
1
-450335288
---------------
And there is one thing that can be important is that in first case we have all "dividers"(GetFromPoints method) are positive (4,24,20) in second case one of them is negative and other two are positive (-4,20,24).
Can anybody explain this?
UPD.
when i changed
return new Coefficients(a/divider, b/divider, c/divider);
to
return new Coefficients(a/divider, 0, c/divider);//anyway in all of these cases 2-nd argument is 0
which means that 0 divided by a negative isn't 0?
Basically you are getting a negative zero value double. However the runtime's default GetHashCode for structs appears to blindly just combine the underlying bytes and not call the field's GetHashCode. Here is simplified version of what you are seeing:
public struct S
{
public double value;
public S(double d)
{
value = d;
}
}
public static void Main(string[] args)
{
double d1 = 0;
double d2 = d1 / -1;
Console.WriteLine("using double");
Console.WriteLine("{0} {1}", d1, d1.GetHashCode());
Console.WriteLine(GetComponentParts(d1));
Console.WriteLine("{0} {1}", d2, d2.GetHashCode());
Console.WriteLine(GetComponentParts(d2));
Console.WriteLine("Equals: {0}, Hashcode:{1}, {2}", d1.Equals(d2), d1.GetHashCode(), d2.GetHashCode());
Console.WriteLine();
Console.WriteLine("using a custom struct");
var s1 = new S(d1);
var s2 = new S(d2);
Console.WriteLine(s1.Equals(s2));
Console.WriteLine(new S(d1).GetHashCode());
Console.WriteLine(new S(d2).GetHashCode());
}
// from: https://msdn.microsoft.com/en-us/library/system.double.epsilon(v=vs.110).aspx
private static string GetComponentParts(double value)
{
string result = String.Format("{0:R}: ", value);
int indent = result.Length;
// Convert the double to an 8-byte array.
byte[] bytes = BitConverter.GetBytes(value);
// Get the sign bit (byte 7, bit 7).
result += String.Format("Sign: {0}\n",
(bytes[7] & 0x80) == 0x80 ? "1 (-)" : "0 (+)");
// Get the exponent (byte 6 bits 4-7 to byte 7, bits 0-6)
int exponent = (bytes[7] & 0x07F) << 4;
exponent = exponent | ((bytes[6] & 0xF0) >> 4);
int adjustment = exponent != 0 ? 1023 : 1022;
result += String.Format("{0}Exponent: 0x{1:X4} ({1})\n", new String(' ', indent), exponent - adjustment);
// Get the significand (bits 0-51)
long significand = ((bytes[6] & 0x0F) << 48);
significand = significand | ((long) bytes[5] << 40);
significand = significand | ((long) bytes[4] << 32);
significand = significand | ((long) bytes[3] << 24);
significand = significand | ((long) bytes[2] << 16);
significand = significand | ((long) bytes[1] << 8);
significand = significand | bytes[0];
result += String.Format("{0}Mantissa: 0x{1:X13}\n", new String(' ', indent), significand);
return result;
}
The output:
using double
0 0
0: Sign: 0 (+)
Exponent: 0xFFFFFC02 (-1022)
Mantissa: 0x0000000000000
0 0
0: Sign: 1 (-)
Exponent: 0xFFFFFC02 (-1022)
Mantissa: 0x0000000000000
Equals: True, Hashcode:0, 0
using a custom struct
False
346948956
-1800534692
I've defined two double one of which is the "normal" zero and the other which is "negative" zero. The difference between the two is in the double's sign bit. The two values are equal in all apparent ways (Equals comparison, GetHashCode, ToString representation) except on the byte level. However when they are put into a custom struct the runtime's GetHashCode method just combines the raw bits which gives a different hash code for each struct even through they contain equal values. Equals does the same thing and gets a False result.
I admit this is kind of big gotcha. The solution to this is to make sure to you override Equals and GetHashCode to get the proper equality that you want.
Actually a similar issue has been mentioned before apparently the runtime only does this when the struct's fields are all 8 bytes wide.

Comparing bits efficiently ( overlap set of x )

I want to compare a stream of bits of arbitrary length to a mask in c# and return a ratio of how many bits were the same.
The mask to check against is anywhere between 2 bits long to 8k (with 90% of the masks being 5 bits long), the input can be anywhere between 2 bits up to ~ 500k, with an average input string of 12k (but yeah, most of the time it will be comparing 5 bits with the first 5 bits of that 12k)
Now my naive implementation would be something like this:
bool[] mask = new[] { true, true, false, true };
float dendrite(bool[] input) {
int correct = 0;
for ( int i = 0; i<mask.length; i++ ) {
if ( input[i] == mask[i] )
correct++;
}
return (float)correct/(float)mask.length;
}
but I expect this is better handled (more efficient) with some kind of binary operator magic?
Anyone got any pointers?
EDIT: the datatype is not fixed at this point in my design, so if ints or bytearrays work better, I'd also be a happy camper, trying to optimize for efficiency here, the faster the computation, the better.
eg if you can make it work like this:
int[] mask = new[] { 1, 1, 0, 1 };
float dendrite(int[] input) {
int correct = 0;
for ( int i = 0; i<mask.length; i++ ) {
if ( input[i] == mask[i] )
correct++;
}
return (float)correct/(float)mask.length;
}
or this:
int mask = 13; //1101
float dendrite(int input) {
return // your magic here;
} // would return 0.75 for an input
// of 101 given ( 1100101 in binary,
// matches 3 bits of the 4 bit mask == .75
ANSWER:
I ran each proposed answer against each other and Fredou's and Marten's solution ran neck to neck but Fredou submitted the fastest leanest implementation in the end. Of course since the average result varies quite wildly between implementations I might have to revisit this post later on. :) but that's probably just me messing up in my test script. ( i hope, too late now, going to bed =)
sparse1.Cyclone
1317ms 3467107ticks 10000iterations
result: 0,7851563
sparse1.Marten
288ms 759362ticks 10000iterations
result: 0,05066964
sparse1.Fredou
216ms 568747ticks 10000iterations
result: 0,8925781
sparse1.Marten
296ms 778862ticks 10000iterations
result: 0,05066964
sparse1.Fredou
216ms 568601ticks 10000iterations
result: 0,8925781
sparse1.Marten
300ms 789901ticks 10000iterations
result: 0,05066964
sparse1.Cyclone
1314ms 3457988ticks 10000iterations
result: 0,7851563
sparse1.Fredou
207ms 546606ticks 10000iterations
result: 0,8925781
sparse1.Marten
298ms 786352ticks 10000iterations
result: 0,05066964
sparse1.Cyclone
1301ms 3422611ticks 10000iterations
result: 0,7851563
sparse1.Marten
292ms 769850ticks 10000iterations
result: 0,05066964
sparse1.Cyclone
1305ms 3433320ticks 10000iterations
result: 0,7851563
sparse1.Fredou
209ms 551178ticks 10000iterations
result: 0,8925781
( testscript copied here, if i destroyed yours modifying it lemme know. https://dotnetfiddle.net/h9nFSa )
how about this one - dotnetfiddle example
using System;
namespace ConsoleApplication1
{
public class Program
{
public static void Main(string[] args)
{
int a = Convert.ToInt32("0001101", 2);
int b = Convert.ToInt32("1100101", 2);
Console.WriteLine(dendrite(a, 4, b));
}
private static float dendrite(int mask, int len, int input)
{
return 1 - getBitCount(mask ^ (input & (int.MaxValue >> 32 - len))) / (float)len;
}
private static int getBitCount(int bits)
{
bits = bits - ((bits >> 1) & 0x55555555);
bits = (bits & 0x33333333) + ((bits >> 2) & 0x33333333);
return ((bits + (bits >> 4) & 0xf0f0f0f) * 0x1010101) >> 24;
}
}
}
64 bits one here - dotnetfiddler
using System;
namespace ConsoleApplication1
{
public class Program
{
public static void Main(string[] args)
{
// 1
ulong a = Convert.ToUInt64("0000000000000000000000000000000000000000000000000000000000001101", 2);
ulong b = Convert.ToUInt64("1110010101100101011001010110110101100101011001010110010101100101", 2);
Console.WriteLine(dendrite(a, 4, b));
}
private static float dendrite(ulong mask, int len, ulong input)
{
return 1 - getBitCount(mask ^ (input & (ulong.MaxValue >> (64 - len)))) / (float)len;
}
private static ulong getBitCount(ulong bits)
{
bits = bits - ((bits >> 1) & 0x5555555555555555UL);
bits = (bits & 0x3333333333333333UL) + ((bits >> 2) & 0x3333333333333333UL);
return unchecked(((bits + (bits >> 4)) & 0xF0F0F0F0F0F0F0FUL) * 0x101010101010101UL) >> 56;
}
}
}
I came up with this code:
static float dendrite(ulong input, ulong mask)
{
// get bits that are same (0 or 1) in input and mask
ulong samebits = mask & ~(input ^ mask);
// count number of same bits
int correct = cardinality(samebits);
// count number of bits in mask
int inmask = cardinality(mask);
// compute fraction (0.0 to 1.0)
return inmask == 0 ? 0f : correct / (float)inmask;
}
// this is a little hack to count the number of bits set to one in a 64-bit word
static int cardinality(ulong word)
{
const ulong mult = 0x0101010101010101;
const ulong mask1h = (~0UL) / 3 << 1;
const ulong mask2l = (~0UL) / 5;
const ulong mask4l = (~0UL) / 17;
word -= (mask1h & word) >> 1;
word = (word & mask2l) + ((word >> 2) & mask2l);
word += word >> 4;
word &= mask4l;
return (int)((word * mult) >> 56);
}
This will check 64-bits at a time. If you need more than that you can just split the input data into 64-bit words and compare them one by one and compute the average result.
Here's a .NET fiddle with the code and a working test case:
https://dotnetfiddle.net/5hYFtE
I would change the code to something along these lines:
// hardcoded bitmask
byte mask = 255;
float dendrite(byte input) {
int correct = 0;
// store the xor:ed result
byte xored = input ^ mask;
// loop through each bit
for(int i = 0; i < 8; i++) {
// if the bit is 0 then it was correct
if(!(xored & (1 << i)))
correct++;
}
return (float)correct/(float)mask.length;
}
The above uses a mask and input of 8 bits, but of course you could modify this to use a 4 byte integer and so on.
Not sure if this will work as expected, but it might give you some clues on how to proceed.
For example if you only would like to check the first 4 bits you could change the code to something like:
float dendrite(byte input) {
// hardcoded bitmask i.e 1101
byte mask = 13;
// number of bits to check
byte bits = 4;
int correct = 0;
// store the xor:ed result
byte xored = input ^ mask;
// loop through each bit, notice that we only checking the first 4 bits
for(int i = 0; i < bits; i++) {
// if the bit is 0 then it was correct
if(!(xored & (1 << i)))
correct++;
}
return (float)correct/(float)bits;
}
Of course it might be faster to actually use a int instead of a byte.

Converting two bytes to an IEEE-11073 16-bit SFLOAT in C#

I need to convert a two byte array to SFloat format according to IEEE-11073.
How can I do that?
I answer my question here.
public float ToSFloat(byte[] value)
{
if (value.Length != 2)
throw new ArgumentException();
byte b0 = value[0];
byte b1 = value[1];
var mantissa = unsignedToSigned(ToInt(b0) + ((ToInt(b1) & 0x0F) << 8), 12);
var exponent = unsignedToSigned(ToInt(b1) >> 4, 4);
return (float)(mantissa * Math.Pow(10, exponent));
}
public int ToInt(byte value)
{
return value & 0xFF;
}
private int unsignedToSigned(int unsigned, int size)
{
if ((unsigned & (1 << size-1)) != 0)
{
unsigned = -1 * ((1 << size-1) - (unsigned & ((1 << size-1) - 1)));
}
return unsigned;
}
public float ToSFloat(byte[] value)
{
if (value.Length != 2)
throw new ArgumentException();
byte b0 = value[0];
byte b1 = value[1];
var mantissa = unsignedToSigned(ToInt(b0) + ((ToInt(b1) & 0x0F) << 8), 12);
var exponent = unsignedToSigned(ToInt(b1) >> 4, 4);
return (float)(mantissa * Math.Pow(10, exponent));
}
public int ToInt(byte value)
{
return value & 0xFF;
}
private int unsignedToSigned(int unsigned, int size)
{
if ((unsigned & (1 << size-1)) != 0)
{
unsigned = -1 * ((1 << size-1) - (unsigned & ((1 << size-1) - 1)));
}
return unsigned;
}
Loosely based on the C implementation by Signove on GitHub I have created this function in C#:
Dictionary<Int32, Single> reservedValues = new Dictionary<Int32, Single> {
{ 0x07FE, Single.PositiveInfinity },
{ 0x07FF, Single.NaN },
{ 0x0800, Single.NaN },
{ 0x0801, Single.NaN },
{ 0x0802, Single.NegativeInfinity }
};
Single Ieee11073ToSingle(Byte[] bytes) {
var ieee11073 = (UInt16) (bytes[0] + 0x100*bytes[1]);
var mantissa = ieee11073 & 0x0FFF;
if (reservedValues.ContainsKey(mantissa))
return reservedValues[mantissa];
if (mantissa >= 0x0800)
mantissa = -(0x1000 - mantissa);
var exponent = ieee11073 >> 12;
if (exponent >= 0x08)
exponent = -(0x10 - exponent);
var magnitude = Math.Pow(10d, exponent);
return (Single) (mantissa*magnitude);
}
This function assumes that the bytes are in little endian format. If not you will have to swap bytes[0] and bytes[1] in the first line of the function. Or perhaps even better remove the first line from the function and change the function argument to accept a UInt16 (the IEEE 11073 value) and then let the caller decide how to extract this value from the input.
I highly advise you to test this code because I do not have any test values to verify the correctnes of the conversion.

Sorting 3 numbers without branching

In C# or C++ how can I implement a branch-free sort of three (integer) numbers?
Is this possible?
No conditionals. Only a cast to uint. Perfect solution.
int abs (int a)
{
int b = a;
b = (b >> (sizeof(int)*CHAR_BIT-1) & 1);
return 2 * b * (a) + a;
}
int max (int a, int b) { return (a + b + abs(a - b)) / 2; }
int min (int a, int b) { return (a + b - abs(a - b)) / 2; }
void sort (int & a, int & b, int & c)
{
int maxnum = max(max(a,b), c);
int minnum = min(min(a,b), c);
int middlenum = a + b + c - maxnum - minnum;
a = maxnum;
b = middlenum;
c = minnum;
}
You can write max, min and swap branch-free functions. Once you have these functions, you can use them to write sort function as:
void sort(int &a, int &b, int &c)
{
int m1 = max(a,b,c);
int m2 = min(a,b,c);
b = a + b + c - m1 - m2;
swap(m1, a);
swap(m2, c);
}
And here are the helper functions:
void swap(int &a, int &b)
{
int tmp = a; a = b; b = tmp;
}
int max( int a, int b, int c ) {
int l1[] = { a, b };
int l2[] = { l1[ a<b ], c };
return l2[ l2[0] < c ];
}
int min( int a, int b, int c ) {
int l1[] = { a, b };
int l2[] = { l1[ a>b ], c };
return l2[ l2[0] > c ];
}
Test code:
int main() {
int a,b,c;
std::cin >> a >> b >> c;
sort(a,b,c);
std::cout << a <<"," << b << "," << c << std::endl;
return 0;
}
Input:
21 242 434
Output (descending order):
434, 242, 21
Demo : http://ideone.com/3ZOzc
I have taken the implementation of max from #David's answer from here, and implemented min with little twist.
You can do this in C++ with:
#include <iostream>
void sort(int *in) {
const int sum = in[0]+in[1];
const int diff = abs(in[1]-in[0]);
in[0] = (sum + diff) / 2;
in[1] = (sum - diff) / 2;
}
int main() {
int a[] = {3,4,1};
sort(a);
sort(a+1);
sort(a);
std::cout << a[0] << "," << a[1] << "," << a[2] << std::endl;
int b[] = {1,2,3};
sort(b);
sort(b+1);
sort(b);
std::cout << b[0] << "," << b[1] << "," << b[2] << std::endl;
}
The trick is in expressing the min/max elements as arithmetic operations, not branching and then calling sort on pairs enough times to "bubble sort" them.
I've made a totally generic version, using template meta-programming to call sort the right number of times. It all gets inlined exactly as you'd hope with gcc 4.7.0 on my x86 box (although call is unconditional on x86 anyway). I've also implemented an abs function that avoids branches on x86 (it makes a few assumptions about integers that make it less portable, it's based on gcc's __builtin_abs implementation for x86 though):
#include <iostream>
#include <limits.h>
void myabs(int& in) {
const int tmp = in >> ((sizeof(int) * CHAR_BIT) - 1);
in ^= tmp;
in = tmp - in;
}
template <int N, int I=1, bool C=false>
struct sorter {
static void sort(int *in) {
const int sum = in[I-0]+in[I-1];
int diff = in[I-1]-in[I-0];
myabs(diff);
in[I-0] = (sum + diff) / 2;
in[I-1] = (sum - diff) / 2;
sorter<N, I+1, I+1>=N>::sort(in);
}
};
template <int N,int I>
struct sorter<N,I,true> {
static void sort(int *in) {
sorter<N-1>::sort(in);
}
};
template <int I, bool C>
struct sorter<0,I,C> {
static void sort(int *) {
}
};
int main() {
int a[] = {3,4,1};
sorter<3>::sort(a);
std::cout << a[0] << "," << a[1] << "," << a[2] << std::endl;
}

Perform signed arithmetic on numbers defined as bit ranges of unsigned bytes

I have two bytes. I need to turn them into two integers where the first 12 bits make one int and the last 4 make the other. I figure i can && the 2nd byte with 0x0f to get the 4 bits, but I'm not sure how to make that into a byte with the correct sign.
update:
just to clarify I have 2 bytes
byte1 = 0xab
byte2 = 0xcd
and I need to do something like this with it
var value = 0xabc * 10 ^ 0xd;
sorry for the confusion.
thanks for all of the help.
int a = 10;
int a1 = a&0x000F;
int a2 = a&0xFFF0;
try to use this code
For kicks:
public static partial class Levitate
{
public static Tuple<int, int> UnPack(this int value)
{
uint sign = (uint)value & 0x80000000;
int small = ((int)sign >> 28) | (value & 0x0F);
int big = value & 0xFFF0;
return new Tuple<int, int>(small, big);
}
}
int a = 10;
a.UnPack();
Ok, let's try this again knowing what we're shooting for. I tried the following out in VS2008 and it seems to work fine, that is, both outOne and outTwo = -1 at the end. Is that what you're looking for?
byte b1 = 0xff;
byte b2 = 0xff;
ushort total = (ushort)((b1 << 8) + b2);
short outOne = (short)((short)(total & 0xFFF0) >> 4);
sbyte outTwo = (sbyte)((sbyte)((total & 0xF) << 4) >> 4);
Assuming you have the following to bytes:
byte a = 0xab;
byte b = 0xcd;
and consider 0xab the first 8 bits and 0xcd the second 8 bits, or 0xabc the first 12 bits and 0xd the last four bits. Then you can get the these bits as follows;
int x = (a << 4) | (b >> 4); // x == 0x0abc
int y = b & 0x0f; // y == 0x000d
edited to take into account clarification of "signing" rules:
public void unpack( byte[] octets , out int hiNibbles , out int loNibble )
{
if ( octets == null ) throw new ArgumentNullException("octets");
if ( octets.Length != 2 ) throw new ArgumentException("octets") ;
int value = (int) BitConverter.ToInt16( octets , 0 ) ;
// since the value is signed, right shifts sign-extend
hiNibbles = value >> 4 ;
loNibble = ( value << 28 ) >> 28 ;
return ;
}

Categories

Resources