Convert C# method to Java - c#

I am looking to convert the following bit of C# code to Java. I having a hard time coming up with a equivalent.
Working C# Code:
private ushort ConvertBytes(byte a, byte b, bool flip)
{
byte[] buffer = new byte[] { a, b };
if (!flip)
{
return BitConverter.ToUInt16(buffer, 0);
}
ushort num = BitConverter.ToUInt16(buffer, 0);
//this.Weight = num;
int xy = 0x3720;
int num2 = 0x3720 - num;
if (num2 > -1)
{
return Convert.ToUInt16(num2);
}
return 1;
}
Here is the Java Code that does not work. The Big challenge is the "BitConverter.ToInt16(buffer,0). How do i get the Java equal of the working C# method.
Java Code that is Wrong:
private short ConvertBytes(byte a, byte b, boolean flip){
byte[] buffer = new byte[] { a, b };
if (!flip){
return (short) ((a << 8) | (b & 0xFF));
}
short num = (short) ((a << 8) | (b & 0xFF));
//this.Weight = num;
int num2 = 0x3720 - num;
if (num2 > -1){
return (short)num2;
}
return 1;
}

private short ConvertBytes(byte a, byte b, boolean flip){
ByteBuffer byteBuffer = ByteBuffer.allocate(2);
byteBuffer.order(ByteOrder.LITTLE_ENDIAN);
byteBuffer.put(a);
byteBuffer.put(b);
short num = byteBuffer.getShort(0);
//this.Weight = num;
int num2 = 0x3720 - num;
if (num2 > -1){
return (short)num2;
}
return 1;
}

Related

Reversing galois multiplication of two byte arrays in C#

I need help in finding the reverse of galois multiplication GF (2^128) in C#. The code below is being used in my AES-GCM functions. I found this code through the web though.
I tried to search the web for galois division but I have no luck in finding it.
Pardon me for my knowledge in this field and my English.
This function derives the value of 2^x.
public byte BIT(byte x)
{
return (byte)(1 << x);
}
This function converts byte array of 4 elements to unsigned int.
public uint WPA_GET_BE32(byte[] a)
{
return (uint)((a[0] << 24 )|( a[1] <<16 )|( a[2] << 8 )| a[3]);
}
This function converts unsigned int into byte array of 4 elements.
public void WPA_PUT_BE32(out byte[] a, uint val)
{
a = new byte[4];
a[0] = (byte)((val >> 24) & 0xff);
a[1] = (byte)((val >> 16) & 0xff);
a[2] = (byte)((val >> 8) & 0xff);
a[3] = (byte)(val & 0xff);
}
public void shift_right_block(ref byte[] v)
{
uint val;
byte[] temp = new byte[4];
temp = v.Skip(12).Take(4).ToArray();
val = WPA_GET_BE32(temp);
val >>= 1;
if ((v[11] & 0x01) > 0) val |= 0x80000000;
WPA_PUT_BE32(out temp, val);
Array.Copy(temp, 0, v, 12, 4);
temp = v.Skip(8).Take(4).ToArray();
val = WPA_GET_BE32(temp);
val >>= 1;
if ((v[7] & 0x01) > 0) val |= 0x80000000;
WPA_PUT_BE32(out temp, val);
Array.Copy(temp, 0, v, 8, 4);
temp = v.Skip(4).Take(4).ToArray();
val = WPA_GET_BE32(temp);
val >>= 1;
if ((v[3] & 0x01) > 0) val |= 0x80000000;
WPA_PUT_BE32(out temp, val);
Array.Copy(temp, 0, v, 4, 4);
temp = v.Skip(0).Take(4).ToArray();
val = WPA_GET_BE32(temp);
val >>= 1;
WPA_PUT_BE32(out temp, val);
Array.Copy(temp, 0, v, 0, 4);
}
This function does a exclusive-OR function on two byte arrays.
public void c_xor_16(ref byte[] dest, byte[] src)
{
int ndx = 0;
for (ndx = 0; ndx < 16; ndx++) dest[ndx] ^= src[ndx];
}
This is the main function and byte array z is the output of the GF multiplication.
public void c_gf_mult(byte[] x, byte[] y, ref byte[] z)
{
int i, j;
byte[] v = new byte[16];
z = new byte[16];
Array.Clear(z, 0, 16);
Array.Copy(y, v, 16);
for (i = 0; i < 16; i++)
{
for (j = 0; j < 8; j++)
{
if ((byte)(x[i] & BIT((byte)(7 - j))) > 0)
{
c_xor_16(ref z, v);
}
if ((byte)(v[15] & 0x01) > 0)
{
shift_right_block(ref v);
v[0] ^= 0xe1;
}
else
{
shift_right_block(ref v);
}
}
}
return;
}

Reversing a long in c#

I know in java that you can simply reverse a long (101010100000001) by using long.reverse (100000001010101). However, is there anything like these that exists in c#.
The answer to your question is no. However it is achievable by code.
How about this...
public static long RevLong(long l)
{
long tmp = l;
long r = 0L;
if (tmp < 0)
tmp *= -1;
while (tmp > 0)
{
r = (r * 10) + (tmp - ((tmp / 10)) * 10);
tmp = tmp / 10;
}
return r * (l < 0 ? -1 : 1);
}
How about...
public ulong Bit(ulong x, int n)
{
return (x & (1 << n)) >> n;
}
public ulong ReverseBits(ulong x)
{
ulong result = 0;
for (int i = 0; i < 64; i++)
result = result | (x.Bit(64 - i) << i);
return result;
}
Another aproach for reversing a long is:
long num = 123456789;
long reversed = 0;
while (num > 0)
{
reversed = (reversed * 10) + (num % 10);
num /= 10;
}
or
long num = 123456789;
long reversed = 0;
while (num > 0)
{
reversed = (reversed << 1) + (reversed << 3) + (num & 1);
num >>= 1;
}
There are some interesting examples here. You could adapt one of these into an extension method, like so:
public static class LongExtension
{
public static ulong Reverse(this ulong value)
{
return (value & 0x00000000000000FFUL) << 56 | (value & 0x000000000000FF00UL) << 40 |
(value & 0x0000000000FF0000UL) << 24 | (value & 0x00000000FF000000UL) << 8 |
(value & 0x000000FF00000000UL) >> 8 | (value & 0x0000FF0000000000UL) >> 24 |
(value & 0x00FF000000000000UL) >> 40 | (value & 0xFF00000000000000UL) >> 56;
}
}
Then you can call it like this:
ulong myLong = 3L;
ulong reversed = myLong.Reverse();
Hope this will work
string s = 101010100000001.tostring();
char[] charArray = s.ToCharArray();
Array.Reverse( charArray );
return new string( charArray );

Conversion of CRC function from C to C# yields wrong values

I'm trying to convert a couple of simple CRC calculating functions from C to C#, but I seem to be getting incorrect results.
The C functions are:
#define CRC32_POLYNOMIAL 0xEDB88320
unsigned long CRC32Value(int i)
{
int j;
unsigned long ulCRC;
ulCRC = i;
for (j=8;j>0;j--)
{
if (ulCRC & 1)
ulCRC = (ulCRC >> 1)^CRC32_POLYNOMIAL;
else
ulCRC >>= 1;
}
return ulCRC;
}
unsigned long CalculateBlockCRC32(
unsigned long ulCount,
unsigned char *ucBuffer)
{
unsigned long ulTemp1;
unsigned long ulTemp2; unsigned long ulCRC = 0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & 0x00FFFFFFL;
ulTemp2 = CRC32Value(((int)ulCRC^*ucBuffer++)&0xff);
ulCRC = ulTemp1^ulTemp2;
}
return(ulCRC);
}
These are well defined, they are taken from a user manual. My C# versions of these functions are:
private ulong CRC32POLYNOMIAL = 0xEDB88320L;
private ulong CRC32Value(int i)
{
int j;
ulong ulCRC = (ulong)i;
for (j = 8; j > 0; j--)
{
if (ulCRC % 2 == 1)
{
ulCRC = (ulCRC >> 1) ^ CRC32POLYNOMIAL;
}
else
{
ulCRC >>= 1;
}
}
return ulCRC;
}
private ulong CalculateBlockCRC32(ulong ulCount, byte[] ucBuffer)
{
ulong ulTemp1;
ulong ulTemp2;
ulong ulCRC=0;
int bufind=0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & 0x00FFFFFFL;
ulTemp2 = CRC32Value(((int)ulCRC ^ ucBuffer[bufind]) & 0xFF);
ulCRC = ulTemp1 ^ ulTemp2;
bufind++;
}
return ulCRC;
}
As I mentioned, there are discrepancies between the C version and the C# version. One possible source is my understanding of the C expression ulCRC & 1 which I believe will only be true for odd numbers.
I call the C# function like this:
string contents = "some data";
byte[] toBeHexed = Encoding.ASCII.GetBytes(contents);
ulong calculatedCRC = this.CalculateBlockCRC32((ulong)toBeHexed.Length, toBeHexed);
And the C function is called like this:
char *Buff="some data";
unsigned long iLen = strlen(Buff);
unsigned long CRC = CalculateBlockCRC32(iLen, (unsigned char*) Buff);
I believe that I am calling the functions with the same data in each language, is that correct? If anyone could shed some light on this I would be very grateful.
As it has been already pointed by #Adriano Repetti you should use UInt32 datatype in place of the ulong type(it is 64 bit unsigned UInt64, whereas in VC++ unsigned long is only 32 bit unsigned type)
private UInt32 CRC32POLYNOMIAL = 0xEDB88320;
private UInt32 CRC32Value(int i)
{
int j;
UInt32 ulCRC = (UInt32)i;
for (j = 8; j > 0; j--)
{
if (ulCRC % 2 == 1)
{
ulCRC = (ulCRC >> 1) ^ CRC32POLYNOMIAL;
}
else
{
ulCRC >>= 1;
}
}
return ulCRC;
}
private UInt32 CalculateBlockCRC32(UInt32 ulCount, byte[] ucBuffer)
{
UInt32 ulTemp1;
UInt32 ulTemp2;
UInt32 ulCRC = 0;
int bufind = 0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & 0x00FFFFFF;
ulTemp2 = CRC32Value(((int)ulCRC ^ ucBuffer[bufind]) & 0xFF);
ulCRC = ulTemp1 ^ ulTemp2;
bufind++;
}
return ulCRC;
}
string contents = "12";
byte[] toBeHexed = Encoding.ASCII.GetBytes(contents);
UInt32 calculatedCRC = CalculateBlockCRC32((UInt32)toBeHexed.Length, toBeHexed);
Usually in C# it doesn't matter whether you use C# data type name(recommended by Microsoft) or ECMA type name. But in this and similar cases with bit level manipulation it can greatly clarify the intent and prevent mistakes.
In C it is always a good idea to use typedefs from stdint.h. They make the same job, as ECMA types in C# - clarify the intent, and also guarantee the length and sign of used datatypes(C compilers may use different lengths for the same types, because standard doesn't specify exact sizes):
#include <stdint.h>
#define CRC32_POLYNOMIAL ((uint32_t)0xEDB88320)
uint32_t CRC32Value(uint32_t i)
{
uint32_t j;
uint32_t ulCRC;
ulCRC = i;
for (j = 8; j > 0; j--)
{
if (ulCRC & 1)
ulCRC = (ulCRC >> 1) ^ CRC32_POLYNOMIAL;
else
ulCRC >>= 1;
}
return ulCRC;
}
uint32_t CalculateBlockCRC32(
size_t ulCount,
uint8_t *ucBuffer)
{
uint32_t ulTemp1;
uint32_t ulTemp2;
uint32_t ulCRC = 0;
while (ulCount-- != 0)
{
ulTemp1 = (ulCRC >> 8) & ((uint32_t)0x00FFFFFF);
ulTemp2 = CRC32Value((ulCRC^*ucBuffer++)&0xff);
ulCRC = ulTemp1^ulTemp2;
}
return(ulCRC);
}
char *Buff = "12";
size_t iLen = strlen(Buff);
uint32_t CRC = CalculateBlockCRC32(iLen, (uint8_t *) Buff);
printf("%u", CRC);

Base 52 to decimal And vice versa conversion

I am trying to adapt this code that can perform conversions to and from Base 52, which I am using to store RGB color information from C# to C++:
public static string ColourToBase52(Color colour)
{
int value = colour.ToArgb() & 0x00FFFFFF; // Mask off the alpha channel.
return ToBase52(value);
}
public static Color ColourFromBase52(string colour)
{
int value = FromBase52(colour);
return Color.FromArgb(unchecked((int)(0xFF000000 | value)));
}
public static string ToBase52(int value)
{
char[] baseChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ".ToCharArray();
int targetBase = baseChars.Length;
int i = 32;
char[] buffer = new char[i];
do
{
buffer[--i] = baseChars[value % targetBase];
value = value / targetBase;
}
while (value > 0);
char[] result = new char[32 - i];
Array.Copy(buffer, i, result, 0, 32 - i);
return new string(result).PadLeft(5, 'a');
}
public static int FromBase52(string value)
{
char[] baseChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ".ToCharArray();
int targetbase = baseChars.Length;
int multiplier = 1;
int result = 0;
for (int i = value.Length-1; i >= 0; --i)
{
int digit = Array.IndexOf(baseChars, value[i]);
result += digit*multiplier;
multiplier *= targetbase;
}
return result;
}
For my C++ code, I have opted to combine the functions that get and return the color value as an integer with the Base 52 conversion functions:
struct DIFColor *DIFBase52ToColor(std::string c)
{
const char *baseChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
int targetBase = 52;
int multiplier = 1;
int result = 0;
const char *d = c.c_str();
for (int i = c.length() - 1; i >= 0; --i)
{
int digit = DIFGetPositionInArray(baseChars, sizeof(baseChars), c[i]);
result += digit * multiplier;
multiplier = multiplier * targetBase;
}
uint8_t b = result & 255;
uint8_t g = (result >> 8) & 255;
uint8_t r = (result >> 16) * 255;
return CreateDIFColor(r,g,b);
}
std::string DIFColorToBase52(struct DIFColor *c)
{
int rgb = ((c->r&0x0ff)<<16)|((c->g&0x0ff)<<8)|(c->b&0x0ff);
const char *baseChars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
int targetBase = 52;
int i = 32;
char *buffer = new char[i];
do
{
buffer[--i] = baseChars[rgb % targetBase];
rgb = rgb / targetBase;
}
while (rgb > 0);
char *result = new char[32 - i];
DIFCopyCharArray((const char *)buffer, i, 0, 32 - i, result);
std::string s((const char*)result);
s.insert(s.begin(), 5 - s.size(), 'a');
return s;
}
I also had to create two functions for array manipulation:
int DIFGetPositionInArray(const char *array, size_t size, const char c)
{
for (size_t i = 0; i < size; i++)
{
if (array[i] == c)
return (int)i;
}
return -1;
}
void DIFCopyCharArray(const char* source, int wheretostart, int wheretocopy, int numtocopy, char *dest)
{
int c = wheretocopy;
for(int i = wheretostart; i <= numtocopy; i++)
{
dest[c] = source[i];
c++;
}
}
However, when I tried to test it with a sanity check, it failed:
255,255,255 = 'aah1U' in Base52 RGB
aah1U = 1,245,59 in RGB
It also seems that every time I run the sanity check, a different value is produced:
255,255,255 = 'aah13' in Base52 RGB
aah13 = 1,245,59 in RGB
255,255,255 = 'aah1j' in Base52 RGB
aah1j = 1,245,59 in RGB
The expected output was:
255,255,255 = 'cpqEN' in Base52 RGB
cpqEN = 255,255,255 in RGB
Making me think that this is possibly a pointer problem.
The error is probably that you don't terminate the result string anywhere, which leads to undefined behavior in the following:
std::string s((const char*)result);
This is because the std::string constructor looks for the terminator when copying the C-style string you pass to it.
You can solve it two ways: Either add the terminator character '\0' to result, or tell the std::string constructor the length of result.
The problem lies in the fact that the array copy function is incorrect. It should be:
void DIFCopyCharArray(const char* source, int wheretostart, int wheretocopy, int numtocopy, char *dest)
{
int c = wheretocopy;
for(int i = wheretostart; c <= numtocopy; i++)
{
dest[c] = source[i];
c++;
}
dest[c] = '\0';
}
Also, the array search function does not work because sizeof(baseChars) returns 4, which is not the number of elements.
Use a function like this:
int DIFGetPositionInArray(const char *array, int arrayElements, const char c)
{
for (int i = 0; i < arrayElements; i++)
{
if (array[i] == c)
return i;
}
return -1;
}
And call it like this;
DIFGetPositionInArray(baseChars,52,d[i]);

Sorting 3 numbers without branching

In C# or C++ how can I implement a branch-free sort of three (integer) numbers?
Is this possible?
No conditionals. Only a cast to uint. Perfect solution.
int abs (int a)
{
int b = a;
b = (b >> (sizeof(int)*CHAR_BIT-1) & 1);
return 2 * b * (a) + a;
}
int max (int a, int b) { return (a + b + abs(a - b)) / 2; }
int min (int a, int b) { return (a + b - abs(a - b)) / 2; }
void sort (int & a, int & b, int & c)
{
int maxnum = max(max(a,b), c);
int minnum = min(min(a,b), c);
int middlenum = a + b + c - maxnum - minnum;
a = maxnum;
b = middlenum;
c = minnum;
}
You can write max, min and swap branch-free functions. Once you have these functions, you can use them to write sort function as:
void sort(int &a, int &b, int &c)
{
int m1 = max(a,b,c);
int m2 = min(a,b,c);
b = a + b + c - m1 - m2;
swap(m1, a);
swap(m2, c);
}
And here are the helper functions:
void swap(int &a, int &b)
{
int tmp = a; a = b; b = tmp;
}
int max( int a, int b, int c ) {
int l1[] = { a, b };
int l2[] = { l1[ a<b ], c };
return l2[ l2[0] < c ];
}
int min( int a, int b, int c ) {
int l1[] = { a, b };
int l2[] = { l1[ a>b ], c };
return l2[ l2[0] > c ];
}
Test code:
int main() {
int a,b,c;
std::cin >> a >> b >> c;
sort(a,b,c);
std::cout << a <<"," << b << "," << c << std::endl;
return 0;
}
Input:
21 242 434
Output (descending order):
434, 242, 21
Demo : http://ideone.com/3ZOzc
I have taken the implementation of max from #David's answer from here, and implemented min with little twist.
You can do this in C++ with:
#include <iostream>
void sort(int *in) {
const int sum = in[0]+in[1];
const int diff = abs(in[1]-in[0]);
in[0] = (sum + diff) / 2;
in[1] = (sum - diff) / 2;
}
int main() {
int a[] = {3,4,1};
sort(a);
sort(a+1);
sort(a);
std::cout << a[0] << "," << a[1] << "," << a[2] << std::endl;
int b[] = {1,2,3};
sort(b);
sort(b+1);
sort(b);
std::cout << b[0] << "," << b[1] << "," << b[2] << std::endl;
}
The trick is in expressing the min/max elements as arithmetic operations, not branching and then calling sort on pairs enough times to "bubble sort" them.
I've made a totally generic version, using template meta-programming to call sort the right number of times. It all gets inlined exactly as you'd hope with gcc 4.7.0 on my x86 box (although call is unconditional on x86 anyway). I've also implemented an abs function that avoids branches on x86 (it makes a few assumptions about integers that make it less portable, it's based on gcc's __builtin_abs implementation for x86 though):
#include <iostream>
#include <limits.h>
void myabs(int& in) {
const int tmp = in >> ((sizeof(int) * CHAR_BIT) - 1);
in ^= tmp;
in = tmp - in;
}
template <int N, int I=1, bool C=false>
struct sorter {
static void sort(int *in) {
const int sum = in[I-0]+in[I-1];
int diff = in[I-1]-in[I-0];
myabs(diff);
in[I-0] = (sum + diff) / 2;
in[I-1] = (sum - diff) / 2;
sorter<N, I+1, I+1>=N>::sort(in);
}
};
template <int N,int I>
struct sorter<N,I,true> {
static void sort(int *in) {
sorter<N-1>::sort(in);
}
};
template <int I, bool C>
struct sorter<0,I,C> {
static void sort(int *) {
}
};
int main() {
int a[] = {3,4,1};
sorter<3>::sort(a);
std::cout << a[0] << "," << a[1] << "," << a[2] << std::endl;
}

Categories

Resources