PHP encrypt string using blowfish - c#

I have an application running on php 7.2 and I need to encrypt a string using the following criteria:
Cipher: NCFB
Output encoding: Base64
Initialization Vector (IV) = 8
I already know the output I should get, but my script returns different strings everything, I think because of the IV ( openssl_random_pseude_bytes), and I can't really understand the logic of it. I am not so experienced with encrypting so I can't figure this out.
$string = 'my-string';
$cipher = 'BF-CFB';
$key = 'my-secret-key';
$ivlen = openssl_cipher_iv_length($cipher);
$iv = openssl_random_pseudo_bytes($ivlen);
$encrypted = base64_encode(openssl_encrypt($string, $cipher, $key, OPENSSL_RAW_DATA, $iv));
Example
The goal of this encryption is for a API access, and there is a provided example written in C# for the encryption method. The thing is that that script generates the same string every time unlike mine. I must build my script so I get same results like the official example provided ( here is a code snippet: )
public new int Encrypt(
byte[] dataIn,
int posIn,
byte[] dataOut,
int posOut,
int count)
{
int end = posIn + count;
​
byte[] iv = this.iv;
​
int ivBytesLeft = this.ivBytesLeft;
int ivPos = iv.Length - ivBytesLeft;
​
// consume what's left in the IV buffer, but make sure to keep the new
// ciphertext in a round-robin fashion (since it represents the new IV)
if (ivBytesLeft >= count)
{
// what we have is enough to deal with the request
for (; posIn < end; posIn++, posOut++, ivPos++)
{
iv[ivPos] = dataOut[posOut] = (byte)(dataIn[posIn] ^ iv[ivPos]);
}
this.ivBytesLeft = iv.Length - ivPos;
return count;
}
for (; ivPos < BLOCK_SIZE; posIn++, posOut++, ivPos++)
{
iv[ivPos] = dataOut[posOut] = (byte)(dataIn[posIn] ^ iv[ivPos]);
}
count -= ivBytesLeft;
​
uint[] sbox1 = this.sbox1;
uint[] sbox2 = this.sbox2;
uint[] sbox3 = this.sbox3;
uint[] sbox4 = this.sbox4;
​
uint[] pbox = this.pbox;
​
uint pbox00 = pbox[0];
uint pbox01 = pbox[1];
uint pbox02 = pbox[2];
uint pbox03 = pbox[3];
uint pbox04 = pbox[4];
uint pbox05 = pbox[5];
uint pbox06 = pbox[6];
uint pbox07 = pbox[7];
uint pbox08 = pbox[8];
uint pbox09 = pbox[9];
uint pbox10 = pbox[10];
uint pbox11 = pbox[11];
uint pbox12 = pbox[12];
uint pbox13 = pbox[13];
uint pbox14 = pbox[14];
uint pbox15 = pbox[15];
uint pbox16 = pbox[16];
uint pbox17 = pbox[17];
​
// now load the current IV into 32bit integers for speed
uint hi = (((uint)iv[0]) << 24) |
(((uint)iv[1]) << 16) |
(((uint)iv[2]) << 8) |
iv[3];
​
uint lo = (((uint)iv[4]) << 24) |
(((uint)iv[5]) << 16) |
(((uint)iv[6]) << 8) |
iv[7];
​
// we deal with the even part first
int rest = count % BLOCK_SIZE;
end -= rest;
​
for (; ; )
{
// need to create new IV material no matter what
hi ^= pbox00;
lo ^= (((sbox1[(int)(hi >> 24)] + sbox2[(int)((hi >> 16) & 0x0ff)]) ^ sbox3[(int)((hi >> 8) & 0x0ff)]) + sbox4[(int)(hi & 0x0ff)]) ^ pbox01;
hi ^= (((sbox1[(int)(lo >> 24)] + sbox2[(int)((lo >> 16) & 0x0ff)]) ^ sbox3[(int)((lo >> 8) & 0x0ff)]) + sbox4[(int)(lo & 0x0ff)]) ^ pbox02;
lo ^= (((sbox1[(int)(hi >> 24)] + sbox2[(int)((hi >> 16) & 0x0ff)]) ^ sbox3[(int)((hi >> 8) & 0x0ff)]) + sbox4[(int)(hi & 0x0ff)]) ^ pbox03;
hi ^= (((sbox1[(int)(lo >> 24)] + sbox2[(int)((lo >> 16) & 0x0ff)]) ^ sbox3[(int)((lo >> 8) & 0x0ff)]) + sbox4[(int)(lo & 0x0ff)]) ^ pbox04;
lo ^= (((sbox1[(int)(hi >> 24)] + sbox2[(int)((hi >> 16) & 0x0ff)]) ^ sbox3[(int)((hi >> 8) & 0x0ff)]) + sbox4[(int)(hi & 0x0ff)]) ^ pbox05;
hi ^= (((sbox1[(int)(lo >> 24)] + sbox2[(int)((lo >> 16) & 0x0ff)]) ^ sbox3[(int)((lo >> 8) & 0x0ff)]) + sbox4[(int)(lo & 0x0ff)]) ^ pbox06;
lo ^= (((sbox1[(int)(hi >> 24)] + sbox2[(int)((hi >> 16) & 0x0ff)]) ^ sbox3[(int)((hi >> 8) & 0x0ff)]) + sbox4[(int)(hi & 0x0ff)]) ^ pbox07;
hi ^= (((sbox1[(int)(lo >> 24)] + sbox2[(int)((lo >> 16) & 0x0ff)]) ^ sbox3[(int)((lo >> 8) & 0x0ff)]) + sbox4[(int)(lo & 0x0ff)]) ^ pbox08;
lo ^= (((sbox1[(int)(hi >> 24)] + sbox2[(int)((hi >> 16) & 0x0ff)]) ^ sbox3[(int)((hi >> 8) & 0x0ff)]) + sbox4[(int)(hi & 0x0ff)]) ^ pbox09;
hi ^= (((sbox1[(int)(lo >> 24)] + sbox2[(int)((lo >> 16) & 0x0ff)]) ^ sbox3[(int)((lo >> 8) & 0x0ff)]) + sbox4[(int)(lo & 0x0ff)]) ^ pbox10;
lo ^= (((sbox1[(int)(hi >> 24)] + sbox2[(int)((hi >> 16) & 0x0ff)]) ^ sbox3[(int)((hi >> 8) & 0x0ff)]) + sbox4[(int)(hi & 0x0ff)]) ^ pbox11;
hi ^= (((sbox1[(int)(lo >> 24)] + sbox2[(int)((lo >> 16) & 0x0ff)]) ^ sbox3[(int)((lo >> 8) & 0x0ff)]) + sbox4[(int)(lo & 0x0ff)]) ^ pbox12;
lo ^= (((sbox1[(int)(hi >> 24)] + sbox2[(int)((hi >> 16) & 0x0ff)]) ^ sbox3[(int)((hi >> 8) & 0x0ff)]) + sbox4[(int)(hi & 0x0ff)]) ^ pbox13;
hi ^= (((sbox1[(int)(lo >> 24)] + sbox2[(int)((lo >> 16) & 0x0ff)]) ^ sbox3[(int)((lo >> 8) & 0x0ff)]) + sbox4[(int)(lo & 0x0ff)]) ^ pbox14;
lo ^= (((sbox1[(int)(hi >> 24)] + sbox2[(int)((hi >> 16) & 0x0ff)]) ^ sbox3[(int)((hi >> 8) & 0x0ff)]) + sbox4[(int)(hi & 0x0ff)]) ^ pbox15;
hi ^= (((sbox1[(int)(lo >> 24)] + sbox2[(int)((lo >> 16) & 0x0ff)]) ^ sbox3[(int)((lo >> 8) & 0x0ff)]) + sbox4[(int)(lo & 0x0ff)]) ^ pbox16;
​
uint swap = lo ^ pbox17;
lo = hi;
hi = swap;
​
if (posIn >= end)
{
// exit right in the middle so we always have new IV material for the rest below
break;
}
​
hi ^= (((uint)dataIn[posIn]) << 24) |
(((uint)dataIn[posIn + 1]) << 16) |
(((uint)dataIn[posIn + 2]) << 8) |
dataIn[posIn + 3];
​
lo ^= (((uint)dataIn[posIn + 4]) << 24) |
(((uint)dataIn[posIn + 5]) << 16) |
(((uint)dataIn[posIn + 6]) << 8) |
dataIn[posIn + 7];
​
posIn += 8;
​
// now stream out the whole block
dataOut[posOut] = (byte)(hi >> 24);
dataOut[posOut + 1] = (byte)(hi >> 16);
dataOut[posOut + 2] = (byte)(hi >> 8);
dataOut[posOut + 3] = (byte)hi;
​
dataOut[posOut + 4] = (byte)(lo >> 24);
dataOut[posOut + 5] = (byte)(lo >> 16);
dataOut[posOut + 6] = (byte)(lo >> 8);
dataOut[posOut + 7] = (byte)lo;
​
posOut += 8;
}
​
// store back the new IV
iv[0] = (byte)(hi >> 24);
iv[1] = (byte)(hi >> 16);
iv[2] = (byte)(hi >> 8);
iv[3] = (byte)hi;
iv[4] = (byte)(lo >> 24);
iv[5] = (byte)(lo >> 16);
iv[6] = (byte)(lo >> 8);
iv[7] = (byte)lo;
​
// emit the rest
for (int i = 0; i < rest; i++)
{
iv[i] = dataOut[posOut + i] = (byte)(dataIn[posIn + i] ^ iv[i]);
}
​
this.ivBytesLeft = iv.Length - rest;
​
return count;
}

That is what expected with your PHP code. CFB mode turns a block cipher into a stream cipher. Due to the semantical security ( or randomized encryption), you need a different IV for each encryption under the same key. Otherwise, an attacker can use two-time-pad attack as in One-Time-Pad once the attacker notices that the IV re-used.
You should always generate the IV freshly.
$iv = openssl_random_pseudo_bytes($ivlen);
Note: There is a still problem that you may generate the same IV twice for the same key if the key is used too much. The easiest mitigation from IV-reuse is using incremental IV or generating the IV's by using an LFSR this is common practice. If you are changing the key for each encryption then IV-reuse is not a problem, however, changing the IV is easier than changing the key.
Update: I've found your C# source code by just looking the comment
// consume what's left in the IV buffer, but make sure to keep the new
The author of this code says that
/// Useful if you don't want to deal with padding of blocks (in comparsion to CBC), however
/// a safe initialization vector (IV) is still needed.
This code currently insecure to use.
You can use
SetIV(value, 0);
function to init the IV with the value coming from the PHP encryption.

Related

Separate bits from short[] in order of most to least significant C#

I want to separate the bits of a short[] contained in a byte[] so the most significant bit of each short are arranged in one contiguous block(array? line?) followed by the next bit from each short and so on.
This is a condensed sample of how the layout of the bits would change:
0101010101010101 0101010101010101
would become
0011001100110011 0011001100110011
or with 3 it would look like
0101010101010101 0101010101010101 0101010101010101
which would become
0001110001110001 1100011100011100 0111000111000111
I put that in a code block to preserve the line breaks.
This would be easy If I could address each bit individually but I have to use bitwise operators which makes it extremely difficult.
Ignoring the possibility that the number of elements in the array wouldn't be a multiple of the base type bit length which in this case is 16 I came up with this:
fixed(byte* inptr = sourcearray){ //the shorts in a byte[]
fixed(byte* outptr = destination){//the output byte[]
var insamples = (short*)inptr;
var outsamples = (ushort*)outptr;
var mask = (ushort)0b1000000000000000;
for(int i = 0, j = 0; i < numsamples; ++i, j += 16){
if(j >= numsamples){
j = 0;
mask >>= 1;
}
outsamples[i] = (ushort)((insamples[j] & mask) | ((insamples[j + 1] & mask) >> 1) | ((insamples[j + 2] & mask) >> 2) | ((insamples[j + 3] & mask) >> 3) |
((insamples[j + 4] & mask) >> 4) | ((insamples[j + 5] & mask) >> 5) | ((insamples[j + 6] & mask) >> 6) | ((insamples[j + 7] & mask) >> 7) |
((insamples[j + 8] & mask) >> 8) | ((insamples[j + 9] & mask) >> 9) | ((insamples[j + 10] & mask) >> 10) | ((insamples[j + 11] & mask) >> 11) |
((insamples[j + 12] & mask) >> 12) | ((insamples[j + 13] & mask) >> 13) | ((insamples[j + 14] & mask) >> 14) | ((insamples[j + 15] & mask) >> 15));
}
}
}
The array I'm working with is 480 shorts (960 bytes) long, I'm pretty sure it does what I want but I'm having trouble writing the function that does the opposite to restore the the array to its original state, so far I have nothing that makes sense, I need it to be reasonably optimal to minimize the processing required but its hurting my brain.
I would probably be better off doing this in C++ but I want to keep the program entirely managed.
I hate to answer my own question but I have just discovered the System.Collections.BitArray class which allows me to address bits individually and within minutes I replaced the code in the op with this:
for(int i = 0, j = 0, k = 0; i < inbits.Length; ++i, j += 16){
if(j >= inbits.Length) j = ++k;
_outbitsout[i] = inbits[j];
}
and to reverse that operation:
var stride = inbits.Length/16;
for(int i = 0, j = 0, k = 0; i < inbits.Length; ++i, j += stride){
if(j >= inbits.Length) j = ++k;
_outbitsin[i] = inbits[j];
}

C# Convert long to bytes gives error: & cannot be applied to long and ulong

I want to convert a long to 8 bytes in C# with 100% reliability.
public static byte[] ToBytes(this long thisLong)
{
byte[] bytes = new byte[8];
bytes[0] = (byte)(thisLong & 0xFF);
bytes[1] = (byte)((thisLong & 0xFF00) >> 8);
bytes[2] = (byte)((thisLong & 0xFF0000) >> 16);
bytes[3] = (byte)((thisLong & 0xFF000000) >> 24);
bytes[4] = (byte)((thisLong & 0xFF00000000) >> 32);
bytes[5] = (byte)((thisLong & 0xFF0000000000) >> 40);
bytes[6] = (byte)((thisLong & 0xFF000000000000) >> 48);
bytes[7] = (byte)((thisLong & 0xFF00000000000000) >> 56);
return bytes;
}
but I get an error on the last line at "thisLong & 0xFF00000000000000":
CS0019: Operator '&' cannot be applied to operands of type 'long' and 'ulong' (CS0019)
It works OK for int, so any not for long?:
public static byte[] ToBytes(this int thisInt)
{
byte[] bytes = new byte[4];
bytes[0] = (byte)(thisInt & 0xFF);
bytes[1] = (byte)((thisInt & 0xFF00) >> 8);
bytes[2] = (byte)((thisInt & 0xFF0000) >> 16);
bytes[3] = (byte)((thisInt & 0xFF000000) >> 24);
return bytes;
}
I don't want to use BitConvertor to avoid issues with endianness.
I don't want to use ulong as converting may fail.

How to get an unsigned long from a byte array

I have an incoming byte array from a piece of test equipment. The byte array can either be two or four bytes long. I wrote the following code to convert these byte array's into unsigned longs:
private ulong GetUlongFrom2Bytes(byte MSB, byte LSB)
{
return (ulong)((MSB << 8) + (LSB));
}
private ulong GetUlongFrom4Bytes(byte MSB, byte msb, byte lsb, byte LSB)
{
return (ulong)((MSB << 24) + (msb << 16) + (lsb << 8) + (LSB));
}
Conversely, for going the opposite direction, I do the following code:
private byte[] Get4Bytes(ulong parm1)
{
byte[] retVal = new byte[4];
retVal[0] = (byte)((parm1 >> 24) & 0xFF);
retVal[1] = (byte)((parm1 >> 16) & 0xFF);
retVal[2] = (byte)((parm1 >> 8) & 0xFF);
retVal[3] = (byte)(parm1 & 0xFF);
return retVal;
}
private byte[] Get8Bytes(ulong parm1, ulong parm2)
{
byte[] retVal = new byte[8];
Array.Copy(Get4Bytes(parm1), 0, retVal, 0, 4);
Array.Copy(Get4Bytes(parm2), 0, retVal, 4, 4);
return retVal;
}
I'm trying to debug my code for controlling this piece of equipment and I'd just like a sanity check from you guys here on SO to confirm that this code is written correctly for what I'm trying to do.
Assuming you want big-endian encoding, then yes: that'll be fine. You can also use BitConverter, but I think you are right not to - it involves extra array allocations, and forces the system's endianness on you (often little-endian).
Generally, I would recommend such code works with a buffer/offset API, though, for simplicity and efficiency - i.e.
private void Write32(ulong value, byte[] buffer, int offset)
{
buffer[offset++] = (byte)((value >> 24) & 0xFF);
buffer[offset++] = (byte)((value >> 16) & 0xFF);
buffer[offset++] = (byte)((value >> 8) & 0xFF);
buffer[offset] = (byte)(value & 0xFF);
}
This would do it:
static ulong SliceValue(byte[] bytes, int start, int length)
{
var bytes = bytes.Skip(start).Take(length);
ulong acc = 0;
foreach (var b in bytes) acc = (acc * 0x100) + b;
return acc;
}

How would I convert this crypto from C# to C

This is the C# code I use:
public void Decrypt(byte[] #in, byte[] #out, int size)
{
lock (this)
{
for (ushort i = 0; i < size; i++)
{
if (_server)
{
#out[i] = (byte)(#in[i] ^ 0xAB);
#out[i] = (byte)((#out[i] << 4) | (#out[i] >> 4));
#out[i] = (byte)(ConquerKeys.Key2[_inCounter >> 8] ^ #out[i]);
#out[i] = (byte)(ConquerKeys.Key1[_inCounter & 0xFF] ^ #out[i]);
}
else
{
#out[i] = (byte)(ConquerKeys.Key1[_inCounter & 0xFF] ^ #in[i]);
#out[i] = (byte)(ConquerKeys.Key2[_inCounter >> 8] ^ #out[i]);
#out[i] = (byte)((#out[i] << 4) | (#out[i] >> 4));
#out[i] = (byte)(#out[i] ^ 0xAB);
}
_inCounter = (ushort)(_inCounter + 1);
}
}
}
and this is how I converted it to work in C.
char* decrypt(char* in, int size, int server)
{
char out[size];
memset(out, 0, size);
for (int i = 0; i < size; i++)
{
if (server == 1)
{
out[i] = in[i] ^ 0xAB;
out[i] = out[i] << 4 | out[i] >> 4;
out[i] = Key2[incounter >> 8] ^ out[i];
out[i] = Key1[incounter & 0xFF] ^ in[i];
}
else if (server == 0)
{
out[i] = Key1[incounter & 0xFF] ^ in[i];
out[i] = Key2[incounter >> 8] ^ out[i];
out[i] = out[i] << 4 | out[i] >> 4;
out[i] = out[i] ^ 0xAB;
}
incounter++;
}
return out;
}
However for some reason the C one does not work.
Link for the full C# file
Link for the full C file
Link for the C implementation
There was a translation error.
The C# line:
#out[i] = (byte)(ConquerKeys.Key1[_inCounter & 0xFF] ^ #out[i]);
Became:
out[i] = Key1[incounter & 0xFF] ^ in[i];
The value on the right of the xor (^) is from the wrong array.
Additionally, you are returning a stack-allocated variable, which will cause all sorts of problem.
Change:
char out[size];
memset(out, 0, size);
to:
char *out = (char*)calloc(size, sizeof(char));
The most glaring error I see is that you are returning a pointer to a stack-allocated array, which is going to get stomped by the next function call after decrypt() returns. You need to malloc() that buffer or pass in a pointer to a writable buffer.
You are returning a reference to a local variable which is illegal. Either let the caller pass in an array or use malloc() to create an array inside the method.
I also suggest turning char into unsigned char since it is more portable. If your platform assumes char is the same as signed char, the arithmetic (bit shifts, etc) will not work right.
So just specify unsigned char explicitly (use a typedef or include <stdint.h> if unsigned char seems too long-winded for you).

How do I convert byte values into decimals?

I'm trying to load some decimal values from a file but I can't work out the correct way to take the raw values and convert them into decimals.
I've read the file out into a byte array, and each chunk of four bytes is supposed to represent one decimal value. To help figure it out, I've constructed a table of how the decimal values 1 through to 46 are represented as four byte chunks.
For instance, the number 1 appears as 0,0,128,63 the number 2 as 0,0,0,64 and so on up to 46, which is 0,0,56,66. The full table is available here.
There is also another series of numbers which go to three decimal places and include negatives, which is here.
The only documentation I have states
They are stored least significant byte first: 1's, 256's, 65536's, 16777216's. This makes the hex sequence 01 01 00 00 into the number 257 (decimal). In C/C++, to read e.g. a float, do: float x; fread(&x, sizeof(float), 1, fileptr);
However I'm using .NET's File.ReadAllBytes method so this isn't much help. If anyone can spare a few minutes to look at the examples files and see if they can spot a way to convert the values to decimals I'd be most grateful.
You can use BitConverter.ToSingle to read a float value from a byte array, so to get a sequence of floats, you could do something like this:
byte[] data = File.ReadAllBytes(fileName);
int count = data.Length / 4;
Debug.Assert(data.Length % 4 == 0);
IEnumerable<float> values = Enumerable.Range(0, count)
.Select(i => BitConverter.ToSingle(data, i*4));
Have you looked into using the BitConverter class? It converts between byte arrays and various types.
Edit:
MSDN has a helpful comment on the documentation for BitConverter at http://msdn.microsoft.com/en-us/library/system.bitconverter_methods(v=vs.85).aspx:
public static decimal ToDecimal(byte[] bytes)
{
int[] bits = new int[4];
bits[0] = ((bytes[0] | (bytes[1] << 8)) | (bytes[2] << 0x10)) | (bytes[3] << 0x18); //lo
bits[1] = ((bytes[4] | (bytes[5] << 8)) | (bytes[6] << 0x10)) | (bytes[7] << 0x18); //mid
bits[2] = ((bytes[8] | (bytes[9] << 8)) | (bytes[10] << 0x10)) | (bytes[11] << 0x18); //hi
bits[3] = ((bytes[12] | (bytes[13] << 8)) | (bytes[14] << 0x10)) | (bytes[15] << 0x18); //flags
return new decimal(bits);
}
public static byte[] GetBytes(decimal d)
{
byte[] bytes = new byte[16];
int[] bits = decimal.GetBits(d);
int lo = bits[0];
int mid = bits[1];
int hi = bits[2];
int flags = bits[3];
bytes[0] = (byte)lo;
bytes[1] = (byte)(lo >> 8);
bytes[2] = (byte)(lo >> 0x10);
bytes[3] = (byte)(lo >> 0x18);
bytes[4] = (byte)mid;
bytes[5] = (byte)(mid >> 8);
bytes[6] = (byte)(mid >> 0x10);
bytes[7] = (byte)(mid >> 0x18);
bytes[8] = (byte)hi;
bytes[9] = (byte)(hi >> 8);
bytes[10] = (byte)(hi >> 0x10);
bytes[11] = (byte)(hi >> 0x18);
bytes[12] = (byte)flags;
bytes[13] = (byte)(flags >> 8);
bytes[14] = (byte)(flags >> 0x10);
bytes[15] = (byte)(flags >> 0x18);
return bytes;
}
The .NET library implemented Decimal.GetBytes() method internally.
I've used the decompiled .NET library to create a simple conversion methods between decimal and byte arrary - you can find it here:
https://gist.github.com/eranbetzalel/5384006#file-decimalbytesconvertor-cs
EDIT : Here is the full source code from my link.
public decimal BytesToDecimal(byte[] buffer, int offset = 0)
{
var decimalBits = new int[4];
decimalBits[0] = buffer[offset + 0] | (buffer[offset + 1] << 8) | (buffer[offset + 2] << 16) | (buffer[offset + 3] << 24);
decimalBits[1] = buffer[offset + 4] | (buffer[offset + 5] << 8) | (buffer[offset + 6] << 16) | (buffer[offset + 7] << 24);
decimalBits[2] = buffer[offset + 8] | (buffer[offset + 9] << 8) | (buffer[offset + 10] << 16) | (buffer[offset + 11] << 24);
decimalBits[3] = buffer[offset + 12] | (buffer[offset + 13] << 8) | (buffer[offset + 14] << 16) | (buffer[offset + 15] << 24);
return new Decimal(decimalBits);
}
public byte[] DecimalToBytes(decimal number)
{
var decimalBuffer = new byte[16];
var decimalBits = Decimal.GetBits(number);
var lo = decimalBits.Value[0];
var mid = decimalBits.Value[1];
var hi = decimalBits.Value[2];
var flags = decimalBits.Value[3];
decimalBuffer[0] = (byte)lo;
decimalBuffer[1] = (byte)(lo >> 8);
decimalBuffer[2] = (byte)(lo >> 16);
decimalBuffer[3] = (byte)(lo >> 24);
decimalBuffer[4] = (byte)mid;
decimalBuffer[5] = (byte)(mid >> 8);
decimalBuffer[6] = (byte)(mid >> 16);
decimalBuffer[7] = (byte)(mid >> 24);
decimalBuffer[8] = (byte)hi;
decimalBuffer[9] = (byte)(hi >> 8);
decimalBuffer[10] = (byte)(hi >> 16);
decimalBuffer[11] = (byte)(hi >> 24);
decimalBuffer[12] = (byte)flags;
decimalBuffer[13] = (byte)(flags >> 8);
decimalBuffer[14] = (byte)(flags >> 16);
decimalBuffer[15] = (byte)(flags >> 24);
return decimalBuffer;
}
As others have mentioned, use the BitConverter class, see the example below:
byte[] bytez = new byte[] { 0x00, 0x00, 0x80, 0x3F };
float flt = BitConverter.ToSingle(bytez, 0); // 1.0
bytez = new byte[] { 0x00, 0x00, 0x00, 0x40 };
flt = BitConverter.ToSingle(bytez, 0); // 2.0
bytez = new byte[] { 0, 0, 192, 190 };
flt = BitConverter.ToSingle(bytez, 0); // -0.375

Categories

Resources