Matching Java Triple DES result to C# one - c#

I have the following C# implementation of triple DES
byte[] bKey = HexToBytes("C67DDB0CE47D27FAF6F32ECA5C99E8AF");
byte[] bMsg = HexToBytes("ff00");
byte[] iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
DESCryptoServiceProvider des = new DESCryptoServiceProvider();
des.Padding = PaddingMode.Zeros;
des.Mode = CipherMode.CBC;
byte[] bK1 = new byte[8];
for (int i = 0; i < 8; i++) bK1[i] = bKey[i];
byte[] bK2 = new byte[8];
for (int i = 0; i < 8; i++) bK2[i] = bKey[i + 8];
ICryptoTransform ict1 = des.CreateEncryptor(bK1, iv);
byte[] bFt = ict1.TransformFinalBlock(bMsg, 0, bMsg.Length);
byte[] bLCb = new byte[8];
for (int i = 0; i < 8; i++) bLCb[i] = bFt[i + bFt.Length - 8];
des.Mode = CipherMode.ECB;
ICryptoTransform ict1_5 = des.CreateDecryptor(bK2, iv);
bLCb = ict1_5.TransformFinalBlock(bLCb, 0, bLCb.Length);
ICryptoTransform ict2 = des.CreateEncryptor(bK1, iv);
byte[] bMac = ict2.TransformFinalBlock(bLCb, 0, bLCb.Length);
ToHex(bMac); // outputs: 4BC0479D7889CF8E
I need to produce same result in Java/Groovy, in which I'm apparently stuck.
The code I have for now is as follows:
byte[] bKey = Hex.decode("C67DDB0CE47D27FAF6F32ECA5C99E8AF")
byte[] bMsg = Hex.decode("ff00")
byte[] keyBytes = Arrays.copyOf(sKey.bytes, 24)
int j = 0, k = 16
while (j < 8) {
keyBytes[k++] = keyBytes[j++]
}
SecretKey key3 = new SecretKeySpec(keyBytes, "DESede")
IvParameterSpec iv3 = new IvParameterSpec(new byte[8])
Cipher cipher3 = Cipher.getInstance("DESede/CBC/PKCS5Padding")
cipher3.init(Cipher.ENCRYPT_MODE, key3, iv3)
byte[] bMac = cipher3.doFinal(bMsg)
println new String(Hex.encode(bMac))
This one outpus: ef2c57c3fa18d0a5
Hex.decode() here is of bouncy castle
I have also tried to reproduce same C# code in java by using DES/CBC twice and EBC in final round, which gave me even different result: 48f63c809c38e1eb
It'd be great if someone could give me a hint of what I may be doing wrong
Update:
Thanks everyone for your help! Final code that works as needed without much tweaking:
Security.addProvider(new BouncyCastleProvider())
byte[] bKey = Hex.decode("C67DDB0CE47D27FAF6F32ECA5C99E8AF")
byte[] bMsg = Hex.decode("ff00")
byte[] keyBytes = Arrays.copyOf(sKey.bytes, 24)
int j = 0, k = 16
while (j < 8) {
keyBytes[k++] = keyBytes[j++]
}
SecretKey key3 = new SecretKeySpec(keyBytes, "DESede")
IvParameterSpec iv3 = new IvParameterSpec(new byte[8])
Cipher cipher3 = Cipher.getInstance("DESede/CBC/ZeroBytePadding")
cipher3.init(Cipher.ENCRYPT_MODE, key3, iv3)
byte[] bMac = cipher3.doFinal(bMsg)
println new String(Hex.encode(bMac))

You're using some non-standard padding and block chaining. You won't be able to use DESede. Try DES instead:
import javax.crypto.*
import javax.crypto.spec.*
def key1 = new SecretKeySpec("C67DDB0CE47D27FA".decodeHex(), "DES")
def key2 = new SecretKeySpec("F6F32ECA5C99E8AF".decodeHex(), "DES")
def plaintext = ("ff00" + "000000000000").decodeHex() // manually zero pad
def c1 = Cipher.getInstance("DES/CBC/NoPadding")
c1.init(Cipher.ENCRYPT_MODE, key1, new IvParameterSpec(new byte[8]))
def cipherText1 = c1.doFinal(plaintext)
def c2 = Cipher.getInstance("DES/CBC/NoPadding")
c2.init(Cipher.DECRYPT_MODE, key2, new IvParameterSpec(new byte[8]))
def cipherText2 = c2.doFinal(cipherText1)
def c3 = Cipher.getInstance("DES/ECB/NoPadding")
c3.init(Cipher.ENCRYPT_MODE, key1)
def cipherText3 = c3.doFinal(cipherText2)
assert cipherText3.encodeHex().toString() == "4bc0479d7889cf8e"

Related

C# encrypt code and c++ encrypt code is not matching

I'm implementing encrypt/decrypt code with c++/c#
I referred to this post and referred to answers.z`
But c++/c# encrypted code was not matched.
Here are my codes.
C++
// base64 encode part
static const std::string base64_chars =
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz"
"0123456789+/";
static inline bool is_base64(BYTE c) {
return (isalnum(c) || (c == '+') || (c == '/'));
}
std::string base64_encode(BYTE const* buf, unsigned int bufLen) {
std::string ret;
int i = 0;
int j = 0;
BYTE char_array_3[3];
BYTE char_array_4[4];
while (bufLen--) {
char_array_3[i++] = *(buf++);
if (i == 3) {
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for (i = 0; (i < 4); i++)
ret += base64_chars[char_array_4[i]];
i = 0;
}
}
if (i)
{
for (j = i; j < 3; j++)
char_array_3[j] = '\0';
char_array_4[0] = (char_array_3[0] & 0xfc) >> 2;
char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4);
char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6);
char_array_4[3] = char_array_3[2] & 0x3f;
for (j = 0; (j < i + 1); j++)
ret += base64_chars[char_array_4[j]];
while ((i++ < 3))
ret += '=';
}
return ret;
}
//start encrypt
std::string key = "01286567891233460123456789a12345";
std::string iv = "0123456789123456";
std::string encrypt(const std::string& str_in)
{
std::string str_out;
std::string str_out2;
byte* keybyte = (byte*)key.c_str();
CryptoPP::AES::Encryption aesEncryption((byte*)key.c_str(), CryptoPP::AES::MAX_KEYLENGTH);
CryptoPP::CBC_Mode_ExternalCipher::Encryption cbcEncryption(aesEncryption, (byte*)iv.c_str());
StreamTransformationFilter stfEncryptor(cbcEncryption, new CryptoPP::StringSink(str_out));
stfEncryptor.Put(reinterpret_cast<const unsigned char*>(str_in.c_str()), str_in.length() + 1);
stfEncryptor.MessageEnd();
str_out2 = base64_encode(reinterpret_cast<const unsigned char*>(str_out.c_str()), strlen(str_out.c_str()));
return str_out2;
}
std::string decrypt(const std::string& cipher_text)
{
std::string str_out;
//need to insert code of decrypt base64
CryptoPP::AES::Decryption aesDecryption((byte*)key.c_str(), CryptoPP::AES::MAX_KEYLENGTH);
CryptoPP::CBC_Mode_ExternalCipher::Decryption cbcDecryption(aesDecryption, (byte*)iv.c_str());
CryptoPP::StreamTransformationFilter stfDecryptor(cbcDecryption, new CryptoPP::StringSink(str_out));
stfDecryptor.Put(reinterpret_cast<const unsigned char*>(cipher_text.c_str()), cipher_text.size());
stfDecryptor.MessageEnd();
return str_out;
}
c#
public string Encrypt(string testCode)
{
string clearText = testCode;
byte[] clearBytes = Encoding.Default.GetBytes(clearText);
using (Aes encryptor = Aes.Create("AES"))
{
//encryptor.BlockSize = 128;
encryptor.Padding = PaddingMode.Zeros;
encryptor.KeySize = 128;
encryptor.Mode = CipherMode.CBC;
encryptor.Key = Encoding.Default.GetBytes("01286567891233460123456789a12345");
encryptor.IV = Encoding.Default.GetBytes("0123456789123456");
using (MemoryStream ms = new MemoryStream())
{
using (CryptoStream cs = new CryptoStream(ms, encryptor.CreateEncryptor(), CryptoStreamMode.Write))
{
cs.Write(clearBytes, 0, clearBytes.Length);
cs.Close();
}
byte[] bt = ms.ToArray();
clearText = Convert.ToBase64String(bt); //clearText = Encoding.Default.GetString(bt);
}
}
return clearText; //Return the encrypted command
}
public string Decrypt(string cipherText)
{
byte[] clearBytes = Convert.FromBase64String(cipherText);
using (Aes decryptor = Aes.Create("AES"))
{
// decryptor.BlockSize = 128;
decryptor.Padding = PaddingMode.Zeros;
decryptor.KeySize = 128;
decryptor.Mode = CipherMode.CBC;
decryptor.Key = Encoding.Default.GetBytes("01286567891233460123456789a12345");
decryptor.IV = Encoding.Default.GetBytes("0123456789123456");
using (MemoryStream ms = new MemoryStream())
{
using (CryptoStream cs = new CryptoStream(ms, decryptor.CreateDecryptor(), CryptoStreamMode.Write))
{
cs.Write(clearBytes, 0, clearBytes.Length);
cs.Close();
}
byte[] bt = ms.ToArray();
cipherText = Encoding.Default.GetString(bt);
}
}
return cipherText; //Return the decrypted text
}
c++ result
encrypted code : ks8zzu20w6zURkuZMgbx8g==
decrypted code : test
c# result
encrypted code : nsWRYBylyjVaJ5Yckk+SRw==
decrypted code : test
I tested with test word both of C++/C# however encrypted code was not matched.
Also, I tested after remove base64 encode code but pure encrypted code was not matched as well.
Could anyone please share your knowledge?
EDIT1
I tried to copy encrypted code by c++ and pasted to c# decrypt code like below
string testcode = "ks8zzu20w6zURkuZMgbx8g=="
Decrypt(testcode)
//result - test↗↗↗↗↗
As you can see, the results look very similar, but there is something weird.
↗ This symbol is added after the word test.
I could not found out why result like this. Is there something I missed?
Solved
std::string encrypt(const std::string& str_in)
{
std::string str_out;
std::string str_out2;
byte* keybyte = (byte*)key.c_str();
CryptoPP::AES::Encryption aesEncryption((byte*)key.c_str(), CryptoPP::AES::MAX_KEYLENGTH);
CryptoPP::CBC_Mode_ExternalCipher::Encryption cbcEncryption(aesEncryption, (byte*)iv.c_str());
StreamTransformationFilter stfEncryptor(cbcEncryption, new CryptoPP::StringSink(str_out));
// 'str_in.length() + 1' in the line below makes the encryption code different from c# code.
/*stfEncryptor.Put(reinterpret_cast<const unsigned char*>(str_in.c_str()), str_in.length() + 1);*/
stfEncryptor.Put(reinterpret_cast<const unsigned char*>(str_in.c_str()), str_in.length());
stfEncryptor.MessageEnd();
str_out2 = cryptobase64_encode(str_out);
return str_out2;
}
As I commented, str_in.length() + 1 makes the encrypted code different from C# code.
And I changed C# padding options from Zeros to PKCS7 for matching c++ encrypted code.
But I don't know why I need to set this option. I think I need to study this.
Anyway, It works well. Special thanks to #jdweng.

How can i decrypt byte[] of more than 16 bytes from AES

So I have a byte[] with a length of 1622 bytes but I want to decrypt it all but AES has only a blocksize of 128 bits.
If im trying to split it up to blocks of 16 bytes i get this exception:
System.Security.Cryptography.CryptographicException
Additional information: Input buffer contains insufficient data. Can it be that rawDataArea % 16 != 0?
Encryptor:
aes256Alg = new AesManaged
{
Key = new byte[] {112,90,16,164,90,221,73,154,246,32,13,102,145,7,57,115,37,5,3,102,205,39,202,231,195,148,202,229,53,138,102,242},
Mode = CipherMode.CBC,
KeySize = 256,
BlockSize = 128,
Padding = PaddingMode.PKCS7,
IV = new byte[] {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
};
ICryptoTransform aes256Encryptor = aes256Alg.CreateEncryptor(aes256Alg.Key,aes256Alg.IV);
Decryptor
AesManaged aes256AlgCBC;
aes256AlgCBC = new AesManaged
{
Key = new byte[] {190,151,28,108,241,101,254,174,16,11,87,84,239,140,239,85,195,25,78,192,105,109,95,128,160,146,123,31,190,188,181,216},
KeySize = 256,
Mode = CipherMode.CBC,
BlockSize = 128,
Padding = PaddingMode.PKCS7,
IV = new byte[] {199,114,91,241,148,90,133,166,13,52,142,187,101,125,81,73}
};
ICryptoTransform aes256CbcDecryptor = aes256AlgCBC.CreateDecryptor(aes256AlgCBC.Key, aes256AlgCBC.IV);
//byte[] rawDataArea = {0x00 .......} // Length of 1622 copied from hexeditor
List<Byte[]> dataAreaByteList = new List<byte[]>();
//Split the rawDataArea up to blocks of 16 bytes and then adding them to a list
//which later can be converted back to a big array
for (int i = 0; i < rawDataArea.Length; i += 16)
{
byte[] transformedBlock = new byte[] { };
aes128CbcDecryptor.TransformBlock(rawDataArea, i, (i += 16),transformedBlock,i);
dataAreaByteList.Add(transformedBlock);
}
You're executing i += 16 two times per loop iteration. You don't need to transform in 16 byte chunks anyway. Use CryptoStream and write any amount you like such as 4KB. Most tutorials do this. Maybe you found a bad tutorial.

C# store int in byte array

I work on a small project and i need to store 4 int types in a byte array(which will be sent later on a socket).
This is the code:
int a = 566;
int b = 1106;
int c = 649;
int d = 299;
byte[] bytes = new byte[16];
bytes[0] = (byte)(a >> 24);
bytes[1] = (byte)(a >> 16);
bytes[2] = (byte)(a >> 8);
bytes[3] = (byte)a;
I shifted the bits of the first value,but i'm not sure now how to retrieve it back...doing the reversed process.
I hope my question is clear,if i missed somthing i'll be glad to explain it again.
Thanks.
To extract the Int32 back out from the byte array, use this expression:
int b = bytes[0] << 24
| bytes[1] << 16
| bytes[2] << 8
| bytes[3]; // << 0
Here is a .NET Fiddle that demonstrates.
Depends on you comment reply, you can do it like this:
int a = 10;
byte[] aByte = BitConverter.GetBytes(a);
int b = 20;
byte[] bByte = BitConverter.GetBytes(b);
List<byte> listOfBytes = new List<byte>(aByte);
listOfBytes.AddRange(bByte);
byte[] newByte = listOfBytes.ToArray();
You can use a MemoryStream to wrap an array of bytes, and then use BinaryWriter to write items to the array, and BinaryReader to read items from the array.
Sample code:
int a = 566;
int b = 1106;
int c = 649;
int d = 299;
// Writing.
byte[] data = new byte[sizeof(int) * 4];
using (MemoryStream stream = new MemoryStream(data))
using (BinaryWriter writer = new BinaryWriter(stream))
{
writer.Write(a);
writer.Write(b);
writer.Write(c);
writer.Write(d);
}
// Reading.
using (MemoryStream stream = new MemoryStream(data))
using (BinaryReader reader = new BinaryReader(stream))
{
a = reader.ReadInt32();
b = reader.ReadInt32();
c = reader.ReadInt32();
d = reader.ReadInt32();
}
// Check results.
Trace.Assert(a == 566);
Trace.Assert(b == 1106);
Trace.Assert(c == 649);
Trace.Assert(d == 299);

Benchmark inconsistent results

I'm trying to profile a function. The function is meant to convert a structure to an array. I have two different approaches, using marshaling or BitConverter. Of course the marshaling method makes a single function that can work with nearly all structures given certain conditions. The BitConverter requires a custom function for each structure. My initial thought was that the BitConverter would be faster but my testing results are not consistent.
Here is a cut and paste of the benchmark.
I've tried the below benchmark in a number of different forms.
When I do the benchmark for the BitConverter function first it tends to be faster.
When I do the benchmark for the Marshaling function first it tends to be faster.
What am I missing?
Summary to show flow. This is not the actual code just how the benchmark flows.
main()
{
Stopwatch watch = new Stopwatch;
// To take care of JIT
bitConverterFunction();
marshalingFunction();
//Thread.Sleep(0); // I've tried this thinking it had to do with context switching issues but the results were basically the same.
watch.Start();
for(i=0; i<iterations; i++)
{
bitConverterFunction();
}
watch.Stop();
Timespan bitConverterTime = watch.Elapsed;
//Thread.Sleep(0); // I've tried this thinking it had to do with context switching issues
watch.Restart();
for(i=0; i<iterations; i++)
{
marshalingFunction();
}
watch.Stop();
Timespan marshalingTime = watch.Elapsed;
// it seems that whichever function is run first, tends to be the quickest.
}
Real Code if you want to test
using System;
using BenchmarkTool;
namespace BenchmarkConsole
{
class Program
{
static void Main(string[] args)
{
Benchmarks.StructToArrayConversion(100);
Benchmarks.StructToArrayConversion(1000);
Benchmarks.StructToArrayConversion(10000);
Benchmarks.StructToArrayConversion(100000);
Console.WriteLine("Press any key to continue.");
Console.ReadKey();
}
}
}
using System;
using System.Diagnostics;
using System.Runtime.InteropServices;
using NUnit.Framework;
namespace BenchmarkTool
{
[TestFixture]
public static class Benchmarks
{
[TestCase(100)]
[TestCase(1000)]
[TestCase(10000)]
[TestCase(100000)]
[TestCase(1000000)]
public static void StructToArrayConversion(int iteration = 100)
{
Stopwatch watch = new Stopwatch();
EntityStatePDU6 state = new EntityStatePDU6()
{
Version = 0,
ExerciseID = 0x01,
PDUType = 0x02,
Family = 0x03,
Timestamp = 0x07060504,
Length = 0x0908,
Site = 0x0D0C,
Application = 0X0F0E,
Entity = 0X1110,
NumArticulationParams = 0X13,
VelocityX = BitConverter.ToSingle(new byte[] {0x14, 0x15, 0x16, 0x17}, 0),
VelocityY = BitConverter.ToSingle(new byte[] {0x18, 0x19, 0x1A, 0x1B}, 0),
VelocityZ = BitConverter.ToSingle(new byte[] {0x1C, 0x1D, 0x1E, 0x1F}, 0),
LocationX = BitConverter.ToSingle(new byte[] {0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27}, 0),
LocationY = BitConverter.ToSingle(new byte[] {0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F}, 0),
LocationZ = BitConverter.ToSingle(new byte[] {0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37}, 0),
Roll = BitConverter.ToSingle(new byte[] {0x38, 0x39, 0x3A, 0x3B}, 0),
Pitch = BitConverter.ToSingle(new byte[] {0x3C, 0x3D, 0x3E, 0x3F}, 0),
Heading = BitConverter.ToSingle(new byte[] {0x40, 0x41, 0x42, 0x43}, 0),
Appearance = 0X47464544
};
// To take care of JIT
ToArrayBitConverter(state);
state.ToByteArray();
Console.WriteLine("*** Benchmark Start ***");
Console.WriteLine("BitConverter Benchmark");
byte[] bitconverterArray = ToArrayBitConverter(state);
//Thread.Sleep(0);
watch.Start();
for(int i = 0; i < iteration; i++)
{
bitconverterArray = ToArrayBitConverter(state);
}
watch.Stop();
TimeSpan bitConverterTime = watch.Elapsed;
Console.WriteLine("{0} Iterations: {1}", iteration, watch.Elapsed.TotalSeconds.ToString("0.0000000"));
Console.WriteLine();
Console.WriteLine("Marshal StructToPtr Benchmark");
byte[] marshalArray = null;
//Thread.Sleep(0);
watch.Restart();
for (int i = 0; i < iteration; i++)
{
marshalArray = state.ToByteArray();
}
watch.Stop();
TimeSpan marshalTime = watch.Elapsed;
Console.WriteLine("{0} Iterations: {1}", iteration, watch.Elapsed.TotalSeconds.ToString("0.0000000"));
Console.WriteLine();
Console.WriteLine("Results");
Console.WriteLine("{0} Faster", marshalTime < bitConverterTime ? "Marshaling" : "BitConverter");
Console.WriteLine("Speed Ratio: {0}", marshalTime < bitConverterTime ? bitConverterTime.TotalSeconds / marshalTime.TotalSeconds : marshalTime.TotalSeconds / bitConverterTime.TotalSeconds);
Console.WriteLine("**********************************");
Console.WriteLine();
Assert.AreEqual(bitconverterArray.Length, marshalArray.Length);
for(int i = 0; i < bitconverterArray.Length; i++)
{
Assert.AreEqual(marshalArray[i],bitconverterArray[i], "#index " + i);
}
}
public static byte[] ToArrayBitConverter(EntityStatePDU6 entity)
{
int size = Marshal.SizeOf(typeof (EntityStatePDU6));
byte[] array = new byte[size];
array[0] = entity.Version;
array[1] = entity.ExerciseID;
array[2] = entity.PDUType;
array[3] = entity.Family;
array[4] = (byte)((0xFF & entity.Timestamp));
array[5] = (byte)((0xFF00 & entity.Timestamp) >> 8);
array[6] = (byte)((0xFF0000 & entity.Timestamp) >> 16);
array[7] = (byte)((0xFF000000 & entity.Timestamp) >> 24);
array[8] = (byte)((0xFF & entity.Length));
array[9] = (byte)((0xFF00 & entity.Length) >> 8);
// Padding1: array[10], array[11]
array[12] = (byte)((0xFF & entity.Site));
array[13] = (byte)((0xFF00 & entity.Site) >> 8);
array[14] = (byte)((0xFF & entity.Application));
array[15] = (byte)((0xFF00 & entity.Application) >> 8);
array[16] = (byte)((0xFF & entity.Entity));
array[17] = (byte)((0xFF00 & entity.Entity) >> 8);
//padding2 array[18]
array[19] = entity.NumArticulationParams;
byte[] bytes = BitConverter.GetBytes(entity.VelocityX);
array[20] = bytes[0];
array[21] = bytes[1];
array[22] = bytes[2];
array[23] = bytes[3];
bytes = BitConverter.GetBytes(entity.VelocityY);
array[24] = bytes[0];
array[25] = bytes[1];
array[26] = bytes[2];
array[27] = bytes[3];
bytes = BitConverter.GetBytes(entity.VelocityZ);
array[28] = bytes[0];
array[29] = bytes[1];
array[30] = bytes[2];
array[31] = bytes[3];
bytes = BitConverter.GetBytes(entity.LocationX);
array[32] = bytes[0];
array[33] = bytes[1];
array[34] = bytes[2];
array[35] = bytes[3];
array[36] = bytes[4];
array[37] = bytes[5];
array[38] = bytes[6];
array[39] = bytes[7];
bytes = BitConverter.GetBytes(entity.LocationY);
array[40] = bytes[0];
array[41] = bytes[1];
array[42] = bytes[2];
array[43] = bytes[3];
array[44] = bytes[4];
array[45] = bytes[5];
array[46] = bytes[6];
array[47] = bytes[7];
bytes = BitConverter.GetBytes(entity.LocationZ);
array[48] = bytes[0];
array[49] = bytes[1];
array[50] = bytes[2];
array[51] = bytes[3];
array[52] = bytes[4];
array[53] = bytes[5];
array[54] = bytes[6];
array[55] = bytes[7];
bytes = BitConverter.GetBytes(entity.Roll);
array[56] = bytes[0];
array[57] = bytes[1];
array[58] = bytes[2];
array[59] = bytes[3];
bytes = BitConverter.GetBytes(entity.Pitch);
array[60] = bytes[0];
array[61] = bytes[1];
array[62] = bytes[2];
array[63] = bytes[3];
bytes = BitConverter.GetBytes(entity.Heading);
array[64] = bytes[0];
array[65] = bytes[1];
array[66] = bytes[2];
array[67] = bytes[3];
array[68] = (byte)((0xFF & entity.Appearance));
array[69] = (byte)((0xFF00 & entity.Appearance) >> 8);
array[70] = (byte)((0xFF0000 & entity.Appearance) >> 16);
array[71] = (byte)((0xFF000000 & entity.Appearance) >> 24);
return array;
}
public static Byte[] ToByteArray<T>(this T obj) where T : struct
{
int size = Marshal.SizeOf(obj);
var arr = new byte[size];
IntPtr ptr = Marshal.AllocHGlobal(size);
Marshal.StructureToPtr(obj, ptr, false);
Marshal.Copy(ptr, arr, 0, size);
Marshal.FreeHGlobal(ptr);
return arr;
}
}
public struct EntityStatePDU6
{
// PDU Header 12 Bytes
public byte Version;
public byte ExerciseID;
public byte PDUType;
public byte Family;
public uint Timestamp;
public ushort Length;
public ushort Padding1;
// Entity ID 6 bytes
public ushort Site;
public ushort Application;
public ushort Entity;
public byte Padding2;
public byte NumArticulationParams;
public float VelocityX;
public float VelocityY;
public float VelocityZ;
public double LocationX;
public double LocationY;
public double LocationZ;
public float Roll;
public float Pitch;
public float Heading;
public uint Appearance;
}
}
Any of the cases below 100000 is too small to get consistent results.
The results are very inconsistent even between runs of the same code (> 2x timing differences). It makes me think there is a significant amount of garbage being generated and the outcome is dominated by when garbage collection kicks in and the performance of the garbage collector.
I added some GC.Collect calls after stopping the stopwatch and this made the results somewhat more consistent (variation between runs was +/- 10%). Marshaling was faster for 100000 and 1000000 iterations typically by 1.5 - 2 times. This was on Mono 2.10.8.1 compiled for Release|x86, so your mileage may vary.

"Padding is invalid and cannot be removed" when decrypting with Rijndael

I'm trying to decrypt a string crypted with Rijndael algorythm. This type of cryptation apply a padding with "#" on the right of the Key and IV, if they are less than 16 characters long. The string to decrypt is received from a Webservice that sends it, and the Key, to me in XML SOAP Format. The IV is the Mac Address of my machine (that the server use as IV to encrypt the string). When i try to decrypt the received string, my program crash at this instruction:
while ((num5 = stream3.ReadByte()) != -1)
and it give to me this error "Padding is not valid and it cannot be removed".
I've searched this error on MSDN, and it says that it happen when the IV used to encrypt is different from the IV used to decrypt, but, i repeat, the IV is the MacAddress and it is the same everytime.
This is the sourcecode of Encrypt and Decrypt functions:
public static string Decrypt(string strInputString, string strKeyString, string myIV)
{
if ((strInputString == null) || (strInputString.Length == 0))
{
return strInputString;
}
try
{
int num5;
int keySize = 0x100;
int blockSize = 0x100;
int length = keySize / 0x10;
if (strKeyString.Length > length)
{
strKeyString = strKeyString.Substring(0, length);
}
if (strKeyString.Length < length)
{
strKeyString = strKeyString.PadRight(length, '#');
}
Encoding.Unicode.GetBytes(strKeyString);
if (myIV.Length > length)
{
myIV = myIV.Substring(0, length);
}
if (myIV.Length < length)
{
myIV = myIV.PadRight(length, '#');
}
Encoding.Unicode.GetBytes(myIV);
byte[] bytes = Encoding.Unicode.GetBytes(strKeyString);
byte[] rgbIV = Encoding.Unicode.GetBytes(myIV);
RijndaelManaged managed = new RijndaelManaged {
BlockSize = blockSize,
KeySize = keySize
};
MemoryStream stream = new MemoryStream();
for (int i = 0; i < strInputString.Length; i += 2)
{
stream.WriteByte(byte.Parse(strInputString.Substring(i, 2), NumberStyles.AllowHexSpecifier));
}
stream.Position = 0L;
MemoryStream stream2 = new MemoryStream();
CryptoStream stream3 = new CryptoStream(stream, managed.CreateDecryptor(bytes, rgbIV), CryptoStreamMode.Read);
while ((num5 = stream3.ReadByte()) != -1)
{
stream2.WriteByte((byte) num5);
}
stream3.Close();
stream2.Close();
stream.Close();
byte[] buffer3 = stream2.ToArray();
return Encoding.Unicode.GetString(buffer3);
}
catch (Exception exception)
{
Log.Error(exception.Message);
}
}
public static string Encrypt(string strInputString, string strKeyString, string myIV)
{
if ((strInputString == null) || (strInputString.Length == 0))
{
return strInputString;
}
try
{
int num4;
int keySize = 0x100;
int blockSize = 0x100;
int length = keySize / 0x10;
if (strKeyString.Length > length)
{
strKeyString = strKeyString.Substring(0, length);
}
if (strKeyString.Length < length)
{
strKeyString = strKeyString.PadRight(length, '#');
}
Encoding.Unicode.GetBytes(strKeyString);
if (myIV.Length > length)
{
myIV = myIV.Substring(0, length);
}
if (myIV.Length < length)
{
myIV = myIV.PadRight(length, '#');
}
Encoding.Unicode.GetBytes(myIV);
byte[] bytes = Encoding.Unicode.GetBytes(strKeyString);
byte[] rgbIV = Encoding.Unicode.GetBytes(myIV);
string str = "";
RijndaelManaged managed = new RijndaelManaged {
BlockSize = blockSize,
KeySize = keySize
};
MemoryStream stream = new MemoryStream(Encoding.Unicode.GetBytes(strInputString));
MemoryStream stream2 = new MemoryStream();
CryptoStream stream3 = new CryptoStream(stream2, managed.CreateEncryptor(bytes, rgbIV), CryptoStreamMode.Write);
while ((num4 = stream.ReadByte()) != -1)
{
stream3.WriteByte((byte) num4);
}
stream3.Close();
stream2.Close();
stream.Close();
foreach (byte num5 in stream2.ToArray())
{
str = str + num5.ToString("X2");
}
return str;
}
catch (Exception exception)
{
Log.Error(exception.Message);
}
}
}
Works fine for me with test code below - are you sure you're passing in the encrypted string for decryption ?
static void Main(string[] args)
{
string strInputString = "test";
string strKeyString = "test123";
string myIV = GetMacAddress();
string encryptedString = Encrypt(strInputString, strKeyString, myIV);
string decryptedString = Decrypt(encryptedString, strKeyString, myIV);
}
public static string Decrypt(string strInputString, string strKeyString, string myIV)
{
if ((strInputString == null) || (strInputString.Length == 0))
{
return strInputString;
}
int num5;
int keySize = 0x100;
int blockSize = 0x100;
int length = keySize / 0x10;
if (strKeyString.Length > length)
{
strKeyString = strKeyString.Substring(0, length);
}
if (strKeyString.Length < length)
{
strKeyString = strKeyString.PadRight(length, '#');
}
Encoding.Unicode.GetBytes(strKeyString);
if (myIV.Length > length)
{
myIV = myIV.Substring(0, length);
}
if (myIV.Length < length)
{
myIV = myIV.PadRight(length, '#');
}
Encoding.Unicode.GetBytes(myIV);
byte[] bytes = Encoding.Unicode.GetBytes(strKeyString);
byte[] rgbIV = Encoding.Unicode.GetBytes(myIV);
RijndaelManaged managed = new RijndaelManaged
{
BlockSize = blockSize,
KeySize = keySize
};
MemoryStream stream = new MemoryStream();
for (int i = 0; i < strInputString.Length; i += 2)
{
stream.WriteByte(byte.Parse(strInputString.Substring(i, 2), NumberStyles.AllowHexSpecifier));
}
stream.Position = 0L;
MemoryStream stream2 = new MemoryStream();
CryptoStream stream3 = new CryptoStream(stream, managed.CreateDecryptor(bytes, rgbIV), CryptoStreamMode.Read);
while ((num5 = stream3.ReadByte()) != -1)
{
stream2.WriteByte((byte)num5);
}
stream3.Close();
stream2.Close();
stream.Close();
byte[] buffer3 = stream2.ToArray();
return Encoding.Unicode.GetString(buffer3);
}
public static string Encrypt(string strInputString, string strKeyString, string myIV)
{
if ((strInputString == null) || (strInputString.Length == 0))
{
return strInputString;
}
int num4;
int keySize = 0x100;
int blockSize = 0x100;
int length = keySize / 0x10;
if (strKeyString.Length > length)
{
strKeyString = strKeyString.Substring(0, length);
}
if (strKeyString.Length < length)
{
strKeyString = strKeyString.PadRight(length, '#');
}
Encoding.Unicode.GetBytes(strKeyString);
if (myIV.Length > length)
{
myIV = myIV.Substring(0, length);
}
if (myIV.Length < length)
{
myIV = myIV.PadRight(length, '#');
}
Encoding.Unicode.GetBytes(myIV);
byte[] bytes = Encoding.Unicode.GetBytes(strKeyString);
byte[] rgbIV = Encoding.Unicode.GetBytes(myIV);
string str = "";
RijndaelManaged managed = new RijndaelManaged
{
BlockSize = blockSize,
KeySize = keySize
};
MemoryStream stream = new MemoryStream(Encoding.Unicode.GetBytes(strInputString));
MemoryStream stream2 = new MemoryStream();
CryptoStream stream3 = new CryptoStream(stream2, managed.CreateEncryptor(bytes, rgbIV), CryptoStreamMode.Write);
while ((num4 = stream.ReadByte()) != -1)
{
stream3.WriteByte((byte)num4);
}
stream3.Close();
stream2.Close();
stream.Close();
foreach (byte num5 in stream2.ToArray())
{
str = str + num5.ToString("X2");
}
return str;
}
private static string GetMacAddress()
{
string macAddresses = "";
foreach (NetworkInterface nic in NetworkInterface.GetAllNetworkInterfaces())
{
if (nic.OperationalStatus == OperationalStatus.Up)
{
macAddresses += nic.GetPhysicalAddress().ToString();
break;
}
}
return macAddresses;
}
Padding is probably Pkcs#5. Instead of #, pad with a byte value that is the number of bytes to pad.
So if you have 5 bytes to pad, padding bytes will be 0505050505, if you need to pad 2 bytes, padding will be 0202 .
I don't think that the IV is the issue. It's the password itself. I suspect that the password used by the server to encrypt is not the password being used by the client to decrypt.
The only way I could reproduce the crash the OP was reporting was by passing in an incorrect password to the Decrypt() method. Passing in an IV that was just slightly incorrect wouldn't throw an exception. For example, I encrypted with the IV as a MAC address in caps and using colons, and then decrypted with the IV as the same MAC address in lower case and using dashes. -- first few bytes were scrambled, but by about byte 16 everything was started matching up with the plain text original.
Are you using the same character encoding as the original string at the time of encryption?
I had a similar issue... the difference, in the end, was how i was passing the data (string) to be encrypted. If I copy/pasted into a textbox, the encryption was different than if i hardcoded into the program. So in short... the encoding of the original data makes a big difference. While the characters may look the same, in reality they could be represented quite differently (8 bytes, 16 bytes, etc).
Find out how the original string was encoded prior to the encryption algorithm (maybe check on the IV parameter encodings as well.

Categories

Resources