How to read the unicode char? like "ä"
public static string Read(int length, string absolutePath)
{
StringBuilder resultAsString = new StringBuilder();
using (MemoryMappedFile memoryMappedFile = MemoryMappedFile.CreateFromFile(absolutePath))
using (MemoryMappedViewStream memoryMappedViewStream = memoryMappedFile.CreateViewStream(0, length))
{
for (int i = 0; i < length; i++)
{
int result = memoryMappedViewStream.ReadByte();
if (result == -1)
{
break;
}
char letter = (char)result;
resultAsString.Append(letter);
}
}
return resultAsString.ToString();
}
the read int (result) is 195 and the char cast gives me not the expected result.
Not sure if that's what you ask for, but, you can use StreamReader
StreamReader sr = new StreamReader(stream, Encoding.Unicode);
If you just want to load and read a UTF-8 file into a string variable the code can be as simple as
var text = File.ReadAllText(filePath, Encoding.UTF8);
If you insist on processing UTF-8 data byte-by-byte though, there is a bit more complex parsing.
Here's a rough (but working) sketch, to go with your original code:
StringBuilder resultAsString = new StringBuilder();
using (MemoryMappedFile memoryMappedFile = MemoryMappedFile.CreateFromFile(filePath))
using (MemoryMappedViewStream viewStream = memoryMappedFile.CreateViewStream(0, new FileInfo(filePath).Length))
{
int b;
while((b = viewStream.ReadByte()) != -1)
{
int acc = b;
bool readUtfDataBytes(int bytesToRead)
{
while (bytesToRead-- > 0)
{
var nextB = viewStream.ReadByte();
if (nextB == -1) return false; // EOS reached
if ((nextB & 0xC0) != 0x80) return false; // invalid UTF-8 data byte
acc <<= 6;
acc |= nextB & 0x3F;
}
return true;
}
if (b >= 0xF0) // 1111 0000
{
acc &= 0x07;
if (!readUtfDataBytes(3)) break; // break on malformed UTF-8
}
else if (b >= 0xE0) // 1110 0000
{
acc &= 0x0F;
if (!readUtfDataBytes(2)) break; // break on malformed UTF-8
}
else if (b >= 0xC0) // 1100 0000
{
acc &= 0x1F;
if (!readUtfDataBytes(1)) break; // break on malformed UTF-8
}
else if (b >= 0x80)
{
break; // break on malformed UTF-8
}
if (acc == 0xFEFF)
{
// ignore UTF-8 BOM
}
else
{
resultAsString.Append(Char.ConvertFromUtf32(acc));
}
}
}
Related
I need to calculate a CRC checksum for a CAN BUS.
Scenario:
My input looks always like the following (where x is either 1 or 0, * marks multiple times x, | marks a section and - is a change of input method, lbis Label, tb is TextBox, cb is ComboBox):
lb: 0
tb: 11x
cb: x
cb: x
lb: 1
tb: 4x => Convert.ToString(8-64 / 8, 2).PadLeft(4, '0');
tb: 8-64x, => 8-64 % 8 == 0
tb: 15x => CRC of above
lb: 1
lb: 11
lb: 1111111
lb: 111
Which returns this layout:
0|11*x-x|x-1-4*x|64*x|15*x-1|11|1111111|111
Example:
00101010101000100100101010100101010(missing 15*x CRC sum)1111111111111
This string will be processed by the following string extension, so maximal 5 equal digits follow each other:
public static string Correct4CRC(this string s)
{
return s.Replace("00000", "000001").Replace("11111", "111110");
}
After that following method returns the divisor:
public static BigInteger CreateDivisor(string s)
{
var i = BigInteger.Parse(s);
var d = BigInteger.Pow(i, 15) + BigInteger.Pow(i, 14) + BigInteger.Pow(i, 10) + BigInteger.Pow(i, 8) + BigInteger.Pow(i, 7) + BigInteger.Pow(i, 4) + BigInteger.Pow(i, 3) + 1;
return d;
}
The only problem I've got is the part with ^:
public static string CRC(this string s)
{
var dd = s.Correct4CRC();
var dr = dd.CreateDivisor().ToString();
int drl = dr.Length;
var d = dd.Substring(0, drl).CreateDivisor();
var f = d ^ dr.CreateDivisor();
var p = true;
while (p)
{
d = dd.Substring(0, drl).CreateDivisor();
f = d ^ dr.CreateDivisor();
p = d > dd.CreateDivisor();
}
return f.ToString();
}
I know this might be closed as asking for code, but please bear with me as I really don't get it. Another problem is, that there is no real documentation which helped me figuring it out.
Anyway, if you know one good doc, which solves my problem please add it as a comment. I'll check it out and close the answer by myself if I get it.
I think your bitstuffing is wrong (the replacement of 00000 with 000001 and of 11111 with 111110), because it doesn't handle avalanche replacements... 0000011110000 that becomes 0000011111000001.
Here http://blog.qartis.com/can-bus/ there seems to be an example. The page is then linked to http://ghsi.de/CRC/index.php?Polynom=1100010110011001&Message=2AA80 that generates some C code for calculating the CRC-15.
// 0000011110000 becomes 0000011111000001
public static string BitStuff(string bits)
{
StringBuilder sb = null;
char last = ' ';
int count = 0;
for (int i = 0; i < bits.Length; i++)
{
char ch = bits[i];
if (ch == last)
{
count++;
if (count == 5)
{
if (sb == null)
{
// The maximum length is equal to the length of bits
// plus 1 for length 5, 2 for length 9, 3 for length 13...
// This because the maximum expanion is for
// 00000111100001111... or 11111000011110000...
sb = new StringBuilder(bits.Length + (bits.Length - 1) / 4);
sb.Append(bits, 0, i);
}
sb.Append(ch);
last = ch == '0' ? '1' : '0';
sb.Append(last);
count = 1;
continue;
}
}
else
{
last = ch;
count = 1;
}
if (sb != null)
{
sb.Append(ch);
}
}
return sb != null ? sb.ToString() : bits;
}
// Taken from http://ghsi.de/CRC/index.php?Polynom=1100010110011001&Message=2AA80
public static string Crc15(string bits)
{
var res = new char[15]; // CRC Result
var crc = new bool[15];
for (int i = 0; i < bits.Length; i++)
{
bool doInvert = (bits[i] == '1') ^ crc[14]; // XOR required?
crc[14] = crc[13] ^ doInvert;
crc[13] = crc[12];
crc[12] = crc[11];
crc[11] = crc[10];
crc[10] = crc[9] ^ doInvert;
crc[9] = crc[8];
crc[8] = crc[7] ^ doInvert;
crc[7] = crc[6] ^ doInvert;
crc[6] = crc[5];
crc[5] = crc[4];
crc[4] = crc[3] ^ doInvert;
crc[3] = crc[2] ^ doInvert;
crc[2] = crc[1];
crc[1] = crc[0];
crc[0] = doInvert;
}
// Convert binary to ASCII
for (int i = 0; i < 15; i++)
{
res[14 - i] = crc[i] ? '1' : '0';
}
return new string(res);
}
and then:
string bits = "0101010101010000000"; // Example data
string crc = Crc15(bits);
bits = bits + crc;
bits = BitStuff(bits);
bits += '1'; // CRC delimiter
bits += 'x'; // ACK slot TODO
bits += '1'; // ACK delimiter
bits += "1111111"; // EOF
Note that you have to put the value for the ACK slot
I'm trying to decode bitcoin address from Base58 string into byte array, and to do that I rewrited original function from Satoshi repository (https://github.com/bitcoin/bitcoin/blob/master/src/base58.cpp), written in c++, to c# (which I'm using).
Original code
static const char* pszBase58 = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
bool DecodeBase58(const char *psz, std::vector<unsigned char>& vch) {
// Skip leading spaces.
while (*psz && isspace(*psz))
psz++;
// Skip and count leading '1's.
int zeroes = 0;
while (*psz == '1') {
zeroes++;
psz++;
}
// Allocate enough space in big-endian base256 representation.
std::vector<unsigned char> b256(strlen(psz) * 733 / 1000 + 1); // log(58) / log(256), rounded up.
// Process the characters.
while (*psz && !isspace(*psz)) {
// Decode base58 character
const char *ch = strchr(pszBase58, *psz);
if (ch == NULL)
return false;
// Apply "b256 = b256 * 58 + ch".
int carry = ch - pszBase58;
for (std::vector<unsigned char>::reverse_iterator it = b256.rbegin(); it != b256.rend(); it++) {
carry += 58 * (*it);
*it = carry % 256;
carry /= 256;
}
assert(carry == 0);
psz++;
}
// Skip trailing spaces.
while (isspace(*psz))
psz++;
if (*psz != 0)
return false;
// Skip leading zeroes in b256.
std::vector<unsigned char>::iterator it = b256.begin();
while (it != b256.end() && *it == 0)
it++;
// Copy result into output vector.
vch.reserve(zeroes + (b256.end() - it));
vch.assign(zeroes, 0x00);
while (it != b256.end())
vch.push_back(*(it++));
return true;
}
Mine rewrited c# version
private static string Base58characters = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
public static bool Decode(string source, ref byte[] destination)
{
int i = 0;
while (i < source.Length)
{
if (source[i] == 0 || !Char.IsWhiteSpace(source[i]))
{
break;
}
i++;
}
int zeros = 0;
while (source[i] == '1')
{
zeros++;
i++;
}
byte[] b256 = new byte[(source.Length - i) * 733 / 1000 + 1];
while (i < source.Length && !Char.IsWhiteSpace(source[i]))
{
int ch = Base58characters.IndexOf(source[i]);
if (ch == -1) //null
{
return false;
}
int carry = Base58characters.IndexOf(source[i]);
for (int k = b256.Length - 1; k > 0; k--)
{
carry += 58 * b256[k];
b256[k] = (byte)(carry % 256);
carry /= 256;
}
i++;
}
while (i < source.Length && Char.IsWhiteSpace(source[i]))
{
i++;
}
if (i != source.Length)
{
return false;
}
int j = 0;
while (j < b256.Length && b256[j] == 0)
{
j++;
}
destination = new byte[zeros + (b256.Length - j)];
for (int kk = 0; kk < destination.Length; kk++)
{
if (kk < zeros)
{
destination[kk] = 0x00;
}
else
{
destination[kk] = b256[j++];
}
}
return true;
}
Function that I'm using for converting from byte-array to HexString
public static string ByteArrayToHexString(byte[] source)
{
return BitConverter.ToString(source).Replace("-", "");
}
To test if everything is working correctly I've used test cases found online here (https://github.com/ThePiachu/Bitcoin-Unit-Tests/blob/master/Address/Address%20Generation%20Test%201.txt). Good thing is that 97% of this test are passed correctly but for 3 there is a little error and I do not know where it is coming from. So my ask to you is to point me what could for these test go wrong or where in rewriting I've made an error. Thank you in advance.
The test cases where errors occures are 1, 21 and 25.
1.
Input:
16UwLL9Risc3QfPqBUvKofHmBQ7wMtjvM
Output:
000966776006953D5567439E5E39F86A0D273BEED61967F6
Should be:
00010966776006953D5567439E5E39F86A0D273BEED61967F6
21.
Input:
1v3VUYGogXD7S1E8kipahj7QXgC568dz1
Output:
0008201462985DF5255E4A6C9D493C932FAC98EF791E2F22
Should be:
000A08201462985DF5255E4A6C9D493C932FAC98EF791E2F22
25.
Input:
1axVFjCkMWDFCHjQHf99AsszXTuzxLxxg
Output:
006C0B8995C7464E89F6760900EA6978DF18157388421561
Should be:
00066C0B8995C7464E89F6760900EA6978DF18157388421561
In your for-loop:
for (int k = b256.Length - 1; k > 0; k--)
The loop condition should be k >= 0 so that you don't skip the first byte in b256.
Based Online CRC calculation, when I entered hex string data =
503002080000024400003886030400000000010100
I get result CRC-CCITT (0xFFFF) =
0x354E (Expected Result)
.
I use the code below, but the results of CalcCRC16() are 0xACEE. What the lack of script below?
using System;
using System.Windows.Forms;
using System.Runtime.Remoting.Metadata.W3cXsd2001;
using System.Diagnostics;
namespace WindowsFormsApplication1 {
public partial class Form1 : Form {
public Form1() {
InitializeComponent();
}
private void Form1_Load(object sender, EventArgs e) {
string result = CalcCRC16("503002080000024400003886030400000000010100");
Debug.Print(result);
// result = ACEE
// result expected = 354E
}
// CRC-CCITT (0xFFFF) with poly 0x1021
// input (hex string) = "503002080000024400003886030400000000010100"
// result expected (hex string) = "354E"
public string CalcCRC16(string strInput) {
ushort temp = 0;
ushort crc = 0xFFFF;
byte[] bytes = GetBytesFromHexString(strInput);
for (int j = 0; j < bytes.Length; j++) {
crc = (ushort)(crc ^ bytes[j]);
for (int i = 0; i < 8; i++) {
if ((crc & 0x0001) == 1)
crc = (ushort)((crc >> 1) ^ 0x1021);
else
crc >>= 1;
}
}
crc = (ushort)~(uint)crc;
temp = crc;
crc = (ushort)((crc << 8) | (temp >> 8 & 0xFF));
return crc.ToString("X4");
}
public Byte[] GetBytesFromHexString(string strInput) {
Byte[] bytArOutput = new Byte[] { };
if (!string.IsNullOrEmpty(strInput) && strInput.Length % 2 == 0) {
SoapHexBinary hexBinary = null;
try {
hexBinary = SoapHexBinary.Parse(strInput);
if (hexBinary != null)
bytArOutput = hexBinary.Value;
}
catch (Exception ex) {
MessageBox.Show(ex.Message);
}
}
return bytArOutput;
}
}
}
I found the answer and I will share here.. may be useful to others.
strInput = 503002080000024400003886030400000000010100
initial = 0xFFFF
poly = 0x1021
strOutput = 354E
reference = Online CRC Calc
public string CalcCRC16(string strInput) {
ushort crc = 0xFFFF;
byte[] data = GetBytesFromHexString(strInput);
for (int i = 0; i < data.Length; i++) {
crc ^= (ushort)(data[i] << 8);
for (int j = 0; j < 8; j++) {
if ((crc & 0x8000) > 0)
crc = (ushort)((crc << 1) ^ 0x1021);
else
crc <<= 1;
}
}
return crc.ToString("X4");
}
public Byte[] GetBytesFromHexString(string strInput) {
Byte[] bytArOutput = new Byte[] { };
if (!string.IsNullOrEmpty(strInput) && strInput.Length % 2 == 0) {
SoapHexBinary hexBinary = null;
try {
hexBinary = SoapHexBinary.Parse(strInput);
if (hexBinary != null) {
bytArOutput = hexBinary.Value;
}
}
catch (Exception ex) {
MessageBox.Show(ex.Message);
}
}
return bytArOutput;
}
Here's an example which works in my application.
I struggled somewhat, now I know its because I had to use char ptrs in stead of 16bit int pointers (because CCIT is LSB order first, so we pick 1 byte from the buffer, shift it 8 times to make it 16bit to validate the upper MSB bit 0x8000).
Most causes found when people struggle with 16bit CRC (while 8bit most of the time works):
Buffer should be called by 8bit ptr
Shifting BEFORE the XOR!
Never use int or unsigned int.. but use short! My application runs on 16 and 32bit Microchip PIC's and using ints results in 16bit values on the 16bit pic and 32bit values (so lot of zeros!) on 32bit platforms.
BOOL = unsigned char.
UINT16 = unsigned short.
The function runs in code, so not a while/forloop.
When done, the CRC is copied to the address pointed by *crc.
This way all ather tasks (M95 modem, MCP's I2C, Flash logs, TCP/IP etc. will be handled without too large delays).
BOOL CRC_16(UINT16 ui16_Bytes, char *src, UINT16 *crc)
{
static BOOL bNew = FALSE;
static UINT16 remainder = 0;
static UINT16 i = 0;
static UINT16 ui16_Loc_bytes;
static char *ptr;
static char locData;
if(!bNew)
{
ui16_Loc_bytes = ui16_Bytes;
ptr = src;
locData = *ptr;
i = 8;
remainder = 0x0000;
bNew = TRUE;
}
if(ui16_Loc_bytes)
{
if(i == 8)
{
remainder ^= (((UINT16)locData)<<8); //Only 8bits at a time filled with zeros
}
if(i)
{
if (remainder & 0x8000)
{
remainder = (remainder << 1);
remainder ^= POLYNOMIAL_16;
}
else
{
remainder = (remainder << 1);
}
i--;
}
else
{
ui16_Loc_bytes--;
ptr++;
locData = *ptr;
//ptr++;
i = 8;
}
}
else
{
bNew = FALSE;
*crc = remainder;
return TRUE;
}
return FALSE;
}
if(SDKaart.ui16_RecBytes >= SDKaart.ui16_ByteLen)//30-5-2018 edited SDKaart.CMD[SDKaart.ui8_ActiefCMD].ui16_RecLen)
{
SD_DESELECT;
if(SDKaart.bInitReady && SDKaart.b_BlockRead)
{
if(CRC_16(512,(char*)&SDKaart.Mem_Block.SD_Buffer[0], &SDKaart.ui16_MemBlock_CRC))
{
if((((UINT16)SDKaart.Mem_Block.SD_Buffer[512]<<8)|(UINT16)SDKaart.Mem_Block.SD_Buffer[513]) == SDKaart.ui16_MemBlock_CRC)
{
SDKaart.bRXReady = TRUE;
SDKaart.TXStat = SPI_IDLE;
printf("CRC16 OK %x\r\n",SDKaart.ui16_MemBlock_CRC);
}
else
{
SDKaart.bRXReady = TRUE;
SDKaart.TXStat = SPI_IDLE;
printf("CRC16 %u != 0x%x 0x%x\r\n",SDKaart.ui16_MemBlock_CRC,SDKaart.Mem_Block.SD_Buffer[512], SDKaart.Mem_Block.SD_Buffer[513] );
}
//printf("CRC citt: %u\r\n", Calculate_CRC_CCITT((char *)&SDKaart.Mem_Block.SD_Buffer[0],512));
}
}
else
{
SDKaart.bRXReady = TRUE;
SDKaart.TXStat = SPI_IDLE;
}
}
else
{
if(SD_SPI_TX_READY)
{
SDKaart.bNewSPIByte = TRUE;
SPI1BUF = SD_EMPTY_BYTE;
}
}
I have used many crcs found online, but a lottt didn't work.
Be aware a lot of online "examples" do use <<1 behind the XOR, but it must be done before xor.
POLY_16 is 0x1021.
Next oppurtunity is to build a table picker. :)
Greetz, John
I use Visual Studio 2010, C# to read Gmail inbox using IMAP, it works as a charm, but I think Unicode is not fully supported as I cannot get Persian (Farsi) strings easily.
For instance I have my string: سلام, but IMAP gives me: "=?utf-8?B?2LPZhNin2YU=?=".
How can I convert it to original string? any tips from converting utf-8 to string?
Let's have a look at the meaning of the MIME encoding:
=?utf-8?B?...something...?=
^ ^
| +--- The bytes are Base64 encoded
|
+---- The string is UTF-8 encoded
So, to decode this, take the ...something... out of your string (2LPZhNin2YU= in your case) and then
reverse the Base64 encoding
var bytes = Convert.FromBase64String("2LPZhNin2YU=");
interpret the bytes as a UTF8 string
var text = Encoding.UTF8.GetString(bytes);
text should now contain the desired result.
A description of this format can be found in Wikipedia:
http://en.wikipedia.org/wiki/MIME#Encoded-Word
What you have is a MIME encoded string. .NET does not include libraries for MIME decoding, but you can either implement this yourself or use a library.
here he is
public static string Decode(string s)
{
return String.Join("", Regex.Matches(s ?? "", #"(?:=\?)([^\?]+)(?:\?B\?)([^\?]*)(?:\?=)").Cast<Match>().Select(m =>
{
string charset = m.Groups[1].Value;
string data = m.Groups[2].Value;
byte[] b = Convert.FromBase64String(data);
return Encoding.GetEncoding(charset).GetString(b);
}));
}
The following method decodes strings like "=?utf-8?B?..." or "=?utf-8?Q?..." into a normal string. The encoding (like "utf-8") is selected automatically. Regular expressions are not used. C#
public static string DecodeQuotedPrintables(string InputText)
{
var ResultChars = new List<char>();
Encoding encoding;
for (int i= 0; i < InputText.Length; i++)
{
var CurrentChar = InputText[i];
switch (CurrentChar)
{
case '=':
if((i + 1) < InputText.Length && InputText[i+1] == '?')
{
// Encoding
i += 2;
int StIndex = InputText.IndexOf('?', i);
int SubStringLength = StIndex - i;
string encodingName = InputText.Substring(i, SubStringLength);
encoding = Encoding.GetEncoding(encodingName);
i += SubStringLength + 1;
//Subencoding
StIndex = InputText.IndexOf('?', i);
SubStringLength = StIndex - i;
string SubEncoding = InputText.Substring(i, SubStringLength);
i += SubStringLength + 1;
//Text message
StIndex = InputText.IndexOf("?=", i);
SubStringLength = StIndex - i;
string Message = InputText.Substring(i, SubStringLength);
i += SubStringLength + 1;
// encoding
switch (SubEncoding)
{
case "B":
var base64EncodedBytes = Convert.FromBase64String(Message);
ResultChars.AddRange(encoding.GetString(base64EncodedBytes).ToCharArray());
// skip space #1
if ((i + 1) < InputText.Length && InputText[i + 1] == ' ')
{
i++;
}
break;
case "Q":
var CharByteList = new List<byte>();
for (int j = 0; j < Message.Length; j++)
{
var QChar = Message[j];
switch (QChar)
{
case '=':
j++;
string HexString = Message.Substring(j, 2);
byte CharByte = Convert.ToByte(HexString, 16);
CharByteList.Add(CharByte);
j += 1;
break;
default:
// Decode charbytes #1
if (CharByteList.Count > 0)
{
var CharString = encoding.GetString(CharByteList.ToArray());
ResultChars.AddRange(CharString.ToCharArray());
CharByteList.Clear();
}
ResultChars.Add(QChar);
break;
}
}
// Decode charbytes #2
if (CharByteList.Count > 0)
{
var CharString = encoding.GetString(CharByteList.ToArray());
ResultChars.AddRange(CharString.ToCharArray());
CharByteList.Clear();
}
// skip space #2
if ((i + 1) < InputText.Length && InputText[i + 1] == ' ')
{
i++;
}
break;
default:
throw new NotSupportedException($"Decode quoted printables: unsupported subencodeing: '{SubEncoding}'");
}
}
else
ResultChars.Add(CurrentChar);
break;
default:
ResultChars.Add(CurrentChar);
break;
}
}
return new string(ResultChars.ToArray());
}
I have a simple function that grabs the hard drive serial number from the C:\ drive and puts it into a string:
ManagementObject disk = new ManagementObject("win32_logicaldisk.deviceid=\"C:\"");
disk.Get();
string hdStr = Convert.ToString(disk["VolumeSerialNumber"]);
I'm then attempting to convert the string above into ASCII and then write it out to a binary file, the issue I'm having is, when converting this string and saving the file using streamwriter, and opening the file in a hex editor, I'm seeing more bytes that I originally wanted to write so for example "16342D1F4A61BC"
Will come out as: 08 16 34 2d 1f 4a 61 c2 bc
It's adding the 08 and c2 in there somehow...
The more complete version is as follows:
string constructor2 = "16342D1F4A61BC";
string StrValue = "";
while (constructor2.Length > 0)
{
StrValue += System.Convert.ToChar(System.Convert.ToUInt32(constructor2.Substring(0, 2), 16)).ToString();
// Remove from the hex object the converted value
constructor2 = constructor2.Substring(2, constructor2.Length - 2);
}
FileStream writeStream;
try
{
writeStream = new FileStream(Path.GetDirectoryName(Application.ExecutablePath) + "\\license.mgr", FileMode.Create);
BinaryWriter writeBinay = new BinaryWriter(writeStream);
writeBinay.Write(StrValue);
writeBinay.Close();
}
catch (Exception ex)
{
MessageBox.Show(ex.ToString());
}
Can anyone help me understand how these are getting added in?
Try this:
string constructor2 = "16342D1F4A61BC";
File.WriteAllBytes("test.bin", ToBytesFromHexa(constructor2));
With the following helper routines:
public static byte[] ToBytesFromHexa(string text)
{
if (text == null)
throw new ArgumentNullException("text");
List<byte> bytes = new List<byte>();
bool low = false;
byte prev = 0;
for (int i = 0; i < text.Length ; i++)
{
byte b = GetHexaByte(text[i]);
if (b == 0xFF)
continue;
if (low)
{
bytes.Add((byte)(prev * 16 + b));
}
else
{
prev = b;
}
low = !low;
}
return bytes.ToArray();
}
public static byte GetHexaByte(char c)
{
if ((c >= '0') && (c <= '9'))
return (byte)(c - '0');
if ((c >= 'A') && (c <= 'F'))
return (byte)(c - 'A' + 10);
if ((c >= 'a') && (c <= 'f'))
return (byte)(c - 'a' + 10);
return 0xFF;
}
Try using System.Text.Encoding.ASCII.GetBytes(hdStr) to get the bytes that represent the string in ASCII.
How important is endian-ness to you in the file?
Perhaps you can use something like:
byte[] b = BitConverter.GetBytes(Convert.ToUInt32(hdStr, 16));