Embedded Resources(resx) are giving me trouble- memory consumption - c#

Consider the following class file that I use to access a .resx that contains multiple sound files with different file sizes.
using System;
using System.Collections.Generic;
using System.Drawing;
using System.IO;
using System.Reflection;
using System.Text;
namespace Resources
{
public class ResManager
{
public static void SoundCopy(String ResourceName, String Dest)
{
try
{
var rm = Resources.Sounds.ResourceManager;
var resStream = rm.GetStream(ResourceName);
if (!File.Exists(Dest))
{
File.Create(Dest).Close();
}
FileStream fs = new FileStream(Dest, FileMode.OpenOrCreate);
CopyStream(resStream, fs);
resStream.Close();
resStream = null;
fs.Close();
fs = null;
rm.ReleaseAllResources();
rm = null;
}
catch (Exception e)
{
// nothlng
}
} // S copy();
private static void CopyStream(Stream from, Stream to)
{
int bufSize = 1024, count;
byte[] buffer = new byte[bufSize];
count = from.Read(buffer, 0, bufSize);
while (count > 0)
{
// StackOverflow -- working set raises here
to.Write(buffer, 0, count);
count = from.Read(buffer, 0, bufSize);
}
// clean up
buffer = null;
} // c str()
} //class
} //namespace
Whatever I copy a different file to the filesystem, I noticed that the program keeps a certain Working Set permanently allocated after i do that, but copying the same file for more than 2 times doesn't.
I am trying to keep my app efficient , i tried to make a duplicate class that needs to be instanciated to do this, then i gave it null value and i've waited 40 minutes to see if the GC did something, but nope.
Normally my app runs at 29 MB working set jumping every 10 s to 37~ MB. After I copied 3 files to temp folder, the working set raised at 133 MB, and it stayed at that during my 40 minute pause.
Additional note: I am targetting .NET 2.0

Related

GzipStream blockwise compression results in a corrupted file

I am trying to implement block-by-block compression using GzipStream class. .NET Core 3.1, Visual Studio 2019, Console App. My OS is Windows 10. I believe it should be possible because gzip files consist of independent blocks one after another as per format specification. But resulting files that I get are corrupted. Here's the code:
using System;
using System.Globalization;
using System.IO;
using System.IO.Compression;
namespace GzipToMemoryStreamExample
{
class Program
{
private static int blockSize;
private static string sourceFileName = "e:\\SomeFolder\\SomeFile.ext";
private static byte[] currentBlock;
private static FileStream readingStream;
private static FileStream writingStream;
static void Main(string[] args)
{
Console.WriteLine("Enter block size:");
string blockSizeStr = Console.ReadLine();
int.TryParse(blockSizeStr, out blockSize);
readingStream = new FileStream(sourceFileName, FileMode.Open);
string resultingFileName = Path.ChangeExtension(sourceFileName, ".gz");
CreateAndOpenResultingFile(resultingFileName);
while (ReadBlock())
{
byte[] processedBlock = ProcessBlock(currentBlock);
writingStream.Write(processedBlock, 0, processedBlock.Length);
}
readingStream.Dispose();
writingStream.Dispose();
Console.WriteLine("Finished.");
Console.ReadKey();
}
private static bool ReadBlock()
{
bool result;
int bytesRead;
currentBlock = new byte[blockSize];
bytesRead = readingStream.Read(currentBlock, 0, blockSize);
result = bytesRead > 0;
return result;
}
private static byte[] ProcessBlock(byte[] sourceData)
{
byte[] result;
using (var outputStream = new MemoryStream())
{
using var compressionStream = new GZipStream(outputStream, CompressionMode.Compress);
compressionStream.Write(sourceData, 0, sourceData.Length);
result = outputStream.ToArray();
}
return result;
}
private static void CreateAndOpenResultingFile(string fileName)
{
if (File.Exists(fileName))
{
File.Delete(fileName);
}
writingStream = File.Create(fileName);
}
}
}
When I look at resulting files I see that result somehow depends on block size I choose. if it's smaller than ~100 Kb, resulting "compressed" blocks are of 10 bytes size each, which leads to extremely small useless file. If size of block is greater than ~100 Kb, then the size of file becomes reasonably large, about 80% of the original, but still corrupted.
Also I checked the block headers and it turns out they're strange. OS is set to TOPS-20 (0x0a value), ISIZE at the end of block is always totally wrong.
What is my mistake?
It's solved just with moving result = outputStream.ToArray(); line out of the compressionStream using scope as Mark Adler suggested in the comments.
private static byte[] ProcessBlock(byte[] sourceData)
{
byte[] result;
using (var outputStream = new MemoryStream())
{
using (var compressionStream = new GZipStream(outputStream, CompressionMode.Compress))
{
compressionStream.Write(sourceData, 0, sourceData.Length);
}
result = outputStream.ToArray();
}
return result;
}

C# Decompress .GZip to file

I have this Code
using System.IO;
using System.IO.Compression;
...
UnGzip2File("input.gz","output.xls");
Which run this procedure, it runs without error but after it, the input.gz is empty and created output.xls is also empty. At the start input.gz had 12MB. What am i doing wrong ? Or have you better/functional solution ?
public static void UnGzip2File(string inputPath, string outputPath)
{
FileStream inputFileStream = new FileStream(inputPath, FileMode.Create);
FileStream outputFileStream = new FileStream(outputPath, FileMode.Create);
using (GZipStream gzipStream = new GZipStream(inputFileStream, CompressionMode.Decompress))
{
byte[] bytes = new byte[4096];
int n;
// To be sure the whole file is correctly read,
// you should call FileStream.Read method in a loop,
// even if in the most cases the whole file is read in a single call of FileStream.Read method.
while ((n = gzipStream.Read(bytes, 0, bytes.Length)) != 0)
{
outputFileStream.Write(bytes, 0, n);
}
}
outputFileStream.Dispose();
inputFileStream.Dispose();
}
Opening the FileStream with the FileMode.Create will overwrite the existing file as documented here. This will cause the file to be empty when you try to decompress it, which in turn leads to an empty output-file.
Below is a working code sample, note that it is async, this can be changed by leaving out async/await and changing the call to the regular CopyTo-method and changing the return type to void.
public static async Task DecompressGZip(string inputPath, string outputPath)
{
using (var input = File.OpenRead(inputPath))
using (var output = File.OpenWrite(outputPath))
using (var gz = new GZipStream(input, CompressionMode.Decompress))
{
await gz.CopyToAsync(output);
}
}

Read last line in open file [duplicate]

This question already has answers here:
How to read a text file reversely with iterator in C#
(11 answers)
Closed 8 years ago.
I'm fairly new all this, but I feel like I'm pretty close to making this work, I just need a little help! I want to create a DLL which can read and return the last line in a file that is open in another application. This is what my code looks like, I just don't know what to put in the while statement.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.IO;
namespace SharedAccess
{
public class ReadShare {
static void Main(string path) {
FileStream stream = File.Open(path, FileMode.Open, FileAccess.Read, FileShare.ReadWrite);
StreamReader reader = new StreamReader(stream);
while (!reader.EndOfStream)
{
//What goes here?
}
}
}
}
To read last line,
var lastLine = File.ReadLines("YourFileName").Last();
If it's a large File
public static String ReadLastLine(string path)
{
return ReadLastLine(path, Encoding.ASCII, "\n");
}
public static String ReadLastLine(string path, Encoding encoding, string newline)
{
int charsize = encoding.GetByteCount("\n");
byte[] buffer = encoding.GetBytes(newline);
using (FileStream stream = new FileStream(path, FileMode.Open))
{
long endpos = stream.Length / charsize;
for (long pos = charsize; pos < endpos; pos += charsize)
{
stream.Seek(-pos, SeekOrigin.End);
stream.Read(buffer, 0, buffer.Length);
if (encoding.GetString(buffer) == newline)
{
buffer = new byte[stream.Length - stream.Position];
stream.Read(buffer, 0, buffer.Length);
return encoding.GetString(buffer);
}
}
}
return null;
}
I refered here,
How to read only last line of big text file
File ReadLines should work for you.
var value = File.ReadLines("yourFile.txt").Last();

Download file with progressbar

I want to download a file in a method, and then continue working with that file using some data that is stored in variables in the first method.
I know you can use DownloadFileAsync, but then I need to continue my work in the DownloadFileCompleted method, and the variables can't be reached from there (unless I declare some global ones and use instead, though that isn't the right way I suppose).
So I googled and found another way, by downloading the file manually, bit by bit. That would suit me quite perfect. Though what I want to know is if there are any other methods/solution to my problem that is more simple?
Or if you can play around with the events and achieve something that suits me better :)
Oh, and please change my question if you find a better title of it, I couldn't think of one.
You have to do it piece by piece to update a progress bar. This code does the trick.
public class WebDownloader
{
private static readonly ILog log = LogManager.GetLogger(typeof(WebDownloader));
public delegate void DownloadProgressDelegate(int percProgress);
public static void Download(string uri, string localPath, DownloadProgressDelegate progressDelegate)
{
long remoteSize;
string fullLocalPath; // Full local path including file name if only directory was provided.
log.InfoFormat("Attempting to download file (Uri={0}, LocalPath={1})", uri, localPath);
try
{
/// Get the name of the remote file.
Uri remoteUri = new Uri(uri);
string fileName = Path.GetFileName(remoteUri.LocalPath);
if (Path.GetFileName(localPath).Length == 0)
fullLocalPath = Path.Combine(localPath, fileName);
else
fullLocalPath = localPath;
/// Have to get size of remote object through the webrequest as not available on remote files,
/// although it does work on local files.
using (WebResponse response = WebRequest.Create(uri).GetResponse())
using (Stream stream = response.GetResponseStream())
remoteSize = response.ContentLength;
log.InfoFormat("Downloading file (Uri={0}, Size={1}, FullLocalPath={2}).",
uri, remoteSize, fullLocalPath);
}
catch (Exception ex)
{
throw new ApplicationException(string.Format("Error connecting to URI (Exception={0})", ex.Message), ex);
}
int bytesRead = 0, bytesReadTotal = 0;
try
{
using (WebClient client = new WebClient())
using (Stream streamRemote = client.OpenRead(new Uri(uri)))
using (Stream streamLocal = new FileStream(fullLocalPath, FileMode.Create, FileAccess.Write, FileShare.None))
{
byte[] byteBuffer = new byte[1024 * 1024 * 2]; // 2 meg buffer although in testing only got to 10k max usage.
int perc = 0;
while ((bytesRead = streamRemote.Read(byteBuffer, 0, byteBuffer.Length)) > 0)
{
bytesReadTotal += bytesRead;
streamLocal.Write(byteBuffer, 0, bytesRead);
int newPerc = (int)((double)bytesReadTotal / (double)remoteSize * 100);
if (newPerc > perc)
{
log.InfoFormat("...Downloading (BytesRead={0}, Perc={1})...", bytesReadTotal, perc);
perc = newPerc;
if (progressDelegate != null)
progressDelegate(perc);
}
}
}
}
catch (Exception ex)
{
throw new ApplicationException(string.Format("Error downloading file (Exception={0})", ex.Message), ex);
}
log.InfoFormat("File successfully downloaded (Uri={0}, BytesDownloaded={1}/{2}, FullLocalPath={3}).",
uri, bytesReadTotal, remoteSize, fullLocalPath);
}
}
You will need to spin off a thread to run this code as its obviously synchronous.
e.g.
Task.Factory.StartNew(_ => Download(...));

.Net Zip Up files

Whats the best way to zip up files using C#? Ideally I want to be able to seperate files into a single archive.
You can use DotNetZip to archieve this. It´s free to use in any application.
Here´s some sample code:
try
{
// for easy disposal
using (ZipFile zip = new ZipFile())
{
// add this map file into the "images" directory in the zip archive
zip.AddFile("c:\\images\\personal\\7440-N49th.png", "images");
// add the report into a different directory in the archive
zip.AddFile("c:\\Reports\\2008-Regional-Sales-Report.pdf", "files");
zip.AddFile("ReadMe.txt");
zip.Save("MyZipFile.zip");
}
}
catch (System.Exception ex1)
{
System.Console.Error.WriteLine("exception: " + ex1);
}
This is now built into the framework if you have version 4.5+
Otherwise, use Ionic.
Namespace is System.IO.Packaging.ZIPPackage.
See http://visualstudiomagazine.com/articles/2012/05/21/net-framework-gets-zip.aspx for a story.
Have you looked at SharpZipLib?
I believe you can build zip files with classes in the System.IO.Packaging namespace - but every time I've tried to look into it, I've found it rather confusing...
Take a look at this library:
http://www.icsharpcode.net/OpenSource/SharpZipLib/
It is pretty comprehensive, it deals with many formats, is open-source, and you can use in closed-source commercial applications.
It is very simple to use:
byte[] data1 = new byte[...];
byte[] data2 = new byte[...];
/*...*/
var path = #"c:\test.zip";
var zip = new ZipOutputStream(new FileStream(path, FileMode.Create))
{
IsStreamOwner = true
}
zip.PutNextEntry("File1.txt");
zip.Write(data1, 0, data1.Length);
zip.PutNextEntry("File2.txt");
zip.Write(data2, 0, data2.Length);
zip.Close();
zip.Dispose();
There are a few librarys around - the most popular of which are DotNetZip and SharpZipLib.
Hi i created two methods with the ShapLib library (you can download it here http://www.icsharpcode.net/opensource/sharpziplib/) that would like to share, they are very easy to use just pass source and target path (fullpath including folder/file and extension). Hope it help you!
//ALLYOURNAMESPACESHERE
using ...
//SHARPLIB
using ICSharpCode.SharpZipLib.Zip;
using ICSharpCode.SharpZipLib.Zip.Compression.Streams;
public static class FileUtils
{
/// <summary>
///
/// </summary>
/// <param name="sourcePath"></param>
/// <param name="targetPath"></param>
public static void ZipFile(string sourcePath, string targetPath)
{
string tempZipFilePath = targetPath;
using (FileStream tempFileStream = File.Create(tempZipFilePath, 1024))
{
using (ZipOutputStream zipOutput = new ZipOutputStream(tempFileStream))
{
// Zip with highest compression.
zipOutput.SetLevel(9);
DirectoryInfo directory = new DirectoryInfo(sourcePath);
foreach (System.IO.FileInfo file in directory.GetFiles())
{
// Get local path and create stream to it.
String localFilename = file.FullName;
//ignore directories or folders
//ignore Thumbs.db file since this probably will throw an exception
//another process could be using it. e.g: Explorer.exe windows process
if (!file.Name.Contains("Thumbs") && !Directory.Exists(localFilename))
{
using (FileStream fileStream = new FileStream(localFilename, FileMode.Open, FileAccess.Read, FileShare.Read))
{
// Read full stream to in-memory buffer.
byte[] buffer = new byte[fileStream.Length];
fileStream.Read(buffer, 0, buffer.Length);
// Create a new entry for the current file.
ZipEntry entry = new ZipEntry(file.Name);
entry.DateTime = DateTime.Now;
// set Size and the crc, because the information
// about the size and crc should be stored in the header
// if it is not set it is automatically written in the footer.
// (in this case size == crc == -1 in the header)
// Some ZIP programs have problems with zip files that don't store
// the size and crc in the header.
entry.Size = fileStream.Length;
fileStream.Close();
// Update entry and write to zip stream.
zipOutput.PutNextEntry(entry);
zipOutput.Write(buffer, 0, buffer.Length);
// Get rid of the buffer, because this
// is a huge impact on the memory usage.
buffer = null;
}
}
}
// Finalize the zip output.
zipOutput.Finish();
// Flushes the create and close.
zipOutput.Flush();
zipOutput.Close();
}
}
}
public static void unZipFile(string sourcePath, string targetPath)
{
if (!Directory.Exists(targetPath))
Directory.CreateDirectory(targetPath);
using (ZipInputStream s = new ZipInputStream(File.OpenRead(sourcePath)))
{
ZipEntry theEntry;
while ((theEntry = s.GetNextEntry()) != null)
{
if (theEntry.Name != String.Empty)
{
using (FileStream streamWriter = File.Create(targetPath + "\\" + theEntry.Name))
{
int size = 2048;
byte[] data = new byte[2048];
while (true)
{
size = s.Read(data, 0, data.Length);
if (size > 0)
{
streamWriter.Write(data, 0, size);
}
else
{
break;
}
}
}
}
}
}
}
}

Categories

Resources