I have array links. Each of link presents one XML file. How can I iterate each XML and save to folder with one call.
GetMesssageAttachments(userId) return array 6 links, but current code save only first file. What's wrong here? Thanks
public async void SaveXMLMessages(string userId)
{
try
{
if (_responseMessage.IsSuccessStatusCode)
{
string messagesFolder = #"C:\XMLMessages";
Directory.CreateDirectory(messagesFolder);
string messageFileName = Path.GetRandomFileName();
string messagesPath = Path.Combine(messagesFolder, messageFileName);
foreach (string xmlMessage in await GetMesssageAttachments(userId))
{
var xmlMessageResponse = await _client.GetAsync(xmlMessage);
using (FileStream fileStream = new FileStream(messagesPath, FileMode.Create))
{
await xmlMessageResponse.Content.CopyToAsync(fileStream);
}
}
}
}
catch (Exception e)
{
throw e.InnerException;
}
}
UPDATED
This is work..
public async void SaveXMLMessages(string userId)
{
try
{
if (_responseMessage.IsSuccessStatusCode)
{
string messagesFolder = #"C:\XMLMessages";
Directory.CreateDirectory(messagesFolder);
foreach (string xmlMessage in await GetMesssageAttachments(userId))
{
string messageFileName = Path.GetRandomFileName();
string messagesPath = Path.Combine(messagesFolder, messageFileName);
var xmlMessageResponse = await _client.GetAsync(xmlMessage);
using (FileStream fileStream = new FileStream(messagesPath, FileMode.Create))
{
await xmlMessageResponse.Content.CopyToAsync(fileStream);
}
}
}
}
catch (Exception e)
{
throw e.InnerException;
}
}
Same messagesPath used in foreach. That means there is only one file created in loop
You must reinitialize it in the loop like this:
foreach (string xmlMessage in await GetMesssageAttachments(userId))
{
string messageFileName = Path.GetRandomFileName();
string messagesPath = Path.Combine(messagesFolder, messageFileName);
var xmlMessageResponse = await _client.GetAsync(xmlMessage);
using (FileStream fileStream = new FileStream(messagesPath, FileMode.Create))
{
await xmlMessageResponse.Content.CopyToAsync(fileStream);
}
}
Maybe you are overwriting files in each iteration, try moving this block to inside your foreach:
string messageFileName = Path.GetRandomFileName();
string messagesPath = Path.Combine(messagesFolder, messageFileName);
Only small change in 'thierry v' code and what rsb55 is saying is right. Your code should look like as below
foreach (string xmlMessage in await GetMesssageAttachments(userId))
{
string messageFileName = Path.GetRandomFileName();
string messagesPath = Path.Combine(messagesFolder, messageFileName);
var xmlMessageResponse = await _client.GetAsync(xmlMessage);
using (FileStream fileStream = new FileStream(messagesPath, FileMode.Create))
{
await xmlMessageResponse.Content.CopyToAsync(fileStream);
}
}
Related
I am trying to save data from public Web API to a txt file. However, it seems that somwhere here
using (var fs = FileService.CreateFile("filename.txt"))
{
// Add some text to file
var title = new UTF8Encoding(true).GetBytes(strContent);
fs.WriteAsync(title, 0, strContent.Length);
}
I am making a mistakes as I am missing some data at the end.
public void GetData()
{
var path = "https://www.cnb.cz/cs/financni-trhy/devizovy-trh/kurzy-devizoveho-trhu/kurzy-devizoveho-trhu/denni_kurz.txt";
string strContent;
var webRequest = WebRequest.Create(path);
using (var response = webRequest.GetResponse())
using(var content = response.GetResponseStream())
using(var reader = new StreamReader(content))
{
strContent = reader.ReadToEnd();
}
using (var fs = FileService.CreateFile("filename.txt"))
{
// Add some text to file
var title = new UTF8Encoding(true).GetBytes(strContent);
fs.WriteAsync(title, 0, strContent.Length);
}
var file = File.ReadAllLines(FileService.ReturnBinLocation("filename.txt"));
var results = new List<string>();
for (var a = 0; a < file.Length; a++)
{
results.Add(file[a]);
File.WriteAllLines(data, results);
}
var sub2 = File.ReadAllText(data);
sub2 = sub2.Replace('\n', '|').TrimEnd('|');
var split = sub2.Split('|');
var list = new List<DailyCourse>();
var i= 0;
do
{
var model = new DailyCourse();
model.Country = split[i]; i++;
model.Currency = split[i]; i++;
model.Amount = split[i]; i++;
model.Code = split[i]; i++;
model.Course = split[i]; i++;
list.Add(model);
} while ( i < split.Length);
var json = JsonSerializer.Serialize(list);
}
public static class FileService
{
public static FileStream CreateFile(string fileName)
{
var wholePath = ReturnBinLocation(fileName);
if (File.Exists(wholePath))
{
File.Delete(wholePath);
}
return File.Create(wholePath);
}
public static string ReturnBinLocation( string fileName)
{
var binPath = Path.GetDirectoryName(System.Reflection.Assembly.GetExecutingAssembly().GetName().CodeBase );
var wholePath = Path.Combine(binPath, fileName);
int endIndex = wholePath. Length - 5;
var sub = wholePath.Substring(5, endIndex);
return sub;
}
}
I have actually found out that it was this "Encoding.UTF8.GetBytes"when I switched it to Encoding.ASCII.GetBytes. It worked
I have to create a zip file from set of urls. and it should have a proper folder structure.
So i tried like
public async Task<byte[]> CreateZip(Guid ownerId)
{
try
{
string startPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, "zipFolder");//base folder
if (Directory.Exists(startPath))
{
DeleteAllFiles(startPath);
Directory.Delete(startPath);
}
Directory.CreateDirectory(startPath);
string zipPath = Path.Combine(AppDomain.CurrentDomain.BaseDirectory, $"{ownerId.ToString()}"); //folder based on ownerid
if (Directory.Exists(zipPath))
{
DeleteAllFiles(zipPath);
Directory.Delete(zipPath);
}
Directory.CreateDirectory(zipPath);
var attachemnts = await ReadByOwnerId(ownerId);
attachemnts.Data.ForEach(i =>
{
var fileLocalPath = $"{startPath}\\{i.Category}";
if (!Directory.Exists(fileLocalPath))
{
Directory.CreateDirectory(fileLocalPath);
}
using (var client = new WebClient())
{
client.DownloadFile(i.Url, $"{fileLocalPath}//{i.Flags ?? ""}_{i.FileName}");
}
});
var zipFilename = $"{zipPath}//result.zip";
if (File.Exists(zipFilename))
{
File.Delete(zipFilename);
}
ZipFile.CreateFromDirectory(startPath, zipFilename, CompressionLevel.Fastest, true);
var result = System.IO.File.ReadAllBytes(zipFilename);
return result;
}
catch (Exception ex)
{
var a = ex;
return null;
}
}
currently im writing all files in my base directory(may be not a good idea).corrently i have to manually delete all folders and files to avoid exception/unwanted files. Can everything be written in memory?
What changes required to write all files and folder structure in memory?
No you can't. Not with the built in Dotnet any way.
As per my comment I would recommend storing the files in a custom location based on a Guid or similar. Eg:
"/xxxx-xxxx-xxxx-xxxx/Folder-To-Zip/....".
This would ensure you could handle multiple requests with the same files or similar file / folder names.
Then you just have to cleanup and delete the folder again afterwards so you don't run out of space.
Hope the below code does the job.
public async Task<byte[]> CreateZip(Guid ownerId)
{
try
{
string startPath = Path.Combine(Path.GetTempPath(), $"{Guid.NewGuid()}_zipFolder");//folder to add
Directory.CreateDirectory(startPath);
var attachemnts = await ReadByOwnerId(ownerId);
attachemnts.Data = filterDuplicateAttachments(attachemnts.Data);
//filtering youtube urls
attachemnts.Data = attachemnts.Data.Where(i => !i.Flags.Equals("YoutubeUrl", StringComparison.OrdinalIgnoreCase)).ToList();
attachemnts.Data.ForEach(i =>
{
var fileLocalPath = $"{startPath}\\{i.Category}";
if (!Directory.Exists(fileLocalPath))
{
Directory.CreateDirectory(fileLocalPath);
}
using (var client = new WebClient())
{
client.DownloadFile(i.Url, $"{fileLocalPath}//{i.Flags ?? ""}_{i.FileName}");
}
});
using (var ms = new MemoryStream())
{
using (var zipArchive = new ZipArchive(ms, ZipArchiveMode.Create, true))
{
System.IO.DirectoryInfo di = new DirectoryInfo(startPath);
var allFiles = di.GetFiles("",SearchOption.AllDirectories);
foreach (var attachment in allFiles)
{
var file = File.OpenRead(attachment.FullName);
var type = attachemnts.Data.Where(i => $"{ i.Flags ?? ""}_{ i.FileName}".Equals(attachment.Name, StringComparison.OrdinalIgnoreCase)).FirstOrDefault();
var entry = zipArchive.CreateEntry($"{type.Category}/{attachment.Name}", CompressionLevel.Fastest);
using (var entryStream = entry.Open())
{
file.CopyTo(entryStream);
}
}
}
var result = ms.ToArray();
return result;
}
}
catch (Exception ex)
{
var a = ex;
return null;
}
}
I made a method which can scan an ftp server. It has two parts in an if...else statement. The first part in the if runs when the list of directories equals to 0, and separate the folders from the files( and put them into that list). Then(because the list is no more empty) the else statement should runs. It has a foreach loop which check all elements of the list and concat them to the ftp address and scan that folder. And here's the problem. Looks like it turn into an infinite loop. I only want to check the folders on the server and break the loop but looks like I cannot find a useful solution.
Here's the code:
internal void ListFilesOnServer()
{
ArrayList files = new ArrayList();
if (directories.Count == 0)
{
try
{
FtpWebRequest ftpwrq = (FtpWebRequest)WebRequest.Create(server);
ftpwrq.Credentials = new NetworkCredential(user, passw);
ftpwrq.Method = WebRequestMethods.Ftp.ListDirectory;
ftpwrq.KeepAlive = false;
FtpWebResponse fresponse = (FtpWebResponse)ftpwrq.GetResponse();
StreamReader sr = new StreamReader(fresponse.GetResponseStream());
string temp = "";
while ((temp = sr.ReadLine()) != null)
{
files.Add(temp);
}
temp = String.Empty;
sr.Close();
fresponse.Close();
DirOrFile(files);
}
catch (Exception e)
{
MessageBox.Show(e.Message);
}
}
else
{
foreach (string file in directories.ToArray())
{
try
{
FtpWebRequest ftpwrq = (FtpWebRequest)WebRequest.Create(server+"/"+file);
ftpwrq.Credentials = new NetworkCredential(user, passw);
ftpwrq.Method = WebRequestMethods.Ftp.ListDirectory;
ftpwrq.KeepAlive = false;
FtpWebResponse fresponse = (FtpWebResponse)ftpwrq.GetResponse();
StreamReader sr = new StreamReader(fresponse.GetResponseStream());
string temp = "";
while ((temp = sr.ReadLine()) != null)
{
files.Add(temp);
}
temp = String.Empty;
sr.Close();
fresponse.Close();
DirOrFile(files);
}
catch(ArgumentException)
{
}
catch (Exception e)
{
MessageBox.Show(e.Message);
}
}
}
ListFilesOnServer();
}
The infinite loop is because you don't have a way to break out of the recursion.
The pattern for recursion looks like this
MyRecursiveMethod()
{
if (conditions)
{
}
else
{
MyRecursiveMethod()
}
}
Here's how I might go about rewriting this code to make it work for you.
using System;
using System.Collections.Generic;
using System.IO;
using System.Net;
using System.Security;
using System.Windows.Forms;
namespace ConsoleApplication5
{
public class FtpTest
{
string server = "servier/";
string user = "user";
SecureString passw = new SecureString();
public List<string> GetFilesOnServer()
{
return GetFilesOnServer(server);
}
public List<string> GetFilesOnServer(string dir)
{
var root = GetDirectoryContents(dir);
var files = new List<string>();
foreach (string name in root)
{
var path = GetFullPath(dir, name);
if (IsDirectory(name))
{
var subFiles = GetFilesOnServer(path);
files.AddRange(subFiles);
}
else
{
files.Add(path);
}
}
return files;
}
public List<string> GetDirectoryContents(string dir)
{
var files = new List<string>();
try
{
var ftpwrq = (FtpWebRequest)WebRequest.Create(dir);
ftpwrq.Credentials = new NetworkCredential(user, passw);
ftpwrq.Method = WebRequestMethods.Ftp.ListDirectory;
ftpwrq.KeepAlive = false;
var fresponse = (FtpWebResponse)ftpwrq.GetResponse();
var sr = new StreamReader(fresponse.GetResponseStream());
string fileName = "";
while ((fileName = sr.ReadLine()) != null)
{
files.Add(fileName);
}
sr.Close();
fresponse.Close();
return files;
}
catch (ArgumentException)
{
}
catch (Exception e)
{
MessageBox.Show(e.Message);
}
return files;
}
public static string GetFullPath(string dir, string file)
{
string path = dir;
if (!path.EndsWith("/"))
path += "/";
path += file;
return path;
}
public static bool IsDirectory(string name)
{
return name.IndexOf(".") > 0;
}
}
}
Note that I only recursively call into GetFilesOnServer if the item retrieved is a directory. I've also refactored out the code that grabs the contents on the FTP server into a non-recursive method.
Hope this helps you out.
I'm trying to make an epub parsing app in a Windows Store with C#, and it won't wait for the archive (epubs are actually zip files) to finish extracting before it tries to parse the not-yet-existing table of contents. How do I make my app be a bit more patient?
I've tried making my UnZip() function return a task and having the epub constructor (epub is a class) use UnZip().Wait(), but that just freezes the app. What do I do?
Edit: Here's my relevant code:
public class epub
{
public string filename;
private StorageFolder unzipFolder;
private IList<epubChapter> _contents;
private bool _parsed = false;
public bool parsed { get { return _parsed; } } //Epub and contents are fully parsed
public epub(string newFilename)
{
_contents = new List<epubChapter>();
filename = newFilename;
UnZipFile().Wait();
getTableOfContents();
}
private async Task UnZipFile()
{
var sourceFolder = Windows.ApplicationModel.Package.Current.InstalledLocation;
StorageFolder localFolder = ApplicationData.Current.LocalFolder;
unzipFolder = await localFolder.CreateFolderAsync(filename, CreationCollisionOption.OpenIfExists);
using (var zipStream = await sourceFolder.OpenStreamForReadAsync(filename))
{
using (MemoryStream zipMemoryStream = new MemoryStream((int)zipStream.Length))
{
await zipStream.CopyToAsync(zipMemoryStream);
using (var archive = new ZipArchive(zipMemoryStream, ZipArchiveMode.Read))
{
foreach (ZipArchiveEntry entry in archive.Entries)
{
if (entry.Name != "")
{
using (Stream fileData = entry.Open())
{
try
{
await unzipFolder.GetFileAsync(entry.Name);
Debug.WriteLine("File at {0} already exists", entry.Name);
continue;
}
catch (FileNotFoundException)
{
Debug.WriteLine("Creating file {0}", entry.Name);
}
StorageFile outputFile = await unzipFolder.CreateFileAsync(entry.Name, CreationCollisionOption.OpenIfExists);
//Debug.WriteLine("Output file created at {0}", outputFile.Path);
using (Stream outputFileStream = await outputFile.OpenStreamForWriteAsync())
{
await fileData.CopyToAsync(outputFileStream);
await outputFileStream.FlushAsync();
}
}
if (entry.Name == "toc.ncx")
{
Debug.WriteLine("toc.ncx found in epub file; parsing it");
getTableOfContents();
}
}
}
}
}
}
}
public void getTableOfContents()
{
string contentsPath = unzipFolder.Path + #"\toc.ncx"; //The file is always called this in valid epubs
try
{
XDocument toc = XDocument.Load(contentsPath);
string nameSpace = getNameSpace(toc);
XElement navMap = firstElementNamed(toc.Root, "navMap");
parseNavPoints(navMap, nameSpace, 0);
_parsed = true;
}
catch(FileNotFoundException)
{
Debug.WriteLine("File toc.ncx was not found!");
}
}
Basically, your question seems to be: How do I call an async method from a constructor?
The short answer is that you don't, instead create an async factory method for your class.
Longer answer: As you noticed, if you call Wait(), your code will block. You can't use await, because constructors can't be async. And if you don't do anything, the constructor is going to return too early.
The solution here is to use an async factory method instead of a constructor. Something like:
private epub(string newFilename)
{
_contents = new List<epubChapter>();
filename = newFilename;
}
public static async Task<epub> CreateAsync(string newFilename)
{
var result = new epub(newFilename);
await result.UnZipFile();
result.getTableOfContents();
return result;
}
For some more information and alternative solutions, see Stephen Cleary's article about async and contructors.
I am trying to download everything (documents, lists, folders) of a web and its sub-webs and its sub-webs (if exists) and so on, I can do it for a single web but its not working for subwebs in it, code is given below,
private void downloadList(SPObjectData objectData)
{
using (SPWeb currentWeb = objectData.Web)
{
foreach (SPList list in currentWeb.Lists)
{
foreach (SPFolder oFolder in list.Folders)
{
if (oFolder != null)
{
foreach (SPFile file in oFolder.files)
{
if (CreateDirectoryStructure(tbDirectory.Text, file.Url))
{
var filepath = System.IO.Path.Combine(tbDirectory.Text, file.Url);
byte[] binFile = file.OpenBinary();
System.IO.FileStream fstream = System.IO.File.Create(filepath);
fstream.Write(binFile, 0, binFile.Length);
fstream.Close();
}
}
}
}
}
}
}
That is because you want to do it recursively for the subwebs
foreach(SPWeb oWeb in currentWeb.Webs){
downloadList(oWeb); //use same logic you used above to get all the stuff from the sub web
}
So, it would be like this for your recursive method:
//notice I overloaded
private void downloadList(SPWeb oWeb){
//get subwebs of subwebs
foreach(SPWeb oWeb in currentWeb.Webs){
downloadList(oWeb);
}
foreach (SPList list in oWeb.Lists)
{
foreach (SPFolder oFolder in list.Folders)
{
if (oFolder != null)
{
foreach (SPFile file in oFolder.files)
{
if (CreateDirectoryStructure(tbDirectory.Text, file.Url))
{
var filepath = System.IO.Path.Combine(tbDirectory.Text, file.Url);
byte[] binFile = file.OpenBinary();
System.IO.FileStream fstream = System.IO.File.Create(filepath);
fstream.Write(binFile, 0, binFile.Length);
fstream.Close();
}
}
}
}
}
}
}