How to setup web api controller for ZipFile - c#

I am trying to create and return a zip file with selected documents. The console shows that the selected DocumentId's are being sent from the Angular controller to the api but I am getting a null error.
ApiController
public HttpResponseMessage Get(string[] id)
{
List<Document> documents = new List<Document>();
using (var context = new ApplicationDbContext())
{
Document document = context.Documents.Find(id);
if (document == null)
{
if (document == null)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.NotFound));
}
}
using (var zipFile = new ZipFile())
{
// Make zip file
foreach (var d in documents)
{
var dt = d.DocumentDate.ToString("y").Replace('/', '-').Replace(':', '-');
string fileName = String.Format("{0}-{1}-{2}.pdf", dt, d.PipeName, d.LocationAb);
zipFile.AddEntry(fileName, d.DocumentUrl);
}
return ZipContentResult(zipFile);
}
}
}
protected HttpResponseMessage ZipContentResult(ZipFile zipFile)
{
var pushStreamContent = new PushStreamContent((stream, content, context) =>
{
zipFile.Save(stream);
stream.Close(); // After save we close the stream to signal that we are done writing.
}, "application/zip");
return new HttpResponseMessage(HttpStatusCode.OK) { Content = pushStreamContent };
}
UPDATE
public HttpResponseMessage Get([FromUri] string[] id)
{
var documents = new List<Document>();
using (var context = new ApplicationDbContext())
{
foreach (string doc in id)
{
Document document = context.Documents.Find(new object[] { doc });
if (document == null)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.NotFound));
}
documents.Add(document);
}
using (var zipFile = new ZipFile())
{
// Make zip file
foreach (var d in documents)
{
var dt = d.DocumentDate.ToString("y").Replace('/', '-').Replace(':', '-');
string fileName = String.Format("{0}-{1}-{2}.pdf", dt, d.PipeName, d.LocationAb);
zipFile.AddEntry(fileName, d.DocumentUrl);
}
return ZipContentResult(zipFile);
}
}
}
ERROR
{"The argument types 'Edm.Int32' and 'Edm.String' are incompatible for this operation. Near WHERE predicate, line 1, column 82."}
STACKTRACE
at System.Data.Entity.Internal.Linq.InternalSet`1.FindInStore(WrappedEntityKey key, String keyValuesParamName)
at System.Data.Entity.Internal.Linq.InternalSet`1.Find(Object[] keyValues)
at System.Data.Entity.DbSet`1.Find(Object[] keyValues)
at TransparentEnergy.ControllersAPI.apiZipPipeLineController.Get(String[] id) in e:\Development\TransparentEnergy\TransparentEnergy\ControllersAPI \BatchZipApi\apiZipPipeLineController.cs:line 25
at lambda_method(Closure , Object , Object[] )
at System.Web.Http.Controllers.ReflectedHttpActionDescriptor.ActionExecutor. <>c__DisplayClass10.<GetExecutor>b__9(Object instance, Object[] methodParameters)
at System.Web.Http.Controllers.ReflectedHttpActionDescriptor.ActionExecutor.Exec ute(Object instance, Object[] arguments)
at System.Web.Http.Controllers.ReflectedHttpActionDescriptor.ExecuteAsync(HttpCo ntrollerContext controllerContext, IDictionary`2 arguments, CancellationToken cancellationToken)
New Screenshot of watch after changing string[] id to List id

Unless you forgot to copy all the code into the question you never add anything to the documents list object.
At the start you create a new List object named documents:
List<Document> documents = new List<Document>();
You then search for items and place them in a new document object:
Document document = context.Documents.Find(id);
Then when attempting to make the zip file you are accessing the first created List object that has nothing put into it.
foreach (var d in documents)
I believe this is then causing your save of the zip file to throw an exception
zipFile.Save(stream);
In the find line above
Document document = context.Documents.Find(id);
did you intend
documents = context.Documents.Find(id);
UPDATE 2
I set up a database with your information, created a MVC web api that takes a POST of JSON to pass the data. This populates the list with items pulled from a database by ID.
[HttpPost]
[ActionName("ZipFileAction")]
public HttpResponseMessage ZipFiles([FromBody]int[] id)
{
if (id == null)
{//Required IDs were not provided
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.BadRequest));
}
List<Document> documents = new List<Document>();
using (var context = new ApplicationDbContext())
{
foreach (int NextDocument in id)
{
Document document = context.Documents.Find(NextDocument);
if (document == null)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.NotFound));
}
documents.Add(document);
}
using (var zipFile = new ZipFile())
{
// Make zip file
foreach (var d in documents)
{
var dt = d.DocumentDate.ToString("y").Replace('/', '-').Replace(':', '-');
string fileName = String.Format("{0}-{1}-{2}.pdf", dt, d.PipeName, d.LocationAb);
zipFile.AddEntry(fileName, d.DocumentUrl);
}
return ZipContentResult(zipFile);
}
}
}

{"The argument types 'Edm.Int32' and 'Edm.String' are incompatible ..."}
Sounds like your key column is an int type and you are searching it using a string. Try changing the type of the parameter from string to int. I renamed it ids as it seems to be a list of document identifiers.
An optimized version would get all needed documents in a single query :
public HttpResponseMessage Get([FromUri] int[] ids)
{
using (var context = new ApplicationDbContext())
{
var documents = context.Documents.Where(doc => ids.Contains(doc.Id)).ToList();
if (documents.Count != ids.Length)
{
throw new HttpResponseException(new HttpResponseMessage(HttpStatusCode.NotFound));
}
using (var zipFile = new ZipFile())
{
// Make zip file
foreach (var d in documents)
{
var dt = d.DocumentDate.ToString("y").Replace('/', '-').Replace(':', '-');
string fileName = String.Format("{0}-{1}-{2}.pdf", dt, d.PipeName, d.LocationAb);
zipFile.AddEntry(fileName, d.DocumentUrl);
}
return ZipContentResult(zipFile);
}
}
}

Related

CsvHelper Error "No header record found" on reading a csv stream

Below is the code which i using to read a stream source of csv files but I get error as "No header record found". The library is 15.0 and I am already using .ToList() as suggested in some solutions, but still the error persists. Below is the method along with the tablefield class and the Read Stream method.
Also note here, I can get the desired result if I pass source as MemoryStream but it fails if I pass it as Stream because I need to avoid writing to memory each time.
public async Task<Stream> DownloadBlob(string containerName, string fileName, string connectionString)
{
// MemoryStream memoryStream = new MemoryStream();
if (string.IsNullOrEmpty(connectionString))
{
connectionString = #"UseDevelopmentStorage=true";
containerName = "testblobs";
}
Microsoft.Azure.Storage.CloudStorageAccount storageAccount = Microsoft.Azure.Storage.CloudStorageAccount.Parse(connectionString);
CloudBlobClient serviceClient = storageAccount.CreateCloudBlobClient();
CloudBlobContainer container = serviceClient.GetContainerReference(containerName);
CloudBlockBlob blob = container.GetBlockBlobReference(fileName);
if (!blob.Exists())
{
throw new Exception($"Blob Not found");
}
return await blob.OpenReadAsync();
public class TableField
{
public string Name { get; set; }
public string Type { get; set; }
public Type DataType
{
get
{
switch( Type.ToUpper() )
{
case "STRING":
return typeof(string);
case "INT":
return typeof( int );
case "BOOL":
case "BOOLEAN":
return typeof( bool );
case "FLOAT":
case "SINGLE":
case "DOUBLE":
return typeof( double );
case "DATETIME":
return typeof( DateTime );
default:
throw new NotSupportedException( $"CSVColumn data type '{Type}' not supported" );
}
}
}
private IEnumerable<Dictionary<string, EntityProperty>> ReadCSV(Stream source, IEnumerable<TableField> cols)
{
using (TextReader reader = new StreamReader(source, Encoding.UTF8))
{
var cache = new TypeConverterCache();
cache.AddConverter<float>(new CSVSingleConverter());
cache.AddConverter<double>(new CSVDoubleConverter());
var csv = new CsvReader(reader,
new CsvHelper.Configuration.CsvConfiguration(global::System.Globalization.CultureInfo.InvariantCulture)
{
Delimiter = ";",
HasHeaderRecord = true,
CultureInfo = global::System.Globalization.CultureInfo.InvariantCulture,
TypeConverterCache = cache
});
csv.Read();
csv.ReadHeader();
var map = (
from col in cols
from src in col.Sources()
let index = csv.GetFieldIndex(src, isTryGet: true)
where index != -1
select new { col.Name, Index = index, Type = col.DataType }).ToList();
while (csv.Read())
{
yield return map.ToDictionary(
col => col.Name,
col => EntityProperty.CreateEntityPropertyFromObject(csv.GetField(col.Type, col.Index)));
}
}
}
StreamReading code:
public async Task<Stream> ReadStream(string containerName, string digestFileName, string fileName, string connectionString)
{
string data = string.Empty;
string fileExtension = Path.GetExtension(fileName);
var contents = await DownloadBlob(containerName, digestFileName, connectionString);
return contents;
}
Sample CSv to be read:
PartitionKey;Time;RowKey;State;RPM;Distance;RespirationConfidence;HeartBPM
te123;2020-11-06T13:33:37.593Z;10;1;8;20946;26;815
te123;2020-11-06T13:33:37.593Z;4;2;79944;8;36635;6
te123;2020-11-06T13:33:37.593Z;3;3;80042;9;8774;5
te123;2020-11-06T13:33:37.593Z;1;4;0;06642;6925;37
te123;2020-11-06T13:33:37.593Z;6;5;04740;74753;94628;21
te123;2020-11-06T13:33:37.593Z;7;6;6;2;14;629
te123;2020-11-06T13:33:37.593Z;9;7;126;86296;9157;05
te123;2020-11-06T13:33:37.593Z;5;8;5;3;7775;08
te123;2020-11-06T13:33:37.593Z;2;9;44363;65;70;229
te123;2020-11-06T13:33:37.593Z;8;10;02;24666;2;2
I have tried to reproduce the problem with version 15.0 of the library, but have failed with classes CSVSingleConverter and CSVDoubleConverter. With the standard classes of the CSVHelper, however, reading the header works:
using System;
using System.IO;
using System.Text;
using CsvHelper;
using CsvHelper.TypeConversion;
namespace ConsoleApp2
{
class Program
{
static void Main(string[] args)
{
using (Stream stream = new FileStream(#"e:\demo.csv", FileMode.Open, FileAccess.Read))
{
ReadCSV(stream);
}
}
private static void ReadCSV(Stream source)
{
using (TextReader reader = new StreamReader(source, Encoding.UTF8))
{
var cache = new TypeConverterCache();
cache.AddConverter<float>(new SingleConverter());
cache.AddConverter<double>(new DoubleConverter());
var csv = new CsvReader(reader,
new CsvHelper.Configuration.CsvConfiguration(global::System.Globalization.CultureInfo.InvariantCulture)
{
Delimiter = ";",
HasHeaderRecord = true,
CultureInfo = global::System.Globalization.CultureInfo.InvariantCulture,
TypeConverterCache = cache
});
csv.Read();
csv.ReadHeader();
foreach (string headerRow in csv.Context.HeaderRecord)
{
Console.WriteLine(headerRow);
}
}
}
}
}
I´ve changed the lines ...
cache.AddConverter<float>(new CSVSingleConverter());
cache.AddConverter<double>(new CSVDoubleConverter());
... to ...
cache.AddConverter<float>(new SingleConverter());
cache.AddConverter<double>(new DoubleConverter());
I put the CSV data into a UTF-8 text file. Output at the console is:
PartitionKey
Time
RowKey
State
RPM
Distance
RespirationConfidence
HeartBPM
EDIT 2020-12-24:
Put the whole source text online, not just part of it.
Related to my answer to your other question (it has more detail ; you can read it there) I didn't encounter any problem connecting CsvHelper to a blob storage sourced stream
This was the code used (I took the CSV data you posted, added it to a file, upped it to blob):
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private async void button1_Click(object sender, EventArgs e)
{
var cstr = "YOUR CONNSTR" HERE;
var bbc = new BlockBlobClient(cstr, "temp", "ankit.csv");
var s = await bbc.OpenReadAsync(new BlobOpenReadOptions(true) { BufferSize = 16384 });
var sr = new StreamReader(s);
var csv = new CsvHelper.CsvReader(sr, new CsvConfiguration(CultureInfo.CurrentCulture) { HasHeaderRecord = true, Delimiter = ";" });
//try by read/getrecord
while(await csv.ReadAsync())
{
var rec = csv.GetRecord<X>();
Console.WriteLine(rec.PartitionKey);
}
var x = new X();
//try by await foreach
await foreach (var r in csv.EnumerateRecordsAsync(x))
{
Console.WriteLine(r.PartitionKey);
}
}
}
class X {
public string PartitionKey { get; set; }
}
Try setting the source stream back to the start.
private IEnumerable<Dictionary<string, EntityProperty>> ReadCSV(Stream source, IEnumerable<TableField> cols)
{
source.Position = 0;
You also can't use yield return there. It delays execution of the code until you access the IEnumerable<Dictionary<string, EntityProperty>> returned from the ReadCSV method. The problem is at that point you have already closed the using statement with the TextReader that CsvHelper needs to read your data, so you get a NullReferenceException.
You either need to remove the yield return
var result = new List<Dictionary<string, EntityProperty>>();
while (csv.Read()){
// Add to result
}
return result;
Or pass the TextReader to your method. Any enumaration of the IEnumerable<Dictionary<string, EntityProperty>> must occur before leaving the using statement which will dispose of the TextReader needed by the CsvReader
IEnumerable<Dictionary<string, EntityProperty>> result;
using (TextReader reader = new StreamReader(source, Encoding.UTF8)){
// Calling ToList() will enumerate your yield statement
result = ReadCSV(reader, cols).ToList();
}
I was getting the same error 'No header found...' and this was after several hundred successful reads of the same file. I added the delimiter=","
reader = csv.reader(filename, delimiter=",")
and that solved the problem. I think the csv_reader will attempt to determine the delimiter if the delimiter is not specified, and fails after a while, maybe a memory leak? the comma is the default, but if the reader has to programatically determine it, it is more likely to fail.

Read Text file without copying to hard disk

I'm using Asp.Net Core 3.0 and I find myself in a situation where the client will pass text file(s) to my API, the API will then parse the text files into a data model using a function that I have created called ParseDataToModel(), and then store that data model into a database using Entity Framework. Since my code is parsing the files into a data model, I really don't need to copy it to the hard disk if it isn't necessary. I don't have a ton of knowledge when it comes to Streams, and I've googled quite a bit, but I was wondering if there is a way to retrieve the string data of the uploaded files without actually copying them to the hard drive? It seems like a needless extra step.... Below is my code for the file Upload and insertion into the database:
[HttpPost("upload"), DisableRequestSizeLimit]
public IActionResult Upload()
{
var filePaths = new List<string>();
foreach(var formFile in Request.Form.Files)
{
if(formFile.Length > 0)
{
var filePath = Path.GetTempFileName();
filePaths.Add(filePath);
using(var stream = new FileStream(filePath, FileMode.Create))
{
formFile.CopyTo(stream);
}
}
}
BaiFiles lastFile = null;
foreach(string s in filePaths)
{
string contents = System.IO.File.ReadAllText(s);
BaiFiles fileToCreate = ParseFileToModel(contents);
if (fileToCreate == null)
return BadRequest(ModelState);
var file = _fileRepository.GetFiles().Where(t => t.FileId == fileToCreate.FileId).FirstOrDefault();
if (file != null)
{
ModelState.AddModelError("", $"File with id {fileToCreate.FileId} already exists");
return StatusCode(422, ModelState);
}
if (!ModelState.IsValid)
return BadRequest();
if (!_fileRepository.CreateFile(fileToCreate))
{
ModelState.AddModelError("", $"Something went wrong saving file with id {fileToCreate.FileId}");
return StatusCode(500, ModelState);
}
lastFile = fileToCreate;
}
return CreatedAtRoute("GetFile", new { fileId = lastFile.FileId }, lastFile);
}
It would be nice to just hold all of the data in memory instead of copying them to the hard drive, just to turn around and open it to read the text.... I apologize if this isn't possible, or if this question has been asked before. I'm sure it has, and I just wasn't googling the correct keywords. Otherwise, I could be wrong and it is already doing exactly what I want - but System.IO.File.ReadAllText() makes me feel it's being copied to a temp directory somewhere.
After using John's answer below, here is the revised code for anyone interested:
[HttpPost("upload"), DisableRequestSizeLimit]
public IActionResult Upload()
{
var filePaths = new List<string>();
BaiFiles lastFile = null;
foreach (var formFile in Request.Form.Files)
{
if (formFile.Length > 0)
{
using (var stream = formFile.OpenReadStream())
{
using (var sr = new StreamReader(stream))
{
string contents = sr.ReadToEnd();
BaiFiles fileToCreate = ParseFileToModel(contents);
if (fileToCreate == null)
return BadRequest(ModelState);
var file = _fileRepository.GetFiles().Where(t => t.FileId == fileToCreate.FileId).FirstOrDefault();
if (file != null)
{
ModelState.AddModelError("", $"File with id {fileToCreate.FileId} already exists");
return StatusCode(422, ModelState);
}
if (!ModelState.IsValid)
return BadRequest();
if (!_fileRepository.CreateFile(fileToCreate))
{
ModelState.AddModelError("", $"Something went wrong saving file with id {fileToCreate.FileId}");
return StatusCode(500, ModelState);
}
lastFile = fileToCreate;
}
}
}
}
if(lastFile == null)
return NoContent();
else
return CreatedAtRoute("GetFile", new { fileId = lastFile.FileId }, lastFile);
}
System.IO.File.ReadAllText(filePath) is a convenience method. It essentially does this:
string text = null;
using (var stream = FileStream.OpenRead(filePath))
using (var reader = new StreamReader(stream))
{
text = reader.ReadToEnd();
}
FormFile implements an OpenReadStream method, so you can simply use this in place of stream in the above:
string text = null;
using (var stream = formFile.OpenReadStream())
using (var reader = new StreamReader(stream))
{
text = reader.ReadToEnd();
}

How to return a filestream together with JSON model from ASPNETCore API call?

I have a controller action which is returning a file using the following line of code. However, I need return a filestream with a model which may contain some other JSON serialized data.
I've tried a few different approaches but haven't found a solution which seems to work very well. Has anyone done this before and what is considered the best approach?
this.File(fs, "application/zip", fileName);
Note: The question answered here is referring to multiple FileStreams, whereas I need to return back a FileStream and a Model as JSON.
However, I have modified this solution with the following. What does everyone think?
return new MultipartResult()
{
new MultipartFileContent()
{
ContentType = "application/zip",
FileName = fileName,
Stream = this.OpenFile(filePath)
},
new MultipartModelContent()
{
ContentType = "application/json",
Model = model
}
};
public async Task ExecuteResultAsync(ActionContext context)
{
foreach (var item in this.OfType<MultipartFileContent>())
{
if (item.Stream != null)
{
var c = new StreamContent(item.Stream);
if (item.ContentType != null)
{
c.Headers.ContentType = new System.Net.Http.Headers.MediaTypeHeaderValue(item.ContentType);
}
if (item.FileName != null)
{
var contentDisposition = new ContentDispositionHeaderValue("attachment");
contentDisposition.SetHttpFileName(item.FileName);
c.Headers.ContentDisposition = new System.Net.Http.Headers.ContentDispositionHeaderValue("attachment");
c.Headers.ContentDisposition.FileName = contentDisposition.FileName.Value;
c.Headers.ContentDisposition.FileNameStar = contentDisposition.FileNameStar.Value;
}
this.content.Add(c);
}
}
foreach (var item in this.OfType<MultipartModelContent>())
{
var c = new ObjectContent<object>(item, new JsonMediaTypeFormatter(), item.ContentType);
if (item.ContentType != null)
{
c.Headers.ContentType = new System.Net.Http.Headers.MediaTypeHeaderValue(item.ContentType);
}
this.content.Add(c);
}
context.HttpContext.Response.ContentLength = content.Headers.ContentLength;
context.HttpContext.Response.ContentType = content.Headers.ContentType.ToString();
await content.CopyToAsync(context.HttpContext.Response.Body);
}

Get all metadata from an existing PDF using iText7

How can I retrieve all metadata stored in a PDF with iText7?
using (var pdfReader = new iText.Kernel.Pdf.PdfReader("path-to-a-pdf-file"))
{
var pdfDocument = new iText.Kernel.Pdf.PdfDocument(pdfReader);
var pdfDocumentInfo = pdfDocument.GetDocumentInfo();
// Getting basic metadata
var author = pdfDocumentInfo.GetAuthor();
var title = pdfDocumentInfo.GetTitle();
// Getting everything else
var someMetadata = pdfDocumentInfo.GetMoreInfo("need-a-key-here");
// How to get all metadata ?
}
I was using this with iTextSharp but I can't figure how to do it with the new iText7.
using (var pdfReader = new iTextSharp.text.pdf.PdfReader("path-to-a-pdf-file"))
{
// Getting basic metadata
var author = pdfReader.Info.ContainsKey("Author") ? pdfReader.Info["Author"] : null;
var title = pdfReader.Info.ContainsKey("Title") ? pdfReader.Info["Title"] : null;
// Getting everything else
var metadata = pdfReader.Info;
metadata.Remove("Author");
metadata.Remove("Title");
// Print metadata
Console.WriteLine($"Author: {author}");
Console.WriteLine($"Title: {title}");
foreach (var line in metadata)
{
Console.WriteLine($"{line.Key}: {line.Value}");
}
}
I am using version 7.1.1 of iText7.
In iText 7 the PdfDocumentInfo class unfortunately does not expose a method to retrieve the keys in the underlying dictionary.
But you can simply retrieve the Info dictionary contents by immediately accessing that dictionary from the trailer dictionary. E.g. for a PdfDocument pdfDocument:
PdfDictionary infoDictionary = pdfDocument.GetTrailer().GetAsDictionary(PdfName.Info);
foreach (PdfName key in infoDictionary.KeySet())
Console.WriteLine($"{key}: {infoDictionary.GetAsString(key)}");
There is problem with "UnicodeBig", "UTF-8" or "PDF" encoded strings.
For example, if PDF is created with Microsoft Word, then "/Creator" is unreadable encoded and needs to be converted:
.
iText7 has own function for that convert:
...ToUnicodeString().
But it is a Method of the PdfString object and PdfDictionary value (PdfObject) hast to be first casted to this PdfString type.
Complete solution as async, "unbreakable" and auto-disposed function:
public static async Task<(Dictionary<string, string> MetaInfo, string Error)> GetMetaInfoAsync(string path)
{
try
{
var metaInfo = await Task.Run(() =>
{
var metaInfoDict = new Dictionary<string, string>();
using (var pdfReader = new PdfReader(path))
using (var pdfDocument = new PdfDocument(pdfReader))
{
metaInfoDict["PDF.PageCount"] = $"{pdfDocument.GetNumberOfPages():D}";
metaInfoDict["PDF.Version"] = $"{pdfDocument.GetPdfVersion()}";
var pdfTrailer = pdfDocument.GetTrailer();
var pdfDictInfo = pdfTrailer.GetAsDictionary(PdfName.Info);
foreach (var pdfEntryPair in pdfDictInfo.EntrySet())
{
var key = "PDF." + pdfEntryPair.Key.ToString().Substring(1);
string value;
switch (pdfEntryPair.Value)
{
case PdfString pdfString:
value = pdfString.ToUnicodeString();
break;
default:
value = pdfEntryPair.Value.ToString();
break;
}
metaInfoDict[key] = value;
}
return metaInfoDict;
}
});
return (metaInfo, null);
}
catch (Exception ex)
{
if (Debugger.IsAttached) Debugger.Break();
return (null, ex.Message);
}
}

Modifying existing resource file programmatically

I am using the following code to generate resource file programmatically.
ResXResourceWriter resxWtr = new ResXResourceWriter(#"C:\CarResources.resx");
resxWtr.AddResource("Title", "Classic American Cars");
resxWtr.Generate();
resxWtr.Close();
Now, i want to modify the resource file crated from above code. If i use the same code, the existing resource file gets replaced. Please help me modify it without loosing the existing contents.
Best Regards,
Ankit
public static void AddOrUpdateResource(string key, string value)
{
var resx = new List<DictionaryEntry>();
using (var reader = new ResXResourceReader(resourceFilepath))
{
resx = reader.Cast<DictionaryEntry>().ToList();
var existingResource = resx.Where(r => r.Key.ToString() == key).FirstOrDefault();
if (existingResource.Key == null && existingResource.Value == null) // NEW!
{
resx.Add(new DictionaryEntry() { Key = key, Value = value });
}
else // MODIFIED RESOURCE!
{
var modifiedResx = new DictionaryEntry() { Key = existingResource.Key, Value = value };
resx.Remove(existingResource); // REMOVING RESOURCE!
resx.Add(modifiedResx); // AND THEN ADDING RESOURCE!
}
}
using (var writer = new ResXResourceWriter(ResxPathEn))
{
resx.ForEach(r =>
{
// Again Adding all resource to generate with final items
writer.AddResource(r.Key.ToString(), r.Value.ToString());
});
writer.Generate();
}
}
I had the same problem this resolve it:
This will append to your existing .resx file
var reader = new ResXResourceReader(#"C:\CarResources.resx");//same fileName
var node = reader.GetEnumerator();
var writer = new ResXResourceWriter(#"C:\CarResources.resx");//same fileName(not new)
while (node.MoveNext())
{
writer.AddResource(node.Key.ToString(), node.Value.ToString());
}
var newNode = new ResXDataNode("Title", "Classic American Cars");
writer.AddResource(newNode);
writer.Generate();
writer.Close();
Here is a function that would help you modify the resource file.
public static void UpdateResourceFile(Hashtable data, String path)
{
Hashtable resourceEntries = new Hashtable();
//Get existing resources
ResXResourceReader reader = new ResXResourceReader(path);
if (reader != null)
{
IDictionaryEnumerator id = reader.GetEnumerator();
foreach (DictionaryEntry d in reader)
{
if (d.Value == null)
resourceEntries.Add(d.Key.ToString(), "");
else
resourceEntries.Add(d.Key.ToString(), d.Value.ToString());
}
reader.Close();
}
//Modify resources here...
foreach (String key in data.Keys)
{
if (!resourceEntries.ContainsKey(key))
{
String value = data[key].ToString();
if (value == null) value = "";
resourceEntries.Add(key, value);
}
}
//Write the combined resource file
ResXResourceWriter resourceWriter = new ResXResourceWriter(path);
foreach (String key in resourceEntries.Keys)
{
resourceWriter.AddResource(key, resourceEntries[key]);
}
resourceWriter.Generate();
resourceWriter.Close();
}
Reference link here

Categories

Resources