Related
I used this code to omit the quote and to split with comma.
I have a content of csv file like this data.
ex:
"1111","05-24-2017","08:30","0","TRAVEL"
"2222","05-25-2017","08:20","0","TRAVEL"
I used this code:
public bool ReadEntrie(int id, ref string name)
{
int count = 0;
CreateConfigFile();
try
{
fs = new FileStream(data_path, FileMode.Open);
sr = new StreamReader(fs);
bool cond = true;
string temp = "";
while (cond == true)
{
if ((temp = sr.ReadLine()) == null)
{
sr.Close();
fs.Close();
cond = false;
if (count == 0)
return false;
}
if (count == id)
{
string[] stringSplit = temp.Trim('\"').Split(new
String[] { "," }, StringSplitOptions.None);
//string[] stringSplit = temp.Split(',');
int _maxIndex = stringSplit.Length;
name = stringSplit[0];
}
count++;
}
sr.Close();
fs.Close();
return true;
}
catch
{
return false;
}
}
If you don't have commas or quotation marks as a part of data e.g.
"12,34","56","a""bc" -> 12,34 56 a"bc
you can put a simple Linq:
string[][] result = File
.ReadLines(#"C"\MyData.csv")
.Select(line => line
.Split(',')
.Select(item => item.Trim('"'))
.ToArray())
.ToArray();
Further improvement is to return an array of a tailored class:
MyClass[] result = File
.ReadLines(#"C"\MyData.csv")
.Select(line => line
.Split(','))
.Select(items => new MyClass() {
Id = items[0].Trim('"'),
Date = DateTime.ParseExact(items[1].Trim('"') + " " + items[2].Trim('"'),
"MM-dd-yyyy hh:mm",
CultureInfo.InvariantCulture),
Code = items[3].Trim('"'),
Text = items[4].Trim('"'),
})
.ToArray();
The problem is not on that function. It is by other function and I used trim
for it.
string[] stringSplit = temp.Split(',');
int _maxIndex = stringSplit.Length;
name = stringSplit[0].Trim('"');
lastname = stringSplit[1].Trim('"');
phone = stringSplit[2].Trim('"');
mail = stringSplit[3].Trim('"');
website = stringSplit[4].Trim('"');
Try this simple function. It will take care of double quote and comma between double quotes.
private string[] GetCommaSeperatedWords(string sep, string line)
{
List<string> list = new List<string>();
StringBuilder word = new StringBuilder();
int doubleQuoteCount = 0;
for (int i = 0; i < line.Length; i++)
{
string chr = line[i].ToString();
if (chr == "\"")
{
if (doubleQuoteCount == 0)
doubleQuoteCount++;
else
doubleQuoteCount--;
continue;
}
if (chr == sep && doubleQuoteCount == 0)
{
list.Add(word.ToString());
word = new StringBuilder();
continue;
}
word.Append(chr);
}
list.Add(word.ToString());
return list.ToArray();
}
I get this error. Later I searched and found out the reason of illegal characters in my XML and its solution. But I don't have the access to edit any of these files. My job is to read and fetch the tag value, attribute value and similar stuff. SO I can't replace the binary characters with escapes like '\x01' with . Also I tried to include CheckCharacters =false in XMLreader settings. It doesn't take this. Still it is throwing the same error.
Is it not possible to fix in XMLreader? I read about XMLtextReader. It can skip the exception. But already I have coded for all my features using XMLreader. It would be good if I can find a solution for this. Otherwise I would have to change all my code.
My code:
private void button1_Click(object sender, EventArgs e)
{
int i = 0;
var filenames = System.IO.Directory
.EnumerateFiles(textBox1.Text, "*.xml", System.IO.SearchOption.AllDirectories)
.Select(System.IO.Path.GetFullPath);
foreach (var f in filenames)
{
var resolver = new XmlUrlOverrideResolver();
resolver.DtdFileMap[#"X1.DTD"] = #"\\location\X1.DTD";
resolver.DtdFileMap[#"R2.DTD"] = #"\\location\X2.DTD";
resolver.DtdFileMap[#"R5.DTD"] = #"\\location\R5.DTD";
XmlReaderSettings settings = new XmlReaderSettings();
settings.DtdProcessing = DtdProcessing.Parse;
settings.XmlResolver = resolver;
XmlReader doc = XmlReader.Create(f, settings);
while (doc.Read())
{
if ((doc.NodeType == XmlNodeType.Element) && (doc.Name == "ap"))
{
if (doc.HasAttributes)
{
String fin = doc.GetAttribute("ap");
if (fin == "no")
{
String[] array = new String[10000];
array[i] = (f);
File.AppendAllText(#"\\location\NAPP.txt", array[i] + Environment.NewLine);
i++;
}
else
{
String[] abs = new String[10000];
abs[i] = (f);
File.AppendAllText(#"\\location\APP.txt", abs[i] + Environment.NewLine);
i++;
}
}
}
}
}
MessageBox.Show("Done");
}
This is a very simple example of character "filter" that will replae the 0x06 character with a space:
public class MyStreamReader : StreamReader {
public MyStreamReader(string path)
: base(path) {
}
public override int Read(char[] buffer, int index, int count) {
int res = base.Read(buffer, index, count);
for (int i = 0; i < res; i++) {
if (buffer[i] == 0x06) {
buffer[i] = ' ';
}
}
return res;
}
}
You use it this way:
using (var sr = new MyStreamReader(f)) {
var doc = XmlReader.Create(sr, settings);
Note that it's very simple because it's replacing a character (the 0x06) with another character of the same "length" (the space). If you wanted to replace a character with a "sequence" of characters (to escape it), it would get more complex (not impossible, 30 minutes of work difficult)
(I have checked and it seems the XmlTextReader only uses that method and not the Read() method)
As always, when a programmer tells you 30 minutes, it means 0 minutes or 2 hours :-)
This is the "more complex" ReplacingStreamReader:
/// <summary>
/// Only the Read methods are supported!
/// </summary>
public class ReplacingStreamReader : StreamReader
{
public ReplacingStreamReader(string path)
: base(path)
{
}
public Func<char, string> ReplaceWith { get; set; }
protected char[] RemainingChars { get; set; }
protected int RemainingCharsIndex { get; set; }
public override int Read()
{
int ch;
if (RemainingChars != null)
{
ch = RemainingChars[RemainingCharsIndex];
RemainingCharsIndex++;
if (RemainingCharsIndex == RemainingChars.Length)
{
RemainingCharsIndex = 0;
RemainingChars = null;
}
}
else
{
ch = base.Read();
if (ch != -1)
{
string replace = ReplaceWith((char)ch);
if (replace == null)
{
// Do nothing
}
else if (replace.Length == 1)
{
ch = replace[0];
}
else
{
ch = replace[0];
RemainingChars = replace.ToCharArray(1, replace.Length - 1);
RemainingCharsIndex = 0;
}
}
}
return ch;
}
public override int Read(char[] buffer, int index, int count)
{
int res = 0;
// We leave error handling to the StreamReader :-)
// We handle only "working" parameters
if (RemainingChars != null && buffer != null && index >= 0 && count > 0 && index + count <= buffer.Length)
{
int remainingCharsCount = RemainingChars.Length - RemainingCharsIndex;
res = Math.Min(remainingCharsCount, count);
Array.Copy(RemainingChars, RemainingCharsIndex, buffer, index, res);
RemainingCharsIndex += res;
if (RemainingCharsIndex == RemainingChars.Length)
{
RemainingCharsIndex = 0;
RemainingChars = null;
}
if (res == count)
{
return res;
}
index += res;
count -= res;
}
while (true)
{
List<char> sb = null;
int res2 = base.Read(buffer, index, count);
if (res2 == 0 || ReplaceWith == null)
{
return res;
}
int j = 0;
for (int i = 0; i < res2; i++)
{
char ch = buffer[index + i];
string replace = ReplaceWith(ch);
if (sb != null)
{
if (replace == null)
{
sb.Add(ch);
}
else
{
sb.AddRange(replace);
}
}
else if (replace == null)
{
buffer[j] = ch;
j++;
}
else if (replace.Length == 1)
{
buffer[j] = replace[0];
j++;
}
else if (replace.Length == 0)
{
// We do not advance
}
else
{
sb = new List<char>();
sb.AddRange(replace);
}
}
res2 = j;
if (sb != null)
{
int res3 = Math.Min(sb.Count, count - res2);
sb.CopyTo(0, buffer, index + res2, res3);
if (res3 < sb.Count)
{
RemainingChars = new char[sb.Count - res3];
RemainingCharsIndex = 0;
sb.CopyTo(res3, RemainingChars, 0, RemainingChars.Length);
}
res += res3;
}
else
{
res2 = j;
// Can't happen if sb != null (at least a character must
// have been added)
if (res2 == 0)
{
continue;
}
}
res += res2;
return res;
}
}
}
Use it like:
using (var sr = new ReplacingStreamReader(f))
{
sr.ReplaceWith = x =>
{
return x == 0x6 ? " " : null;
// return x == '.' ? " " : null; // Replace all . with
};
var doc = XmlReader.Create(sr, settings);
Be aware that the ReplacingStreamReader doesn't "know" which part of the xml it is modifying, so rarely a "blind" replace is ok :-) Other than this limitation, you can replace any character with any string (null in the ReplaceWith means "keep the current character", equivalent to x.ToString() in the example given. Returning string.Empty is valid, means remove the current character).
The class is quite interesting: it keeps a char[] RemainingChars with the chars that have been read (and filtered by ReplaceWith) but that haven't been returned by a Read() method because the passed buffer was too much small (the ReplaceWith method could "enlarge" the read string, making it too much big for the buffer!). Note that sb is a List<char> instead of a StringBuilder. Probably using one or the other would be nearly equivalent, code-wise.
You could first read the content into a string replace (escape) the content, and then load it into a XmlReader:
foreach (var f in filenames) {
string text;
using (StreamReader s = new StreamReader(f,Encoding.UTF8)) {
text = s.ReadToEnd();
}
text = text.Replace("\x01",#""); //replace the content
//load some settings
var resolver = new XmlUrlOverrideResolver();
resolver.DtdFileMap[#"X1.DTD"] = #"\\location\X1.DTD";
resolver.DtdFileMap[#"R2.DTD"] = #"\\location\X2.DTD";
resolver.DtdFileMap[#"R5.DTD"] = #"\\location\R5.DTD";
XmlReaderSettings settings = new XmlReaderSettings();
settings.DtdProcessing = DtdProcessing.Parse;
settings.XmlResolver = resolver;
XmlReader doc = XmlReader.Create(text, settings);
//perform processing task
//...
}
Where can I find a simple (as much as possible, but no simpler!) implementation of an LR(1) parser generator?
I'm not looking for performance, just the ability to generate the LR(1) states (item sets).
C++, C#, Java, and Python would all work for me.
I've written a very simple one in C# and wanted to share it here.
It basically populates the action lookup table, which tells you which state to shift to or which rule to use for reduction.
If the number is nonnegative, then it denotes a new state; if it's negative, then its one's complement (i.e. ~x) denotes the rule index.
All you need now is to make a lexer and to do the actual parsing with the action table.
Note 1: It might be quite slow at generating the states for a real-world grammar, so you might want to think twice before using it in production code!
Note 2: You might want to double-check its correctness, since I've only checked it a bit.
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using size_t = System.UInt32;
public class LRParser
{
private string[] symbols; // index => symbol
private IDictionary<string, size_t> interned = new SortedDictionary<string, size_t>(); // symbol => index
private int[/*state*/, /*lookahead*/] actions; // If >= 0, represents new state after shift. If < 0, represents one's complement (i.e. ~x) of reduction rule.
public LRParser(params KeyValuePair<string, string[]>[] grammar)
{
this.interned.Add(string.Empty, new size_t());
foreach (var rule in grammar)
{
if (!this.interned.ContainsKey(rule.Key))
{ this.interned.Add(rule.Key, (size_t)this.interned.Count); }
foreach (var symbol in rule.Value)
{
if (!this.interned.ContainsKey(symbol))
{ this.interned.Add(symbol, (size_t)this.interned.Count); }
}
}
this.symbols = this.interned.ToArray().OrderBy(p => p.Value).Select(p => p.Key).ToArray();
var syntax = Array.ConvertAll(grammar, r => new KeyValuePair<size_t, size_t[]>(this.interned[r.Key], Array.ConvertAll(r.Value, s => this.interned[s])));
var nonterminals = Array.ConvertAll(this.symbols, s => new List<size_t>());
for (size_t i = 0; i < syntax.Length; i++) { nonterminals[syntax[i].Key].Add(i); }
var firsts = Array.ConvertAll(Enumerable.Range(0, this.symbols.Length).ToArray(), s => nonterminals[s].Count > 0 ? new HashSet<size_t>() : new HashSet<size_t>() { (size_t)s });
int old;
do
{
old = firsts.Select(l => l.Count).Sum();
foreach (var rule in syntax)
{
foreach (var i in First(rule.Value, firsts))
{ firsts[rule.Key].Add(i); }
}
} while (old < firsts.Select(l => l.Count).Sum());
var actions = new Dictionary<int, IDictionary<size_t, IList<int>>>();
var states = new Dictionary<HashSet<Item>, int>(HashSet<Item>.CreateSetComparer());
var todo = new Stack<HashSet<Item>>();
var root = new Item(0, 0, new size_t());
todo.Push(new HashSet<Item>());
Closure(root, todo.Peek(), firsts, syntax, nonterminals);
states.Add(new HashSet<Item>(todo.Peek()), states.Count);
while (todo.Count > 0)
{
var set = todo.Pop();
var closure = new HashSet<Item>();
foreach (var item in set)
{ Closure(item, closure, firsts, syntax, nonterminals); }
var grouped = Array.ConvertAll(this.symbols, _ => new HashSet<Item>());
foreach (var item in closure)
{
if (item.Symbol >= syntax[item.Rule].Value.Length)
{
IDictionary<size_t, IList<int>> map;
if (!actions.TryGetValue(states[set], out map))
{ actions[states[set]] = map = new Dictionary<size_t, IList<int>>(); }
IList<int> list;
if (!map.TryGetValue(item.Lookahead, out list))
{ map[item.Lookahead] = list = new List<int>(); }
list.Add(~(int)item.Rule);
continue;
}
var next = item;
next.Symbol++;
grouped[syntax[item.Rule].Value[item.Symbol]].Add(next);
}
for (size_t symbol = 0; symbol < grouped.Length; symbol++)
{
var g = new HashSet<Item>();
foreach (var item in grouped[symbol])
{ Closure(item, g, firsts, syntax, nonterminals); }
if (g.Count > 0)
{
int state;
if (!states.TryGetValue(g, out state))
{
state = states.Count;
states.Add(g, state);
todo.Push(g);
}
IDictionary<size_t, IList<int>> map;
if (!actions.TryGetValue(states[set], out map))
{ actions[states[set]] = map = new Dictionary<size_t, IList<int>>(); }
IList<int> list;
if (!map.TryGetValue(symbol, out list))
{ map[symbol] = list = new List<int>(); }
list.Add(state);
}
}
}
this.actions = new int[states.Count, this.symbols.Length];
for (int i = 0; i < this.actions.GetLength(0); i++)
{
for (int j = 0; j < this.actions.GetLength(1); j++)
{ this.actions[i, j] = int.MinValue; }
}
foreach (var p in actions)
{
foreach (var q in p.Value)
{ this.actions[p.Key, q.Key] = q.Value.Single(); }
}
foreach (var state in states.OrderBy(p => p.Value))
{
Console.WriteLine("State {0}:", state.Value);
foreach (var item in state.Key.OrderBy(i => i.Rule).ThenBy(i => i.Symbol).ThenBy(i => i.Lookahead))
{
Console.WriteLine(
"\t{0}: {1} \xB7 {2} | {3} → {0}",
this.symbols[syntax[item.Rule].Key],
string.Join(" ", syntax[item.Rule].Value.Take((int)item.Symbol).Select(s => this.symbols[s]).ToArray()),
string.Join(" ", syntax[item.Rule].Value.Skip((int)item.Symbol).Select(s => this.symbols[s]).ToArray()),
this.symbols[item.Lookahead] == string.Empty ? "\x04" : this.symbols[item.Lookahead],
string.Join(
", ",
Array.ConvertAll(
actions[state.Value][item.Symbol < syntax[item.Rule].Value.Length ? syntax[item.Rule].Value[item.Symbol] : item.Lookahead].ToArray(),
a => a >= 0 ? string.Format("state {0}", a) : string.Format("{0} (rule {1})", this.symbols[syntax[~a].Key], ~a))));
}
Console.WriteLine();
}
}
private static void Closure(Item item, HashSet<Item> closure /*output*/, HashSet<size_t>[] firsts, KeyValuePair<size_t, size_t[]>[] syntax, IList<size_t>[] nonterminals)
{
if (closure.Add(item) && item.Symbol >= syntax[item.Rule].Value.Length)
{
foreach (var r in nonterminals[syntax[item.Rule].Value[item.Symbol]])
{
foreach (var i in First(syntax[item.Rule].Value.Skip((int)(item.Symbol + 1)), firsts))
{ Closure(new Item(r, 0, i == new size_t() ? item.Lookahead : i), closure, firsts, syntax, nonterminals); }
}
}
}
private struct Item : IEquatable<Item>
{
public size_t Rule;
public size_t Symbol;
public size_t Lookahead;
public Item(size_t rule, size_t symbol, size_t lookahead)
{
this.Rule = rule;
this.Symbol = symbol;
this.Lookahead = lookahead;
}
public override bool Equals(object obj) { return obj is Item && this.Equals((Item)obj); }
public bool Equals(Item other)
{ return this.Rule == other.Rule && this.Symbol == other.Symbol && this.Lookahead == other.Lookahead; }
public override int GetHashCode()
{ return this.Rule.GetHashCode() ^ this.Symbol.GetHashCode() ^ this.Lookahead.GetHashCode(); }
}
private static IEnumerable<size_t> First(IEnumerable<size_t> symbols, IEnumerable<size_t>[] map)
{
foreach (var symbol in symbols)
{
bool epsilon = false;
foreach (var s in map[symbol])
{
if (s == new size_t()) { epsilon = true; }
else { yield return s; }
}
if (!epsilon) { yield break; }
}
yield return new size_t();
}
private static KeyValuePair<K, V> MakePair<K, V>(K k, V v) { return new KeyValuePair<K, V>(k, v); }
private static void Main(string[] args)
{
var sw = Stopwatch.StartNew();
var parser = new LRParser(
MakePair("start", new string[] { "exps" }),
MakePair("exps", new string[] { "exps", "exp" }),
MakePair("exps", new string[] { }),
MakePair("exp", new string[] { "INTEGER" })
);
Console.WriteLine(sw.ElapsedMilliseconds);
}
}
LRSTAR 9.1 is a minimal LR(1) and LR(*) parser generator. You can use it to verify that your parser generator is giving the correct states, by using option '/s'. LRSTAR has been tested against HYACC and found to be giving the correct LR(1) states. 20 grammars are provided with LRSTAR and 6 Microsoft Visual Studio projects.
What C# template engine
that uses 'pure' HTML having only text and markers
sans any control flow like if, while, loop or expressions,
separating html from control code ?
Below is the example phone book list code,
expressing how this should be done:
string html=#"
<html><head><title>#title</title></head>
<body>
<table>
<tr>
<td> id</td> <td> name</td> <td> sex</td> <td>phones</td>
</tr><!--#contacts:-->
<tr>
<td>#id</td> <td>#name</td> <td>#sex</td>
<td>
<!--#phones:-->#phone <br/>
<!--:#phones-->
</td>
</tr><!--:#contacts-->
</table>
</body>
</html>";
var contacts = from c in db.contacts select c;
Marker m = new Marker(html);
Filler t = m.Mark("title");
t.Set("Phone book");
Filler c = m.Mark("contacts", "id,name,sex");
// **foreach** expressed in code, not in html
foreach(var contact in contacts) {
int id = contact.id;
c.Add(id, contact.name, contact.sex);
Filler p = c.Mark("phones", "phone");
var phones = from ph in db.phones
where ph.id == id
select new {ph.phone};
if (phones.Any()) {
foreach(var ph in phones) {
p.Add(ph);
}
} else {
fp.Clear();
}
}
Console.Out.WriteLine(m.Get());
Use this code:
Templet.cs
using System;
using System.Collections.Generic;
using System.Text;
using System.Text.RegularExpressions;
namespace templaten.com.Templaten
{
public class tRange
{
public int head, toe;
public tRange(int _head, int _toe)
{
head = _head;
toe = _toe;
}
}
public enum AType
{
VALUE = 0,
NAME = 1,
OPEN = 2,
CLOSE = 3,
GROUP = 4
}
public class Atom
{
private AType kin;
private string tag;
private object data;
private List<Atom> bag;
public Atom(string _tag = "",
AType _kin = AType.VALUE,
object _data = null)
{
tag = _tag;
if (String.IsNullOrEmpty(_tag))
_kin = AType.GROUP;
kin = _kin;
if (_kin == AType.GROUP)
bag = new List<Atom>();
else
bag = null;
data = _data;
}
public AType Kin
{
get { return kin; }
}
public string Tag
{
get { return tag; }
set { tag = value; }
}
public List<Atom> Bag
{
get { return bag; }
}
public object Data
{
get { return data; }
set { data = value; }
}
public int Add(string _tag = "",
AType _kin = AType.VALUE,
object _data = null)
{
if (bag != null)
{
bag.Add(new Atom(_tag, _kin, _data));
return bag.Count - 1;
}
else
{
return -1;
}
}
}
public class Templet
{
private string content;
string namepat = "\\w+";
string justName = "(\\w+)";
string namePre = "#";
string namePost = "";
string comment0 = "\\<!--\\s*";
string comment1 = "\\s*--\\>";
private Atom tokens; // parsed contents
private Dictionary<string, int> iNames; // name index
private Dictionary<string, tRange> iGroups; // groups index
private Atom buffer; // output buffer
private Dictionary<string, int> _iname; // output name index
private Dictionary<string, tRange> _igroup; // output index
public Templet(string Content = null)
{
Init(Content);
}
private int[] mark(string[] names, string group)
{
if (names == null || names.Length < 1) return null;
tRange t = new tRange(0, buffer.Bag.Count - 1);
if (group != null)
{
if (!_igroup.ContainsKey(group)) return null;
t = _igroup[group];
}
int[] marks = new int[names.Length];
for (int i = 0; i < marks.Length; i++)
marks[i] = -1;
for (int i = t.head; i <= t.toe; i++)
{
if (buffer.Bag[i].Kin == AType.NAME)
{
for (int j = 0; j < names.Length; j++)
{
if (String.Compare(
names[j],
buffer.Bag[i].Tag,
true) == 0)
{
marks[j] = i;
break;
}
}
}
}
return marks;
}
public Filler Mark(string group, string names)
{
Filler f = new Filler(this, names);
f.di = mark(f.names, group);
f.Group = group;
tRange t = null;
if (_igroup.ContainsKey(group)) t = _igroup[group];
f.Range = t;
return f;
}
public Filler Mark(string names)
{
Filler f = new Filler(this, names);
f.di = mark(f.names, null);
f.Group = "";
f.Range = null;
return f;
}
public void Set(int[] locations, object[] x)
{
int j = Math.Min(x.Length, locations.Length);
for (int i = 0; i < j; i++)
{
int l = locations[i];
if ((l >= 0) && (buffer.Bag[l] != null))
buffer.Bag[l].Data = x[i];
}
}
public void New(string group, int seq = 0)
{
// place new group copied from old group just below it
if (!( iGroups.ContainsKey(group)
&& _igroup.ContainsKey(group)
&& seq > 0)) return;
tRange newT = null;
tRange t = iGroups[group];
int beginRange = _igroup[group].toe + 1;
for (int i = t.head; i <= t.toe; i++)
{
buffer.Bag.Insert(beginRange,
new Atom(tokens.Bag[i].Tag,
tokens.Bag[i].Kin,
tokens.Bag[i].Data));
beginRange++;
}
newT = new tRange(t.toe + 1, t.toe + (t.toe - t.head + 1));
// rename past group
string pastGroup = group + "_" + seq;
t = _igroup[group];
buffer.Bag[t.head].Tag = pastGroup;
buffer.Bag[t.toe].Tag = pastGroup;
_igroup[pastGroup] = t;
// change group indexes
_igroup[group] = newT;
}
public void ReMark(Filler f, string group)
{
if (!_igroup.ContainsKey(group)) return;
Map(buffer, _iname, _igroup);
f.di = mark(f.names, group);
f.Range = _igroup[group];
}
private static void Indexing(string aname,
AType kin,
int i,
Dictionary<string, int> dd,
Dictionary<string, tRange> gg)
{
switch (kin)
{
case AType.NAME: // index all names
dd[aname] = i;
break;
case AType.OPEN: // index all groups
if (!gg.ContainsKey(aname))
gg[aname] = new tRange(i, -1);
else
gg[aname].head = i;
break;
case AType.CLOSE:
if (!gg.ContainsKey(aname))
gg[aname] = new tRange(-1, i);
else
gg[aname].toe = i;
break;
default:
break;
}
}
private static void Map(Atom oo,
Dictionary<string, int> dd,
Dictionary<string, tRange> gg)
{
for (int i = 0; i < oo.Bag.Count; i++)
{
string aname = oo.Bag[i].Tag;
Indexing(oo.Bag[i].Tag, oo.Bag[i].Kin, i, dd, gg);
}
}
public void Init(string Content = null)
{
content = Content;
tokens = new Atom("", AType.GROUP);
iNames = new Dictionary<string, int>();
iGroups = new Dictionary<string, tRange>();
// parse content into tokens
string namePattern = namePre + namepat + namePost;
string patterns =
"(?<var>" + namePattern + ")|" +
"(?<head>" + comment0 + namePattern + ":" + comment1 + ")|" +
"(?<toe>" + comment0 + ":" + namePattern + comment1 + ")";
Regex jn = new Regex(justName, RegexOptions.Compiled);
Regex r = new Regex(patterns, RegexOptions.Compiled);
MatchCollection ms = r.Matches(content);
int pre = 0;
foreach (Match m in ms)
{
tokens.Add(content.Substring(pre, m.Index - pre));
int idx = -1;
if (m.Groups.Count >= 3)
{
string aname = "";
MatchCollection x = jn.Matches(m.Value);
if (x.Count > 0 && x[0].Groups.Count > 1)
aname = x[0].Groups[1].ToString();
AType t = AType.VALUE;
if (m.Groups[1].Length > 0) t = AType.NAME;
if (m.Groups[2].Length > 0) t = AType.OPEN;
if (m.Groups[3].Length > 0) t = AType.CLOSE;
if (aname.Length > 0)
{
tokens.Add(aname, t);
idx = tokens.Bag.Count - 1;
}
Indexing(aname, t, idx, iNames, iGroups);
}
pre = m.Index + m.Length;
}
if (pre < content.Length)
tokens.Add(content.Substring(pre, content.Length - pre));
// copy tokens into buffer
buffer = new Atom("", AType.GROUP);
for (int i = 0; i < tokens.Bag.Count; i++)
buffer.Add(tokens.Bag[i].Tag, tokens.Bag[i].Kin);
// initialize index of output names
_iname = new Dictionary<string, int>();
foreach (string k in iNames.Keys)
_iname[k] = iNames[k];
// initialize index of output groups
_igroup = new Dictionary<string, tRange>();
foreach (string k in iGroups.Keys)
{
tRange t = iGroups[k];
_igroup[k] = new tRange(t.head, t.toe);
}
}
public string Get()
{
StringBuilder sb = new StringBuilder("");
for (int i = 0; i < buffer.Bag.Count; i++)
{
switch (buffer.Bag[i].Kin)
{
case AType.VALUE:
sb.Append(buffer.Bag[i].Tag);
break;
case AType.NAME:
sb.Append(buffer.Bag[i].Data);
break;
case AType.OPEN:
case AType.CLOSE:
break;
default: break;
}
}
return sb.ToString();
}
}
public class Filler
{
private Templet t = null;
public int[] di;
public string[] names;
public string Group { get; set; }
public tRange Range { get; set; }
private int seq = 0;
public Filler(Templet tl, string markers = null)
{
t = tl;
if (markers != null)
names = markers.Split(new char[] { ',' },
StringSplitOptions.RemoveEmptyEntries);
else
names = null;
}
public void init(int length)
{
di = new int[length];
for (int i = 0; i < length; i++)
di[i] = -1;
seq = 0;
Group = "";
Range = null;
}
// clear contents inside marked object or group
public void Clear()
{
object[] x = new object[di.Length];
for (int i = 0; i < di.Length; i++)
x[i] = null;
t.Set(di, x);
}
// set value for marked object,
// or add row to group and set value to columns
public void Set(params object[] x)
{
t.Set(di, x);
}
public void Add(params object[] x)
{
if (Group.Length > 0)
{
t.New(Group, seq);
++seq;
t.ReMark(this, Group);
}
t.Set(di, x);
}
}
}
Testing program
Program.cs
Templet m = new Templet(html);
Filler f= m.Mark("title");
f.Set("Phone book");
Filler fcontacts = m.Mark("contacts", "id,name,sex,phone");
fcontacts.Add(1, "Akhmad", "M", "123456");
fcontacts.Add(2, "Barry", "M", "234567");
fcontacts.Add(1, "Charles", "M", "345678");
Console.Out.WriteLine(m.Get());
Still can't do nested loop- yet.
Just use ASP.NET. Whether you use webforms or MVC, it's super easy to have C# in your .cs files, and HTML in your .aspx files.
As with anything in programming, it's 99% up to you to do things right. Flexible UI engines aren't going to enforce that you follow good coding practices.
In principle most any template engine you choose can separate HTML from control logic with the proper architecture. using an MVC (Or MVVM) pattern, if you construct your model in such a way that the controller contains the if/then logic instead of the view you can eliminate it from the view.
That said, the syntax you use is very close to Razor syntax which is easily available for ASP.NET MVC through NuGet packages.
I totally hear you. I built SharpFusion, which has some other stuff in it but if you look for the template.cs file you will see the handler that parses a HTML file and simply replaces out tokens with values that you've made in c#.
Because no XML parsing is done like ASP.NET the framework loads much faster than even an MVC site.
Another alternative is ServiceStack.
I am trying to read a *.csv-file.
The *.csv-file consist of two columns separated by semicolon (";").
I am able to read the *.csv-file using StreamReader and able to separate each line by using the Split() function. I want to store each column into a separate array and then display it.
Is it possible to do that?
You can do it like this:
using System.IO;
static void Main(string[] args)
{
using(var reader = new StreamReader(#"C:\test.csv"))
{
List<string> listA = new List<string>();
List<string> listB = new List<string>();
while (!reader.EndOfStream)
{
var line = reader.ReadLine();
var values = line.Split(';');
listA.Add(values[0]);
listB.Add(values[1]);
}
}
}
My favourite CSV parser is one built into .NET library. This is a hidden treasure inside Microsoft.VisualBasic namespace.
Below is a sample code:
using Microsoft.VisualBasic.FileIO;
var path = #"C:\Person.csv"; // Habeeb, "Dubai Media City, Dubai"
using (TextFieldParser csvParser = new TextFieldParser(path))
{
csvParser.CommentTokens = new string[] { "#" };
csvParser.SetDelimiters(new string[] { "," });
csvParser.HasFieldsEnclosedInQuotes = true;
// Skip the row with the column names
csvParser.ReadLine();
while (!csvParser.EndOfData)
{
// Read current line fields, pointer moves to the next line.
string[] fields = csvParser.ReadFields();
string Name = fields[0];
string Address = fields[1];
}
}
Remember to add reference to Microsoft.VisualBasic
More details about the parser is given here: http://codeskaters.blogspot.ae/2015/11/c-easiest-csv-parser-built-in-net.html
LINQ way:
var lines = File.ReadAllLines("test.txt").Select(a => a.Split(';'));
var csv = from line in lines
select (from piece in line
select piece);
^^Wrong - Edit by Nick
It appears the original answerer was attempting to populate csv with a 2 dimensional array - an array containing arrays. Each item in the first array contains an array representing that line number with each item in the nested array containing the data for that specific column.
var csv = from line in lines
select (line.Split(',')).ToArray();
Just came across this library: https://github.com/JoshClose/CsvHelper
Very intuitive and easy to use. Has a nuget package too which made is quick to implement: https://www.nuget.org/packages/CsvHelper/27.2.1. Also appears to be actively maintained which I like.
Configuring it to use a semi-colon is easy: https://github.com/JoshClose/CsvHelper/wiki/Custom-Configurations
You can't create an array immediately because you need to know the number of rows from the beginning (and this would require to read the csv file twice)
You can store values in two List<T> and then use them or convert into an array using List<T>.ToArray()
Very simple example:
var column1 = new List<string>();
var column2 = new List<string>();
using (var rd = new StreamReader("filename.csv"))
{
while (!rd.EndOfStream)
{
var splits = rd.ReadLine().Split(';');
column1.Add(splits[0]);
column2.Add(splits[1]);
}
}
// print column1
Console.WriteLine("Column 1:");
foreach (var element in column1)
Console.WriteLine(element);
// print column2
Console.WriteLine("Column 2:");
foreach (var element in column2)
Console.WriteLine(element);
N.B.
Please note that this is just a very simple example. Using string.Split does not account for cases where some records contain the separator ; inside it.
For a safer approach, consider using some csv specific libraries like CsvHelper on nuget.
I usually use this parser from codeproject, since there's a bunch of character escapes and similar that it handles for me.
Here is my variation of the top voted answer:
var contents = File.ReadAllText(filename).Split('\n');
var csv = from line in contents
select line.Split(',').ToArray();
The csv variable can then be used as in the following example:
int headerRows = 5;
foreach (var row in csv.Skip(headerRows)
.TakeWhile(r => r.Length > 1 && r.Last().Trim().Length > 0))
{
String zerothColumnValue = row[0]; // leftmost column
var firstColumnValue = row[1];
}
If you need to skip (head-)lines and/or columns, you can use this to create a 2-dimensional array:
var lines = File.ReadAllLines(path).Select(a => a.Split(';'));
var csv = (from line in lines
select (from col in line
select col).Skip(1).ToArray() // skip the first column
).Skip(2).ToArray(); // skip 2 headlines
This is quite useful if you need to shape the data before you process it further (assuming the first 2 lines consist of the headline, and the first column is a row title - which you don't need to have in the array because you just want to regard the data).
N.B. You can easily get the headlines and the 1st column by using the following code:
var coltitle = (from line in lines
select line.Skip(1).ToArray() // skip 1st column
).Skip(1).Take(1).FirstOrDefault().ToArray(); // take the 2nd row
var rowtitle = (from line in lines select line[0] // take 1st column
).Skip(2).ToArray(); // skip 2 headlines
This code example assumes the following structure of your *.csv file:
Note: If you need to skip empty rows - which can by handy sometimes, you can do so by inserting
where line.Any(a=>!string.IsNullOrWhiteSpace(a))
between the from and the select statement in the LINQ code examples above.
You can use Microsoft.VisualBasic.FileIO.TextFieldParser dll in C# for better performance
get below code example from above article
static void Main()
{
string csv_file_path=#"C:\Users\Administrator\Desktop\test.csv";
DataTable csvData = GetDataTabletFromCSVFile(csv_file_path);
Console.WriteLine("Rows count:" + csvData.Rows.Count);
Console.ReadLine();
}
private static DataTable GetDataTabletFromCSVFile(string csv_file_path)
{
DataTable csvData = new DataTable();
try
{
using(TextFieldParser csvReader = new TextFieldParser(csv_file_path))
{
csvReader.SetDelimiters(new string[] { "," });
csvReader.HasFieldsEnclosedInQuotes = true;
string[] colFields = csvReader.ReadFields();
foreach (string column in colFields)
{
DataColumn datecolumn = new DataColumn(column);
datecolumn.AllowDBNull = true;
csvData.Columns.Add(datecolumn);
}
while (!csvReader.EndOfData)
{
string[] fieldData = csvReader.ReadFields();
//Making empty value as null
for (int i = 0; i < fieldData.Length; i++)
{
if (fieldData[i] == "")
{
fieldData[i] = null;
}
}
csvData.Rows.Add(fieldData);
}
}
}
catch (Exception ex)
{
}
return csvData;
}
Hi all, I created a static class for doing this.
+ column check
+ quota sign removal
public static class CSV
{
public static List<string[]> Import(string file, char csvDelimiter, bool ignoreHeadline, bool removeQuoteSign)
{
return ReadCSVFile(file, csvDelimiter, ignoreHeadline, removeQuoteSign);
}
private static List<string[]> ReadCSVFile(string filename, char csvDelimiter, bool ignoreHeadline, bool removeQuoteSign)
{
string[] result = new string[0];
List<string[]> lst = new List<string[]>();
string line;
int currentLineNumner = 0;
int columnCount = 0;
// Read the file and display it line by line.
using (System.IO.StreamReader file = new System.IO.StreamReader(filename))
{
while ((line = file.ReadLine()) != null)
{
currentLineNumner++;
string[] strAr = line.Split(csvDelimiter);
// save column count of dirst line
if (currentLineNumner == 1)
{
columnCount = strAr.Count();
}
else
{
//Check column count of every other lines
if (strAr.Count() != columnCount)
{
throw new Exception(string.Format("CSV Import Exception: Wrong column count in line {0}", currentLineNumner));
}
}
if (removeQuoteSign) strAr = RemoveQouteSign(strAr);
if (ignoreHeadline)
{
if(currentLineNumner !=1) lst.Add(strAr);
}
else
{
lst.Add(strAr);
}
}
}
return lst;
}
private static string[] RemoveQouteSign(string[] ar)
{
for (int i = 0;i< ar.Count() ; i++)
{
if (ar[i].StartsWith("\"") || ar[i].StartsWith("'")) ar[i] = ar[i].Substring(1);
if (ar[i].EndsWith("\"") || ar[i].EndsWith("'")) ar[i] = ar[i].Substring(0,ar[i].Length-1);
}
return ar;
}
}
I have spend few hours searching for a right library, but finally I wrote my own code :)
You can read file (or database) with whatever tools you want and then apply the following routine to each line:
private static string[] SmartSplit(string line, char separator = ',')
{
var inQuotes = false;
var token = "";
var lines = new List<string>();
for (var i = 0; i < line.Length; i++) {
var ch = line[i];
if (inQuotes) // process string in quotes,
{
if (ch == '"') {
if (i<line.Length-1 && line[i + 1] == '"') {
i++;
token += '"';
}
else inQuotes = false;
} else token += ch;
} else {
if (ch == '"') inQuotes = true;
else if (ch == separator) {
lines.Add(token);
token = "";
} else token += ch;
}
}
lines.Add(token);
return lines.ToArray();
}
var firstColumn = new List<string>();
var lastColumn = new List<string>();
// your code for reading CSV file
foreach(var line in file)
{
var array = line.Split(';');
firstColumn.Add(array[0]);
lastColumn.Add(array[1]);
}
var firstArray = firstColumn.ToArray();
var lastArray = lastColumn.ToArray();
Here's a special case where one of data field has semicolon (";") as part of it's data in that case most of answers above will fail.
Solution in that case will be
string[] csvRows = System.IO.File.ReadAllLines(FullyQaulifiedFileName);
string[] fields = null;
List<string> lstFields;
string field;
bool quoteStarted = false;
foreach (string csvRow in csvRows)
{
lstFields = new List<string>();
field = "";
for (int i = 0; i < csvRow.Length; i++)
{
string tmp = csvRow.ElementAt(i).ToString();
if(String.Compare(tmp,"\"")==0)
{
quoteStarted = !quoteStarted;
}
if (String.Compare(tmp, ";") == 0 && !quoteStarted)
{
lstFields.Add(field);
field = "";
}
else if (String.Compare(tmp, "\"") != 0)
{
field += tmp;
}
}
if(!string.IsNullOrEmpty(field))
{
lstFields.Add(field);
field = "";
}
// This will hold values for each column for current row under processing
fields = lstFields.ToArray();
}
The open-source Angara.Table library allows to load CSV into typed columns, so you can get the arrays from the columns. Each column can be indexed both by name or index. See http://predictionmachines.github.io/Angara.Table/saveload.html.
The library follows RFC4180 for CSV; it enables type inference and multiline strings.
Example:
using System.Collections.Immutable;
using Angara.Data;
using Angara.Data.DelimitedFile;
...
ReadSettings settings = new ReadSettings(Delimiter.Semicolon, false, true, null, null);
Table table = Table.Load("data.csv", settings);
ImmutableArray<double> a = table["double-column-name"].Rows.AsReal;
for(int i = 0; i < a.Length; i++)
{
Console.WriteLine("{0}: {1}", i, a[i]);
}
You can see a column type using the type Column, e.g.
Column c = table["double-column-name"];
Console.WriteLine("Column {0} is double: {1}", c.Name, c.Rows.IsRealColumn);
Since the library is focused on F#, you might need to add a reference to the FSharp.Core 4.4 assembly; click 'Add Reference' on the project and choose FSharp.Core 4.4 under "Assemblies" -> "Extensions".
I have been using csvreader.com(paid component) for years, and I have never had a problem. It is solid, small and fast, but you do have to pay for it. You can set the delimiter to whatever you like.
using (CsvReader reader = new CsvReader(s) {
reader.Settings.Delimiter = ';';
reader.ReadHeaders(); // if headers on a line by themselves. Makes reader.Headers[] available
while (reader.ReadRecord())
... use reader.Values[col_i] ...
}
I am just student working on my master's thesis, but this is the way I solved it and it worked well for me. First you select your file from directory (only in csv format) and then you put the data into the lists.
List<float> t = new List<float>();
List<float> SensorI = new List<float>();
List<float> SensorII = new List<float>();
List<float> SensorIII = new List<float>();
using (OpenFileDialog dialog = new OpenFileDialog())
{
try
{
dialog.Filter = "csv files (*.csv)|*.csv";
dialog.Multiselect = false;
dialog.InitialDirectory = ".";
dialog.Title = "Select file (only in csv format)";
if (dialog.ShowDialog() == DialogResult.OK)
{
var fs = File.ReadAllLines(dialog.FileName).Select(a => a.Split(';'));
int counter = 0;
foreach (var line in fs)
{
counter++;
if (counter > 2) // Skip first two headder lines
{
this.t.Add(float.Parse(line[0]));
this.SensorI.Add(float.Parse(line[1]));
this.SensorII.Add(float.Parse(line[2]));
this.SensorIII.Add(float.Parse(line[3]));
}
}
}
}
catch (Exception exc)
{
MessageBox.Show(
"Error while opening the file.\n" + exc.Message,
this.Text,
MessageBoxButtons.OK,
MessageBoxIcon.Error
);
}
}
This is my 2 simple static methods to convert text from csv file to List<List<string>> and vice versa. Each method use row convertor.
This code should take into account all the possibilities of the csv file. You can define own csv separator and this methods try to correct escape double 'quote' char, and deals with the situation when all text in quotes are one cell and csv separator is inside quoted string including multiple lines in one cell and can ignore empty rows.
Last method is only for testing. So you can ignore it, or test your own, or others solution with this test method :). For testing I used this hard csv with 2 rows on 4 lines:
0,a,""bc,d
"e, f",g,"this,is, o
ne ""lo
ng, cell""",h
This is final code. For simplicity, I removed all try catch blocks.
using System;
using System.Collections.Generic;
using System.Linq;
public static class Csv {
public static string FromListToString(List<List<string>> csv, string separator = ",", char quotation = '"', bool returnFirstRow = true)
{
string content = "";
for (int row = 0; row < csv.Count; row++) {
content += (row > 0 ? Environment.NewLine : "") + RowFromListToString(csv[row], separator, quotation);
}
return content;
}
public static List<List<string>> FromStringToList(string content, string separator = ",", char quotation = '"', bool returnFirstRow = true, bool ignoreEmptyRows = true)
{
List<List<string>> csv = new List<List<string>>();
string[] rows = content.Split(new string[] { Environment.NewLine }, StringSplitOptions.None);
if (rows.Length <= (returnFirstRow ? 0 : 1)) { return csv; }
List<string> csvRow = null;
for (int rowIndex = 0; rowIndex < rows.Length; rowIndex++) {
(List<string> row, bool rowClosed) = RowFromStringToList(rows[rowIndex], csvRow, separator, quotation);
if (rowClosed) { if (!ignoreEmptyRows || row.Any(rowItem => rowItem.Length > 0)) { csv.Add(row); csvRow = null; } } // row ok, add to list
else { csvRow = row; } // not fully created, continue
}
if (!returnFirstRow) { csv.RemoveAt(0); } // remove header
return csv;
}
public static string RowFromListToString(List<string> csvData, string separator = ",", char quotation = '"')
{
csvData = csvData.Select(element =>
{
if (element.Contains(quotation)) {
element = element.Replace(quotation.ToString(), quotation.ToString() + quotation.ToString());
}
if (element.Contains(separator) || element.Contains(Environment.NewLine)) {
element = "\"" + element + "\"";
}
return element;
}).ToList();
return string.Join(separator, csvData);
}
public static (List<string>, bool) RowFromStringToList(string csvRow, List<string> continueWithRow = null, string separator = ",", char quotation = '"')
{
bool rowClosed = true;
if (continueWithRow != null && continueWithRow.Count > 0) {
// in previous result quotation are fixed so i need convert back to double quotation
string previousCell = quotation.ToString() + continueWithRow.Last().Replace(quotation.ToString(), quotation.ToString() + quotation.ToString()) + Environment.NewLine;
continueWithRow.RemoveAt(continueWithRow.Count - 1);
csvRow = previousCell + csvRow;
}
char tempQuote = (char)162;
while (csvRow.Contains(tempQuote)) { tempQuote = (char)(tempQuote + 1); }
char tempSeparator = (char)(tempQuote + 1);
while (csvRow.Contains(tempSeparator)) { tempSeparator = (char)(tempSeparator + 1); }
csvRow = csvRow.Replace(quotation.ToString() + quotation.ToString(), tempQuote.ToString());
if(csvRow.Split(new char[] { quotation }, StringSplitOptions.None).Length % 2 == 0) { rowClosed = !rowClosed; }
string[] csvSplit = csvRow.Split(new string[] { separator }, StringSplitOptions.None);
List<string> csvList = csvSplit
.ToList()
.Aggregate("",
(string row, string item) => {
if (row.Count((ch) => ch == quotation) % 2 == 0) { return row + (row.Length > 0 ? tempSeparator.ToString() : "") + item; }
else { return row + separator + item; }
},
(string row) => row.Split(tempSeparator).Select((string item) => item.Trim(quotation).Replace(tempQuote, quotation))
).ToList();
if (continueWithRow != null && continueWithRow.Count > 0) {
return (continueWithRow.Concat(csvList).ToList(), rowClosed);
}
return (csvList, rowClosed);
}
public static bool Test()
{
string csvText = "0,a,\"\"bc,d" + Environment.NewLine + "\"e, f\",g,\"this,is, o" + Environment.NewLine + "ne \"\"lo" + Environment.NewLine + "ng, cell\"\"\",h";
List<List<string>> csvList = new List<List<string>>() { new List<string>() { "0", "a", "\"bc", "d" }, new List<string>() { "e, f", "g", "this,is, o" + Environment.NewLine + "ne \"lo" + Environment.NewLine + "ng, cell\"", "h" } };
List<List<string>> csvTextAsList = Csv.FromStringToList(csvText);
bool ok = Enumerable.SequenceEqual(csvList[0], csvTextAsList[0]) && Enumerable.SequenceEqual(csvList[1], csvTextAsList[1]);
string csvListAsText = Csv.FromListToString(csvList);
return ok && csvListAsText == csvText;
}
}
Usage examples:
// get List<List<string>> representation of csv
var csvFromText = Csv.FromStringToList(csvAsText);
// read csv file with custom separator and quote
// return no header and ignore empty rows
var csvFile = File.ReadAllText(csvFileFullPath);
var csvFromFile = Csv.FromStringToList(csvFile, ";", '"', false, false);
// get text representation of csvData from List<List<string>>
var csvAsText = Csv.FromListToString(csvData);
Notes:
This: char tempQuote = (char)162; is first rare character from ASCI table. The script searches for this, or the first next few ascii character that is NOT in the text and uses it as a temporary escape and quote characters.
Still wrong. You need to compensate for "" in quotes.
Here is my solution Microsoft style csv.
/// <summary>
/// Microsoft style csv file. " is the quote character, "" is an escaped quote.
/// </summary>
/// <param name="fileName"></param>
/// <param name="sepChar"></param>
/// <param name="quoteChar"></param>
/// <param name="escChar"></param>
/// <returns></returns>
public static List<string[]> ReadCSVFileMSStyle(string fileName, char sepChar = ',', char quoteChar = '"')
{
List<string[]> ret = new List<string[]>();
string[] csvRows = System.IO.File.ReadAllLines(fileName);
foreach (string csvRow in csvRows)
{
bool inQuotes = false;
List<string> fields = new List<string>();
string field = "";
for (int i = 0; i < csvRow.Length; i++)
{
if (inQuotes)
{
// Is it a "" inside quoted area? (escaped litteral quote)
if(i < csvRow.Length - 1 && csvRow[i] == quoteChar && csvRow[i+1] == quoteChar)
{
i++;
field += quoteChar;
}
else if(csvRow[i] == quoteChar)
{
inQuotes = false;
}
else
{
field += csvRow[i];
}
}
else // Not in quoted region
{
if (csvRow[i] == quoteChar)
{
inQuotes = true;
}
if (csvRow[i] == sepChar)
{
fields.Add(field);
field = "";
}
else
{
field += csvRow[i];
}
}
}
if (!string.IsNullOrEmpty(field))
{
fields.Add(field);
field = "";
}
ret.Add(fields.ToArray());
}
return ret;
}
}
I have a library that is doing exactly you need.
Some time ago I had wrote simple and fast enough library for work with CSV files. You can find it by the following link: https://github.com/ukushu/DataExporter/blob/master/Csv.cs
It works with CSV like with 2 dimensions array. Exactly like you need.
As example, in case of you need all of values of 3rd row only you need is to write:
Csv csv = new Csv();
csv.FileOpen("c:\\file1.csv");
var allValuesOf3rdRow = csv.Rows[2];
or to read 2nd cell of 3rd row:
var value = csv.Rows[2][1];
Headers are required in csv for json conversion in the below code
You can use below code as is without making any changes.
This code will work with two row headers or with one row header.
Below code reads the uploaded IForm File and converts to memory stream.
If you want to use file path instead of uploaded file you can replace
new StreamReader(ms, System.Text.Encoding.UTF8, true)) with new StreamReader("../../examplefilepath");
using (var ms = new MemoryStream())
{
administrativesViewModel.csvFile.CopyTo(ms);
ms.Position = 0;
using (StreamReader csvReader = new StreamReader(ms, System.Text.Encoding.UTF8, true))
{
List<string> lines = new List<string>();
while (!csvReader.EndOfStream)
{
var line = csvReader.ReadLine();
var values = line.Split(';');
if (values[0] != "" && values[0] != null)
{
lines.Add(values[0]);
}
}
var csv = new List<string[]>();
foreach (string item in lines)
{
csv.Add(item.Split(','));
}
var properties = lines[0].Split(',');
int csvI = 1;
var listObjResult = new List<Dictionary<string, string>>();
if (lines.Count() > 1)
{
var ln = lines[0].Substring(0, lines[0].Count() - 1);
var ln1 = lines[1].Substring(0, lines[1].Count() - 1);
var lnSplit = ln.Split(',');
var ln1Split = ln1.Split(',');
if (lnSplit.Count() != ln1Split.Count())
{
properties = lines[1].Split(',');
csvI = 2;
}
}
for (int i = csvI; i < csv.Count(); i++)
{
var objResult = new Dictionary<string, string>();
if (csvI > 0)
{
var splitProp = lines[0].Split(":");
if (splitProp.Count() > 1)
{
if (splitProp[0] != "" && splitProp[0] != null && splitProp[1] != "" && splitProp[1] != null)
{
objResult.Add(splitProp[0], splitProp[1]);
}
}
}
for (int j = 0; j < properties.Length; j++)
if (!properties[j].Contains(":"))
{
objResult.Add(properties[j], csv[i][j]);
}
listObjResult.Add(objResult);
}
var result = JsonConvert.SerializeObject(listObjResult);
var result2 = JArray.Parse(result);
Console.WriteLine(result2);
}
}
look at this
using CsvFramework;
using System.Collections.Generic;
namespace CvsParser
{
public class Customer
{
public int Id { get; set; }
public string Name { get; set; }
public List<Order> Orders { get; set; }
}
public class Order
{
public int Id { get; set; }
public int CustomerId { get; set; }
public int Quantity { get; set; }
public int Amount { get; set; }
public List<OrderItem> OrderItems { get; set; }
}
public class Address
{
public int Id { get; set; }
public int CustomerId { get; set; }
public string Name { get; set; }
}
public class OrderItem
{
public int Id { get; set; }
public int OrderId { get; set; }
public string ProductName { get; set; }
}
class Program
{
static void Main(string[] args)
{
var customerLines = System.IO.File.ReadAllLines(#"Customers.csv");
var orderLines = System.IO.File.ReadAllLines(#"Orders.csv");
var orderItemLines = System.IO.File.ReadAllLines(#"OrderItemLines.csv");
CsvFactory.Register<Customer>(builder =>
{
builder.Add(a => a.Id).Type(typeof(int)).Index(0).IsKey(true);
builder.Add(a => a.Name).Type(typeof(string)).Index(1);
builder.AddNavigation(n => n.Orders).RelationKey<Order, int>(k => k.CustomerId);
}, false, ',', customerLines);
CsvFactory.Register<Order>(builder =>
{
builder.Add(a => a.Id).Type(typeof(int)).Index(0).IsKey(true);
builder.Add(a => a.CustomerId).Type(typeof(int)).Index(1);
builder.Add(a => a.Quantity).Type(typeof(int)).Index(2);
builder.Add(a => a.Amount).Type(typeof(int)).Index(3);
builder.AddNavigation(n => n.OrderItems).RelationKey<OrderItem, int>(k => k.OrderId);
}, true, ',', orderLines);
CsvFactory.Register<OrderItem>(builder =>
{
builder.Add(a => a.Id).Type(typeof(int)).Index(0).IsKey(true);
builder.Add(a => a.OrderId).Type(typeof(int)).Index(1);
builder.Add(a => a.ProductName).Type(typeof(string)).Index(2);
}, false, ',', orderItemLines);
var customers = CsvFactory.Parse<Customer>();
}
}
}