so, i have this block of code on my app that scans a directory for files, makes a list with them and compares that list with a list of files from a database (if the path to that directory exists on the db) and adds the difference between them to one of two other lists.Here it is :
if (id > 0)
{
var dbDrawingList = mdl_drawing.GetDrawingsByBaseId(id);
var counter = 0;
if (dbDrawingList.Count() < serverDrawingList.Count())
{
counter = serverDrawingList.Count();
}
else
{
counter = dbDrawingList.Count();
}
for (int i = 0; i <= counter; i++)
{
if (i < serverDrawingList.Count())
{
if (dbDrawingList.Select(f => f.partNumber).Contains(serverDrawingList[i].partNumber) == false)
{
onServerAndNotDb.Add(serverDrawingList[i]);
}
}
if (i < dbDrawingList.Count())
{
if (serverDrawingList.Select(f => f.partNumber).Contains(dbDrawingList[i].partNumber) == false)
{
onDbAndNotServer.Add(dbDrawingList[i]);
}
}
}
serverDrawingList = null;
dbDrawingList = null;
}
Does anyone have a better way of doing this?(there can be more than one file with the same name, so the Except method doesn't work)
I have one method that does exactly that, this is the way I implemented it and it's working so far:
private void deleteRegistersFromFilesThatWasRemoved(string path)
{
// add local files to list
List<string> allFiles = new List<string>();
string[] dirs = Directory.GetDirectories(path, "*",
SearchOption.TopDirectoryOnly);
foreach (var dir in dirs)
{
string[] files = Directory.GetFiles(dir, "*", SearchOption.TopDirectoryOnly);
foreach (var file in files)
{
if(file != path)
{
allFiles.Add(file);
}
}
}
// list for file records in the database
List<string> record = new List<string>();
string queryfiles = //your query
SqlCommand cmd = new SqlCommand(queryfiles, connection);
cmd.Connection.Open();
cmd.ExecuteNonQuery();
SqlDataReader read = cmd.ExecuteReader();
while (read.Read())
{
// I have path and file name separated that's why here's a string sum
string r = read[2].ToString() + read[1].ToString();
record.Add(r);
}
cmd.Connection.Close();
for (int i = 0; i < record.Count; i++)
{
if (!allFiles.Contains(record[i]))
{
// do something if the record on the database is not in the local
//files list
}
}
}
}
Related
i have to import 2 CSV's.
CSV 1 [49]: Including about 50 tab seperated colums.
CSV 2:[2] Inlcudes 3 Columns which should be replaced on the [3] [6] and [11] place of my first csv.
So heres what i do:
1) Importing the csv and split into a array.
string employeedatabase = "MYPATH";
List<String> status = new List<String>();
StreamReader file2 = new System.IO.StreamReader(filename);
string line = file2.ReadLine();
while ((line = file2.ReadLine()) != null)
{
string[] ud = line.Split('\t');
status.Add(ud[0]);
}
String[] ud_status = status.ToArray();
PROBLEM 1: i have about 50 colums to handle, ud_status is just the first, so do i need 50 Lists and 50 String arrays?
2) Importing the second csv and split into a array.
List<String> vorname = new List<String>();
List<String> nachname = new List<String>();
List<String> username = new List<String>();
StreamReader file = new System.IO.StreamReader(employeedatabase);
string line3 = file.ReadLine();
while ((line3 = file.ReadLine()) != null)
{
string[] data = line3.Split(';');
vorname.Add(data[0]);
nachname.Add(data[1]);
username.Add(data[2]);
}
String[] db_vorname = vorname.ToArray();
String[] db_nachname = nachname.ToArray();
String[] db_username = username.ToArray();
PROBLEM 2: After loading these two csv's i dont know how to combine them, and change to columns as mentioned above ..
somethine like this?
mynewArray = ud_status + "/t" + ud_xy[..n] + "/t" + changed_colum + ud_xy[..n];
save "mynewarray" into tablulator seperated csv with encoding "utf-8".
To read the file into a meaningful format, you should set up a class that defines the format of your CSV:
public class CsvRow
{
public string vorname { get; set; }
public string nachname { get; set; }
public string username { get; set; }
public CsvRow (string[] data)
{
vorname = data[0];
nachname = data[1];
username = data[2];
}
}
Then populate a list of this:
List<CsvRow> rows = new List<CsvRow>();
StreamReader file = new System.IO.StreamReader(employeedatabase);
string line3 = file.ReadLine();
while ((line3 = file.ReadLine()) != null)
{
rows.Add(new CsvRow(line3.Split(';'));
}
Similarly format your other CSV and include unused properties for the new fields. Once you have loaded both, you can populate the new properties from this list in a loop, matching the records by whatever common field the CSVs hopefully share. Then finally output the resulting data to a new CSV file.
Your solution is not to use string arrays to do this. That will just drive you crazy. It's better to use the System.Data.DataTable object.
I didn't get a chance to test the LINQ lambda expression at the end of this (or really any of it, I wrote this on a break), but it should get you on the right track.
using (var ds = new System.Data.DataSet("My Data"))
{
ds.Tables.Add("File0");
ds.Tables.Add("File1");
string[] line;
using (var reader = new System.IO.StreamReader("FirstFile"))
{
//first we get columns for table 0
foreach (string s in reader.ReadLine().Split('\t'))
ds.Tables["File0"].Columns.Add(s);
while ((line = reader.ReadLine().Split('\t')) != null)
{
//and now the rest of the data.
var r = ds.Tables["File0"].NewRow();
for (int i = 0; i <= line.Length; i++)
{
r[i] = line[i];
}
ds.Tables["File0"].Rows.Add(r);
}
}
//we could probably do these in a loop or a second method,
//but you may want subtle differences, so for now we just do it the same way
//for file1
using (var reader2 = new System.IO.StreamReader("SecondFile"))
{
foreach (string s in reader2.ReadLine().Split('\t'))
ds.Tables["File1"].Columns.Add(s);
while ((line = reader2.ReadLine().Split('\t')) != null)
{
//and now the rest of the data.
var r = ds.Tables["File1"].NewRow();
for (int i = 0; i <= line.Length; i++)
{
r[i] = line[i];
}
ds.Tables["File1"].Rows.Add(r);
}
}
//you now have these in functioning datatables. Because we named columns,
//you can call them by name specifically, or by index, to replace in the first datatable.
string[] columnsToReplace = new string[] { "firstColumnName", "SecondColumnName", "ThirdColumnName" };
for(int i = 0; i < ds.Tables[0].Rows.Count; i++)
{
//you didn't give a sign of any relation between the two tables
//so this is just by row, and assumes the row count is equivalent.
//This is also not advised.
//if there is a key these sets of data share
//you should join on them instead.
foreach(DataRow dr in ds.Tables[0].Rows[i].ItemArray)
{
dr[3] = ds.Tables[1].Rows[i][columnsToReplace[0]];
dr[6] = ds.Tables[1].Rows[i][columnsToReplace[1]];
dr[11] = ds.Tables[1].Rows[i][columnsToReplace[2]];
}
}
//ds.Tables[0] now has the output you want.
string output = String.Empty;
foreach (var s in ds.Tables[0].Columns)
output = String.Concat(output, s ,"\t");
output = String.Concat(output, Environment.NewLine); // columns ready, now the rows.
foreach (DataRow r in ds.Tables[0].Rows)
output = string.Concat(output, r.ItemArray.SelectMany(t => (t.ToString() + "\t")), Environment.NewLine);
if(System.IO.File.Exists("MYPATH"))
using (System.IO.StreamWriter file = new System.IO.StreamWriter("MYPATH")) //or a variable instead of string literal
{
file.Write(output);
}
}
With Cinchoo ETL - an open source file helper library, you can do the merge of CSV files as below. Assumed the 2 CSV file contains same number of lines.
string CSV1 = #"Id Name City
1 Tom New York
2 Mark FairFax";
string CSV2 = #"Id City
1 Las Vegas
2 Dallas";
dynamic rec1 = null;
dynamic rec2 = null;
StringBuilder csv3 = new StringBuilder();
using (var csvOut = new ChoCSVWriter(new StringWriter(csv3))
.WithFirstLineHeader()
.WithDelimiter("\t")
)
{
using (var csv1 = new ChoCSVReader(new StringReader(CSV1))
.WithFirstLineHeader()
.WithDelimiter("\t")
)
{
using (var csv2 = new ChoCSVReader(new StringReader(CSV2))
.WithFirstLineHeader()
.WithDelimiter("\t")
)
{
while ((rec1 = csv1.Read()) != null && (rec2 = csv2.Read()) != null)
{
rec1.City = rec2.City;
csvOut.Write(rec1);
}
}
}
}
Console.WriteLine(csv3.ToString());
Hope it helps.
Disclaimer: I'm the author of this library.
I am currently working on a small project that returns text from 'txt' file based on criteria and then groups it before I export it to a database. In the text file I have:
c:\test\123
Other Lines...
c:\test\124
Problem: "description of error". (this error is for directory 124)
Problem: "description of error". (this error is for directory 124)
c:\test\125
...
I would like to group the 'problems' to their associated directory when importing them to the database. So far I have tried using 'foreach' to return the rows where the line contains/begins with directory or problem. Although this passes the value in order it is not clear for users to see which directory the problem belongs to. Ideally I am after:
Directory(column1) Problem(column2)
c:\test\123 || Null
c:\test\124 || Problem: "description of Error".
c:\test\124 || Problem: "description of Error".
c:\test\125 || Null
Any help that you can give would be greatly appreciated. I have been racking my brains on this for the last week!
(current code)
var lines = File.ReadAllLines(filename);
foreach (var line in File.ReadLines(filename))
{
String stringTest = line;
if (stringTest.Contains(directory))
{
String test = stringTest;
var csb = new SqlConnectionStringBuilder();
csb.DataSource = host;
csb.InitialCatalog = catalog;
csb.UserID = user;
csb.Password = pass;
using (var sc = new SqlConnection(csb.ConnectionString))
using (var cmd = sc.CreateCommand())
{
sc.Open();
cmd.CommandText = "DELETE FROM table";
cmd.CommandText = "INSERT INTO table (ID, Directory) values (NEWID(), #val)";
cmd.Parameters.AddWithValue("#VAL", test);
cmd.ExecuteNonQuery();
sc.Close();
}
}
if (stringTest.Contains(problem))
{
Same for problem....
Here is one solution:
Assuming that you have the following class to hold a result item:
public class ResultItem
{
public string Directory { get; set; }
public string Problem { get; set; }
}
You can do the following:
var lines = File.ReadAllLines(filename);
string current_directory = null;
List<ResultItem> results = new List<ResultItem>();
//maintain the number of results added for the current directory
int problems_added_for_current_directory = 0;
foreach (var line in lines)
{
if (line.StartsWith("c:\\test"))
{
//If we are changing to a new directory
//And we didn't add any items for current directory
//Add a null result item
if (current_directory != null && problems_added_for_current_directory == 0)
{
results.Add(new ResultItem
{
Directory = current_directory,
Problem = null
});
}
current_directory = line;
problems_added_for_current_directory = 0;
}
else if (line.StartsWith("Problem"))
{
results.Add(new ResultItem
{
Directory = current_directory,
Problem = line
});
problems_added_for_current_directory++;
}
}
//If we are done looping
//And we didn't add any items for current (last) directory
//Add a null result item
if (current_directory != null && problems_added_for_current_directory == 0)
{
results.Add(new ResultItem
{
Directory = current_directory,
Problem = null
});
}
I am trying to bulk insert a CSV file into a SQL Server database.
The process is .CSV file to DataTable to SqlBulkCopy to SQL Server.
When I run this I get this error:
The given ColumnMapping does not match up with any column in the source or destination
When I directly import the CSV into SQL Server via Management Studio it works! So I think the problem is my data table conversion?
Here is my code to go from .CSV to DataTable:
public DataTable CsvFileToDataTable(string filePath)
{
var csvData = new DataTable("Table1");
using (var csvReader = new TextFieldParser(filePath))
{
csvReader.SetDelimiters(new string[] { "," });
csvReader.HasFieldsEnclosedInQuotes = true;
var readFields = csvReader.ReadFields();
foreach (var dataColumn in readFields.Select(column => new DataColumn(column) {AllowDBNull = true }))
{
csvData.Columns.Add(dataColumn);
}
while (!csvReader.EndOfData)
{
var data = csvReader.ReadFields();
for (var i = 0; i < data.Length; i++)
{
if (data[i] == "")
{
data[i] = null;
}
}
csvData.Rows.Add(data);
}
}
return csvData;
}
And here is the code for the bulk copy insert:
public void InsertData(DataTable table)
{
using (var transactionScope = new TransactionScope())
{
using (var sqlConnection = new SqlConnection(this.ConnectionString))
{
sqlConnection.Open();
using (var sqlBulkCopy = new SqlBulkCopy(sqlConnection))
{
sqlBulkCopy.DestinationTableName = table.TableName;
foreach (var column in table.Columns)
{
sqlBulkCopy.ColumnMappings.Add(column.ToString(), column.ToString());
}
sqlBulkCopy.WriteToServer(table);
}
transactionScope.Complete();
}
}
}
Does anyone have any suggestions?
Thanks
Turns out If I tweked each method
foreach (var dataColumn in readFields.Select(column => new DataColumn(columntypeof(string)) { AllowDBNull = true, DefaultValue = string.Empty }))
{
csvData.Columns.Add(dataColumn);
}
and in the second Method I changed it to use an index rather than column name
for (var count = 0; count < table.Columns.Count; count++)
{
sqlBulkCopy.ColumnMappings.Add(count, count);
}
I was able to use these code samples to create a working solution that reads a csv, checks if there is data, and if there is clean out the data and import all the csv data to the table:
private static void ProcessFile(string FilePath, string TableName)
{
var dt = GetDataTable(FilePath, TableName);
if (dt == null)
{
return;
}
if (dt.Rows.Count == 0)
{
AuditLog.AddInfo("No rows imported after reading file " + FilePath);
return;
}
ClearData(TableName);
InsertData(dt);
}
private static DataTable GetDataTable(string FilePath, string TableName)
{
var dt = new DataTable(TableName);
using (var csvReader = new TextFieldParser(FilePath))
{
csvReader.SetDelimiters(new string[] { "," });
csvReader.HasFieldsEnclosedInQuotes = true;
var readFields = csvReader.ReadFields();
if (readFields == null)
{
AuditLog.AddInfo("Could not read header fields for file " + FilePath);
return null;
}
foreach (var dataColumn in readFields.Select(column => new DataColumn(column, typeof(string)) { AllowDBNull = true, DefaultValue = string.Empty }))
{
dt.Columns.Add(dataColumn);
}
while (!csvReader.EndOfData)
{
var data = csvReader.ReadFields();
if (data == null)
{
AuditLog.AddInfo(string.Format("Could not read fields on line {0} for file {1}", csvReader.LineNumber, FilePath));
continue;
}
var dr = dt.NewRow();
for (var i = 0; i < data.Length; i++)
{
if (!string.IsNullOrEmpty(data[i]))
{
dr[i] = data[i];
}
}
dt.Rows.Add(dr);
}
}
return dt;
}
private static void ClearData(string TableName)
{
SqlHelper.ExecuteNonQuery(ConfigurationUtil.ConnectionString, CommandType.Text, "TRUNCATE TABLE " + TableName);
}
private static void InsertData(DataTable table)
{
using (var sqlBulkCopy = new SqlBulkCopy(ConfigurationUtil.ConnectionString))
{
sqlBulkCopy.DestinationTableName = table.TableName;
for (var count = 0; count < table.Columns.Count; count++)
{
sqlBulkCopy.ColumnMappings.Add(count, count);
}
sqlBulkCopy.WriteToServer(table);
}
}
In the click button event i did:
if (File.Exists(#"d:\Keywords.txt"))
{
entries = File.ReadAllLines(#"d:\Keywords.txt");
foreach (string entry in entries)
{
string[] values = entry.Split(',');
if (LocalyKeyWords.Count == 0)
{
LocalyKeyWords[values[0]] = new List<string>();
}
else
{
LocalyKeyWords[values[0]].Clear();
}
for (int i = 1; i < values.Length; i++)
LocalyKeyWords[values[0]].Add(values[i]);
}
}
The part i added/changed is:
if (LocalyKeyWords.Count == 0)
{
LocalyKeyWords[values[0]] = new List<string>();
}
else
{
LocalyKeyWords[values[0]].Clear();
}
And when its first time the text file not exist its ok but when the file is exist and already have url's and keys inside im getting the same error on : LocalyKeyWords[values[0]].Clear();
The error is: The given key was not present in the dictionary.
And i see that values contain two indexs in index [0] the url and index [1] the key and LocalyKeyWords als contain one index wich is the values.
So how do i solve this problem ?
And how do i load the text file when im running the program in the constructor even if im not clicking the button ?
How do i make that once load the text file when running the program in the constructor and once when clicking the button ?
Thanks.
You have to read file first and store its values in your dictionary if you do not want to lose all changes. And if you want to overwrite existing keys for urls then you should clear your List everytime. For example:
private void button6_Click(object sender, EventArgs e)
{
string[] entries = File.ReadAllLines(#"D:\Keywords.txt"));
foreach (string entry in entries)
{
string[] values = entry.Split(',');
LocalyKeyWords[values[0]].Clear();
for (int i = 1; i < values.Length; i++)
LocalyKeyWords[values[0]].Add(values[i]);
}
using (var w = new StreamWriter(#"D:\Keywords.txt"))
{
crawlLocaly1 = new CrawlLocaly();
crawlLocaly1.StartPosition = FormStartPosition.CenterParent;
DialogResult dr = crawlLocaly1.ShowDialog(this);
if (dr == DialogResult.OK)
{
if (LocalyKeyWords.ContainsKey(mainUrl))
{
LocalyKeyWords[mainUrl].Clear(); //probably you could skip this part and create new List everytime
LocalyKeyWords[mainUrl].Add(crawlLocaly1.getText());
}
else
{
LocalyKeyWords[mainUrl] = new List<string>();
LocalyKeyWords[mainUrl].Add(crawlLocaly1.getText());
}
foreach (KeyValuePair<string, List<string>> kvp in LocalyKeyWords)
{
w.WriteLine(kvp.Key + "," + string.Join(",", kvp.Value));
}
}
}
}
I'm trying to read a file line by line, which works perfectly but I want to seperate the results I get into subitems in the listview.
I am also searching for all .jar files in the folder so I can use those as the name (first column). The second column needs to have the "version", the third column the "author" and the fourth column the "description".
Here's one of the text files I receive from within the jar files:
name: AFK
main: com.github.alesvojta.AFK.AFK
version: 2.0.5
author: Ales Vojta / schneckk
description: Provides AFK messages
website: http://dev.bukkit.org/server-mods/afk/
commands:
afk:
description: Provides AFK message when player types /afk.
usage: /<command>
this is the code I have right now:
private List<string> GetInstalledPlugins()
{
List<string> list = new List<string>();
lvInstalledPlugins.Items.Clear();
if (!Directory.Exists(Environment.CurrentDirectory + "\\plugins"))
{
Directory.CreateDirectory(Environment.CurrentDirectory + "\\plugins");
DirectoryInfo di = new DirectoryInfo(Environment.CurrentDirectory + "\\plugins");
FileInfo[] fileInfo = di.GetFiles("*.jar");
foreach (var info in fileInfo)
{
//lvInstalledPlugins.Items.Add(info.Name);
list.Add(info.Name);
}
}
else
{
DirectoryInfo di = new DirectoryInfo(Environment.CurrentDirectory + "\\plugins");
FileInfo[] fileInfo = di.GetFiles("*.jar");
foreach (var info in fileInfo)
{
//lvInstalledPlugins.Items.Add(info.Name);
list.Add(info.Name);
}
}
return list;
}
private void test(IEnumerable<string> list)
{
List<ListViewItem> PluginList = new List<ListViewItem>();
var items = new string[4];
try
{
foreach (var ListItem in list)
{
Console.WriteLine(ListItem);
var name = Environment.CurrentDirectory + "\\plugins\\" + ListItem;
var zip = new ZipInputStream(File.OpenRead(name));
var filestream = new FileStream(name, FileMode.Open, FileAccess.Read);
var zipfile = new ZipFile(filestream);
ZipEntry item;
while ((item = zip.GetNextEntry()) != null)
{
if (item.Name == "plugin.yml")
{
using (var s = new StreamReader(zipfile.GetInputStream(item)))
{
string line;
while ((line = s.ReadLine()) != null)
{
if (line.Contains("name"))
{
items[0] = line;
}
if (line.Contains("version"))
{
items[1] = line;
}
if (line.Contains("author"))
{
items[2] = line;
}
if (line.Contains("description"))
{
items[3] = line;
}
try
{
var lvitem = new ListViewItem(items);
lvitem.Name = items[0];
lvitem.Text = items[0];
lvitem.SubItems.Add(items[1]);
lvitem.SubItems.Add(items[2]);
lvitem.SubItems.Add(items[3]);
PluginList.Add(lvitem);
}
catch (Exception)
{
}
}
lvInstalledPlugins.Items.AddRange(PluginList.ToArray());
}
}
}
}
This doesn't seem to work :/, any ideas? I've been working on this for the whole day and can't seem to get it to work :(.
Not exactly sure of your question, but going by the title, the answer to the question below may provide some assistance.
C# listView, how do I add items to columns 2, 3 and 4 etc?