I have a script that imports a csv file and reads each line to update the corresponding item in Sitecore. It works for many of the products but the problem is for some products where certain cells in the row have commas in them (such as the product description).
protected void SubmitButton_Click(object sender, EventArgs e)
{
if (UpdateFile.PostedFile != null)
{
var file = UpdateFile.PostedFile;
// check if valid csv file
message.InnerText = "Updating...";
Sitecore.Context.SetActiveSite("backedbybayer");
_database = Database.GetDatabase("master");
SitecoreContext context = new SitecoreContext(_database);
Item homeNode = context.GetHomeItem<Item>();
var productsItems =
homeNode.Axes.GetDescendants()
.Where(
child =>
child.TemplateID == new ID(TemplateFactory.FindTemplateId<IProductDetailPageItem>()));
try
{
using (StreamReader sr = new StreamReader(file.InputStream))
{
var firstLine = true;
string currentLine;
var productIdIndex = 0;
var industryIdIndex = 0;
var categoryIdIndex = 0;
var pestIdIndex = 0;
var titleIndex = 0;
string title;
string productId;
string categoryIds;
string industryIds;
while ((currentLine = sr.ReadLine()) != null)
{
var data = currentLine.Split(',').ToList();
if (firstLine)
{
// find index of the important columns
productIdIndex = data.IndexOf("ProductId");
industryIdIndex = data.IndexOf("PrimaryIndustryId");
categoryIdIndex = data.IndexOf("PrimaryCategoryId");
titleIndex = data.IndexOf("Title");
firstLine = false;
continue;
}
title = data[titleIndex];
productId = data[productIdIndex];
categoryIds = data[categoryIdIndex];
industryIds = data[industryIdIndex];
var products = productsItems.Where(x => x.DisplayName == title);
foreach (var product in products)
{
product.Editing.BeginEdit();
try
{
product.Fields["Product Id"].Value = productId;
product.Fields["Product Industry Ids"].Value = industryIds;
product.Fields["Category Ids"].Value = categoryIds;
}
finally
{
product.Editing.EndEdit();
}
}
}
}
// when done
message.InnerText = "Complete";
}
catch (Exception ex)
{
message.InnerText = "Error reading file";
}
}
}
The problem is that when a description field has commas, like "Product is an effective, preventative biofungicide," it gets split as well and throws off the index, so categoryIds = data[8] gets the wrong value.
The spreadsheet is data that is provided by our client, so I would rather not require the client to edit the file unless necessary. Is there a way I can handle this in my code? Is there a different way I can read the file that won't split everything by comma?
I suggest use Ado.Net, If the field's data are inside quotes and it will parse it like a field and ignore any commas inside this..
Code Example:
static DataTable GetDataTableFromCsv(string path, bool isFirstRowHeader)
{
string header = isFirstRowHeader ? "Yes" : "No";
string pathOnly = Path.GetDirectoryName(path);
string fileName = Path.GetFileName(path);
string sql = #"SELECT * FROM [" + fileName + "]";
using(OleDbConnection connection = new OleDbConnection(
#"Provider=Microsoft.Jet.OLEDB.4.0;Data Source=" + pathOnly +
";Extended Properties=\"Text;HDR=" + header + "\""))
using(OleDbCommand command = new OleDbCommand(sql, connection))
using(OleDbDataAdapter adapter = new OleDbDataAdapter(command))
{
DataTable dataTable = new DataTable();
dataTable.Locale = CultureInfo.CurrentCulture;
adapter.Fill(dataTable);
return dataTable;
}
}
I am trying to implement a script in my application that will dump the entire contents (for now, but I am trying to write the code so that I can easily customize it to only grab certain columns) of a sql db (running ms sql server express 2014) to a .csv file.
Here is the code I have written currently:
public void doCsvWrite(string timeStamp){
try {
//specify file name of log file (csv).
string newFileName = "C:/TestDirectory/DataExport-" + timeStamp + ".csv";
//check to see if file exists, if not create an empty file with the specified file name.
if (!File.Exists(newFileName)) {
FileStream fs = new FileStream(newFileName, FileMode.CreateNew);
fs.Close();
//define header of new file, and write header to file.
string csvHeader = "ITEM1,ITEM2,ITEM3,ITEM4,ITEM5";
using (FileStream fsWHT = new FileStream(newFileName, FileMode.Append, FileAccess.Write))
using(StreamWriter swT = new StreamWriter(fsWHT))
{
swT.WriteLine(csvHeader.ToString());
}
}
//set up connection to database.
SqlConnection myDEConnection;
String cDEString = "Data Source=localhost\\NAMEDPIPE;Initial Catalog=db;User Id=user;Password=pwd";
String strDEStatement = "SELECT * FROM table";
try
{
myDEConnection = new SqlConnection(cDEString);
}
catch (Exception ex)
{
//error handling here.
return;
}
try
{
myDEConnection.Open();
}
catch (Exception ex)
{
//error handling here.
return;
}
SqlDataReader reader = null;
SqlCommand myDECommand = new SqlCommand(strDEStatement, myDEConnection);
try
{
reader = myDECommand.ExecuteReader();
while (reader.Read())
{
for (int i = 0; i < reader.FieldCount; i++)
{
if(reader["Column1"].ToString() == "") {
//does nothing if the current line is "bugged" (containing no values at all, typically happens after reboot of 3rd party equipment).
}
else {
//grab relevant tag data and set the csv line for the current row.
string csvDetails = reader["Column1"] + "," + reader["Column2"] + "," + String.Format("{0:0.0}", reader["Column3"]) + "," + String.Format("{0:0.000}", reader["Column4"]) + "," + reader["Column5"];
using (FileStream fsWDT = new FileStream(newFileName, FileMode.Append, FileAccess.Write))
using(StreamWriter swDT = new StreamWriter(fsWDT))
{
//write csv line to file.
swDT.WriteLine(csvDetails.ToString());
}
}
}
}
}
catch (Exception ex)
{
//error handling here.
myDEConnection.Close();
return;
}
myDEConnection.Close();
}
catch (Exception ex)
{
//error handling here.
MessageBox.Show(ex.Message);
}
}
Now, this was working fine when I was using it with a 3rd party SQLite-based database, but the output I'm getting after modifing this to my MSSQL db looks something like this (ITEM1 is the primary key, a standard auto-incrementing ID-field):
ITEM1,ITEM2,ITEM3,ITEM4,ITEM5
1,row1_item2,row1_item3,row1_item4,row1_item5
1,row1_item2,row1_item3,row1_item4,row1_item5
1,row1_item2,row1_item3,row1_item4,row1_item5
1,row1_item2,row1_item3,row1_item4,row1_item5
1,row1_item2,row1_item3,row1_item4,row1_item5
1,row1_item2,row1_item3,row1_item4,row1_item5
2,row2_item2,row2_item3,row2_item4,row2_item5
2,row2_item2,row2_item3,row2_item4,row2_item5
2,row2_item2,row2_item3,row2_item4,row2_item5
2,row2_item2,row2_item3,row2_item4,row2_item5
2,row2_item2,row2_item3,row2_item4,row2_item5
3,row3_item2,row3_item3,row3_item4,row3_item5
3,row3_item2,row3_item3,row3_item4,row3_item5
3,row3_item2,row3_item3,row3_item4,row3_item5
3,row3_item2,row3_item3,row3_item4,row3_item5
....
So it seems that it writes several entries of the same row, where I would just like one single line each row. Any suggestions?
Thanks in advance.
edit: Thanks everyone for your answers!
The for loop isn't needed in the section below. Because it loops from 0 to FieldCount I assume the loop was originally meant to append the text from each column together but inside the loop there's a single line that concatenates the text and assigns it to csvDetails.
try
{
reader = myDECommand.ExecuteReader();
while (reader.Read())
{
for (int i = 0; i < reader.FieldCount; i++)
{
if(reader["Column1"].ToString() == "") {
//does nothing if the current line is "bugged" (containing no values at all, typically happens after reboot of 3rd party equipment).
}
else {
//grab relevant tag data and set the csv line for the current row.
string csvDetails = reader["Column1"] + "," + reader["Column2"] + "," + String.Format("{0:0.0}", reader["Column3"]) + "," + String.Format("{0:0.000}", reader["Column4"]) + "," + reader["Column5"];
using (FileStream fsWDT = new FileStream(newFileName, FileMode.Append, FileAccess.Write))
using(StreamWriter swDT = new StreamWriter(fsWDT))
{
//write csv line to file.
swDT.WriteLine(csvDetails.ToString());
}
}
}
}
}
Usually, we use specialy designed export/import utilites for dumping data.
However, if you have to implement you own routine I suggest decomposing.
private static IEnumerable<IDataRecord> SourceData(String sql) {
using (SqlConnection con = new SqlConnection(ConnectionStringHere)) {
con.Open();
using (SqlCommand q = new SqlCommand(sql, con)) {
using (var reader = q.ExecuteReader()) {
while (reader.Read()) {
//TODO: you may want to add additional conditions here
yield return reader;
}
}
}
}
}
private static IEnumerable<String> ToCsv(IEnumerable<IDataRecord> data) {
foreach (IDataRecord record in data) {
StringBuilder sb = new StringBuilder();
for (int i = 0; i < record .FieldCount; ++i) {
String chunk = Convert.ToString(record .GetValue(0));
if (i > 0)
sb.Append(',');
if (chunk.Contains(',') || chunk.Contains(';'))
chunk = "\"" + chunk.Replace("\"", "\"\"") + "\"";
sb.Append(chunk);
}
yield return sb.ToString();
}
}
Having SourceData and ToCsv you can easily implement
private static void WriteMyCsv(String fileName) {
var source = SourceData("SELECT * FROM table");
File.WriteAllLines(fileName, ToCsv(source));
}
You have a for loop which is looping over the fieldcount.
for (int i = 0; i < reader.FieldCount; i++)
I think it will work if you remove the loop as you don't need to iterate through the columns.
it happens because output placed inside for-loop
for (int i = 0; i < reader.FieldCount; i++)
and every record repeats FieldCount-times
Complete example. Verified working .NET 4.8, May 22. Code simplified for demo.
Why the DataTable ? Under circumstances it is useful. If you converting hundreds of files at once and multi threading - it works as large buffer + you can do pretty complex data mangling at the same time - should you need it.
UNFORTUNATELY - Microsoft trying to detect the column types and if your data not comply with the mechanism it ends with hard to correct errors. In that case use the second solution.
// Get the data from SQLite
SqliteConnection SQLiDataCon = new SqliteConnection(#"Data Source=c:\sqlite.db3");
SQLiDataCon.Open();
SqliteDataReader SQLiDtaReader = new SqliteCommand(#"SELECT * FROM stats;", SQLiDataCon).ExecuteReader();
// Load data to DataTable
DataTable csvTable = new DataTable();
csvTable.Load(SQLiDtaReader);
// Get "one" string with column names
string csvFields = #"""" + String.Join(#""",""",csvTable.Columns.Cast<DataColumn>().Select(dc => dc.ColumnName).ToArray()) + #"""";
// Prep "in memory the entire content of the CSV"
StringBuilder csvString = new StringBuilder();
// Write the header in
csvString.AppendLine(csvFields);
// Write the rows in
foreach (DataRow dr in csvTable.Rows)
{
csvString.AppendLine(#"""" + String.Join(#""",""", dr.ItemArray) + #"""");
}
// Save to file
StreamWriter csvFile = new StreamWriter(#"c:\stats.csv");
csvFile.Write(csvString);
Without DataTable.
// SQLITE
SqliteConnection SQLiDataCon = new SqliteConnection(#"Data Source=c:\sqlite.db3");
SQLiDataCon.Open();
StringBuilder csvString = new StringBuilder();
StreamWriter csvFile;
Object[] csvRow;
SqliteDataReader SQLiDtaReader = new SqliteCommand(#"SELECT * FROM sometable;", SQLiDataCon).ExecuteReader();
// CSV HEADER
csvString.AppendLine(#"""" + String.Join(#""",""", SQLiDtaReader.GetSchemaTable().AsEnumerable().Select(dr => dr.Field<string>("ColumnName")).ToArray<string>()) + #"""");
// CSV BODY
while (SQLiDtaReader.Read())
{
SQLiDtaReader.GetValues(csvRow = new Object[SQLiDtaReader.FieldCount]);
csvString.AppendLine(#"""" + String.Join(#""",""",csvRow ) + #"""");
}
// WRITE IT
csvFile = new StreamWriter(#"C:\somecsvfile.csv");
csvFile.Write(csvString);
I need to upload a CSV file to an ASP.NET application, on an Azure server. Although it works fine on my local machine, when uploading it to the server the following error is thrown:
"Process cannot access the file
'C:\inetpub\wwwroot\ImportFiles\9_11.csv' because it is being used by
another process"
My code:
string fileName = DateTime.Now.ToString().Replace(':', '_').Replace('/', '_').Replace(' ', '_') + Convert.ToString((new Random()).Next(0, 999) * (new Random()).Next(0, 999));
string path = Server.MapPath("ImportFiles") + "\\" + fileName + "" + FileImport.FileName.Substring(FileImport.FileName.IndexOf('.'));
FileImport.SaveAs(path);
string pathforSeconStream = path;
try
{
Response.Write("<script> alert('In Try Block');</script>");
bool flag = true;
int visiblemessageCount = 0;
int rollNo = 1;
StreamReader ColLine = new StreamReader(path);
string ColInputline = ColLine.ReadLine();
String[] ColsInput = ColInputline.Split(',');
ColLine.Close();
ColLine.Dispose();
string preFix = "", RollNumber = "";
StreamReader sr = new StreamReader(pathforSeconStream);
}
catch(Exception ex)
{
}
The code to generate a unique filename is wrong. Use Path.GetTempFileName.
PS never eat an exceptiion. Please remove catch (Exception ex) {};
Revision
Instead of FileImport.Save(...) just save the request in a MemoryStream and then work on it.
I am trying create and write to a text file in a C# application using the following code
System.IO.Directory.CreateDirectory(Server.MapPath("~\\count"));
using (System.IO.FileStream fs = new System.IO.FileStream("~/count/count.txt", System.IO.FileMode.Create))
using (System.IO.StreamWriter sw = new System.IO.StreamWriter("~/count/count.txt"))
{
sw.Write("101");
}
string _count = System.IO.File.ReadAllText("~/count/count.txt");
Application["NoOfVisitors"] = _count;
but I get an error:
The process cannot access the file 'path' because it is being used by another process.
What is my error?
You're trying to open the file twice; your first using statements creates a FileStream that is not used, but locks the file, so the second using fails.
Just delete your first using line, and it should all work fine.
However, I'd recommend replacing all of that with File.WriteAllText, then there would be no using in your code, it'd be much simpler.
var dir = Server.MapPath("~\\count");
var file = Path.Combine(dir, "count.txt");
Directory.CreateDirectory(dir);
File.WriteAllText(file, "101");
var _count = File.ReadAllText(file);
Application["NoOfVisitors"] = _count;
private void ExportTextFile()
{
#region This region Used For Private variable.
string lblName = null;
string lblACN = null;
string lblIFSC = null;
string lblBCode = null;
string lblAdd1 = null;
string lblAdd2 = null;
string lblAdd3 = null;
string lblMobileNo = null;
string lblEmai = null;
#endregion
Response.Clear();
Response.Buffer = true;
Response.Charset = "";
Response.ContentType = "application/text";
StringBuilder Rowbind = new StringBuilder();
for (int i = 0; i < GrdAcc.Rows.Count; i++)
{
CheckBox Chk_Status = (CheckBox)GrdAcc.Rows[i].FindControl
("ChkStatus");
Label lbl_Status = (Label)GrdAcc.Rows[i].FindControl
("lblStatus");
Label lbl_Code = (Label)GrdAcc.Rows[i].FindControl
("lblEmpCode");
if (Chk_Status.Checked == true)
{
#region Fetching ACC Details From Database.
AccPL.Status = lbl_Status.Text;
AccPL.EmpCode = lbl_Code.Text;
DataTable dt = AccBL.ExportFileAcc(AccPL);
if (dt.Rows.Count > 0)
{
lblName = dt.Rows[0]["Name"].ToString().Trim();
lblACNo = dt.Rows[0]["ACNo"].ToString().Trim();
lblBCode = dt.Rows[0]["BCode"].ToString().Trim();
lblIFSC = dt.Rows[0]["IFSC"].ToString().Trim();
lblAdd1 = dt.Rows[0]["ADd1"].ToString() + dt.Rows[0]
["PPostTehsil"].ToString();
lblAdd2 = dt.Rows[0]["Add2"].ToString().Trim() +
dt.Rows[0]["PPIN"].ToString().Trim();
lblAdd3 = dt.Rows[0]["Add3"].ToString() + dt.Rows[0]
["State"].ToString().Trim();
lblMobileNo = dt.Rows[0]["MobileNo"].ToString().Trim();
lblEmai = dt.Rows[0]["Email"].ToString().Trim();
}
#endregion
#region Generating Text File .
if (ddlExportType.SelectedValue == "2")
{
Response.AddHeader("content-disposition", "
attachment;filename=" + DateTime.Now.ToString("ddMMyyhhmmss") + ".txt");
Rowbind.Append(lblName + "#" + lblAccNo + "#" + lblIFSC
+ "#" + "#" + "#" + "#" + "#" + "#" + lbl_Code.Text);
Rowbind.Append("\r\n");
}
#endregion
Response.Output.Write(Rowbind.ToString());
Response.Flush();
Response.End();
}
I'm getting access denied whenever I try to delete a file after finishing reading it at C:\inetpub\wwwroot\Project\temp\. I Close() and Dispose() the StreamReader properly already? I also gave full permission for NETWORK SERVICE account? Can anyone help me?
reader = new StreamReader(path + fileName);
DataTable dt = new DataTable();
String line = null;
int i = 0;
while ((line = reader.ReadLine()) != null)
{
String[] data = line.Split(',');
if (data.Length > 0)
{
if (i == 0)
{
dt.Columns.Add(new DataColumn());
foreach (object item in data)
{
DataColumn c = new DataColumn(Convert.ToString(item));
if (Convert.ToString(item).Contains("DATE"))
{
c.DataType = typeof(DateTime);
}
else { c.DataType = typeof(String); }
dt.Columns.Add(c);
}
dt.Columns.Add(new DataColumn("CreatedDate", typeof(DateTime)));
i++;
}
else
{
DataRow row = dt.NewRow();
row[0] = "";
for (int j = 0; j < data.Length; j++)
{
if (dt.Columns[j + 1].DataType == typeof(DateTime))
{
row[j + 1] = Convert.ToDateTime(data[j]);
}
else
{
row[j + 1] = data[j];
}
}
row[data.Length + 1] = DateTime.Now.ToString();
dt.Rows.Add(row);
}
}
}
DataAccess dataAccess = new DataAccess(Constant.CONNECTION_STRING_NAME);
dataAccess.WriteBulkData(dt, Constant.TABLE);
reader.Close();
reader.Dispose();
File.Delete(path);
Your File.Delete method call should take path + fileName as parameter. This is because according to this link http://msdn.microsoft.com/en-us/library/system.io.file.delete.aspx path is the full path including the filename and your path variable includes only the folder name.
I also had the problem, hence me stumbling on this post to server. I added the following line of code before and after a Copy / Delete.
Delete
File.SetAttributes(file, FileAttributes.Normal);
File.Delete(file);
Copy
File.Copy(file, dest, true);
File.SetAttributes(dest, FileAttributes.Normal);
Link: Why is access to the path denied?
You're deleting File.Delete(path); not File.Delete(path + filename);
You are opening
reader = new StreamReader(path + fileName);
But you are closing
File.Delete(path);