I've created a program to mine and import data from about 75 spreadsheets into an oracle table. I'm able to connect, iterate through sheets, and grab cells and rows seemingly fine. The problem is if the excel sheet was saved with grouped rows collapsed, it skips the rows.
I can't find anywhere if there is an extended property or reg setting to allow me to possibly expand the groups on entry? Not sure how to getting around a collapsed group (not merged cell , those I can process without a problem).
code bits :
//Starting where I iterate through a particular sheet
var connectionString = string.Format("Provider=Microsoft.ACE.OLEDB.12.0; data source={0}; Extended Properties=\"Excel 12.0;HDR=NO;IMEX=1;ReadOnly=0\"", fileName);
OleDbConnection objConn = new OleDbConnection(connectionString);
try
{
objConn.Open();
System.Data.DataTable dt = objConn.GetOleDbSchemaTable(OleDbSchemaGuid.Tables, new object[] { null, null, null, "TABLE" });
if (dt != null)
{
foreach (DataRow row in dt.Rows)
{
var adapter = new OleDbDataAdapter("SELECT F1,F2,F3,F4,F5,F6,F7,F8,F9,F10,F11,F12 FROM [" + row["TABLE_NAME"].ToString() + "]", connectionString);
var ds = new DataSet();
try
{
adapter.Fill(ds, "anyname");
}
catch
{
break;
}
DataTable data = ds.Tables[0];
int rownum = 0;
// <a bunch of variable declarations>
foreach (DataRow row_b in data.Rows)
{
// start slogging through the rows
rownum = rownum++;
// <reset some variables>
if (rownum == 1) // Catch valid scripts that contain a number
{
foreach (DataColumn column in data.Columns)
{
if (column.ToString() == "F1")
{
// <processing code for this column>
}
if (column.ToString() == "F2")
{
// <processing code for this column>
}
if (column.ToString() == "F3")
{
// <you get the picture>
}
}
}
if (rownum == 3)
{
// <moving along through the rows...different processing>
}
// <..rows 4-11..>
if (rownum > 12 )
{
// <more value assignment>
}
string allvals = APPLICATION + E_USER + STEP_DESC + VARIATIONS + STATUS + STOPS_TESTING + ISSUE_NUM + ISSUE_COMMENTS + ADDITIONAL_INFO;
allvals = allvals.Trim();
//Don't want sheets that come across as Print Area this shouldn't affect the row processing
isPrintArea = 0;
if (BOOKSHEET.Contains("Print_Area"))
{
isPrintArea = 1;
}
Boolean addornot=false;
if (cb_forallscripts.Checked == true)
{
addornot = (STEP_NUM != 0 &&
allvals != "" &&
isPrintArea == 0 &&
SCRIPT_NUM != 0);
}
else
{
addornot = (STEP_NUM != 0 &&
allvals != "" &&
isPrintArea == 0 &&
SCRIPT_NUM != 0 &&
runScripts.Contains(SCRIPT_NUM.ToString()));
}
if (addornot)
{
//<connect to our Oracle db, I set up oCmd outside this>
OracleCommand oCmd = new OracleCommand();
oCmd.Connection = oConn;
oCmd.CommandType = CommandType.Text;
oCmd.Parameters.Add("STEP_NUM", STEP_NUM);
// <... bunch of parameters ...>
oCmd.Parameters.Add("script", SCRIPT);
oCmd.CommandText = "<My insert statement> ";
oCmd.ExecuteNonQuery();
}
}
}
}
}
catch ( <error processing>)
{ }
Rows.OutlineLevel should be the VBA property you are looking for. It can be read or set. See this page for Microsoft's rather terse description. Range.ClearOutline will, as it says, clear the outline for a specified range, as explained here.
Related
I am getting data from excel and showing it in DataGridWiew.
I have two textboxes, one is for starting index for first record and other is for last record.
Code works fine. But lets suppose starting record is 1 and ending is 10 when I change 10 to 1 or 2 it gives me an error in this line:
adapter.Fill(dataTable);
Full Code is below:
public DataSet Parse(string fileName)
{
string connectionString = string.Format("provider = Microsoft.Jet.OLEDB.4.0; data source = {0}; Extended Properties = Excel 8.0;", fileName);
DataSet data = new DataSet();
foreach (var sheetName in GetExcelSheetNames(connectionString))
{
using (OleDbConnection con = new OleDbConnection(connectionString))
{
string query = "";
var dataTable = new DataTable();
if(tbStarting.Text.Trim()=="" && tbEnding.Text.Trim() == "")
{
query = string.Format("SELECT * FROM [{0}]", sheetName);
}
else
{
query = string.Format("SELECT * FROM [{0}] where SrNo between " + int.Parse(tbStarting.Text.Trim()) + " and " + int.Parse(tbEnding.Text.Trim()) + " order by SrNo", sheetName);
}
con.Open();
OleDbDataAdapter adapter = new OleDbDataAdapter(query, con);
adapter.Fill(dataTable);
data.Tables.Add(dataTable);
con.Close();
}
}
return data;
}
static string[] GetExcelSheetNames(string connectionString)
{
OleDbConnection con = null;
DataTable dt = null;
con = new OleDbConnection(connectionString);
con.Open();
dt = con.GetOleDbSchemaTable(OleDbSchemaGuid.Tables, null);
if (dt == null)
{
return null;
}
String[] excelSheetNames = new String[dt.Rows.Count];
int i = 0;
foreach (DataRow row in dt.Rows)
{
excelSheetNames[i] = row["TABLE_NAME"].ToString();
i++;
}
return excelSheetNames;
}
Why this is happening please help me?
Looking at the code, it seems that your procedure is working when you ask to retrieve all the record in each table. But you are not showing which table (Sheet) is actually used afterwars.
Chances are, you are using the first one only.
When you submit some parameters, only one of the tables (Sheets) can fulfill those requirements. The other(s) don't, possibly because a field named [SrNo] is not present.
This causes the More Parameters Required error when trying to apply a filter.
Not related to the error, but worth noting: you don't need to recreate the whole DataSet + DataTables to filter your DataSources.
The DataSet.Tables[N].DefaultView.RowFilter can be used to get the same result without destroying all the objects each time a filter is required.
RowFilter has some limitations in the language (e.g. does not support BETWEEN, Field >= Value1 AND Field <= Value2 must be used), but it's quite effective.
This is a possible setup:
(xDataSet is a placeholder for your actual DataSet)
//Collect the values in the TextBoxes in a string array
private void button1_Click(object sender, EventArgs e)
{
string[] Ranges = new string[] { tbStarting.Text.Trim(), tbEnding.Text.Trim() };
if (xDataSet != null)
FilterDataset(Ranges);
}
private void FilterDataset(string[] Ranges)
{
if (string.IsNullOrEmpty(Ranges[0]) & string.IsNullOrEmpty(Ranges[1]))
xDataSet.Tables[0].DefaultView.RowFilter = null;
else if (string.IsNullOrEmpty(Ranges[0]) | string.IsNullOrEmpty(Ranges[1]))
return;
else if (int.Parse(Ranges[0]) < int.Parse(Ranges[1]))
xDataSet.Tables[0].DefaultView.RowFilter = string.Format("SrNo >= {0} AND SrNo <= {1}", Ranges[0], Ranges[1]);
else
xDataSet.Tables[0].DefaultView.RowFilter = string.Format("SrNo = {0}", Ranges[0]);
this.dataGridView1.Update();
}
I've modified your code you code a bit to handle those requirements.
(I've left here those filters anyway; they're not used, but if you still want them, they are in a working condition)
DataSet xDataSet = new DataSet();
string WorkBookPath = #"[Excel WorkBook Path]";
//Query one Sheet only. More can be added if necessary
string[] WBSheetsNames = new string[] { "Sheet1" };
//Open the Excel document and assign the DataSource to a dataGridView
xDataSet = Parse(WorkBookPath, WBSheetsNames, null);
dataGridView1.DataSource = xDataSet.Tables[0];
dataGridView1.Refresh();
public DataSet Parse(string fileName, string[] WorkSheets, string[] ranges)
{
if (!File.Exists(fileName)) return null;
string connectionString = string.Format("provider = Microsoft.ACE.OLEDB.12.0; " +
"data source = {0}; " +
"Extended Properties = \"Excel 12.0;HDR=YES\"",
fileName);
DataSet data = new DataSet();
string query = string.Empty;
foreach (string sheetName in GetExcelSheetNames(connectionString))
{
foreach (string WorkSheet in WorkSheets)
if (sheetName == (WorkSheet + "$"))
{
using (OleDbConnection con = new OleDbConnection(connectionString))
{
DataTable dataTable = new DataTable();
if ((ranges == null) ||
(string.IsNullOrEmpty(ranges[0]) || string.IsNullOrEmpty(ranges[1])) ||
(int.Parse(ranges[0]) > int.Parse(ranges[1])))
query = string.Format("SELECT * FROM [{0}]", sheetName);
else if ((int.Parse(ranges[0]) == int.Parse(ranges[1])))
query = string.Format("SELECT * FROM [{0}] WHERE SrNo = {1}", sheetName, ranges[0]);
else
query = string.Format("SELECT * FROM [{0}] WHERE (SrNo BETWEEN {1} AND {2}) " +
"ORDER BY SrNo", sheetName, ranges[0], ranges[1]);
con.Open();
OleDbDataAdapter adapter = new OleDbDataAdapter(query, con);
adapter.Fill(dataTable);
data.Tables.Add(dataTable);
};
}
}
return data;
}
static string[] GetExcelSheetNames(string connectionString)
{
string[] excelSheetNames = null;
using (OleDbConnection con = new OleDbConnection(connectionString))
{
con.Open();
using (DataTable dt = con.GetOleDbSchemaTable(OleDbSchemaGuid.Tables, null))
{
if (dt != null)
{
excelSheetNames = new string[dt.Rows.Count];
for (int i = 0; i < dt.Rows.Count; i++)
{
excelSheetNames[i] = dt.Rows[i]["TABLE_NAME"].ToString();
}
}
}
}
return excelSheetNames;
}
First a bit of background:
I developed a method of retrieving rows from a table in SQL in batches or pages, by using IEnumerable and yield. It works great when you need read only access but not so well when you need to do updates to the underlying data as well.
So I wrote a method that takes in a generic DataTable, and builds up an update statement which then gets passed to SQL along with the entire DataTable as a table valued parameter.
The method looks like this:
string[] validColumns = SQL_Columns.Split(',');
foreach(DataColumn column in p_UpdatesTable.Columns)
{
if(!validColumns.Contains(column.ColumnName))
{
throw new Exception("Column '" + column.ColumnName + "' is not valid for this table");
}
}
//Establish SQL Connection
using (SqlConnection sqlConnection = new SqlConnection(connectionString))
{
sqlConnection.Open();
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.Append("UPDATE Table SET ");
List<string> columnsToUpdate = new List<string>(p_UpdatesTable.Columns.Count);
foreach(DataColumn column in p_UpdatesTable.Columns)
{
if (!column.ColumnName.Equals("UID", StringComparison.InvariantCultureIgnoreCase))
{
StringBuilder columnBuilder = new StringBuilder();
columnBuilder.Append(column.ColumnName);
columnBuilder.Append(" = U.");
columnBuilder.Append(column.ColumnName);
columnsToUpdate.Add(columnBuilder.ToString());
}
}
commandBuilder.Append(string.Join(",", columnsToUpdate.ToArray()));
commandBuilder.Append(" FROM #UpdateTable AS U WHERE UID = U.UID");
using (SqlCommand sqlCommand = new SqlCommand(commandBuilder.ToString(), sqlConnection))
{
SqlParameter updateTableParameter = sqlCommand.Parameters.Add("UpdateTable", SqlDbType.Structured);
updateTableParameter.Value = p_UpdatesTable;
int rowsAffected = sqlCommand.ExecuteNonQuery();
if(rowsAffected != p_UpdatesTable.Rows.Count)
{
throw new Exception("Update command affected " + rowsAffected + " rows out of the " + p_UpdatesTable.Rows.Count + " expected.");
}
}
sqlConnection.Dispose();
}
I then built this method to populate the update table:
private void AddUpdate(ref DataTable p_UpdateTable, string p_ColumnName, long p_uid, object p_value)
{
if(!StronglyTypedDataset.Columns.Contains(p_ColumnName))
{
throw new ArgumentException("Table '" + p_ColumnName + "' does not exist in table", "p_ColumnName");
}
if(!p_UpdateTable.Columns.Contains(p_ColumnName))
{
DataColumn columnToAdd = p_UpdateTable.Columns.Add(p_ColumnName);
columnToAdd.DataType = StronglyTypedDataset.Columns.Cast<DataColumn>().Where(c => c.ColumnName.Equals(p_ColumnName)).First().DataType;
}
var existingRow = p_UpdateTable.Rows.Cast<DataRow>().Where(r => Convert.ToInt64(r["UID"]) == p_uid).FirstOrDefault();
if(existingRow != null)
{
existingRow[p_ColumnName] = p_value;
}
else
{
DataRow newRow = p_UpdateTable.NewRow();
newRow["UID"] = p_uid;
newRow[p_ColumnName] = p_value;
p_UpdateTable.Rows.Add(newRow);
}
}
There are a few times where I need to call this so this is more of a convenience method than anything else.
Now the problem: there is a possibility where I add a bunch of columns and values for one UID, but for another I might add more columns or not add values for existing columns. The problem with this is the update as it is will obliterate whatever is in the database already with a null value, which I don't want unless I explicitly say "make this null".
I was thinking of getting around this by supplying a value as a default, which I can then check for in my update statement and then using a CASE in the UPDATE statement that checks for this value, and using the original value (so in essence I could just ignore the "U." before the column name). The problem is that the table is generic so there could be anything in there, and in the case of actual data that somehow matches my default value, things would break.
I should note that this update table will be built up into a batch and a batch updated at once, not on a row by row basis.
Is there a value that is guaranteed not to be used, perhaps a GUID (I know there could still be a collision) or something like that?
An example:
Say my table looks like this after one row:
| UID | column 1 | column 2 |
row 1 | 1 | x | y |
On the second row it looks like this:
| UID | column 1 | column 2 | column 3 |
row 1 | 1 | x | y | ? |
row 2 | 2 | x | y | z |
The value for row 1 column 3 never gets set, so it gets defaulted to null. When I use my update statement, SQL will go and set that value to null even if there is something already in the table, but I don't want it to update the field for that row at all since I didn't specify a value for it.
I want to be able to put a value in place of the ? instead of it defaulting to null, so then I can change the update statement to something like UPDATE Table SET Column1 = U.Column1, Column2 = U.Column2, Column3 = CASE WHEN U.Column3 = somevalue THEN Column3 ELSE U.Column3 END FROM #UpdateTable U.
You could make yourself a little Maybe<T> that can either be an actual value for T, in which case you push through the update, or it could be a special not-a-value. It could look something like this:
public sealed class Maybe<T> {
private readonly T value;
private readonly bool hasValue;
private Maybe() {
hasValue = false;
}
public readonly Maybe<T> Nothing = new Maybe();
public Maybe(T value) {
this.value = value;
hasValue = true;
}
public T Value {
get {
return value;
}
}
public bool HasValue {
get {
return value;
}
}
}
Which you could use like this:
private void AddUpdate<T>(DataTable p_UpdateTable, string p_ColumnName, long p_uid, Maybe<T> p_value) {
// ...
if(existingRow != null) {
if(p_value.HasValue)
existingRow[p_ColumnName] = p_value.Value;
}
else {
DataRow newRow = p_UpdateTable.NewRow();
newRow["UID"] = p_uid;
if(p_value.HasValue)
newRow[p_ColumnName] = p_value.Value;
p_UpdateTable.Rows.Add(newRow);
}
// ...
}
You don't need the ref for the DataTable parameter, by the way.
I wound up taking a slightly different route, not using an identifier but using another column entirely.
My AddUpdate looks like this now:
private void AddUpdate(DataTable p_UpdateTable, string p_ColumnName, long p_uid, object p_value)
{
if (!StronglyTypedDataSet.Columns.Contains(p_ColumnName))
{
throw new ArgumentException("Table '" + p_ColumnName + "' does not exist in table", "p_ColumnName");
}
if (!p_UpdateTable.Columns.Contains(p_ColumnName))
{
var matchingColumn = StronglyTypedDataSet.Columns.Cast<DataColumn>().Where(c => c.ColumnName.Equals(p_ColumnName)).First();
DataColumn columnToAdd = p_UpdateTable.Columns.Add(p_ColumnName, matchingColumn.DataType);
columnToAdd.MaxLength = matchingColumn.MaxLength;
DataColumn setNullColumn = p_UpdateTable.Columns.Add(p_ColumnName + "_null", typeof(bool));
setNullColumn.DefaultValue = false;
}
var existingRow = p_UpdateTable.Rows.Cast<DataRow>().Where(r => Convert.ToInt64(r["UID"]) == p_uid).FirstOrDefault();
if (existingRow != null)
{
existingRow[p_ColumnName] = p_value;
if (p_value == null || p_value == DBNull.Value)
{
existingRow[p_ColumnName + "_null"] = true;
}
}
else
{
DataRow newRow = p_UpdateTable.NewRow();
newRow["UID"] = p_uid;
newRow[p_ColumnName] = p_value;
if (p_value == null || p_value == DBNull.Value)
{
newRow[p_ColumnName + "_null"] = true;
}
p_UpdateTable.Rows.Add(newRow);
}
}
This way, if a column gets defaulted to null because it gets added only after some rows get added to the table, I can do a check in my update statement that doesn't update the value.
The update statement looks like this now:
string[] validColumns = SQL_Columns.Split(',');
var trimmed = validColumns.Select(c => c.Trim());
foreach(DataColumn column in p_UpdatesTable.Columns)
{
if(!column.ColumnName.EndsWith("_null") && !trimmed.Contains(column.ColumnName))
{
throw new Exception("Column '" + column.ColumnName + "' is not valid for table");
}
}
string tableTypeName = "dbo.UpdateSpecific" + Guid.NewGuid().ToString().Replace("-", "").Replace("{", "").Replace("}", "");
StringBuilder tableTypeBuilder = new StringBuilder();
tableTypeBuilder.Append("CREATE TYPE ");
tableTypeBuilder.Append(tableTypeName);
tableTypeBuilder.Append(" AS TABLE (");
List<string> tableTypeColumns = new List<string>(p_UpdatesTable.Columns.Count);
StringBuilder commandBuilder = new StringBuilder();
commandBuilder.Append("UPDATE Table SET ");
List<string> columnsToUpdate = new List<string>(p_UpdatesTable.Columns.Count);
foreach (DataColumn column in p_UpdatesTable.Columns)
{
//build command to create table type
StringBuilder columnTypeBuilder = new StringBuilder();
columnTypeBuilder.Append("[");
columnTypeBuilder.Append(column.ColumnName);
columnTypeBuilder.Append("] ");
if(column.DataType == typeof(int))
{
columnTypeBuilder.Append("INT");
}
else if(column.DataType == typeof(long))
{
columnTypeBuilder.Append("BIGINT");
}
else if(column.DataType == typeof(bool))
{
columnTypeBuilder.Append("BIT");
}
else if(column.DataType == typeof(string))
{
columnTypeBuilder.Append("VARCHAR(");
columnTypeBuilder.Append(column.MaxLength);
columnTypeBuilder.Append(")");
}
else if(column.DataType == typeof(byte[]))
{
columnTypeBuilder.Append("IMAGE");
}
tableTypeColumns.Add(columnTypeBuilder.ToString());
//build actual update statement
if (!column.ColumnName.Equals("UID", StringComparison.InvariantCultureIgnoreCase) && !column.ColumnName.EndsWith("_null"))
{
StringBuilder columnBuilder = new StringBuilder();
columnBuilder.Append(column.ColumnName);
columnBuilder.Append(" = (CASE WHEN U.");
columnBuilder.Append(column.ColumnName);
columnBuilder.Append(" IS NULL THEN (CASE WHEN ISNULL(U.");
columnBuilder.Append(column.ColumnName);
columnBuilder.Append("_null, 0) = 1 THEN U.");
columnBuilder.Append(column.ColumnName);
columnBuilder.Append(" ELSE C.");
columnBuilder.Append(column.ColumnName);
columnBuilder.Append(" END) ELSE U.");
columnBuilder.Append(column.ColumnName);
columnBuilder.Append(" END)");
columnsToUpdate.Add(columnBuilder.ToString());
}
}
tableTypeBuilder.Append(string.Join(",", tableTypeColumns.ToArray()));
tableTypeBuilder.Append(")");
commandBuilder.Append(string.Join(",", columnsToUpdate.ToArray()));
commandBuilder.Append(" FROM Table AS C JOIN #UpdateTable AS U ON C.UID = U.UID");
//Establish SQL Connection
using (SqlConnection sqlConnection = new SqlConnection(context.strContext[(int)eCCE_Context._CONNECTION_STRING]))
{
sqlConnection.Open();
try
{
using (SqlCommand createTableTypeCommand = new SqlCommand(tableTypeBuilder.ToString(), sqlConnection))
{
createTableTypeCommand.ExecuteNonQuery();
}
using (SqlCommand sqlCommand = new SqlCommand(commandBuilder.ToString(), sqlConnection))
{
SqlParameter updateTableParameter = sqlCommand.Parameters.Add("#UpdateTable", SqlDbType.Structured);
updateTableParameter.Value = p_UpdatesTable;
updateTableParameter.TypeName = tableTypeName;
int rowsAffected = sqlCommand.ExecuteNonQuery();
if (rowsAffected != p_UpdatesTable.Rows.Count)
{
throw new Exception("Update command affected " + rowsAffected + " rows out of the " + p_UpdatesTable.Rows.Count + " expected.");
}
}
}
finally
{
string dropStatement = "IF EXISTS (SELECT * FROM sys.types st JOIN sys.schemas ss ON st.schema_id = ss.schema_id WHERE st.name = N'"+ tableTypeName.Substring(tableTypeName.IndexOf(".")+1) +"' AND ss.name = N'dbo') DROP TYPE " + tableTypeName;
using (SqlCommand dropTableTypeCommand = new SqlCommand(dropStatement, sqlConnection))
{
dropTableTypeCommand.ExecuteNonQuery();
}
}
sqlConnection.Dispose();
}
Tested and working :)
I have an Excel file that originally had about 600 rows, and I was able to convert the excel file to a data table and everything got inserted into the sql table correctly.
The Excel file now has 3,600 rows and is having some type of issues that is not throwing an error but after 5 mins or so all the rows are still not inserted into the sql table.
Now, when converting the Excel file to a in memory datatable this happens very quickly, but when looping the datatable and inserting into the sql table is where I'm loosing data and is very slow, but I'm am receiving no errors what so ever.
For one, on each insert I've got to make a new connection to the database and insert the record, and I already know this is very VERY wrong, and I'm hoping to get some guidance from one of the sql pros on this one.
What is the correct way to process a in memory datatable with 3,600 records / rows with-out making 3,600 new connections?
--Here is the code the processes the excel file, and This happens very quickly.--
public static async Task<DataTable> ProcessExcelToDataTableAsync(string pathAndNewFileName, string hasHeader/*Yes or No*/)
{
return await Task.Run(() =>
{
string conStr = "", SheetName = "";
switch (Path.GetExtension(pathAndNewFileName))
{
case ".xls": //Excel 97-03
conStr = ConfigurationManager.ConnectionStrings["Excel03ConString"].ConnectionString;
break;
case ".xlsx":
conStr = ConfigurationManager.ConnectionStrings["Excel07ConString"].ConnectionString;
break;
}
conStr = String.Format(conStr, pathAndNewFileName, hasHeader);
OleDbConnection connExcel = new OleDbConnection(conStr);
OleDbCommand cmdExcel = new OleDbCommand();
OleDbDataAdapter oda = new OleDbDataAdapter();
DataTable dt = new DataTable();
cmdExcel.Connection = connExcel;
//Get the name of First Sheet
connExcel.Open();
DataTable dtExcelSchema;
dtExcelSchema = connExcel.GetOleDbSchemaTable(OleDbSchemaGuid.Tables, null);
SheetName = dtExcelSchema.Rows[0]["TABLE_NAME"].ToString();
connExcel.Close();
//Read Data from First Sheet
connExcel.Open();
cmdExcel.CommandText = "SELECT * From [" + SheetName + "]";
oda.SelectCommand = cmdExcel;
oda.Fill(dt);
connExcel.Close();
cmdExcel.Dispose();
oda.Dispose();
if (File.Exists(pathAndNewFileName))
{
File.Delete(pathAndNewFileName);
}
return dt;
});
}
--Here is the code that processes the in memory datatable and inserts each new record into the sql table, and this is where things stop working, no visible errors, but just does not return or work--
**I am in need of a better way to optimize this function where the records get inserted into the the sql table.
static async Task<ProcessDataTablePartsResult> ProcessDataTablePartsAsync(int genericCatalogID, DataTable initialExcelData)
{
//#GenericCatalogID INT,
//#Number VARCHAR(50),
//#Name VARCHAR(200),
//#Length DECIMAL(8,4),
//#Width DECIMAL(8,4),
//#Height DECIMAL(8,4),
//#ProfileID TINYINT,
//#PackageQty DECIMAL(9,4),
//#CategoryID INT,
//#UnitMeasure VARCHAR(10),
//#Cost MONEY,
//#PartID INT OUT
return await Task.Run(() =>
{
DataTable badDataTable = null,
goodDataTable = initialExcelData.Clone();
goodDataTable.Clear();
int newPartID = 0,
currIx = 0,
numGoodRows = initialExcelData.Rows.Count,
numBadRows = 0;
List<int> badIndexes = new List<int>();
List<int> goodIndexes = new List<int>();
List<Profile> profiles = GenericCatalogManagerBL.GetProfiles(_genericCNN);
List<Category> categories = GenericCatalogManagerBL.GetAllCategoryNameID(_genericCNN);
Func<string, byte> getProfileID = delegate(string x)
{
return profiles.Where(p => p.TheProfile.ToLower().Replace(" ", "") == x.ToLower().Replace(" ", "")).FirstOrDefault().ID;
};
Func<string, int> getCategoryID = delegate(string x)
{
return categories.Where(c => c.Name.ToLower().Replace(" ", "") == x.ToLower().Replace(" ", "")).FirstOrDefault().ID;
};
foreach (DataRow r in initialExcelData.Rows)
{
try
{
IPart p = new Part
{
GenericCatalogID = genericCatalogID,
Number = r["Number"].ToString(),
Name = r["Name"].ToString(),
Length = decimal.Parse(r["Length"].ToString()),
Width = decimal.Parse(r["Width"].ToString()),
Height = decimal.Parse(r["Height"].ToString()),
ProfileID = getProfileID(r["Profile"].ToString()),
CategoryID = getCategoryID(r["Category"].ToString()),
PackageQty = int.Parse(r["PackageQty"].ToString()),
UnitMeasure = r["UnitMeasure"].ToString(),
Cost = decimal.Parse(r["Cost"].ToString())
};
GenericCatalogManagerBL.InsertPart(_genericCNN, p, out newPartID);
goodIndexes.Add(currIx);
}
catch (Exception)
{
numBadRows++;
numGoodRows--;
badIndexes.Add(currIx);
}
currIx++;
}
for (int i = 0; i < goodIndexes.Count; i++)
{
goodDataTable.ImportRow(initialExcelData.Rows[goodIndexes[i]]);
initialExcelData.Rows[goodIndexes[i]].Delete();
}
initialExcelData.AcceptChanges();
goodDataTable.AcceptChanges();
if (initialExcelData.Rows.Count > 0)
{
badDataTable = initialExcelData;
}
return new ProcessDataTablePartsResult(numGoodRows, numBadRows, badDataTable, goodDataTable);
});
}
**--Here is the entire flow of the function--**
public static async Task<GenericPartsReport> ProcessGenericPartsAsync(int genericCatalogID, MembershipUser user, HttpRequest request, bool emailReport, bool hasHeaders)
{
byte[] fbytes = new byte[request.ContentLength];
request.InputStream.Read(fbytes, 0, fbytes.Length);
string pathAndNewFileName = Path.GetRandomFileName() + Path.GetExtension(request.Headers["X-FILE-NAME"]),
badReportTableString = "",
goodReportTableString = "";
GenericPartsReport report = new GenericPartsReport();
//get the users temp folder
pathAndNewFileName = UtilCommon.SiteHelper.GetUserTempFolder(user, request) + pathAndNewFileName;
File.WriteAllBytes(pathAndNewFileName, fbytes);
//process the excel file first
DataTable excelDataTable = await ProcessExcelToDataTableAsync(pathAndNewFileName, hasHeaders ? "Yes" : "No");
ProcessDataTablePartsResult processedResult = await ProcessDataTablePartsAsync(genericCatalogID, excelDataTable);
if (processedResult.BadDataTable != null)
{
if (processedResult.BadDataTable.Rows.Count > 0)
{
badReportTableString = await BuildTableReportAsync(processedResult.BadDataTable, "AlumCloud Parts Not Added Report");
processedResult.BadDataTable.Dispose();
}
}
if (processedResult.GoodDataTable != null)
{
if (processedResult.GoodDataTable.Rows.Count > 0)
{
goodReportTableString = await BuildTableReportAsync(processedResult.GoodDataTable, "AlumCloud Parts Added Report");
processedResult.GoodDataTable.Dispose();
}
}
report.Report = "A total number of (" + processedResult.NumberOfGoodRows + ") records was added to your generic catalog.<br/><br/>A total number of (" + processedResult.NumberOfBadRows + ") records were excluded from being added to your generic catalog.";
if (processedResult.NumberOfBadRows > 0)
{
report.Report += "<br/><br/>You can review an excel file that meets the standards here: <a href='" + _exampleExcelFile + "'>How to format a part excel file</a>.";
report.HasBadRows = true;
}
if (processedResult.NumberOfGoodRows > 0)
{
report.Report += "<br/><br/><b>Below is all of the parts that were added to your generic catalog<b/><br/><br/>" + goodReportTableString;
}
if (processedResult.NumberOfBadRows > 0)
{
report.Report += "<br/><br/><b>Below is all of the parts that were not added to your generic catalog</b><br/><br/>" + badReportTableString;
}
if (emailReport)
{
AFCCIncCommonUtil.EmailUtil.SendMailToThreadPool(user.Email, _supportEmail, report.Report, "AlumCloud Generic Catalog Parts Report", true);
}
excelDataTable.Dispose();
return report;
}
--This is the function that never returns or is in some state of limbo--
ProcessDataTablePartsResult processedResult = await ProcessDataTablePartsAsync(genericCatalogID, excelDataTable);
In my Page I am fetching a value from the Database & filling the values in the DataTable. I am then comparing that values with the mac String in the IF.
Based upon the condition in the Query there will be no records fetched, it is stuck in the IF condition and throws the No row at Position 0 Exception rather than going into the Else part.
My code is:
string mac = GetMac();
string Qry = "Select VUserid,Password from passtable where VUserid='" + UserName.Text + "' and Flag='A'";
string qry = "Select VUserid,Password from passtable where Flag='A'";
string strq = "Select Mac_id from Sysinfo Where Appflag='A'";
using (SqlConnection conn = new SqlConnection(ConfigurationManager.ConnectionStrings["EvalCon"].ConnectionString))
{
try
{
SqlCommand cmd = new SqlCommand(Qry, conn);
SqlCommand cmd1 = new SqlCommand(qry, conn);
SqlCommand cmd2 = new SqlCommand(strq, conn);
conn.Open();
SqlDataAdapter da = new SqlDataAdapter(cmd);
SqlDataAdapter daa = new SqlDataAdapter(cmd1);
SqlDataAdapter dap = new SqlDataAdapter(cmd2);
DataTable dt = new DataTable();
DataTable dtt = new DataTable();
DataTable tab = new DataTable();
da.Fill(dt);
daa.Fill(dtt);
dap.Fill(tab);
for (int i = 0; i < tab.Rows.Count; i++)
{
for (int x = 0; x <= dtt.Rows.Count - 1; x++)
{
if (mac == tab.Rows[i]["Mac_id"].ToString() || tab.Rows.Count != 0)
{
if (UserName.Text == dtt.Rows[x]["VUserid"].ToString() && Password.Text == dtt.Rows[x]["Password"].ToString())
{
Response.Redirect("~/Changepass.aspx");
break;
}
else
{
lblMessage.Visible = true;
lblMessage.ForeColor = System.Drawing.Color.Red;
lblMessage.Text = "Invalid Username or Password !!!";
}
}
else
{
lblMessage.Visible = true;
lblMessage.ForeColor = System.Drawing.Color.Red;
lblMessage.Text = "Invalid Access Point for Evaluation !!!";
}
}
}
}
finally
{
conn.Close();
conn.Dispose();
}
}
First of all, you may want to give some more meaningful names to your variables.
On a side note, you may want to change your for loops into a foreach loop:
foreach (DataRow tabRow in tab.Rows.Count)
{
foreach (DataRow dttRow in dtt.Rows.Count)
{
// logic here
// tab.Rows[i]["Mac_id"] becomes tabRow["Mac_id"]
// and
// dtt.Rows[x]["VUserid"] becomes dttRow["VUserid"]
// and so on...
}
}
This way if there are no records fetched it won't go in.
After that, you may want to check the conditions on RowCount > 0 for the datatables before going inside the loops and act outside the loop if the RowCount is 0.
Just swap your OR condition in If statement:
if (tab.Rows.Count != 0 || mac == tab.Rows[i]["Mac_id"].ToString())
{
...
...
}
If you need to check against a null result I'd wrap my if statement in another if statement that does a simple check for a null result before doing anything else. Something like this will check if it's not null:
if(tab.rows[i]["Mac_id"] != null)
{
//logic here
}
You could add this into your current if statement check so:
if(mac == tab.Rows[i]["Mac_id"].ToString() || tab.Rows.Count != 0)
becomes:
if(mac == tab.Rows[i]["Mac_id"].ToString() || tab.Rows.Count != 0 && tab.rows[i]["Mac_id"] != null)
Though as Tallmaris says it might be better to restructure it using a foreach loop instead.
Apologies if this is a daft error - but have been struggling a few weeks with this and I am no further on.
The problem is - my update call from a DT based DS to an SQL Db is only inserting nerw records, not updating modifed ones.
I have a datasource (httpWebRequested html table) which I call periodically and update a DataTable belonging to a DataSet with it. Initially I parsed it all into a second DataTable and merged it with my DataSet table - but this wasnt working, so currently I have one DataTable only and either insert new rows into table or update existing values by iterating through the source and adding a row at a time to an array - checking for existance of current row, and either updating or inserting ([Session ID] is a PK, and Data[0] is corresponding unique value):-
foreach (var row in rows.Skip(1))
{
//create new list collection
var data = new List<string>();
//interate through rows
foreach (var column in row.Descendants("td"))
{
//add data to list from Table
data.Add(column.InnerText);
}
string strSelect = "[Session ID] = '"+ data[0] +"'";
DataRow[] myRow = dt.Select(strSelect);
if (myRow.Length == 1)
{
if (myRow[0][2].ToString() != data[2].ToString())
{
myRow[0][2] = data[2];
}
if (myRow[0][3].ToString() != data[3].ToString())
{
myRow[0][3] = data[3];
}
if (myRow[0][4].ToString() != data[4].ToString())
{
myRow[0][4] = data[4];
}
if (myRow[0][5].ToString() != data[5].ToString())
{
myRow[0][5] = data[5];
}
if (myRow[0][7].ToString() != data[7].ToString())
{
myRow[0][7] = data[7];
}
}
else
{
dt.Rows.Add(data.ToArray());
}
I put a bit on the end also which counts the Added and Modified:-
int modified= 0;
int added = 0;
foreach (DataRow dr in pca.chatDataSetG.Tables[0].Rows)
{
if (dr.RowState == DataRowState.Modified)
{
modified++;
}
if (dr.RowState == DataRowState.Added)
{
added++;
}
}
At this point - the counts are fine - modified pick up the rows where attributes have changed and added is correct.
When I call my update Db method - things start to go wrong:-
public static void updateSqlTable()
{
string connectionString = "Connection String here";
string qry = #"select * from chatData";
SqlConnection conn = new SqlConnection(connectionString);
try
{
SqlDataAdapter da = new SqlDataAdapter(qry, conn);
SqlCommandBuilder sb = new SqlCommandBuilder(da);
log.Info("Building Queries...");
da.UpdateCommand = sb.GetUpdateCommand();
da.InsertCommand = sb.GetInsertCommand();
da.DeleteCommand = sb.GetDeleteCommand();
log.Info("Filling Data into Adapter...");
int modified = 0;
int added = 0;
foreach (DataRow dr in pca.chatDataSetG.Tables[0].Rows)
{
if (dr.RowState == DataRowState.Modified)
{
modified++;
}
if (dr.RowState == DataRowState.Added)
{
added++;
}
}
//This is where the modified count reverts
da.Fill(pca.chatDataSetG, "Chat");
modified = 0;
added = 0;
foreach (DataRow dr in pca.chatDataSetG.Tables[0].Rows)
{
if (dr.RowState == DataRowState.Modified)
{
modified ++;
}
if (dr.RowState == DataRowState.Added)
{
added++;
}
}
conn.Open();
log.Info("Calling Update to DB...");
int rowseffected = da.Update(pca.chatDataSetG, "Chat");
log.Info("Update Complete - " + rowseffected + " rows effected........");
}
catch (Exception ex)
{
log.Error("Error Updating Db with chat Data", ex);
}
finally
{
conn.Close();
}
}
The counts are right before the fill, but after - the added count remains the same - good, but the modified count goes to 0 - bad :( . I have tried all shapes to try and see what is going on here - but honestly - am stumped. Any help would be very, very, very much appreciated. The rows in the Db are not reflecting new values.
Peter
You do re-fill your DataSet/Tables at this line of code
//This is where the modified count reverts
da.Fill(pca.chatDataSetG, "Chat");
Within MSDN DataAdapter fill I think there is an explanation for the behaviour described
You can use the Fill method multiple times on the same DataTable. If a
primary key exists, incoming rows are merged with matching rows that
already exist. If no primary key exists, incoming rows are appended to
the DataTable.
IMO modified rows got merged with the origianl owns from the db and therefore loose its rowState, wheras inserted items got appended as described.