I am using p4.net API to generate some reports from the metadata.
In one of the reports, I need to generate then number of the changes lines for each changeset report.
As a reporting tool, I am using MS SQL Reporting services 2008, and I have written a custom dll that uses p4.net API to calculate the number of changed lines. it works on the local without any problem. However, when I run the code on the server, it calculates let's say first %20 part then starts throwing Unable to connect to the Perforce Server!
Unable to connect to Perforce! exception.
I try same credentials on the local, it works.. I use commandline with same credentials on the server, it works.
Could anyone help me with that please, if encountered before?
Here is the code I use. If needed
public static class PerforceLib
{
public static P4Connection p4conn = null;
private static void CheckConn()
{
try
{
if (p4conn == null)
{
p4conn = new P4Connection();
p4conn.Port = "address";
p4conn.User = "user";
p4conn.Password = "pwd*";
p4conn.Connect();
p4conn.Login("pwd");
}
else if (p4conn != null)
{
if(!p4conn.IsValidConnection(true, false))
{
Log("Check CONN : Connection is not valid, reconnecting");
p4conn.Login("pwd*");
}
}
}
catch (Exception ex )
{
Log(ex.Message);
}
}
public static int DiffByChangeSetNumber(string ChangeSetNumber)
{
try
{
CheckConn();
P4Record set = p4conn.Run("describe", "-s",ChangeSetNumber)[0];
string[] files = set.ArrayFields["depotFile"].ToArray<string>();
string[] revs = set.ArrayFields["rev"].ToArray<string>();
string[] actions = set.ArrayFields["action"].ToArray<string>();
int totalChanges = 0;
List<P4File> lstFiles = new List<P4File>();
for (int i = 0; i < files.Count(); i++)
{
if (actions[i].ToString() == "edit")
lstFiles.Add(new P4File() { DepotFile = files[i].ToString(), Revision = revs[i].ToString(), Action = actions[i].ToString() });
}
foreach (var item in lstFiles)
{
if (item.Revision != "1")
{
string firstfile = string.Format("{0}#{1}", item.DepotFile, (int.Parse(item.Revision) - 1).ToString());
string secondfile = string.Format("{0}#{1}", item.DepotFile, item.Revision);
P4UnParsedRecordSet rec = p4conn.RunUnParsed("diff2", "-ds", firstfile, secondfile);
if (rec.Messages.Count() > 1)
{
totalChanges = PerforceUtil.GetDiffResults(rec.Messages[1].ToString(), item.DepotFile);
}
}
}
GC.SuppressFinalize(lstFiles);
Log(string.Format("{0} / {1}", ChangeSetNumber,totalChanges.ToString() + Environment.NewLine));
return totalChanges;
}
catch (Exception ex)
{
Log(ex.Message + Environment.NewLine);
return -1;
}
}
}
your help will be appreciated
Many thanks
I have solved this issue. we identified that the code is circling through the ephemeral port range in around two minutes. once it reaches the maximum ephemeral port, it was trying to use same port again. Due to each perforce command creates a new socket, available ports were running out after it processed about 1000 changesets.
I have set the ReservedPorts value of HKLM\SYSTEM\CurrentControlSet\Services\Tcpip\Parameters default(1433,143) that gave me larger range of ephemeral port.
and also implemented singleton pattern for P4Conn which helped as I dont close the connection. I only check the validity of the connection, and login if the connection is not valid.
Please let me know if any of you guys needs any help regarding this
Related
I wrote a program using CSOM to upload documents to SharePoint and insert metadata to the properties. once a while(like every 3 months) the SharePoint server gets busy or we reset IIS or any other communication problem that it may have, we get "The operation has timed out" error on clientContext.ExecuteQuery(). To resolve the issue I wrote an extension method for ExecuteQuery to try every 10 seconds for 5 times to connect to the server and execute the query. My code works in the Dev and QA environment without any problem but in Prod, when it fails the first time with timeout error, in the second attempt, it only uploads the document but it doesn't update the properties and all the properties are empty in the library. It doesn't return any error as result of ExecteQuery() but It seems from the two requests in the batch witch are uploading the file and updating the properties, it just does uploading and I don't know what happens to the properties. It kinda removes that from the batch in the second attempt!
I used both upload methods docs.RootFolder.Files.Add and File.SaveBinaryDirect in different parts of my code but I copy just one of them here so you can see what I have in my code.
I appreciate your help.
public static void ExecuteSharePointQuery(ClientContext context)
{
int cnt = 0;
bool isExecute = false;
while (cnt < 5)
{
try
{
context.ExecuteQuery();
isExecute = true;
break;
}
catch (Exception ex)
{
cnt++;
Logger.Error(string.Format("Communication attempt with SharePoint failed. Attempt {0}", cnt));
Logger.Error(ex.Message);
Thread.Sleep(10000);
if (cnt == 5 && isExecute == false)
{
Logger.Error(string.Format("Couldn't execute the query in SharePoint."));
Logger.Error(ex.Message);
throw;
}
}
}
}
public static void UploadSPFileWithProperties(string siteURL, string listTitle, FieldMapper item)
{
Logger.Info(string.Format("Uploading to SharePoint: {0}", item.pdfPath));
using (ClientContext clientContext = new ClientContext(siteURL))
{
using (FileStream fs = new FileStream(item.pdfPath, FileMode.Open))
{
try
{
FileCreationInformation fileCreationInformation = new FileCreationInformation();
fileCreationInformation.ContentStream = fs;
fileCreationInformation.Url = Path.GetFileName(item.pdfPath);
fileCreationInformation.Overwrite = true;
List docs = clientContext.Web.Lists.GetByTitle(listTitle);
Microsoft.SharePoint.Client.File uploadFile = docs.RootFolder.Files.Add(fileCreationInformation);
uploadFile.CheckOut();
//Update the metadata
ListItem listItem = uploadFile.ListItemAllFields;
//Set field values on item
foreach (List<string> list in item.fieldMappings)
{
if (list[FieldMapper.SP_VALUE_INDEX] != null)
{
TrySet(ref listItem, list[FieldMapper.SP_FIELD_NAME_INDEX], (FieldType)Enum.Parse(typeof(FieldType), list[FieldMapper.SP_TYPE_INDEX]), list[FieldMapper.SP_VALUE_INDEX]);
}
}
listItem.Update();
uploadFile.CheckIn(string.Empty, CheckinType.OverwriteCheckIn);
SharePointUtilities.ExecuteSharePointQuery(clientContext);
}
catch (Exception ex)
{
}
}
}
}
There's too many possible reasons for me to really comment on a solution, especially considering it's only on the prod environment.
What I can say is that it's probably easiest to keep a reference to the last uploaded file. If your code fails then check if the last file has been uploaded correctly.
Side note: I'm not sure if this is relevant but if it's a large file you want to upload it in slices.
My question follows this one: Writing large number of records (bulk insert) to Access in .NET/C#
I've tried the method indicated with DAO. At first, it did not work, getting this error: Retrieving the COM class factory for component with CLSID {681EF637-F129-4AE9-94BB-618937E3F6B6} failed due to the following error: 80040154.) I've searched and found a solution with registry keys, it worked, but when I've tried on an other PC, the founction makes my application crach.
There is my code and where it fails:
using Microsoft.Office.Interop.Access.Dao;
public void ExportDataTableToAccess(System.Data.DataTable dtOutData)
{
Microsoft.Office.Interop.Access.Application access = new Microsoft.Office.Interop.Access.Application();
_DBEngine dbEngine = null;
try
{
dbEngine = access.DBEngine; //Fails here
//I have tried DBEngine dbEngine = access.DBEngine
//did not work either
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
try
{
Boolean CheckFl = false;
string DBPath = dbPath;
Database db = dbEngine.OpenDatabase(dbPath);
Recordset AccesssRecordset = db.OpenRecordset(dtOutData.TableName);
Field[] AccesssFields = new Field[dtOutData.Columns.Count];
//Loop on each row of dtOutData
for (Int32 rowCounter = 0; rowCounter < dtOutData.Rows.Count; rowCounter++)
{
AccesssRecordset.AddNew();
//Loop on column
for (Int32 colCounter = 0; colCounter < dtOutData.Columns.Count; colCounter++)
{
// for the first time... setup the field name.
if (!CheckFl)
AccesssFields[colCounter] = AccesssRecordset.Fields[dtOutData.Columns[colCounter].ColumnName];
AccesssFields[colCounter].Value = dtOutData.Rows[rowCounter][colCounter];
}
AccesssRecordset.Update();
CheckFl = true;
}
AccesssRecordset.Close();
db.Close();
}
catch (Exception ex)
{
MessageBox.Show(ex.Message);
}
finally
{
System.Runtime.InteropServices.Marshal.ReleaseComObject(dbEngine);
}
}
I've tried this solution too: How to repair COMException error 80040154?
It still works on my computer, but on no other one. And I don't know how to make all the manipulation on the registry on each computer to make it works (I don't want this solution either since it would be a very huge amount of time)
I know this is a 32-64 bit problem somewhere, but event the x86 build don't works on other computer... How should I resolve this problem ?
This is my second question on this site, please, if something is wrong with my question, tell me, and I will do everything possible to correct it :)
Install the PIA Primary Interop Assemblies to work with the DAO Driver not being found.
I want to make an application - license plate recognition from image. I use OpenCvSharp and Puma.NET.
But when I start my application,writes that the number is not found.
When I use breakpoints - Exception - "Recognition engine halted with code:0"
I loaded three dll - dibapi.dll, puma.net.dll, puma.interop.dll.
Why numbers are not recognized?
public void RecognizePlate() //
{
plateList.Clear();
int i = 1;
foreach(var plateImage in plate)
{
plateList.Add(i.ToString()+ " ) " + RunPuma(plateImage));
i++;
}
}
string RunPuma(IplImage img) //
{
PumaPage Image = new PumaPage(img.ToBitmap());
using (Image)
{
Image.FileFormat = PumaFileFormat.RtfAnsi;
Image.AutoRotateImage = true;
Image.FontSettings.DetectBold = true;
Image.FontSettings.DetectItalic = true;
Image.EnableSpeller = false;
Image.Language = PumaLanguage.English;
try
{
string s = Image.RecognizeToString();
return s;
}
catch(Exception e)
{
return "This is NOT NUMBER";
}
}
return "Error";
}`
You will need to restart Visual Studio as Administrator and you should be able to work then.
The problem is an unsuccessful registration.
According the documentation, apuma.dll component should be registered during the installation. But *.bat file seems to be wrong, at least for my computer.
I solved problem with:
moving all files from Puma.NET\COM Server\Register to Puma.NET\COM Server
open console in Puma.NET\COM Server directory.
Typing this command: regsvr32 APuma.dll
If you get a successful registration message, George is your uncle!!
I got out of memory exception problem for 4 months. My client use webservice, they wanna me test their webservice. In their webservice, there is a function call upload. I test that function on 1500 users who uploaded at the same time. I tried garbage collection function of visual studio (GC). With 2mb of file, there is not exception, but with 8mb of file there is still out of memory exception. I have tried many times and a lot of solutions but still happened. I gonna crazy now. When upload is on going, I watched memory of all test computers but memory is not out of. So I think that problem is from webservice and server. But my client said that i have to improve those reasons which is from webservice and server to them. I'm gonna crazy now. Do you guys have any solotions for this? In additional, Our client does not public code, I just use webservice's function to test. Additional, I have to use vps to connect their webservice and network rather slow when connect to vps.
I have to make sure that my test script doesn't have any problem. Here is my test script to test upload function.
public void UploadNewJob(string HalID, string fileUID, string jobUID, string fileName, out List errorMessages)
{
errorMessages = null;
try
{
int versionNumber;
int newVersionNumber;
string newRevisionTag;
datasyncservice.ErrorObject errorObj = new datasyncservice.ErrorObject();
PfgDbJob job = new PfgDbJob();
job.CompanyName = Constant.SEARCH_CN;
job.HalliburtonSalesOffice = Constant.SEARCH_SO;
job.HalliburtonOperationsLocation = Constant.SEARCH_OL;
job.UploadPersonHalId = HalID;
job.CheckOutState = Constant.CHECKOUT_STATE;
job.RevisionTag = Constant.NEW_REVISION_TAG;
var manifestItems = new List();
var newManifestItems = new List();
var manifestItem = new ManifestItem();
if (fileUID == "")
{
if (job.JobUid == Guid.Empty)
job.JobUid = Guid.NewGuid();
if (job.FileUid == Guid.Empty)
job.FileUid = Guid.NewGuid();
}
else
{
Guid JobUid = new Guid(jobUID);
job.JobUid = JobUid;
Guid fileUid = new Guid(fileUID);
job.FileUid = fileUid;
}
// Change the next line when we transfer .ssp files by parts
manifestItem.PartUid = job.FileUid;
job.JobFileName = fileName;
manifestItem.BinaryFileName = job.JobFileName;
manifestItem.FileUid = job.FileUid;
manifestItem.JobUid = job.JobUid;
manifestItem.PartName = string.Empty;
manifestItem.SequenceNumber = 0;
manifestItems.Add(manifestItem);
errorMessages = DataSyncService.Instance.ValidateForUploadPfgDbJobToDatabase(out newVersionNumber, out newRevisionTag, out errorObj, out newManifestItems, HalID, job, false);
if (manifestItems.Count == 0)
manifestItems = newManifestItems;
if (errorMessages.Count > 0)
{
if (errorMessages.Count > 1 || errorMessages[0].IndexOf("NOT AN ERROR") == -1)
{
return;
}
}
//upload new Job
Guid transferUid;
long a= GC.GetTotalMemory(false);
byte[] fileContents = File.ReadAllBytes(fileName);
fileContents = null;
GC.Collect();
long b = GC.GetTotalMemory(false);
//Assert.Fail((b - a).ToString());
//errorMessages = DataSyncService.Instance.UploadFileInAJob(out transferUid, out errorObj, job.UploadPersonHalId, job, manifestItem, fileContents);
DataSyncService.Instance.UploadPfgDbJobToDatabase(out errorObj, out versionNumber, job.UploadPersonHalId, job, false, manifestItems);
}
catch (Exception ex)
{
Assert.Fail("Error from Test Scripts: " + ex.Message);
}
}
Please review my test code. And if there is not any problem from my test code, I have to improve reason is not from my test code T_T
My guess would be that you hit the 2 GB object size limit of .NET (1500 * 8MB > 4GB).
You should consider to change to .NET 4.5 and use the large object mode - see here - the setting is called gcAllowVeryLargeObjects.
For instances when Active Directory takes too long to replicate data between sites, I need to ensure that the local AD replica contains the most up to date information.
How can I get a list of DomainControllers for the current site?
I haven't found anything on Codeproject or on StackOverflow
Going to all this trouble is probably wasted effort. Unless you are experiencing issues with the built in logic for finding a domain controller you should just go with the built in method that returns one. According to Microsoft it automatically tries to find the closes one: http://technet.microsoft.com/en-us/library/cc978016.aspx.
Just use the static DomainController.FindOne method and pass in your directorycontext.
Update
Alright, try the code below, let me know how it works for you. It pings each, returns the roundtrip time, if -1 (no connection) it skips it. Flags PDC status if present. Orders by PDC status, followed by ping round trip.
static void Main(string[] args)
{
var dcsInOrder = (from DomainController c in Domain.GetCurrentDomain().DomainControllers
let responseTime = Pinger(c.Name)
where responseTime >=0
let pdcStatus = c.Roles.Contains(ActiveDirectoryRole.PdcRole)
orderby pdcStatus, responseTime
select new {DC = c, ResponseTime = responseTime}
).ToList();
foreach (var dc in dcsInOrder)
{
System.Console.WriteLine(dc.DC.Name + " - " + dc.ResponseTime);
}
System.Console.ReadLine();
}
private static int Pinger(string address)
{
Ping p = new Ping();
try
{
PingReply reply = p.Send(address, 3000);
if (reply.Status == IPStatus.Success) return (int)reply.RoundtripTime;
}
catch { }
return -1;
}
First, I'll answer the question that you actually asked:
System.DirectoryServices.ActiveDirectory.ActiveDirectorySite.GetComputerSite().Servers
But it seems like you're asking how to make sure that you're talking to the closest domain controller possible. Windows doesn't exactly provide this functionality, the best it will do is give you a domain controller in the same site that the code is running from.
I think the first thing to check is that you have your sites and subnets configured correctly. Run Active Directory Sites and Services, and make sure that subnets and domain controllers are assigned to the correct sites.
This MSDN page (and the Technet article in Peter's answer) says that you must be searching by the DNS name for the DC Locator to attempt to find a DC in the current site. I don't know if the Name property of the Domain class is the DNS domain name.
I have to assume that DomainController.FindOne is a wrapper for DsGetDcName. At that link, you can find how to turn on tracing for that function. You can use this if you still have problems, or maybe you should just PInvoke this function.
Here is a code sample that has no hard coding of DCs. Comments and criticism are welcome.
/// <summary>
/// For best results ensure all hosts are pingable, and turned on.
/// </summary>
/// <returns>An ordered list of DCs with the PDCE first</returns>
static LinkedList<DomainController> GetNearbyDCs()
{
LinkedList<DomainController> preferredDCs = new LinkedList<DomainController>();
List<string> TestedDCs = new List<string>();
using (var mysite = ActiveDirectorySite.GetComputerSite())
{
using (var currentDomain = Domain.GetCurrentDomain())
{
DirectoryContext dctx = new DirectoryContext(DirectoryContextType.Domain, currentDomain.Name);
var listOfDCs = DomainController.FindAll(dctx, mysite.Name);
foreach (DomainController item in listOfDCs)
{
Console.WriteLine(item.Name );
if (IsConnected(item.IPAddress))
{
// Enumerating "Roles" will cause the object to bind to the server
ActiveDirectoryRoleCollection rollColl = item.Roles;
if (rollColl.Count > 0)
{
foreach (ActiveDirectoryRole roleItem in rollColl)
{
if (!TestedDCs.Contains(item.Name))
{
TestedDCs.Add(item.Name);
if (roleItem == ActiveDirectoryRole.PdcRole)
{
preferredDCs.AddFirst(item);
break;
}
else
{
if (preferredDCs.Count > 0)
{
var tmp = preferredDCs.First;
preferredDCs.AddBefore(tmp, item);
}
else
{
preferredDCs.AddFirst(item);
}
break;
}
}
}
}
else
{
// The DC exists but has no roles
TestedDCs.Add(item.Name);
if (preferredDCs.Count > 0)
{
var tmp = preferredDCs.First;
preferredDCs.AddBefore(tmp, item);
}
else
{
preferredDCs.AddFirst(item);
}
}
}
else
{
preferredDCs.AddLast(item);
}
}
}
}
return preferredDCs;
}
static bool IsConnected(string hostToPing)
{
string pingurl = string.Format("{0}", hostToPing);
string host = pingurl;
bool result = false;
Ping p = new Ping();
try
{
PingReply reply = p.Send(host, 3000);
if (reply.Status == IPStatus.Success)
return true;
}
catch { }
return result;
}
Here's my approach using powershell but I'm sure it's a simple implementation in c#, etc. If DHCP is setup correctly, the Primary DNS server in your subnet should be the closest Domain Controller. So the following code should grab the first DNS IP and resolve it to the hostname of the closest DC. This doesn't require RSAT or credentials and contains no specific properties of the current domain.
$NetItems = #(Get-WmiObject -Class Win32_NetworkAdapterConfiguration -Filter "IPEnabled = 'True'" -ComputerName $env:COMPUTERNAME)
foreach ($objItem in $NetItems)
{
if ($objItem.{DNSServerSearchOrder}.Count -ge 1)
{
$PrimaryDNS = $objItem.DNSServerSearchOrder[0]
$domain = $objItem.DNSDomain
break
}
}
[System.Net.Dns]::GetHostbyAddress($PrimaryDNS).hostname -replace ".$($domain)",""