I have a list of URLs, and the meaning of this is that I am checking our websites if anyone is down / offline we would get a notification and that works except some of the URLs crash at this line
HttpWebResponse httpRes = (HttpWebResponse)httpReq.GetResponse();
But the rest is working just fine? can anyone tell me what I'm doing wrong? I've tried URLs with HTTPS, HTTP and even with only www...
public void CheckUrl()//List Of URLs
{
List<string> urls = new List<string>() {
"https//:www.example.com/something1/buy",
"https//:www.example.com/something2/buy",
"https//:www.example.com/something3/buy",
"https//:www.example.com/something4/buy",
};
//walks through all the URL:s
foreach (var url in urls)
{
//Creating URL Request
HttpWebRequest httpReq = (HttpWebRequest)WebRequest.Create(url);
httpReq.AllowAutoRedirect = false;
try
{
WebClient client = new WebClient();
string downloadString = client.DownloadString(url);
//Trying to find a response
HttpWebResponse httpRes = (HttpWebResponse)httpReq.GetResponse();
if ((httpRes.StatusCode != HttpStatusCode.OK || httpRes.StatusCode != HttpStatusCode.Found) && downloadString.Contains("404 -") || downloadString.Contains("Server Error"))
{
// Code for NotFound resources goes here.
SendVerificationLinkEmail(httpRes.StatusCode.ToString(), url);
foreach (var number in Numbers)
{
SendSms(url, number);
}
}
//Close the response.
httpRes.Close();
}
catch(Exception e)
{
//sending only to admin to check it out first
SendExeptionUrl(url);
foreach (var number in Numbers)
{
SendSms(url, number);
}
}
}
Application.Exit();
}
Related
I am very new to Selenium C#. I was searching for ways to find broken links of a website using Selenium C#. I could find a handful of solutions for Java Selenium but I was unable to find using Selenium C#. It would be really helpful if anyone could post a small snippet of code for the same or any link to any document so that I could refer and follow it. Thanks in advance.
You can try iterate over list of 'a' tags and check for 200 OK in http request:
IList<IWebElement> links = driver.FindElements(By.TagName("a"));
foreach (IWebElement link in links)
{
var url = link.getAttribute("href");
IsLinkWorking(url);
}
bool IsLinkWorking(string url) {
HttpWebRequest request = (HttpWebRequest) HttpWebRequest.Create(url);
//You can set some parameters in the "request" object...
request.AllowAutoRedirect = true;
try {
HttpWebResponse response = (HttpWebResponse) request.GetResponse();
if (response.StatusCode == HttpStatusCode.OK)
{
Console.WriteLine("\r\nResponse Status Code is OK and
StatusDescription is: {0}", response.StatusDescription);
// Releases the resources of the response.
response.Close();
return true;
}
else
{
return false;
}
} catch { //TODO: Check for the right exception here
return false;
}
}
IWebDriver webDriver = new ChromeDriver();
webDriver.Navigate().GoToUrl("https://www.google.co.in/maps/");
HttpWebRequest req = null;
var urls = Driver.FindElements(By.TagName("a"));
foreach (var url in urls)
{
if (!(url.Text.Contains("Email") || url.Text == ""))
{
req = (HttpWebRequest)WebRequest.Create(url.GetAttribute("href"));
try
{
var response = (HttpWebResponse)re.GetResponse();
System.Console.WriteLine($"URL: {url.GetAttribute("href")} status is :{response.StatusCode}");
}
catch (WebException e)
{
var errorResponse = (HttpWebResponse)e.Response;
System.Console.WriteLine($"URL: {url.GetAttribute("href")} status is :{errorResponse.StatusCode}");
}
}
}
I have the following code that receives webhook messages:
// Read posted data
string requestBody;
using (var reader = new StreamReader(HttpContext.Current.Request.InputStream))
{
requestBody = reader.ReadToEnd();
}
requestBody.Log();
// Attempt to forward request
context.CopyTo(Settings.Payments.Paypal.ScirraPaypalIPNEndpoint);
requestBody contains data which is logged. I then attempt to forward the request to another URL:
public static void CopyTo(this HttpContext source, string url)
{
var destination = (HttpWebRequest) WebRequest.Create(url);
var request = source.Request;
destination.Method = request.HttpMethod;
// Copy unrestricted headers
foreach (var headerKey in request.Headers.AllKeys)
{
if (WebHeaderCollection.IsRestricted(headerKey)) continue;
destination.Headers[headerKey] = request.Headers[headerKey];
}
// Copy restricted headers
if (request.AcceptTypes != null && request.AcceptTypes.Any())
{
destination.Accept = string.Join(",", request.AcceptTypes);
}
destination.ContentType = request.ContentType;
destination.Referer = request.UrlReferrer?.AbsoluteUri ?? string.Empty;
destination.UserAgent = request.UserAgent;
// Copy content (if content body is allowed)
if (request.HttpMethod != "GET"
&& request.HttpMethod != "HEAD"
&& request.ContentLength > 0)
{
using (var destinationStream = destination.GetRequestStream())
{
request.InputStream.Position = 0;
request.InputStream.CopyTo(destinationStream);
destinationStream.Close();
}
}
if (!Settings.Deployment.IsLive)
{
ServicePointManager.ServerCertificateValidationCallback =
(sender, certificate, chain, sslPolicyErrors) => true;
}
using (var response = destination.GetResponse() as HttpWebResponse)
{
if (response == null) throw new Exception("Failed to post to " + url);
}
}
The handler that receives this forwarded request has the code:
public void ProcessRequest(HttpContext context)
{
string requestBody;
using (var reader = new StreamReader(HttpContext.Current.Request.InputStream))
{
requestBody = reader.ReadToEnd();
}
requestBody.Log();
}
However on the handler forwarded to, requestBody is always empty! What am I doing wrong here?
Both servers are hosted in Clouflare, when posting from one to the other I get a CF 1000 prohibited IP error.
Solution is to add target servers IP address into requesting servers hosts file.
Hi guys need help i am trying to make a console application which checks whether a website is available or not. Also i am trying to get the title of the page.
For doing this i am using HttpWebRequest Class (for getting status) and WebClient class (for getting title).
Note: The page i am trying to get is on a private server.
URl format is (applicationname-environment.corporation.companyname.com)
example: FIFO-dev.corp.tryit.com
When i try to get the status it is always giving me 401 as its status even though the page is up and running
List<int> Web_Status = new List<int>();
foreach (var URL in WEB_URL)
{
try
{
HttpWebRequest Web_Test = (HttpWebRequest)WebRequest.Create("http://" + URL);
Web_Test.AllowAutoRedirect = true;
HttpWebResponse Web_response = (HttpWebResponse)Web_Test.GetResponse();
Web_Status.Add((int)Web_response.StatusCode);
Web_response.Close();
}
catch (System.Net.WebException ex)
{
HttpWebResponse Web_response = (HttpWebResponse)ex.Response;
Web_Status.Add((int)Web_response.StatusCode);
}
}
Also note while giving url's i am making sure that i am not reentering http://.
The below code is giving this error
"System.Net.WebException: The remote server returned an error: (401)
Unauthorized.
at System.Net.WebClient.DownloadDataInternal(Uri address,
WebRequest& request)
at System.Net.WebClient.DownloadString(Uri address)
at website_monitoring.Get_Title.Title(List`1 WEB_URL) in "
string source = "";
List<string> status = new List<string>();
WebClient x = new WebClient();
foreach (var item in WEB_URL)
{
try
{
source = x.DownloadString("http://" + item);
status.Add(Regex.Match(source, #"\<title\b[^>]*\>\s*(?<Title>[\s\S]*?)\</title\>", RegexOptions.IgnoreCase).Groups["Title"].Value);
}
catch (System.Net.WebException ex)
{
status.Add(ex.ToString());
}
}
Sorry guys i cant give the exact url i am trying on.
This code is working with all the common websites and blogs like
"stackoverflow.com","http://understandingarduino.blogspot.in/" and so
on.
Update 1: following mammago suggestion i was able to handle 4xx issue but now it is giving too many redirects issues while getting title.
i was able to handle 302 status issue by autoredirect property to 1000;
List<int> Web_Status = new List<int>();
foreach (var URL in WEB_URL)
{
try
{
HttpWebRequest Web_Test = (HttpWebRequest)WebRequest.Create("http://" + URL);
// Set credentials to use for this request.
Web_Test.Credentials = CredentialCache.DefaultCredentials;
Web_Test.CookieContainer = new CookieContainer();
Web_Test.AllowAutoRedirect = true;
Web_Test.MaximumAutomaticRedirections = 1000;
//Web_Test.UserAgent =
HttpWebResponse Web_response = (HttpWebResponse)Web_Test.GetResponse();
Web_Status.Add((int)Web_response.StatusCode);
Web_response.Close();
}
catch (System.Net.WebException ex)
{
HttpWebResponse Web_response = (HttpWebResponse)ex.Response;
Web_Status.Add((int)Web_response.StatusCode);
}
}
Now all i need help is how to handle auto- redirect issue in this segment
string source = "";
List<string> status = new List<string>();
WebClient x = new WebClient();
//letting the website know its a known user who is accessing it (as if website have auto authentication)
x.Credentials = CredentialCache.DefaultCredentials;
foreach (var item in WEB_URL)
{
try
{
source = x.DownloadString("http://" + item);
status.Add(Regex.Match(source, #"\<title\b[^>]*\>\s*(?<Title>[\s\S]*?)\</title\>", RegexOptions.IgnoreCase).Groups["Title"].Value);
}
catch (System.Net.WebException ex)
{
status.Add(ex.ToString());
//source = x.DownloadString("http://" + ex);
//status.Add(Regex.Match(source, #"\<title\b[^>]*\>\s*(?<Title>[\s\S]*?)\</title\>", RegexOptions.IgnoreCase).Groups["Title"].Value);
}
}
"System.Net.WebException: Too many automatic redirections were
attempted. at System.Net.WebClient.DownloadDataInternal(Uri
address, WebRequest& request) at
System.Net.WebClient.DownloadString(Uri address)
I am attempting to load a page I've received from an RSS feed and I receive the following WebException:
Cannot handle redirect from HTTP/HTTPS protocols to other dissimilar ones.
with an inner exception:
Invalid URI: The hostname could not be parsed.
I wrote a code that would attempt loading the url via an HttpWebRequest. Due to some suggestions I received, when the HttpWebRequest fails I then set the AllowAutoRedirect to false and basically manually loop through the iterations of redirect until I find out what ultimately fails. Here's the code I'm using, please forgive the gratuitous Console.Write/Writeline calls:
Uri url = new Uri(val);
bool result = true;
System.Net.HttpWebRequest req = (System.Net.HttpWebRequest)System.Net.HttpWebRequest.Create(url);
string source = String.Empty;
Uri responseURI;
try
{
using (System.Net.WebResponse webResponse = req.GetResponse())
{
using (HttpWebResponse httpWebResponse = webResponse as HttpWebResponse)
{
responseURI = httpWebResponse.ResponseUri;
StreamReader reader;
if (httpWebResponse.ContentEncoding.ToLower().Contains("gzip"))
{
reader = new StreamReader(new GZipStream(httpWebResponse.GetResponseStream(), CompressionMode.Decompress));
}
else if (httpWebResponse.ContentEncoding.ToLower().Contains("deflate"))
{
reader = new StreamReader(new DeflateStream(httpWebResponse.GetResponseStream(), CompressionMode.Decompress));
}
else
{
reader = new StreamReader(httpWebResponse.GetResponseStream());
}
source = reader.ReadToEnd();
reader.Close();
}
}
req.Abort();
HtmlAgilityPack.HtmlDocument doc = new HtmlAgilityPack.HtmlDocument();
doc.LoadHtml(source);
result = true;
}
catch (ArgumentException ae)
{
Console.WriteLine(url + "\n--\n" + ae.Message);
result = false;
}
catch (WebException we)
{
Console.WriteLine(url + "\n--\n" + we.Message);
result = false;
string urlValue = url.ToString();
try
{
bool cont = true;
int count = 0;
do
{
req = (System.Net.HttpWebRequest)System.Net.HttpWebRequest.Create(urlValue);
req.Headers.Add("Accept-Language", "en-us,en;q=0.5");
req.AllowAutoRedirect = false;
using (System.Net.WebResponse webResponse = req.GetResponse())
{
using (HttpWebResponse httpWebResponse = webResponse as HttpWebResponse)
{
responseURI = httpWebResponse.ResponseUri;
StreamReader reader;
if (httpWebResponse.ContentEncoding.ToLower().Contains("gzip"))
{
reader = new StreamReader(new GZipStream(httpWebResponse.GetResponseStream(), CompressionMode.Decompress));
}
else if (httpWebResponse.ContentEncoding.ToLower().Contains("deflate"))
{
reader = new StreamReader(new DeflateStream(httpWebResponse.GetResponseStream(), CompressionMode.Decompress));
}
else
{
reader = new StreamReader(httpWebResponse.GetResponseStream());
}
source = reader.ReadToEnd();
if (string.IsNullOrEmpty(source))
{
urlValue = httpWebResponse.Headers["Location"].ToString();
count++;
reader.Close();
}
else
{
cont = false;
}
}
}
} while (cont);
}
catch (UriFormatException uriEx)
{
Console.WriteLine(urlValue + "\n--\n" + uriEx.Message + "\r\n");
result = false;
}
catch (WebException innerWE)
{
Console.WriteLine(urlValue + "\n--\n" + innerWE.Message+"\r\n");
result = false;
}
}
if (result)
Console.WriteLine("testing successful");
else
Console.WriteLine("testing unsuccessful");
Since this is currently just test code I hardcode val as http://rss.nytimes.com/c/34625/f/642557/s/3d072012/sc/38/l/0Lartsbeat0Bblogs0Bnytimes0N0C20A140C0A70C30A0Csarah0Ekane0Eplay0Eamong0Eofferings0Eat0Est0Eanns0Ewarehouse0C0Dpartner0Frss0Gemc0Frss/story01.htm
the ending url that gives the UriFormatException is: http:////www-nc.nytimes.com/2014/07/30/sarah-kane-play-among-offerings-at-st-anns-warehouse/?=_php=true&_type=blogs&_php=true&_type=blogs&_php=true&_type=blogs&_php=true&_type=blogs&_php=true&_type=blogs&_php=true&_type=blogs&_php=true&_type=blogs&partner=rss&emc=rss&_r=6&
Now I'm sure if I'm missing something or if I'm doing the looping wrong, but if I take val and just put that into a browser the page loads fine, and if I take the url that causes the exception and put it in a browser I get taken to an account login for nytimes.
I have a number of these rss feed urls that are resulting in this problem. I also have a large number of these rss feed urls that have no problem loading at all. Let me know if there is any more information needed to help resolve this. Any help with this would be greatly appreciated.
Could it be that I need to have some sort of cookie capability enabled?
You need to keep track of the cookies while doing all your requests. You can use an instance of the CookieContainer class to achieve that.
At the top of your method I made the following changes:
Uri url = new Uri(val);
bool result = true;
// keep all our cookies for the duration of our calls
var cookies = new CookieContainer();
System.Net.HttpWebRequest req = (System.Net.HttpWebRequest)System.Net.HttpWebRequest.Create(url);
// assign our CookieContainer to the new request
req.CookieContainer = cookies;
string source = String.Empty;
Uri responseURI;
try
{
And in the exception handler where you create a new HttpWebRequest, you do the assignment from our CookieContainer again:
do
{
req = (System.Net.HttpWebRequest)System.Net.HttpWebRequest.Create(urlValue);
// reuse our cookies!
req.CookieContainer = cookies;
req.Headers.Add("Accept-Language", "en-us,en;q=0.5");
req.AllowAutoRedirect = false;
using (System.Net.WebResponse webResponse = req.GetResponse())
{
This makes sure that on each successive call the already present cookies are resend again in the next request. If you leave this out, no cookies are sent and therefore the site you try to visit assumes you are a fresh/new/unseen user and gives you a kind of authentication path.
If you want to store/keep cookies beyond this method you could move the cookie instance variable to a static public property so you can use all those cookies program-wide like so:
public static class Cookies
{
static readonly CookieContainer _cookies = new CookieContainer();
public static CookieContainer All
{
get
{
return _cookies;
}
}
}
And to use it in a WebRequest:
var req = (System.Net.HttpWebRequest) WebRequest.Create(url);
req.CookieContainer = Cookies.All;
While googling for a solution to upload videos on youtube from my windows phone 8 app i was brought to YouTube API v2.0 – Direct Uploading. I followed the article and coded as follows.
private void UploadVideoYoutube(byte[] byteArr, string isoVideoFileName)
{
Uri uri = new Uri("http://uploads.gdata.youtube.com/feeds/api/users/default/uploads");
Dictionary<string, string> post_params = new Dictionary<string, string>();
Dictionary<string, string> extra_headers = new Dictionary<string, string>();
RESTSuccessCallback success_callback = new RESTSuccessCallback(Success);
RESTErrorCallback error_callback = new RESTErrorCallback(Error);
try
{
HttpWebRequest request = WebRequest.CreateHttp(uri);
//we could move the content-type into a function argument too.
//request.ContentType = "application/atom+xml; charset=UTF-8";
//request.ContentType = "application/x-www-form-urlencoded";
request.ContentType = "video/mp4";
request.Method = "POST";
//provide parameters
post_params.Add("Authorization", "<ClientID>");
//this might be helpful for APIs that require setting custom headers...
if (extra_headers != null)
try
{
foreach (String header in extra_headers.Keys)
request.Headers[header] = extra_headers[header];
}
catch (Exception) { }
request.BeginGetRequestStream((IAsyncResult result) =>
{
HttpWebRequest preq = result.AsyncState as HttpWebRequest;
if (preq != null)
{
Stream postStream = preq.EndGetRequestStream(result);
//allow for dynamic spec of post body
StringBuilder postParamBuilder = new StringBuilder();
if (post_params != null)
foreach (String key in post_params.Keys)
postParamBuilder.Append(String.Format("{0}={1}", key, post_params[key]));
Byte[] requestByteArray = Encoding.UTF8.GetBytes(postParamBuilder.ToString());
//guess one could just accept a byte[] [via function argument] for arbitrary data types - images, audio,...
postStream.Write(requestByteArray, 0, requestByteArray.Length);
postStream.Close();
preq.BeginGetResponse((IAsyncResult final_result) =>
{
HttpWebRequest req = final_result.AsyncState as HttpWebRequest;
if (req != null)
{
try
{
//we call the success callback as long as we get a response stream
WebResponse response = req.EndGetResponse(final_result);
success_callback(response.GetResponseStream());
}
catch (WebException e)
{
//otherwise call the error/failure callback
error_callback(e.Message);
return;
}
}
}, preq);
}
}, request);
}
catch (Exception ex)
{
AppHelper.ErrorOccured(ex);
}
}
But it returned following error, please guide me.
Also please let me know if there is any youtube library for windows phones.
Thanks
The remote server returned an error: NotFound.