I want to be able to add attributes to a string of html without having to build a parser to handle the html. In one specific case, I want to be able to extract the id of the html or insert an id to the html server side.
Say I have:
string stringofhtml = "<img src=\"someimage.png\" alt=\"the image\" />";
I would like to be able to do something like:
HtmlGenericControl htmlcontrol = new HtmlGenericControl(stringofhtml);
htmlcontrol.Attributes["id'] = "newid";
OR
int theid = htmlcontrol.Attributes["id"];
This is just a way that I can access/add attributes of the html strings that I have.
You can do this:
HtmlGenericControl ctrl = new HtmlGenericControl();
ctrl.InnerHtml = "<img src=\"someimage.png\" alt=\"the image\" />";
You could always use a LiteralControl too, instead of an HtmlGenericControl:
LiteralControl lit = new LiteralControl(stringOfHtml);
I do not think there is a control available which will provide you with the functionality you are looking for.
Below I have made use of the HtmlAgility pack to parse/query the HTML and created a new control subclassing the Literal control.
This control accepts an HTML string, checks to ensure it contains at least a single element and provides access to get/set that elements attributes.
Example usage
string image = "<img src=\"someimage.png\" alt=\"the image\" />";
HtmlControlFromString htmlControlFromString = new HtmlControlFromString(image);
htmlControlFromString.Attributes["id"] = "image2";
string id = htmlControlFromString.Attributes["id"];
Control
using System;
using System.Collections.Generic;
using System.Linq;
using System.Web;
using System.Web.UI.WebControls;
using HtmlAgilityPack;
public class HtmlControlFromString : Literal
{
private HtmlDocument _document = new HtmlDocument();
private HtmlNode _htmlElement;
public AttributesCollection Attributes { get; set; }
public HtmlControlFromString(string html)
{
_document.LoadHtml(html);
if (_document.DocumentNode.ChildNodes.Count > 0)
{
_htmlElement = _document.DocumentNode.ChildNodes[0];
Attributes = new AttributesCollection(_htmlElement);
Attributes.AttributeChanged += new EventHandler(Attributes_AttributeChanged);
SetHtml();
}
else
{
throw new InvalidOperationException("Argument does not contain a valid html element.");
}
}
void Attributes_AttributeChanged(object sender, EventArgs e)
{
SetHtml();
}
void SetHtml()
{
Text = _htmlElement.OuterHtml;
}
}
public class AttributesCollection
{
public event EventHandler AttributeChanged;
private HtmlNode _htmlElement;
public string this[string attribute]
{
get
{
HtmlAttribute htmlAttribute = _htmlElement.Attributes[attribute];
return htmlAttribute == null ? null : htmlAttribute.Value;
}
set
{
HtmlAttribute htmlAttribute = _htmlElement.Attributes[attribute];
if (htmlAttribute == null)
{
htmlAttribute = _htmlElement.OwnerDocument.CreateAttribute(attribute);
htmlAttribute.Value = value;
_htmlElement.Attributes.Add(htmlAttribute);
}
else
{
htmlAttribute.Value = value;
}
EventHandler attributeChangedHandler = AttributeChanged;
if (attributeChangedHandler != null)
attributeChangedHandler(this, new EventArgs());
}
}
public AttributesCollection(HtmlNode htmlElement)
{
_htmlElement = htmlElement;
}
}
Hope this helps.
Related
I've been working on a webscraper as a Windows Forms application in C#. The user enter a search term and the term and the program will then split the search string for each individual words and look up the amount of search results through Yahoo and Google.
My issue lies with the orientation of the huge HTML document. I've tried multiple approaches such as
iterating recursively and comparing ids aswell as with lamba and the Where statements. Both results in null. I also manually looked into the html document to make sure the id of the div I want exist in the document.
The id I'm looking for is "resultStats" but it is suuuuuper nested. My code looks like this:
using HtmlAgilityPack;
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
namespace WebScraper2._0
{
public class Webscraper
{
private string Google = "http://google.com/#q=";
private string Yahoo = "http://search.yahoo.com/search?p=";
private HtmlWeb web = new HtmlWeb();
private HtmlDocument GoogleDoc = new HtmlDocument();
private HtmlDocument YahooDoc = new HtmlDocument();
public Webscraper()
{
Console.WriteLine("Init");
}
public int WebScrape(string searchterms)
{
//Console.WriteLine(searchterms);
string[] ssize = searchterms.Split(new char[0]);
int YahooMatches = 0;
int GoogleMatches = 0;
foreach (var term in ssize)
{
//Console.WriteLine(term);
var y = web.Load(Yahoo + term);
var g = web.Load(Google + term + "&cad=h");
YahooMatches += YahooFilter(y);
GoogleMatches += GoogleFilter(g);
}
Console.WriteLine("Yahoo found " + YahooMatches.ToString() + " matches");
Console.WriteLine("Google found " + GoogleMatches.ToString() + " matches");
return YahooMatches + GoogleMatches;
}
//Parse to get correct info
public int YahooFilter(HtmlDocument doc)
{
//Look for node with correct ID
IEnumerable<HtmlNode> nodes = doc.DocumentNode.Descendants().Where(n => n.HasClass("mw-jump-link"));
foreach (var item in nodes)
{
// displaying final output
Console.WriteLine(item.InnerText);
}
//TODO: Return search resultamount.
return 0;
}
int testCounter = 0;
string toReturn = "";
bool foundMatch = false;
//Parse to get correct info
public int GoogleFilter(HtmlDocument doc)
{
if (doc == null)
{
Console.WriteLine("Null");
}
foreach (var node in doc.DocumentNode.ChildNodes)
{
toReturn += Looper(node, testCounter, toReturn, foundMatch);
}
Console.WriteLine(toReturn);
/*
var stuff = doc.DocumentNode.Descendants("div")
.Where(node => node.GetAttributeValue("id", "")
.Equals("extabar")).ToList();
IEnumerable<HtmlNode> nodes = doc.DocumentNode.Descendants().Where(n => n.HasClass("appbar"));
*/
return 0;
}
public string Looper(HtmlNode node, int counter, string returnstring, bool foundMatch)
{
Console.WriteLine("Loop started" + counter.ToString());
counter++;
Console.WriteLine(node.Id);
if (node.Id == "resultStats")
{
returnstring += node.InnerText;
}
foreach (HtmlNode n in node.Descendants())
{
Looper(n, counter, returnstring, foundMatch);
}
return returnstring;
}
}
}
I made an google HTML Scraper a few weeks ago, a few things to consider
First: Google don't like when you try to Scrape their Search HTML, while i was running a list of companies trying to get their addresses and phone number, Google block my IP from accessing their website for a little bit (Which cause a hilarious panic in the office)
Second: Google will change the HTML (Id names and etc) of the page so using ID's won't work, on my case i used the combination of HTML Tags and specific information to parse the response and extract the information that i wanted.
Third: It's better to just use their API to grab the information you need, just make sure you respect their free tier query limit and you should be golden.
Here is the Code i used.
public static string getBetween(string strSource, string strStart, string strEnd)
{
int Start, End;
if (strSource.Contains(strStart) && strSource.Contains(strEnd))
{
Start = strSource.IndexOf(strStart, 0) + strStart.Length;
End = strSource.IndexOf(strEnd, Start);
return strSource.Substring(Start, End - Start);
}
else
{
return "";
}
}
public void SearchResult()
{
//Run a Google Search
string uriString = "http://www.google.com/search";
string keywordString = "Search String";
WebClient webClient = new WebClient();
NameValueCollection nameValueCollection = new NameValueCollection();
nameValueCollection.Add("q", keywordString);
webClient.QueryString.Add(nameValueCollection);
string result = webClient.DownloadString(uriString);
string search = getBetween(result, "Address", "Hours");
rtbHtml.Text = getBetween(search, "\">", "<");
}
On my case i used the String Address and Hours to limit what information i wanted to extract.
Edit: Fixed the Logic and added the Code i used.
Edit2: forgot to add the GetBetween Class. (sorry it's my first Answer)
I'm trying to transfer data from a treenode (at least I think that's what it is) which contains much more data than I need. It would be very difficult for me to manipulate the data within the treenode. I would much rather have an array which provides me with only the necessary data for data manipulation.
I would like higher rates have following variables:
1. BookmarkNumber (integer)
2. Date (string)
3. DocumentType (string)
4. BookmarkPageNumberString (string)
5. BookmarkPageNumberInteger (integer)
I would like to the above defined rate from the data from variable book_mark (as can be seen in my code).
I've been wrestling with this for two days. Any help would be much appreciated. I'm probably sure that the question wasn't phrased correctly so please ask questions so that I may explain further if needed.
Thanks so much
BTW what I'm trying to do is create a Windows Form program which parses a PDF file which has multiple bookmarks into discrete PDF files for each bookmark/chapter while saving the bookmark in the correct folder with the correct naming convention, the folder and naming convention dependent upon the PDF name and title name of the bookmark/chapter being parsed.
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
using System.IO;
using itextsharp.pdfa;
using iTextSharp.awt;
using iTextSharp.testutils;
using iTextSharp.text;
using iTextSharp.xmp;
using iTextSharp.xtra;
namespace WindowsFormsApplication1
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void ChooseImageFileWrapper_Click(object sender, EventArgs e)
{
OpenFileDialog openFileDialog1 = new OpenFileDialog();
openFileDialog1.InitialDirectory = GlobalVariables.InitialDirectory;
openFileDialog1.Filter = "Pdf Files|*.pdf";
openFileDialog1.RestoreDirectory = true;
openFileDialog1.Title = "Image File Wrapper Chooser";
if (openFileDialog1.ShowDialog() == DialogResult.OK)
{
try
{
GlobalVariables.ImageFileWrapperPath = openFileDialog1.FileName;
}
catch (Exception ex)
{
MessageBox.Show("Error: Could not read file from disk. Original error: " + ex.Message);
}
}
ImageFileWrapperPath.Text = GlobalVariables.ImageFileWrapperPath;
}
private void ImageFileWrapperPath_TextChanged(object sender, EventArgs e)
{
}
private void button2_Click(object sender, EventArgs e)
{
iTextSharp.text.pdf.PdfReader pdfReader = new iTextSharp.text.pdf.PdfReader(GlobalVariables.ImageFileWrapperPath);
IList<Dictionary<string, object>> book_mark = iTextSharp.text.pdf.SimpleBookmark.GetBookmark(pdfReader);
List<ImageFileWrapperBookmarks> IFWBookmarks = new List<ImageFileWrapperBookmarks>();
foreach (Dictionary<string, object> bk in book_mark) // bk is a single instance of book_mark
{
ImageFileWrapperBookmarks.BookmarkNumber = ImageFileWrapperBookmarks.BookmarkNumber + 1;
foreach (KeyValuePair<string, object> kvr in bk) // kvr is the key/value in bk
{
if (kvr.Key == "Kids" || kvr.Key == "kids")
{
//create recursive program for children
}
else if (kvr.Key == "Title" || kvr.Key == "title")
{
}
else if (kvr.Key == "Page" || kvr.Key == "page")
{
}
}
}
MessageBox.Show(GlobalVariables.ImageFileWrapperPath);
}
}
}
Here's one way to parse a PDF and create a data structure similar to what you describe. First the data structure:
public class BookMark
{
static int _number;
public BookMark() { Number = ++_number; }
public int Number { get; private set; }
public string Title { get; set; }
public string PageNumberString { get; set; }
public int PageNumberInteger { get; set; }
public static void ResetNumber() { _number = 0; }
// bookmarks title may have illegal filename character(s)
public string GetFileName()
{
var fileTitle = Regex.Replace(
Regex.Replace(Title, #"\s+", "-"),
#"[^-\w]", ""
);
return string.Format("{0:D4}-{1}.pdf", Number, fileTitle);
}
}
A method to create a list of Bookmark (above):
List<BookMark> ParseBookMarks(IList<Dictionary<string, object>> bookmarks)
{
int page;
var result = new List<BookMark>();
foreach (var bookmark in bookmarks)
{
// add top-level bookmarks
var stringPage = bookmark["Page"].ToString();
if (Int32.TryParse(stringPage.Split()[0], out page))
{
result.Add(new BookMark() {
Title = bookmark["Title"].ToString(),
PageNumberString = stringPage,
PageNumberInteger = page
});
}
// recurse
if (bookmark.ContainsKey("Kids"))
{
var kids = bookmark["Kids"] as IList<Dictionary<string, object>>;
if (kids != null && kids.Count > 0)
{
result.AddRange(ParseBookMarks(kids));
}
}
}
return result;
}
Call method above like this to dump the results to a text file:
void DumpResults(string path)
{
using (var reader = new PdfReader(path))
{
// need this call to parse page numbers
reader.ConsolidateNamedDestinations();
var bookmarks = ParseBookMarks(SimpleBookmark.GetBookmark(reader));
var sb = new StringBuilder();
foreach (var bookmark in bookmarks)
{
sb.AppendLine(string.Format(
"{0, -4}{1, -100}{2, -25}{3}",
bookmark.Number, bookmark.Title,
bookmark.PageNumberString, bookmark.PageNumberInteger
));
}
File.WriteAllText(outputTextFile, sb.ToString());
}
}
The bigger problem is how to extract each Bookmark into a separate file. If every Bookmark starts a new page it's easy:
Iterate over the return value of ParseBookMarks()
Select a page range that begins with the current BookMark.Number, and ends with the next BookMark.Number - 1
Use that page range to create separate files.
Something like this:
void ProcessPdf(string path)
{
using (var reader = new PdfReader(path))
{
// need this call to parse page numbers
reader.ConsolidateNamedDestinations();
var bookmarks = ParseBookMarks(SimpleBookmark.GetBookmark(reader));
for (int i = 0; i < bookmarks.Count; ++i)
{
int page = bookmarks[i].PageNumberInteger;
int nextPage = i + 1 < bookmarks.Count
// if not top of page will be missing content
? bookmarks[i + 1].PageNumberInteger - 1
/* alternative is to potentially add redundant content:
? bookmarks[i + 1].PageNumberInteger
*/
: reader.NumberOfPages;
string range = string.Format("{0}-{1}", page, nextPage);
// DEMO!
if (i < 10)
{
var outputPath = Path.Combine(OUTPUT_DIR, bookmarks[i].GetFileName());
using (var readerCopy = new PdfReader(reader))
{
var number = bookmarks[i].Number;
readerCopy.SelectPages(range);
using (FileStream stream = new FileStream(outputPath, FileMode.Create))
{
using (var document = new Document())
{
using (var copy = new PdfCopy(document, stream))
{
document.Open();
int n = readerCopy.NumberOfPages;
for (int j = 0; j < n; )
{
copy.AddPage(copy.GetImportedPage(readerCopy, ++j));
}
}
}
}
}
}
}
}
}
The problem is that it's highly unlikely all bookmarks are going to be at the top of every page of the PDF. To see what I mean, experiment with commenting / uncommenting the bookmarks[i + 1].PageNumberInteger lines.
I need a regex pattern for finding links in a string (with HTML code) to get the links with file endings like .gif or .png
Example String:
picture.png
For now I get everything between the " " and the text between the <a> and </a>.
I want to get this:
Href = //site.com/folder/picture.png
String = picture.png
My code so far:
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Diagnostics;
using System.Drawing;
using System.Linq;
using System.Net;
using System.Text;
using System.Text.RegularExpressions;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace downloader
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
private void button1_Click(object sender, EventArgs e)
{
string url = textBox1.Text;
string s = gethtmlcode(url);
foreach (LinkItem i in LinkFinder.Find(s))
{
richTextBox1.Text += Convert.ToString(i);
}
}
static string gethtmlcode(string url)
{
using (WebClient client = new WebClient())
{
string htmlCode = client.DownloadString(url);
return htmlCode;
}
}
public struct LinkItem
{
public string Href;
public string Text;
public override string ToString()
{
return Href + "\n\t" + Text + "\n\t";
}
}
static class LinkFinder
{
public static List<LinkItem> Find(string file)
{
List<LinkItem> list = new List<LinkItem>();
// 1.
// Find all matches in file.
MatchCollection m1 = Regex.Matches(file, #"(<a.*?>.*?</a>)",
RegexOptions.Singleline);
// 2.
// Loop over each match.
foreach (Match m in m1)
{
string value = m.Groups[1].Value;
LinkItem i = new LinkItem();
// 3.
// Get href attribute.
Match m2 = Regex.Match(value, #"href=\""(.*?)\""",
RegexOptions.Singleline);
if (m2.Success)
{
i.Href = m2.Groups[1].Value;
}
// 4.
// Remove inner tags from text.
string t = Regex.Replace(value, #"\s*<.*?>\s*", "",
RegexOptions.Singleline);
i.Text = t;
list.Add(i);
}
return list;
}
}
}
}
I can suggest using HtmlAgilityPack for this task. Install using Manage NuGet Packages for Solution menu, and add the following method:
/// <summary>
/// Collects a href attribute values and a node values if image extension is jpg or png
/// </summary>
/// <param name="html">HTML string or an URL</param>
/// <returns>A key-value pair list of href values and a node values</returns>
private List<KeyValuePair<string, string>> GetLinksWithHtmlAgilityPack(string html)
{
var result = new List<KeyValuePair<string, string>>();
HtmlAgilityPack.HtmlDocument hap;
Uri uriResult;
if (Uri.TryCreate(html, UriKind.Absolute, out uriResult) && uriResult.Scheme == Uri.UriSchemeHttp)
{ // html is a URL
var doc = new HtmlAgilityPack.HtmlWeb();
hap = doc.Load(uriResult.AbsoluteUri);
}
else
{ // html is a string
hap = new HtmlAgilityPack.HtmlDocument();
hap.LoadHtml(html);
}
var nodes = hap.DocumentNode.SelectNodes("//a");
if (nodes != null)
foreach (var node in nodes)
if (Path.GetExtension(node.InnerText.Trim()).ToLower() == ".png" ||
Path.GetExtension(node.InnerText.Trim()).ToLower() == ".jpg")
result.Add(new KeyValuePair<string,string>(node.GetAttributeValue("href", null), node.InnerText));
return result;
}
Then, use it as (I am using a dummy string, just for demo)
var result = GetLinksWithHtmlAgilityPack("picture.pngpicture.bmp");
Output:
Or, with a URL, something like:
var result = GetLinksWithHtmlAgilityPack("http://www.google.com");
I am Scraping HTML DOM elements using HtmlAgilityPack in ASP.NET. currently my code is loading all the href links which means that sublinks of sublinks also . But I need only the depending URL of my domain URL. I don't know how to write code for it. Can any one help me to do this?
Here is my code:
public void GetURL(string strGetURL)
{
var getHtmlSource = new HtmlWeb();
var document = new HtmlDocument();
try
{
document = getHtmlSource.Load(strGetURL);
var aTags = document.DocumentNode.SelectNodes("//a");
if (aTags != null)
{
outputurl.Text = string.Empty;
int _count = 0;
foreach (var aTag in aTags)
{
string strURLTmp;
strURLTmp = aTag.Attributes["href"].Value;
if (_count != 0)
{
if (!CheckDuplicate(strURLTmp))
{
lstResults.Add(strURLTmp);
outputurl.Text += strURLTmp + "\n";
counter++;
GetURL(strURLTmp);
}
}
_count++;
}
}
}
If you meant to get URL that contains specific domain, you can change the XPath to be :
//a[contains(#href, 'your domain here')]
Or if you prefer LINQ than XPath :
var aTags = document.DocumentNode.SelectNodes("//a");
if (aTags != null)
{
....
var relevantLinks = aTags.Where(o => o.GetAttributeValue("href", "")
.Contains("your domain here")
);
....
}
GetAttributeValue() is a better way to get value of an attribute using HAP. Instead of returning null which may cause exception, this method returns the 2nd parameter when the attribute is not found in the context node.
I have some markup that contains certain HTML image tags with the class featured. What I need is to find all those images, add an anchor tag around the image, set the href attribute of the anchor to the images src value (the image path), and lastly replace the images src value with a new value (I call a method that will return this value).
<p>Some text here <img src="/my/path/image.png" alt="image description" class="featured" />. Some more text and another image that should not be modified <img src="/my/path/image2.png" alt="image description" /></p>
Should become.
<p>Some text here <img src="/new/path/from/method.png" alt="image description" class="featured" />. Some more text and another image that should not be modified <img src="/my/path/image2.png" alt="image description" /></p>
Don't use RegEx to parse HTML. See this classic SO answer for the reasons.
Use the HTML Agility Pack instead - you can use XPath to query your HTML.
Ended up with this code.
using System;
using System.Reflection;
using HtmlAgilityPack;
using log4net;
namespace Company.Web.Util
{
public static class HtmlParser
{
private static readonly ILog _log = LogManager.GetLogger(MethodBase.GetCurrentMethod().DeclaringType);
private static HtmlDocument _htmlDocument;
public static string Parse(string input)
{
_htmlDocument = new HtmlDocument();
_htmlDocument.LoadHtml(input);
ParseNode(_htmlDocument.DocumentNode);
return _htmlDocument.DocumentNode.WriteTo().Trim();
}
private static void ParseChildren(HtmlNode parentNode)
{
for (int i = parentNode.ChildNodes.Count - 1; i >= 0; i--)
{
ParseNode(parentNode.ChildNodes[i]);
}
}
private static void ParseNode(HtmlNode node)
{
if (node.NodeType == HtmlNodeType.Element)
{
if (node.Name == "img" && node.HasAttributes)
{
for (int i = node.Attributes.Count - 1; i >= 0; i--)
{
HtmlAttribute currentAttribute = node.Attributes[i];
if ("class" == currentAttribute.Name && currentAttribute.Value.ToLower().Contains("featured"))
{
try
{
string originaleImagePath = node.Attributes["src"].Value;
string imageThumbnailPath = GetImageThumbnail(originaleImagePath);
var anchorNode = HtmlNode.CreateNode("<a>");
var imageNode = HtmlNode.CreateNode("<img>");
imageNode.SetAttributeValue("alt", node.Attributes["alt"].Value);
imageNode.SetAttributeValue("src", imageThumbnailPath);
anchorNode.SetAttributeValue("href", originaleImagePath);
anchorNode.AppendChild(imageNode);
node.ParentNode.InsertBefore(anchorNode, node);
node.ParentNode.RemoveChild(node);
}
catch (Exception exception)
{
if (_log.IsDebugEnabled)
{
_log.WarnFormat("Some message: {0}", exception);
}
}
}
}
}
}
if (node.HasChildNodes)
{
ParseChildren(node);
}
}
}
}