inVINCEable Posted November 24, 2009 Share Posted November 24, 2009 $target_url = "****"; $userAgent = 'Googlebot/2.1 (http://www.googlebot.com/bot.html)'; // make the cURL request to $target_url $ch = curl_init(); curl_setopt($ch, CURLOPT_USERAGENT, $userAgent); curl_setopt($ch, CURLOPT_URL,$target_url); curl_setopt($ch, CURLOPT_FAILONERROR, true); curl_setopt($ch, CURLOPT_FOLLOWLOCATION, true); curl_setopt($ch, CURLOPT_AUTOREFERER, true); curl_setopt($ch, CURLOPT_RETURNTRANSFER,true); curl_setopt($ch, CURLOPT_TIMEOUT, 10); $html= curl_exec($ch); if (!$html) { echo " cURL error number:" .curl_errno($ch); echo " cURL error:" . curl_error($ch); exit; } // parse the html into a DOMDocument $dom = new DOMDocument(); @$dom->loadHTML($html); // grab all the on the page $xpath = new DOMXPath($dom); $xml = simplexml_import_dom($dom); $names = $xml->xpath('//textarea'); foreach($names as $name) { echo "$name<br />"; } Ok, everything works perfectly except one thing. When I view my page (here listed as *** to remain private) in the textarea element I get my words that I am scraping AND the html elements that are to be displayed (not the html elements for the page formatting, but HTML elements to actually be viewed so users can copy and paste the code). This works fine when viewing it, but when using the above code to scrape the content, only the words show, not the actual HTML elements that appear in the textarea. Is there a workaround for this? I'm assuming XPATH thinks they are elements of the actual page. Any help is GREATLY appreciated. Quote Link to comment Share on other sites More sharing options...
inVINCEable Posted November 24, 2009 Author Share Posted November 24, 2009 Anyone? Quote Link to comment Share on other sites More sharing options...
Recommended Posts
Join the conversation
You can post now and register later. If you have an account, sign in now to post with your account.