Advertisement
Guest User

Untitled

a guest
Dec 27th, 2012
63
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
PHP 2.47 KB | None | 0 0
  1. <?php
  2.  
  3. // It may take a whils to crawl a site ...
  4. set_time_limit(10000);
  5.  
  6. // Inculde the phpcrawl-mainclass
  7. include("libs/PHPCrawler.class.php");
  8.  
  9. // Extend the class and override the handleDocumentInfo()-method
  10. class MyCrawler extends PHPCrawler
  11. {
  12.   function handleDocumentInfo($DocInfo)
  13.   {
  14.     // Just detect linebreak for output ("\n" in CLI-mode, otherwise "<br>").
  15.     if (PHP_SAPI == "cli") $lb = "\n";
  16.     else $lb = "<br />";
  17.  
  18.     // Print the URL and the HTTP-status-Code
  19.     echo "Page requested: ".$DocInfo->url." (".$DocInfo->http_status_code.")".$lb;
  20.    
  21.     // Print the refering URL
  22.     echo "Referer-page: ".$DocInfo->referer_url.$lb;
  23.    
  24.     //print array of links found
  25.     //echo "Links Found: ". print_r($DocInfo->links_found) .$lb;
  26.    
  27.     // Print if the content of the document was be recieved or not
  28.     if ($DocInfo->received == true)
  29.       echo "Content received: ".$DocInfo->bytes_received." bytes".$lb;
  30.     else
  31.       echo "Content not received".$lb;
  32.    
  33.     // Now you should do something with the content of the actual
  34.     // received page or file ($DocInfo->source), we skip it in this example
  35.    
  36.     echo $lb;
  37.    
  38.     flush();
  39.   }
  40. }
  41.  
  42. // Now, create a instance of your class, define the behaviour
  43. // of the crawler (see class-reference for more options and details)
  44. // and start the crawling-process.
  45.  
  46. $crawler = new MyCrawler();
  47.  
  48. // URL to crawl
  49. $crawler->setURL("http://news.google.com");
  50.  
  51. // Only receive content of files with content-type "text/html"
  52. $crawler->addContentTypeReceiveRule("#text/html#");
  53.  
  54. // Ignore links to pictures, dont even request pictures
  55. $crawler->addURLFilterRule("#\.(jpg|jpeg|gif|png)$# i");
  56.  
  57. // Store and send cookie-data like a browser does
  58. $crawler->enableCookieHandling(true);
  59.  
  60. // Set the traffic-limit to 1 MB (in bytes,
  61. // for testing we dont want to "suck" the whole site)
  62. $crawler->setTrafficLimit(2000 * 1024);
  63.  
  64. $crawler->setFollowMode(0);
  65.  
  66. // Thats enough, now here we go
  67. $crawler->go();
  68.  
  69. // At the end, after the process is finished, we print a short
  70. // report (see method getProcessReport() for more information)
  71. $report = $crawler->getProcessReport();
  72.  
  73. if (PHP_SAPI == "cli") $lb = "\n";
  74. else $lb = "<br />";
  75.    
  76. echo "Summary:".$lb;
  77. echo "Links followed: ".$report->links_followed.$lb;
  78. echo "Documents received: ".$report->files_received.$lb;
  79. echo "Bytes received: ".$report->bytes_received." bytes".$lb;
  80. echo "Process runtime: ".$report->process_runtime." sec".$lb;
  81. ?>
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement