Advertisement
Guest User

Untitled

a guest
Sep 12th, 2018
182
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.03 KB | None | 0 0
  1. '''
  2.     [(Latin - Italian) | Website archiver]
  3.         Website: 'http://www.latin.it/'
  4.         Functional on: 2018-09-12
  5.  
  6.     Copyright (c) 2018 <Iseefloatingstufftoo>
  7.  
  8.     Permission is hereby granted, free of charge, to any person
  9.     obtaining a copy of this software and associated documentation
  10.     files (the "Software"), to deal in the Software without
  11.     restriction, including without limitation the rights to use,
  12.     copy, modify, merge, publish, distribute, sublicense, and/or sell
  13.     copies of the Software, and to permit persons to whom the
  14.     Software is furnished to do so, subject to the following
  15.     conditions:
  16.  
  17.     The above copyright notice and this permission notice shall be
  18.     included in all copies or substantial portions of the Software.
  19.  
  20.     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  21.     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
  22.     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  23.     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
  24.     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  25.     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  26.     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  27.     OTHER DEALINGS IN THE SOFTWARE.
  28.  
  29. '''
  30.  
  31. import urllib2
  32. from bs4 import BeautifulSoup
  33. import os
  34.  
  35. '''
  36.     The function crawlPage looks if the current url contains .lat, which only pages do with latin and the italian translation(s) on it,
  37.     and if it is on such a page, it will parse the latin and italian, and save it in separate files.
  38.    
  39.     On every page the crawler goes over, it collects all links to other pages that contain 'autore/', as all pages we are interested in
  40.     contain 'autore/' in the link, as those pages have links to authors, works, chapters, and texts themselves. We then visit all the pages
  41.     we have not visited yet that are deeper in the folder structure, until a maximum search depth of 8 has been reached.
  42.  
  43. '''
  44.  
  45. def crawlPage(URL, COUNT):
  46.     # if we have clicked already 8 urls to end up on this page, we stop. This is a safety measure so that we don't end in a long link chain leading nowehre.
  47.     if (COUNT > 8):
  48.         return
  49.     else:
  50.         #there are some urls with spaces in them for some reason. These are faulty and will crash the program, so we check if there are no spaces in the current url
  51.         if ' ' not in URL:
  52.             #we display the current page we are in to the console, and load the website data.
  53.             print URL
  54.             response = urllib2.urlopen('http://www.latin.it/' + URL)
  55.             html = response.read()
  56.             soup = BeautifulSoup(html, 'lxml')
  57.            
  58.             #we check if this is a page with italian/latin text on it.
  59.             if '.lat' in URL:
  60.                 name = URL[1:]
  61.                 store = URL[1:URL.rfind('/')]
  62.                
  63.                 #we create the directory structure the url indicates so that the texts remain sorted
  64.                 if not os.path.exists(store):
  65.                     os.makedirs(store)
  66.                    
  67.                 #we find the body of text, which contains both the italian and latin
  68.                 body =  soup.find('div', {'onselectstart' : 'return(false);', 'style' : 'user-select:none;-moz-user-select:none;-khtml-user-select:none;'})
  69.                
  70.                 #here we filter out the latin
  71.                 lat = body.find('div', {'style' : 'text-align:justify;'}).text
  72.                
  73.                 #...and write it to a file
  74.                 file = open(name + '.txt', 'w+')
  75.                 file.write(lat.encode('utf-8', errors='ignore'))
  76.                 file.close()
  77.                
  78.                 itCounter = 1
  79.                 #here we find each italian translation and write it to a file.
  80.                 for it in body.find_all('div', {'class' : 'tdbox', 'style' : 'text-align:justify; color:#004ea2;'}):
  81.                     file = open(name + '.it_' + str(itCounter) + '.txt', 'w+')
  82.                     file.write(it.get_text().encode('utf-8', errors="ignore"))
  83.                     file.close()
  84.                     itCounter += 1
  85.            
  86.                 return
  87.            
  88.             #here we find all links containing 'autore/', and if we did not go to that page yet, we will and run this entire function on that page again.
  89.             for link in soup.find_all('a'):
  90.                 t = link.get('href')
  91.                 if t is not None:
  92.                     if 'autore/' in t:
  93.                         if (URL in t and len(t) > (len(URL) + 1)):
  94.                             crawlPage(t, COUNT + 1)
  95.  
  96. #Here we start the code
  97.  
  98. crawlPage('', 0)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement