# scraper.py import requests from bs4 import BeautifulSoup PAGE=0 doParse = True while doParse: PAGE +=1 url = f"https://xakep.ru/{PAGE}" response = requests.get(url) soup = BeautifulSoup(response.text, 'lxml') titles = soup.find_all('h3', class_ = 'entry-title') descr = soup.find_all('p', class_ = "block-exb") for d in descr: print(d.text) if (len(titles)==0) or (PAGE==2): break print("-"*50, f"{PAGE=}", "-"*30, "\n") for i in range(len(titles)): print(titles[i].text.strip()) #print("-- ", descr[i]) print("> ", titles[i].a['href'].strip()) print() print(f"Total: {PAGE-1} pages")