Advertisement
kodish2

server-rc2

Dec 19th, 2019
232
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.55 KB | None | 0 0
  1. import requests
  2. from bs4 import BeautifulSoup
  3. import re
  4.  
  5. def pegar_mp4(titulo,url):
  6. pagina_de_busca2 = requests.get(url)
  7. soup2 = BeautifulSoup(pagina_de_busca2.text, "html.parser")
  8.  
  9. for player in soup2.find_all('video', attrs={'id': 'RedeCanaisPlayer'}):
  10. data3 = str(player)
  11. if player.source:
  12. player2 = player.source.attrs['src']
  13. #print(player2)
  14.  
  15. print('<channel>')
  16. print('<item>')
  17. print('<title>'+titulo+'</title>')
  18. print('<link>'+player2+'</link>')
  19. print('<thumbnail>https://icon-icons.com/icons2/1056/PNG/256/movies_icon-icons.com_76714.png</thumbnail>')
  20. print('<fanart></fanart>')
  21. print('<info></info>')
  22. print('</item>')
  23. print('</channel>')
  24. print('\n')
  25.  
  26. return player2
  27.  
  28. def conv_link(titulo,links):
  29. #print(links)
  30. base = "https://redecanais.pictures/"
  31. conn = requests.get(links)
  32. soup = BeautifulSoup(conn.text, "html.parser")
  33.  
  34. for player in soup.find_all('iframe', attrs={'"="': ''}):
  35. data2 = str(player)
  36. link = data2.replace('<iframe =""="" allowfullscreen="" frameborder="0" height="400" name="Player" scrolling="no" src="','').replace('" width="640"> </iframe>','').replace("&amp;","&")
  37.  
  38. urlp = ""+base+""+link+""
  39.  
  40. if link.find("server"):
  41. urlp2 = urlp.replace('.php','player.php')
  42. #print(urlp2)
  43. pegar_mp4(titulo,urlp2)
  44.  
  45.  
  46.  
  47. break
  48.  
  49. def main():
  50. for i in range(50):
  51. st = 281
  52. si = st + i
  53. li = str(si)
  54.  
  55. headers = {
  56. 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36',
  57. }
  58.  
  59. url = "https://redecanais.pictures/browse-filmes-videos-"+li+"-date.html"
  60. pagina_de_busca2 = requests.get(url,headers=headers)
  61. soup = BeautifulSoup(pagina_de_busca2.text, "html.parser")
  62. #print(soup)
  63.  
  64. for player in soup.find_all('div', attrs={'class': 'caption'}):
  65. data3 = str(player.a)
  66. b1 = data3.replace('<a class="ellipsis" href="/','https://redecanais.pictures/')
  67. b2 = re.sub('" title="',r';',b1)
  68. b3 = re.sub('">(.*)',r'',b2)
  69.  
  70. titulo = re.sub('(.*);',r'',b3)
  71. link = re.sub(';(.*)',r'',b3)
  72. #print(b3)
  73. #print(titulo)
  74. conv_link(titulo,link)
  75.  
  76.  
  77.  
  78.  
  79.  
  80. main()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement