Advertisement
costaplus

New openload

Mar 21st, 2017
202
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 6.39 KB | None | 0 0
  1. # -*- coding: utf-8 -*-
  2. # ------------------------------------------------------------
  3. # streamondemand - XBMC Plugin
  4. # Conector for openload.co
  5. # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
  6. # ------------------------------------------------------------
  7.  
  8. import re
  9.  
  10. from core import config
  11. from core import httptools
  12. from core import logger
  13. from core import scrapertools
  14.  
  15.  
  16. headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:51.0) Gecko/20100101 Firefox/51.0'}
  17.  
  18.  
  19. def test_video_exists(page_url):
  20.     logger.info("(page_url='%s')" % page_url)
  21.  
  22.     header = {}
  23.     if "|" in page_url:
  24.         page_url, referer = page_url.split("|", 1)
  25.         header = {'Referer': referer}
  26.     data = httptools.downloadpage(page_url, headers=header, cookies=False).data
  27.     if 'We’re Sorry!' in data:
  28.         data = httptools.downloadpage(page_url.replace("/embed/", "/f/"), headers=header, cookies=False).data
  29.         if 'We’re Sorry!' in data:
  30.             return False, "[Openload] File non trovato"
  31.  
  32.     return True, ""
  33.  
  34.  
  35. def get_video_url(page_url, premium=False, user="", password="", video_password=""):
  36.     logger.info("url=" + page_url)
  37.     video_urls = []
  38.  
  39.     header = {}
  40.     if "|" in page_url:
  41.         page_url, referer = page_url.split("|", 1)
  42.         header = {'Referer': referer}
  43.     data = httptools.downloadpage(page_url, headers=header, cookies=False).data
  44.     subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"')
  45.     #Header para la descarga
  46.     header_down = "|User-Agent=" + headers['User-Agent']
  47.  
  48.     try:
  49.         from lib.aadecode import decode as aadecode
  50.         if "videocontainer" not in data:
  51.             url = page_url.replace("/embed/", "/f/")
  52.             data = httptools.downloadpage(url, cookies=False).data
  53.  
  54.         text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));')
  55.         text_decode = ""
  56.         for t in text_encode:
  57.             text_decode += aadecode(t)
  58.  
  59.         var_r = scrapertools.find_single_match(text_decode, "window.r\s*=\s*['\"]([^'\"]+)['\"]")
  60.         var_encodes = scrapertools.find_multiple_matches(data, 'id="%s[^"]*">([^<]+)<' % var_r)
  61.  
  62.         videourl = ""
  63.         for encode in var_encodes:
  64.             text_decode = []
  65.             try:
  66.                 idx1 = max(2, ord(encode[0]) - 52)
  67.                 idx2 = min(idx1, len(encode) - 28)
  68.                 idx3 = encode[idx2:idx2+30]
  69.                 decode1 = []
  70.                 for i in range(0, len(idx3), 3):
  71.                     decode1.append(int(idx3[i:i+3], 8))
  72.                 idx4 = encode[0:idx2] + encode[idx2+30:]
  73.                 j = 0
  74.                 i = 0
  75.                 while i < len(idx4):
  76.                     data_1 = int(idx4[i:i+2], 16)
  77.                     data_2 = idx4[i:i+3]
  78.                     i += 2
  79.                     if (j % 3) == 0:
  80.                         data_1 = int(data_2, 8)
  81.                         i += 1
  82.                     elif j % 2 == 0 and j != 0 and ord(idx4[j-1]) < 60:
  83.                         data_1 = int(data_2, 10)
  84.                         i += 1
  85.  
  86.                     value = data_1 ^ 213 ^ decode1[j % 6]
  87.                     j += 1
  88.                     text_decode.append(chr(value))
  89.  
  90.                 text_decode = "".join(text_decode)
  91.             except:
  92.                 continue
  93.  
  94.             #videourl = "https://openload.co/stream/%s?mime=true" % text_decode
  95.             videourl = "https://oload.tv/stream/%s?mime=true" % text_decode
  96.             resp_headers = httptools.downloadpage(videourl, follow_redirects=False, only_headers=True)
  97.             videourl = resp_headers.headers["location"].replace("https", "http").replace("?mime=true", "")
  98.             extension = resp_headers.headers["content-type"]
  99.             break
  100.  
  101.         # Falla el método, se utiliza la api aunque en horas punta no funciona
  102.         if not videourl:
  103.             videourl, extension = get_link_api(page_url)
  104.     except:
  105.         import traceback
  106.         logger.info(traceback.format_exc())
  107.         # Falla el método, se utiliza la api aunque en horas punta no funciona
  108.         videourl, extension = get_link_api(page_url)
  109.  
  110.     extension = extension.replace("video/", ".").replace("application/x-", ".")
  111.     if not extension:
  112.         try:
  113.             extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"')
  114.             extension = "." + extension.rsplit(".", 1)[1]
  115.         except:
  116.             pass
  117.  
  118.     if config.get_platform() != "plex":
  119.         video_urls.append([extension + " [Openload] ", videourl + header_down, 0, subtitle])
  120.     else:
  121.         video_urls.append([extension + " [Openload] ", videourl, 0, subtitle])
  122.  
  123.     for video_url in video_urls:
  124.         logger.info("%s - %s" % (video_url[0], video_url[1]))
  125.  
  126.     return video_urls
  127.  
  128.  
  129. # Encuentra vídeos del servidor en el texto pasado
  130. def find_videos(text):
  131.     encontrados = set()
  132.     devuelve = []
  133.  
  134.     referer = ""
  135.     if "|Referer=" in text:
  136.         referer = "|"+text.split("|Referer=", 1)[1]
  137.     patronvideos = '(?:openload|oload).../(?:embed|f)/([0-9a-zA-Z-_]+)'
  138.     logger.info("#" + patronvideos + "#")
  139.  
  140.     matches = re.compile(patronvideos, re.DOTALL).findall(text)
  141.     for media_id in matches:
  142.         titulo = "[Openload]"
  143.         url = 'https://oload.tv/embed/%s/%s' % (media_id, referer)
  144.         if url not in encontrados:
  145.             logger.info("  url=" + url)
  146.             devuelve.append([titulo, url, 'openload'])
  147.             encontrados.add(url)
  148.         else:
  149.             logger.info("  url duplicada=" + url)
  150.  
  151.     return devuelve
  152.  
  153.  
  154. def get_link_api(page_url):
  155.     from core import jsontools
  156.     file_id = scrapertools.find_single_match(page_url, '(?:embed|f)/([0-9a-zA-Z-_]+)')
  157.     login = "97b2326d7db81f0f"
  158.     key = "AQFO3QJQ"
  159.     data = httptools.downloadpage("https://api.openload.co/1/file/dlticket?file=%s&login=%s&key=%s" % (file_id, login, key)).data
  160.     data = jsontools.load_json(data)
  161.  
  162.     if data["status"] == 200:
  163.         ticket = data["result"]["ticket"]
  164.         data = httptools.downloadpage("https://api.openload.co/1/file/dl?file=%s&ticket=%s" % (file_id, ticket)).data
  165.         data = jsontools.load_json(data)
  166.         extension = data["result"]["content_type"]
  167.         videourl = data['result']['url']
  168.         videourl = videourl.replace("https", "http")
  169.         return videourl, extension
  170.  
  171.     return "", ""
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement