Advertisement
Guest User

Untitled

a guest
Apr 24th, 2016
157
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 12.35 KB | None | 0 0
  1. # -*- coding: utf-8 -*-
  2. # ------------------------------------------------------------
  3. # streamondemand.- XBMC Plugin
  4. # Canal para seriehd - based on guardaserie channel
  5. # http://blog.tvalacarta.info/plugin-xbmc/streamondemand.
  6. # ------------------------------------------------------------
  7. import re
  8. import sys
  9. import urllib2
  10.  
  11. from core import config
  12. from core import logger
  13. from core import scrapertools
  14. from core.item import Item
  15. from servers import servertools
  16.  
  17. __channel__ = "seriehd"
  18. __category__ = "S"
  19. __type__ = "generic"
  20. __title__ = "Serie HD"
  21. __language__ = "IT"
  22.  
  23. headers = [
  24.     ['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0'],
  25.     ['Accept-Encoding', 'gzip, deflate']
  26. ]
  27.  
  28. host = "http://www.seriehd.org"
  29.  
  30.  
  31. def isGeneric():
  32.     return True
  33.  
  34.  
  35. def mainlist(item):
  36.     logger.info("[seriehd.py] mainlist")
  37.  
  38.     itemlist = [Item(channel=__channel__,
  39.                      action="fichas",
  40.                      title="[COLOR azure]Serie TV[/COLOR]",
  41.                      url=host + "/serie-tv-streaming/",
  42.                      thumbnail="http://i.imgur.com/rO0ggX2.png"),
  43.                 Item(channel=__channel__,
  44.                      action="sottomenu",
  45.                      title="[COLOR orange]Sottomenu...[/COLOR]",
  46.                      url=host,
  47.                      thumbnail="http://i37.photobucket.com/albums/e88/xzener/NewIcons.png"),
  48.                 Item(channel=__channel__,
  49.                      action="search",
  50.                      title="[COLOR green]Cerca...[/COLOR]",
  51.                      thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
  52.  
  53.     return itemlist
  54.  
  55.  
  56. def search(item, texto):
  57.     logger.info("[seriehd.py] search")
  58.  
  59.     item.url = host + "/?s=" + texto
  60.  
  61.     try:
  62.         return fichas(item)
  63.  
  64.     # Se captura la excepción, para no interrumpir al buscador global si un canal falla.
  65.     except:
  66.         import sys
  67.         for line in sys.exc_info():
  68.             logger.error("%s" % line)
  69.         return []
  70.  
  71.  
  72. def sottomenu(item):
  73.     logger.info("[seriehd.py] sottomenu")
  74.     itemlist = []
  75.  
  76.     data = anti_cloudflare(item.url)
  77.  
  78.     patron = '<a href="([^"]+)">([^<]+)</a>'
  79.  
  80.     matches = re.compile(patron, re.DOTALL).findall(data)
  81.  
  82.     for scrapedurl, scrapedtitle in matches:
  83.         itemlist.append(
  84.                 Item(channel=__channel__,
  85.                      action="fichas",
  86.                      title=scrapedtitle,
  87.                      url=scrapedurl))
  88.  
  89.     # Elimina 'Serie TV' de la lista de 'sottomenu'
  90.     itemlist.pop(0)
  91.  
  92.     return itemlist
  93.  
  94.  
  95. def fichas(item):
  96.     logger.info("[seriehd.py] fichas")
  97.     itemlist = []
  98.  
  99.     data = anti_cloudflare(item.url)
  100.  
  101.     # ------------------------------------------------
  102.     cookies = ""
  103.     matches = re.compile('(.seriehd.org.*?)\n', re.DOTALL).findall(config.get_cookie_data())
  104.     for cookie in matches:
  105.         name = cookie.split('\t')[5]
  106.         value = cookie.split('\t')[6]
  107.         cookies += name + "=" + value + ";"
  108.     headers.append(['Cookie', cookies[:-1]])
  109.     import urllib
  110.     _headers = urllib.urlencode(dict(headers))
  111.     # ------------------------------------------------
  112.  
  113.     patron = '<h2>(.*?)</h2>\s*'
  114.     patron += '<img src="([^"]+)" alt="[^"]*"/>\s*'
  115.     patron += '<A HREF="([^"]+)">'
  116.  
  117.     matches = re.compile(patron, re.DOTALL).findall(data)
  118.  
  119.     for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
  120.         scrapedthumbnail += "|" + _headers
  121.         scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
  122.  
  123.         tmdbtitle = scrapedtitle.split("(")[0]
  124.         try:
  125.            plot, fanart, poster, extrameta = info(tmdbtitle)
  126.  
  127.            itemlist.append(
  128.                Item(channel=__channel__,
  129.                     thumbnail=poster,
  130.                     fanart=fanart if fanart != "" else poster,
  131.                     extrameta=extrameta,
  132.                     plot=str(plot),
  133.                     action="episodios",
  134.                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
  135.                     url=scrapedurl,
  136.                     fulltitle=scrapedtitle,
  137.                     show=scrapedtitle,
  138.                     folder=True))
  139.         except:
  140.            itemlist.append(
  141.                Item(channel=__channel__,
  142.                     action="episodios",
  143.                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
  144.                     fulltitle=scrapedtitle,
  145.                     url=scrapedurl,
  146.                     show=scrapedtitle,
  147.                     thumbnail=scrapedthumbnail))
  148.  
  149.     patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
  150.     next_page = scrapertools.find_single_match(data, patron)
  151.     if next_page != "":
  152.         itemlist.append(
  153.                 Item(channel=__channel__,
  154.                      action="fichas",
  155.                      title="[COLOR orange]Successivo>>[/COLOR]",
  156.                      url=next_page))
  157.  
  158.     return itemlist
  159.  
  160.  
  161. def episodios(item):
  162.     logger.info("[seriehd.py] episodios")
  163.  
  164.     itemlist = []
  165.  
  166.     data = anti_cloudflare(item.url)
  167.  
  168.     import xbmc
  169.     patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">'
  170.  
  171.     url = scrapertools.find_single_match(data, patron)
  172.     url = scrapertools.decodeHtmlentities(url.replace("?seriehd",""))
  173.  
  174.     headers.append(['Referer', url])
  175.     data = scrapertools.cache_page(url, headers=headers)
  176.  
  177.     start = data.find('<ul>')
  178.     end = data.find('</ul>', start)
  179.     data = data[start:end]
  180.  
  181.     patron = '<li[^d]+data[^>]+><a href="[^&]+&idStagioni=(.*?)">(.*?)</a></li>'
  182.     seasons = re.compile(patron, re.DOTALL).findall(data)
  183.  
  184.     for scrapedseason, scrapednum in seasons:
  185.         data = scrapertools.cache_page(url, headers=headers)
  186.  
  187.         start = data.find('<section id="seasons">')
  188.         end = data.find('</div>', start)
  189.         data = data[start:end]
  190.  
  191.         patron = '<li[^>]+><a href="[^&]+&idStagioni=%s[^>]+>(.*?)</a></li>' % scrapedseason
  192.         #episodes_data = scrapertools.find_single_match(data, patron)
  193.  
  194.         episodes = re.compile(patron, re.DOTALL).findall(data)
  195.  
  196.         xbmc.log( "HHH episodio HHHHHHHHHHHHHHHHHHHHHHHHHHHH" )
  197.         xbmc.log( "%s" % episodes )
  198.  
  199.         for scrapedepisode in episodes:
  200.  
  201.             season = str(int(scrapednum) + 1)
  202.             episode = str(int(scrapedepisode) + 1)
  203.             if len(episode) == 1: episode = "0" + episode
  204.  
  205.             title = season + "x" + episode
  206.  
  207.             # Le pasamos a 'findvideos' la url con dos partes divididas por el caracter "?"
  208.             # [host+path]?[argumentos]?[Referer]
  209.             url = "%s?idStagioni=%s&episode=%s?%s" % (url, scrapedseason, scrapedepisode, url)
  210.  
  211.             xbmc.log( "HHH url HHHHHHHHHHHHHHHHHHHHHHHHHHHH" )
  212.             xbmc.log( "%s" % url )
  213.  
  214.             itemlist.append(
  215.                    Item(channel=__channel__,
  216.                         action="findvideos",
  217.                         title=title,
  218.                         url=url,
  219.                         fulltitle=item.fulltitle,
  220.                         show=item.show,
  221.                         thumbnail=item.thumbnail))
  222.  
  223.     if config.get_library_support() and len(itemlist) != 0:
  224.         itemlist.append(
  225.                 Item(channel=__channel__,
  226.                      title=item.title,
  227.                      url=item.url,
  228.                      action="add_serie_to_library",
  229.                      extra="episodios",
  230.                      show=item.show))
  231.         itemlist.append(
  232.                 Item(channel=item.channel,
  233.                      title="Scarica tutti gli episodi della serie",
  234.                      url=item.url,
  235.                      action="download_all_episodes",
  236.                      extra="episodios",
  237.                      show=item.show))
  238.  
  239.     return itemlist
  240.  
  241.  
  242. def findvideos(item):
  243.     logger.info("[seriehd.py] findvideos")
  244.     itemlist = []
  245.  
  246.     url1 = item.url.split('?')[1]
  247.     url = "http://hdpass.xyz/serie.php?" + url1
  248.     post = item.url.split('?')[2]
  249.     referer1 = item.url.split('?')[3]
  250.     referer = referer1 + url1 + post
  251.  
  252.     headers.append(['Referer', referer])
  253.  
  254.     data = scrapertools.cache_page(url, post=post, headers=headers)
  255.  
  256.     patron = '<iframe id="iframeVid" width=".+?" height=".+?" src="([^"]+)" allowfullscreen="">'
  257.     url = scrapertools.find_single_match(data, patron)
  258.  
  259.     if 'hdpass.xyz' in url:
  260.         data = scrapertools.cache_page(url, headers=headers)
  261.  
  262.         start = data.find('<ul id="mirrors">')
  263.         end = data.find('</ul>', start)
  264.         data = data[start:end]
  265.  
  266.         patron = '<form method="get" action="">\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/><input type="hidden" name="([^"]+)" value="([^"]+)"/> <input type="submit" class="[^"]*" name="([^"]+)" value="([^"]+)"/>\s*</form>'
  267.  
  268.         html = []
  269.         for name1, val1, name2, val2, name3, val3, name4, val4, name5, val5  in re.compile(patron).findall(data):
  270.             if name3 == '' and val3 == '':
  271.                 get_data = '%s=%s&%s=%s&%s=%s&%s=%s' % (name1, val1, name2, val2, name4, val4, name5, val5)
  272.             else:
  273.                 get_data = '%s=%s&%s=%s&%s=%s&%s=%s&%s=%s' % (name1, val1, name2, val2, name3, val3, name4, val4, name5, val5)
  274.             tmp_data = scrapertools.cache_page('http://hdpass.xyz/film.php?' + get_data, headers=headers)
  275.             patron = r'; eval\(unescape\("(.*?)",(\[".*?;"\]),(\[".*?\])\)\);'
  276.             try:
  277.                 [(par1, par2, par3)] = re.compile(patron, re.DOTALL).findall(tmp_data)
  278.             except:
  279.                 patron = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed" value="([^"]+)"/>'
  280.                 for media_label, media_url in re.compile(patron).findall(tmp_data):
  281.                     media_label=scrapertools.decodeHtmlentities(media_label.replace("hosting","hdload"))
  282.  
  283.                     itemlist.append(
  284.                             Item(server=media_label,
  285.                                  action="play",
  286.                                  title=' - [Player]' if media_label == '' else ' - [Player @%s]' % media_label,
  287.                                  url=media_url,
  288.                                  folder=False))
  289.                 continue
  290.  
  291.             par2 = eval(par2, {'__builtins__': None}, {})
  292.             par3 = eval(par3, {'__builtins__': None}, {})
  293.             tmp_data = unescape(par1, par2, par3)
  294.             html.append(tmp_data.replace(r'\/', '/'))
  295.         url = ''.join(html)
  296.  
  297.     itemlist.extend(servertools.find_video_items(data=url))
  298.  
  299.     for videoitem in itemlist:
  300.         videoitem.title = item.title + videoitem.title
  301.         videoitem.show = item.show
  302.         videoitem.fulltitle = item.fulltitle
  303.         videoitem.thumbnail = item.thumbnail
  304.         videoitem.channel = __channel__
  305.  
  306.     return itemlist
  307.  
  308. def anti_cloudflare(url):
  309.     # global headers
  310.  
  311.     try:
  312.         resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
  313.         resp_headers = dict(resp_headers)
  314.     except urllib2.HTTPError, e:
  315.         resp_headers = e.headers
  316.  
  317.     if 'refresh' in resp_headers:
  318.         import time
  319.         time.sleep(int(resp_headers['refresh'][:1]))
  320.  
  321.         scrapertools.get_headers_from_response(host + "/" + resp_headers['refresh'][7:], headers=headers)
  322.  
  323.     return scrapertools.cache_page(url, headers=headers)
  324.  
  325.  
  326. def unescape(par1, par2, par3):
  327.     var1 = par1
  328.     for ii in xrange(0, len(par2)):
  329.         var1 = re.sub(par2[ii], par3[ii], var1)
  330.  
  331.     var1 = re.sub("%26", "&", var1)
  332.     var1 = re.sub("%3B", ";", var1)
  333.     return var1.replace('<!--?--><?', '<!--?-->')
  334.  
  335. def info(title):
  336.     logger.info("streamondemand.seriehd info")
  337.     try:
  338.         from core.tmdb import Tmdb
  339.         oTmdb= Tmdb(texto_buscado=title, tipo= "tv", include_adult="false", idioma_busqueda="it")
  340.         count = 0
  341.         if oTmdb.total_results > 0:
  342.            extrameta = {}
  343.            extrameta["Year"] = oTmdb.result["release_date"][:4]
  344.            extrameta["Genre"] = ", ".join(oTmdb.result["genres"])
  345.            extrameta["Rating"] = float(oTmdb.result["vote_average"])
  346.            fanart=oTmdb.get_backdrop()
  347.            poster=oTmdb.get_poster()
  348.            plot=oTmdb.get_sinopsis()
  349.            return plot, fanart, poster, extrameta
  350.     except:
  351.         pass
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement