Advertisement
Guest User

Untitled

a guest
Apr 27th, 2016
164
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 12.35 KB | None | 0 0
  1. # -*- coding: utf-8 -*-
  2. # ------------------------------------------------------------
  3. # streamondemand.- XBMC Plugin
  4. # Canal para seriehd - based on guardaserie channel
  5. # http://blog.tvalacarta.info/plugin-xbmc/streamondemand.
  6. # ------------------------------------------------------------
  7. import re
  8. import sys
  9. import urllib2
  10.  
  11. from core import config
  12. from core import logger
  13. from core import scrapertools
  14. from core.item import Item
  15. from servers import servertools
  16.  
  17. __channel__ = "seriehd"
  18. __category__ = "S"
  19. __type__ = "generic"
  20. __title__ = "Serie HD"
  21. __language__ = "IT"
  22.  
  23. headers = [
  24.     ['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0'],
  25.     ['Accept-Encoding', 'gzip, deflate']
  26. ]
  27.  
  28. host = "http://www.seriehd.org"
  29.  
  30.  
  31. def isGeneric():
  32.     return True
  33.  
  34.  
  35. def mainlist(item):
  36.     logger.info("[seriehd.py] mainlist")
  37.  
  38.     itemlist = [Item(channel=__channel__,
  39.                      action="fichas",
  40.                      title="[COLOR azure]Serie TV[/COLOR]",
  41.                      url=host + "/serie-tv-streaming/",
  42.                      thumbnail="http://i.imgur.com/rO0ggX2.png"),
  43.                 Item(channel=__channel__,
  44.                      action="sottomenu",
  45.                      title="[COLOR orange]Sottomenu...[/COLOR]",
  46.                      url=host,
  47.                      thumbnail="http://i37.photobucket.com/albums/e88/xzener/NewIcons.png"),
  48.                 Item(channel=__channel__,
  49.                      action="search",
  50.                      title="[COLOR green]Cerca...[/COLOR]",
  51.                      thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
  52.  
  53.     return itemlist
  54.  
  55.  
  56. def search(item, texto):
  57.     logger.info("[seriehd.py] search")
  58.  
  59.     item.url = host + "/?s=" + texto
  60.  
  61.     try:
  62.         return fichas(item)
  63.  
  64.     # Se captura la excepción, para no interrumpir al buscador global si un canal falla.
  65.     except:
  66.         import sys
  67.         for line in sys.exc_info():
  68.             logger.error("%s" % line)
  69.         return []
  70.  
  71.  
  72. def sottomenu(item):
  73.     logger.info("[seriehd.py] sottomenu")
  74.     itemlist = []
  75.  
  76.     data = anti_cloudflare(item.url)
  77.  
  78.     patron = '<a href="([^"]+)">([^<]+)</a>'
  79.  
  80.     matches = re.compile(patron, re.DOTALL).findall(data)
  81.  
  82.     for scrapedurl, scrapedtitle in matches:
  83.         itemlist.append(
  84.                 Item(channel=__channel__,
  85.                      action="fichas",
  86.                      title=scrapedtitle,
  87.                      url=scrapedurl))
  88.  
  89.     # Elimina 'Serie TV' de la lista de 'sottomenu'
  90.     itemlist.pop(0)
  91.  
  92.     return itemlist
  93.  
  94.  
  95. def fichas(item):
  96.     logger.info("[seriehd.py] fichas")
  97.     itemlist = []
  98.  
  99.     data = anti_cloudflare(item.url)
  100.  
  101.     # ------------------------------------------------
  102.     cookies = ""
  103.     matches = re.compile('(.seriehd.org.*?)\n', re.DOTALL).findall(config.get_cookie_data())
  104.     for cookie in matches:
  105.         name = cookie.split('\t')[5]
  106.         value = cookie.split('\t')[6]
  107.         cookies += name + "=" + value + ";"
  108.     headers.append(['Cookie', cookies[:-1]])
  109.     import urllib
  110.     _headers = urllib.urlencode(dict(headers))
  111.     # ------------------------------------------------
  112.  
  113.     patron = '<h2>(.*?)</h2>\s*'
  114.     patron += '<img src="([^"]+)" alt="[^"]*"/>\s*'
  115.     patron += '<A HREF="([^"]+)">'
  116.  
  117.     matches = re.compile(patron, re.DOTALL).findall(data)
  118.  
  119.     for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
  120.         scrapedthumbnail += "|" + _headers
  121.         scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
  122.  
  123.         tmdbtitle = scrapedtitle.split("(")[0]
  124.         try:
  125.            plot, fanart, poster, extrameta = info(tmdbtitle)
  126.  
  127.            itemlist.append(
  128.                Item(channel=__channel__,
  129.                     thumbnail=poster,
  130.                     fanart=fanart if fanart != "" else poster,
  131.                     extrameta=extrameta,
  132.                     plot=str(plot),
  133.                     action="episodios",
  134.                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
  135.                     url=scrapedurl,
  136.                     fulltitle=scrapedtitle,
  137.                     show=scrapedtitle,
  138.                     folder=True))
  139.         except:
  140.            itemlist.append(
  141.                Item(channel=__channel__,
  142.                     action="episodios",
  143.                     title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
  144.                     fulltitle=scrapedtitle,
  145.                     url=scrapedurl,
  146.                     show=scrapedtitle,
  147.                     thumbnail=scrapedthumbnail))
  148.  
  149.     patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
  150.     next_page = scrapertools.find_single_match(data, patron)
  151.     if next_page != "":
  152.         itemlist.append(
  153.                 Item(channel=__channel__,
  154.                      action="fichas",
  155.                      title="[COLOR orange]Successivo>>[/COLOR]",
  156.                      url=next_page))
  157.  
  158.     return itemlist
  159.  
  160.  
  161. def episodios(item):
  162.  
  163.     #### stagioni ed episodi devono essere sistemati
  164.     #### view-source:http://hdpass.xyz/serie.php?idSerie=99?seriehd
  165.     #### please, dateci una mano
  166.  
  167.     logger.info("[seriehd.py] episodios")
  168.  
  169.     itemlist = []
  170.  
  171.     data = anti_cloudflare(item.url)
  172.  
  173.     patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">'
  174.  
  175.     url = scrapertools.find_single_match(data, patron)
  176.     url = scrapertools.decodeHtmlentities(url.replace("?seriehd",""))
  177.  
  178.     headers.append(['Referer', url])
  179.     data = scrapertools.cache_page(url, headers=headers)
  180.  
  181.     start = data.find('</ul>')
  182.     end = data.find('<section id="seasons">', start)
  183.     data = data[start:end]
  184.     import xbmc
  185.  
  186.     seasons_data = scrapertools.get_match(data,'<select onchange="location = this.value;">(.*?)</select>')
  187.     patron = '<option[^=]+="[^&]+&idStagioni=(.*?)">(.*?)</option>'
  188.  
  189.     seasons = re.compile(patron, re.DOTALL).findall(seasons_data)
  190.  
  191.     xbmc.log( "HHH seasons HHHHHHHHHHHHHHHHHHHHHHHHHHHH" )
  192.     xbmc.log( "%s" % seasons )
  193.  
  194.     for scrapedseason, scrapednum in seasons:
  195.         data = scrapertools.cache_page(url, headers=headers)
  196.  
  197.         start = data.find('<section id="seasons">')
  198.         end = data.find('</div>', start)
  199.         data = data[start:end]
  200.  
  201.         episodes_data = scrapertools.get_match(data, '<select onchange="location = this.value;">(.*?)</select>')
  202.         patron = '<option[^=]+="[^&]+&idStagioni=%s&episode=(.*?)">.*?</option>' % scrapedseason
  203.  
  204.         episodes = re.compile(patron, re.DOTALL).findall(episodes_data)
  205.  
  206.         xbmc.log( "HHH episodes HHHHHHHHHHHHHHHHHHHHHHHHHHHH" )
  207.         xbmc.log( "%s" % episodes )
  208.  
  209.         for scrapedepisode in episodes:
  210.  
  211.             season = str(int(scrapednum))
  212.             episode = str(int(scrapedepisode))
  213.             if len(episode) == 1: episode = "0" + episode
  214.  
  215.             title = season + "x" + episode
  216.  
  217.             # Le pasamos a 'findvideos' la url con dos partes divididas por el caracter "?"
  218.             # [host+path]?[argumentos]?[Referer]
  219.  
  220.             url = "%s?idStagioni=%s&episode=%s?%s" % (url, scrapedseason, scrapedepisode, url)
  221.  
  222.             xbmc.log( "HHH url HHHHHHHHHHHHHHHHHHHHHHHHHHHH" )
  223.             xbmc.log( "%s" % url )
  224.             xbmc.log( "%s" % scrapedseason )
  225.             xbmc.log( "%s" % scrapedepisode )
  226.  
  227.             itemlist.append(
  228.                    Item(channel=__channel__,
  229.                         action="findvideos",
  230.                         title=title,
  231.                         url=url,
  232.                         fulltitle=item.fulltitle,
  233.                         show=item.show,
  234.                         thumbnail=item.thumbnail))
  235.  
  236.     if config.get_library_support() and len(itemlist) != 0:
  237.         itemlist.append(
  238.                 Item(channel=__channel__,
  239.                      title=item.title,
  240.                      url=item.url,
  241.                      action="add_serie_to_library",
  242.                      extra="episodios",
  243.                      show=item.show))
  244.         itemlist.append(
  245.                 Item(channel=item.channel,
  246.                      title="Scarica tutti gli episodi della serie",
  247.                      url=item.url,
  248.                      action="download_all_episodes",
  249.                      extra="episodios",
  250.                      show=item.show))
  251.  
  252.     return itemlist
  253.  
  254.  
  255. def findvideos(item):
  256.     logger.info("[seriehd.py] findvideos")
  257.     itemlist = []
  258.  
  259.     url1 = item.url.split('?')[1]
  260.     url = "http://hdpass.xyz/serie.php?" + url1
  261.     post = item.url.split('?')[2]
  262.     referer1 = item.url.split('?')[3]
  263.     referer = referer1 + url1 + post
  264.  
  265.     headers.append(['Referer', referer])
  266.  
  267.     data = scrapertools.cache_page(url, post=post, headers=headers)
  268.  
  269.     patron = '<iframe id="iframeVid" width=".+?" height=".+?" src="([^"]+)" allowfullscreen="">'
  270.     url = scrapertools.find_single_match(data, patron)
  271.  
  272.     if 'hdpass.xyz' in url:
  273.         data = scrapertools.cache_page(url, headers=headers)
  274.  
  275.         start = data.find('<ul id="mirrors">')
  276.         end = data.find('</ul>', start)
  277.         data = data[start:end]
  278.  
  279.         patron = '<form method="get" action="">\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/><input type="hidden" name="([^"]+)" value="([^"]+)"/> <input type="submit" class="[^"]*" name="([^"]+)" value="([^"]+)"/>\s*</form>'
  280.  
  281. #        html = []
  282.         for name1, val1, name2, val2, name3, val3, name4, val4, name5, val5  in re.compile(patron).findall(data):
  283.             if name3 == '' and val3 == '':
  284.                 get_data = '%s=%s&%s=%s&%s=%s&%s=%s' % (name1, val1, name2, val2, name4, val4, name5, val5)
  285.             else:
  286.                 get_data = '%s=%s&%s=%s&%s=%s&%s=%s&%s=%s' % (name1, val1, name2, val2, name3, val3, name4, val4, name5, val5)
  287.             tmp_data = scrapertools.cache_page('http://hdpass.xyz/film.php?' + get_data, headers=headers)
  288.             patron = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed" value="([^"]+)"/>'
  289.             for media_label, media_url in re.compile(patron).findall(tmp_data):
  290.                 media_label=scrapertools.decodeHtmlentities(media_label.replace("hosting","hdload"))
  291.  
  292.                 itemlist.append(
  293.                         Item(server=media_label,
  294.                              action="play",
  295.                              title=' - [Player]' if media_label == '' else ' - [Player @%s]' % media_label,
  296.                              url=media_url,
  297.                              folder=False))
  298.  
  299.     itemlist.extend(servertools.find_video_items(data=url))
  300.  
  301.     for videoitem in itemlist:
  302.         videoitem.title = item.title + videoitem.title
  303.         videoitem.show = item.show
  304.         videoitem.fulltitle = item.fulltitle
  305.         videoitem.thumbnail = item.thumbnail
  306.         videoitem.channel = __channel__
  307.  
  308.     return itemlist
  309.  
  310. def anti_cloudflare(url):
  311.     # global headers
  312.  
  313.     try:
  314.         resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
  315.         resp_headers = dict(resp_headers)
  316.     except urllib2.HTTPError, e:
  317.         resp_headers = e.headers
  318.  
  319.     if 'refresh' in resp_headers:
  320.         import time
  321.         time.sleep(int(resp_headers['refresh'][:1]))
  322.  
  323.         scrapertools.get_headers_from_response(host + "/" + resp_headers['refresh'][7:], headers=headers)
  324.  
  325.     return scrapertools.cache_page(url, headers=headers)
  326.  
  327.  
  328. def unescape(par1, par2, par3):
  329.     var1 = par1
  330.     for ii in xrange(0, len(par2)):
  331.         var1 = re.sub(par2[ii], par3[ii], var1)
  332.  
  333.     var1 = re.sub("%26", "&", var1)
  334.     var1 = re.sub("%3B", ";", var1)
  335.     return var1.replace('<!--?--><?', '<!--?-->')
  336.  
  337. def info(title):
  338.     logger.info("streamondemand.seriehd info")
  339.     try:
  340.         from core.tmdb import Tmdb
  341.         oTmdb= Tmdb(texto_buscado=title, tipo= "tv", include_adult="false", idioma_busqueda="it")
  342.         count = 0
  343.         if oTmdb.total_results > 0:
  344.            extrameta = {}
  345.            extrameta["Year"] = oTmdb.result["release_date"][:4]
  346.            extrameta["Genre"] = ", ".join(oTmdb.result["genres"])
  347.            extrameta["Rating"] = float(oTmdb.result["vote_average"])
  348.            fanart=oTmdb.get_backdrop()
  349.            poster=oTmdb.get_poster()
  350.            plot=oTmdb.get_sinopsis()
  351.            return plot, fanart, poster, extrameta
  352.     except:
  353.         pass
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement