Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # -*- coding: utf-8 -*-
- # ------------------------------------------------------------
- # streamondemand.- XBMC Plugin
- # Canal para seriehd - based on guardaserie channel
- # http://blog.tvalacarta.info/plugin-xbmc/streamondemand.
- # ------------------------------------------------------------
- import re
- import sys
- import urllib2
- from core import config
- from core import logger
- from core import scrapertools
- from core.item import Item
- from servers import servertools
- __channel__ = "seriehd"
- __category__ = "S"
- __type__ = "generic"
- __title__ = "Serie HD"
- __language__ = "IT"
- headers = [
- ['User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:39.0) Gecko/20100101 Firefox/39.0'],
- ['Accept-Encoding', 'gzip, deflate']
- ]
- host = "http://www.seriehd.org"
- def isGeneric():
- return True
- def mainlist(item):
- logger.info("[seriehd.py] mainlist")
- itemlist = [Item(channel=__channel__,
- action="fichas",
- title="[COLOR azure]Serie TV[/COLOR]",
- url=host + "/serie-tv-streaming/",
- thumbnail="http://i.imgur.com/rO0ggX2.png"),
- Item(channel=__channel__,
- action="sottomenu",
- title="[COLOR orange]Sottomenu...[/COLOR]",
- url=host,
- thumbnail="http://i37.photobucket.com/albums/e88/xzener/NewIcons.png"),
- Item(channel=__channel__,
- action="search",
- title="[COLOR green]Cerca...[/COLOR]",
- thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
- return itemlist
- def search(item, texto):
- logger.info("[seriehd.py] search")
- item.url = host + "/?s=" + texto
- try:
- return fichas(item)
- # Se captura la excepción, para no interrumpir al buscador global si un canal falla.
- except:
- import sys
- for line in sys.exc_info():
- logger.error("%s" % line)
- return []
- def sottomenu(item):
- logger.info("[seriehd.py] sottomenu")
- itemlist = []
- data = anti_cloudflare(item.url)
- patron = '<a href="([^"]+)">([^<]+)</a>'
- matches = re.compile(patron, re.DOTALL).findall(data)
- for scrapedurl, scrapedtitle in matches:
- itemlist.append(
- Item(channel=__channel__,
- action="fichas",
- title=scrapedtitle,
- url=scrapedurl))
- # Elimina 'Serie TV' de la lista de 'sottomenu'
- itemlist.pop(0)
- return itemlist
- def fichas(item):
- logger.info("[seriehd.py] fichas")
- itemlist = []
- data = anti_cloudflare(item.url)
- # ------------------------------------------------
- cookies = ""
- matches = re.compile('(.seriehd.org.*?)\n', re.DOTALL).findall(config.get_cookie_data())
- for cookie in matches:
- name = cookie.split('\t')[5]
- value = cookie.split('\t')[6]
- cookies += name + "=" + value + ";"
- headers.append(['Cookie', cookies[:-1]])
- import urllib
- _headers = urllib.urlencode(dict(headers))
- # ------------------------------------------------
- patron = '<h2>(.*?)</h2>\s*'
- patron += '<img src="([^"]+)" alt="[^"]*"/>\s*'
- patron += '<A HREF="([^"]+)">'
- matches = re.compile(patron, re.DOTALL).findall(data)
- for scrapedtitle, scrapedthumbnail, scrapedurl in matches:
- scrapedthumbnail += "|" + _headers
- scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
- tmdbtitle = scrapedtitle.split("(")[0]
- try:
- plot, fanart, poster, extrameta = info(tmdbtitle)
- itemlist.append(
- Item(channel=__channel__,
- thumbnail=poster,
- fanart=fanart if fanart != "" else poster,
- extrameta=extrameta,
- plot=str(plot),
- action="episodios",
- title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
- url=scrapedurl,
- fulltitle=scrapedtitle,
- show=scrapedtitle,
- folder=True))
- except:
- itemlist.append(
- Item(channel=__channel__,
- action="episodios",
- title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
- fulltitle=scrapedtitle,
- url=scrapedurl,
- show=scrapedtitle,
- thumbnail=scrapedthumbnail))
- patron = "<span class='current'>\d+</span><a rel='nofollow' class='page larger' href='([^']+)'>\d+</a>"
- next_page = scrapertools.find_single_match(data, patron)
- if next_page != "":
- itemlist.append(
- Item(channel=__channel__,
- action="fichas",
- title="[COLOR orange]Successivo>>[/COLOR]",
- url=next_page))
- return itemlist
- def episodios(item):
- #### stagioni ed episodi devono essere sistemati
- #### view-source:http://hdpass.xyz/serie.php?idSerie=99?seriehd
- #### please, dateci una mano
- logger.info("[seriehd.py] episodios")
- itemlist = []
- data = anti_cloudflare(item.url)
- patron = r'<iframe width=".+?" height=".+?" src="([^"]+)" allowfullscreen frameborder="0">'
- url = scrapertools.find_single_match(data, patron)
- url = scrapertools.decodeHtmlentities(url.replace("?seriehd",""))
- headers.append(['Referer', url])
- data = scrapertools.cache_page(url, headers=headers)
- start = data.find('</ul>')
- end = data.find('<section id="seasons">', start)
- data = data[start:end]
- import xbmc
- seasons_data = scrapertools.get_match(data,'<select onchange="location = this.value;">(.*?)</select>')
- patron = '<option[^=]+="[^&]+&idStagioni=(.*?)">(.*?)</option>'
- seasons = re.compile(patron, re.DOTALL).findall(seasons_data)
- xbmc.log( "HHH seasons HHHHHHHHHHHHHHHHHHHHHHHHHHHH" )
- xbmc.log( "%s" % seasons )
- for scrapedseason, scrapednum in seasons:
- data = scrapertools.cache_page(url, headers=headers)
- start = data.find('<section id="seasons">')
- end = data.find('</div>', start)
- data = data[start:end]
- episodes_data = scrapertools.get_match(data, '<select onchange="location = this.value;">(.*?)</select>')
- patron = '<option[^=]+="[^&]+&idStagioni=%s&episode=(.*?)">.*?</option>' % scrapedseason
- episodes = re.compile(patron, re.DOTALL).findall(episodes_data)
- xbmc.log( "HHH episodes HHHHHHHHHHHHHHHHHHHHHHHHHHHH" )
- xbmc.log( "%s" % episodes )
- for scrapedepisode in episodes:
- season = str(int(scrapednum))
- episode = str(int(scrapedepisode))
- if len(episode) == 1: episode = "0" + episode
- title = season + "x" + episode
- # Le pasamos a 'findvideos' la url con dos partes divididas por el caracter "?"
- # [host+path]?[argumentos]?[Referer]
- url = "%s?idStagioni=%s&episode=%s?%s" % (url, scrapedseason, scrapedepisode, url)
- xbmc.log( "HHH url HHHHHHHHHHHHHHHHHHHHHHHHHHHH" )
- xbmc.log( "%s" % url )
- xbmc.log( "%s" % scrapedseason )
- xbmc.log( "%s" % scrapedepisode )
- itemlist.append(
- Item(channel=__channel__,
- action="findvideos",
- title=title,
- url=url,
- fulltitle=item.fulltitle,
- show=item.show,
- thumbnail=item.thumbnail))
- if config.get_library_support() and len(itemlist) != 0:
- itemlist.append(
- Item(channel=__channel__,
- title=item.title,
- url=item.url,
- action="add_serie_to_library",
- extra="episodios",
- show=item.show))
- itemlist.append(
- Item(channel=item.channel,
- title="Scarica tutti gli episodi della serie",
- url=item.url,
- action="download_all_episodes",
- extra="episodios",
- show=item.show))
- return itemlist
- def findvideos(item):
- logger.info("[seriehd.py] findvideos")
- itemlist = []
- url1 = item.url.split('?')[1]
- url = "http://hdpass.xyz/serie.php?" + url1
- post = item.url.split('?')[2]
- referer1 = item.url.split('?')[3]
- referer = referer1 + url1 + post
- headers.append(['Referer', referer])
- data = scrapertools.cache_page(url, post=post, headers=headers)
- patron = '<iframe id="iframeVid" width=".+?" height=".+?" src="([^"]+)" allowfullscreen="">'
- url = scrapertools.find_single_match(data, patron)
- if 'hdpass.xyz' in url:
- data = scrapertools.cache_page(url, headers=headers)
- start = data.find('<ul id="mirrors">')
- end = data.find('</ul>', start)
- data = data[start:end]
- patron = '<form method="get" action="">\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/>\s*<input type="hidden" name="([^"]+)" value="([^"]+)"/><input type="hidden" name="([^"]+)" value="([^"]+)"/> <input type="submit" class="[^"]*" name="([^"]+)" value="([^"]+)"/>\s*</form>'
- # html = []
- for name1, val1, name2, val2, name3, val3, name4, val4, name5, val5 in re.compile(patron).findall(data):
- if name3 == '' and val3 == '':
- get_data = '%s=%s&%s=%s&%s=%s&%s=%s' % (name1, val1, name2, val2, name4, val4, name5, val5)
- else:
- get_data = '%s=%s&%s=%s&%s=%s&%s=%s&%s=%s' % (name1, val1, name2, val2, name3, val3, name4, val4, name5, val5)
- tmp_data = scrapertools.cache_page('http://hdpass.xyz/film.php?' + get_data, headers=headers)
- patron = r'<input type="hidden" name="urlEmbed" data-mirror="([^"]+)" id="urlEmbed" value="([^"]+)"/>'
- for media_label, media_url in re.compile(patron).findall(tmp_data):
- media_label=scrapertools.decodeHtmlentities(media_label.replace("hosting","hdload"))
- itemlist.append(
- Item(server=media_label,
- action="play",
- title=' - [Player]' if media_label == '' else ' - [Player @%s]' % media_label,
- url=media_url,
- folder=False))
- itemlist.extend(servertools.find_video_items(data=url))
- for videoitem in itemlist:
- videoitem.title = item.title + videoitem.title
- videoitem.show = item.show
- videoitem.fulltitle = item.fulltitle
- videoitem.thumbnail = item.thumbnail
- videoitem.channel = __channel__
- return itemlist
- def anti_cloudflare(url):
- # global headers
- try:
- resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
- resp_headers = dict(resp_headers)
- except urllib2.HTTPError, e:
- resp_headers = e.headers
- if 'refresh' in resp_headers:
- import time
- time.sleep(int(resp_headers['refresh'][:1]))
- scrapertools.get_headers_from_response(host + "/" + resp_headers['refresh'][7:], headers=headers)
- return scrapertools.cache_page(url, headers=headers)
- def unescape(par1, par2, par3):
- var1 = par1
- for ii in xrange(0, len(par2)):
- var1 = re.sub(par2[ii], par3[ii], var1)
- var1 = re.sub("%26", "&", var1)
- var1 = re.sub("%3B", ";", var1)
- return var1.replace('<!--?--><?', '<!--?-->')
- def info(title):
- logger.info("streamondemand.seriehd info")
- try:
- from core.tmdb import Tmdb
- oTmdb= Tmdb(texto_buscado=title, tipo= "tv", include_adult="false", idioma_busqueda="it")
- count = 0
- if oTmdb.total_results > 0:
- extrameta = {}
- extrameta["Year"] = oTmdb.result["release_date"][:4]
- extrameta["Genre"] = ", ".join(oTmdb.result["genres"])
- extrameta["Rating"] = float(oTmdb.result["vote_average"])
- fanart=oTmdb.get_backdrop()
- poster=oTmdb.get_poster()
- plot=oTmdb.get_sinopsis()
- return plot, fanart, poster, extrameta
- except:
- pass
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement