# -*- coding: utf-8 -*- #------------------------------------------------------------ # pelisalacarta - XBMC Plugin # Canal para yaske # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ import urlparse,urllib2,urllib,re import os, sys from core import logger from core import config from core import scrapertools from core.item import Item from servers import servertools __channel__ = "yaske" __category__ = "F" __type__ = "generic" __title__ = "Yaske.net" __language__ = "ES" DEBUG = config.get_setting("debug") def isGeneric(): return True def mainlist(item): logger.info("[yaske.py] mainlist") itemlist = [] itemlist.append( Item(channel=__channel__, title="Portada" , action="peliculas", url="http://www.yaske.net/es/peliculas/")) itemlist.append( Item(channel=__channel__, title="Categorías" , action="categorias", url="http://www.yaske.net/es/peliculas/")) itemlist.append( Item(channel=__channel__, title="Últimas agregadas" , action="peliculas", url="http://www.yaske.net/es/peliculas/ultimas")) itemlist.append( Item(channel=__channel__, title="Buscar" , action="search") ) return itemlist def search(item,texto): logger.info("[yaske.py] search") itemlist = [] try: item.url = "http://www.yaske.net/es/peliculas/search/%s" item.url = item.url % texto item.extra = "" itemlist.extend(peliculas(item)) itemlist = sorted(itemlist, key=lambda Item: Item.title) return itemlist except: import sys for line in sys.exc_info(): logger.error( "%s" % line ) return [] def peliculas(item): logger.info("[yaske.py] listado") # Descarga la página data = scrapertools.downloadpageGzip(item.url) # Extrae las entradas patron = '
[^<]+' patron += '[^<]+' patron += '
([^<]+)
[^<]+' patron += '[^<]+' patron += '
(.*?)
' matches = re.compile(patron,re.DOTALL).findall(data) #scrapertools.printMatches(matches) itemlist = [] for scrapedurl,scrapedtitle,scrapedthumbnail,calidad,idiomas in matches: patronidiomas = '0: idiomas_disponibles = "["+idiomas_disponibles[:-1]+"]" title = scrapedtitle.strip()+" "+idiomas_disponibles+"["+calidad+"]" url = scrapedurl thumbnail = scrapedthumbnail scrapedplot = "" if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]") itemlist.append( Item(channel=__channel__, action="findvideos", title=title , url=url , thumbnail=thumbnail , plot=scrapedplot , viewmode="movie", folder=True) ) # Extrae el paginador patronvideos = "
\»\;" matches = re.compile(patronvideos,re.DOTALL).findall(data) scrapertools.printMatches(matches) if len(matches)>0: scrapedurl = urlparse.urljoin(item.url,matches[0]) itemlist.append( Item(channel=__channel__, action="peliculas", title=">> Página siguiente" , url=scrapedurl , folder=True) ) return itemlist def categorias(item): logger.info("[yaske.py] categorias") # Descarga la página data = scrapertools.downloadpageGzip(item.url) data = scrapertools.get_match(data,'
(.*?)
') # Extrae las entradas #
  • Drama patron = '
  • ]+>([^<]+)' matches = re.compile(patron,re.DOTALL).findall(data) #scrapertools.printMatches(matches) itemlist = [] for scrapedurl,scrapedtitle in matches: scrapedthumbnail = "" scrapedplot = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=__channel__, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) ) return itemlist def findvideos(item): logger.info("[yaske.py] findvideos") # Descarga la página data = scrapertools.downloadpageGzip(item.url) # Extrae las entradas ''' Opcion 11 vk Latino dvd screener Tweet ''' patron = '0: bien = True break return bien