Advertisement
Guest User

Untitled

a guest
Oct 29th, 2012
134
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 11.40 KB | None | 0 0
  1. # -*- coding: utf-8 -*-
  2. #------------------------------------------------------------
  3. # pelisalacarta - XBMC Plugin
  4. # Canal para jkanime
  5. # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
  6. #------------------------------------------------------------
  7.  
  8. import urlparse,urllib2,urllib,re
  9. import os, sys
  10.  
  11. from core import logger
  12. from core import config
  13. from core import scrapertools
  14. from core.item import Item
  15. from servers import servertools
  16.  
  17. DEBUG = config.get_setting("debug")
  18.  
  19. __category__ = "A"
  20. __type__ = "generic"
  21. __title__ = "JKanime"
  22. __channel__ = "jkanime"
  23. __language__ = "ES"
  24. __creationdate__ = "20121015"
  25.  
  26. def isGeneric():
  27.     return True
  28.  
  29. def mainlist(item):
  30.     logger.info("[jkanime.py] mainlist")
  31.  
  32.     itemlist = []
  33.     itemlist.append( Item(channel=__channel__, action="ultimos" , title="Últimos"           , url="http://jkanime.net/" ))
  34.     itemlist.append( Item(channel=__channel__, action="letras"  , title="Listado Alfabetico", url="http://jkanime.net/" ))
  35.     itemlist.append( Item(channel=__channel__, action="generos" , title="Listado por Genero", url="http://jkanime.net/" ))
  36.     itemlist.append( Item(channel=__channel__, action="search"  , title="Buscar" ))
  37.  
  38.     return itemlist
  39.  
  40. def search(item,texto):
  41.     logger.info("[jkanime.py] search")
  42.     if item.url=="":
  43.         item.url="http://jkanime.net/buscar/%s/"
  44.     texto = texto.replace(" ","+")
  45.     item.url = item.url % texto
  46.     try:
  47.         return series(item)
  48.     # Se captura la excepción, para no interrumpir al buscador global si un canal falla
  49.     except:
  50.         import sys
  51.         for line in sys.exc_info():
  52.             logger.error( "%s" % line )
  53.         return []
  54.  
  55. def ultimos(item):
  56.     logger.info("[jkanime.py] ultimos")
  57.     itemlist = []
  58.     data = scrapertools.cache_page(item.url)
  59.     data = scrapertools.get_match(data,'<ul class="latestul">(.*?)</ul>')
  60.    
  61.     patron = '<a href="([^"]+)">([^<]+)<'
  62.     matches = re.compile(patron,re.DOTALL).findall(data)    
  63.  
  64.     for scrapedurl,scrapedtitle in matches:
  65.         title = scrapedtitle.strip()
  66.         url = urlparse.urljoin(item.url,scrapedurl)
  67.         thumbnail = ""
  68.         plot = ""
  69.         if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
  70.  
  71.         itemlist.append( Item(channel=__channel__, action="episodios" , title=title , url=url, thumbnail=thumbnail, plot=plot))        
  72.  
  73.     return itemlist
  74.  
  75. def generos(item):
  76.     logger.info("[jkanime.py] generos")
  77.     itemlist = []
  78.    
  79.     data = scrapertools.cache_page(item.url)
  80.     data = scrapertools.get_match(data,'<div class="genres">(.*?)</div>')
  81.    
  82.     patron = '<a href="([^"]+)">([^<]+)</a>'
  83.     matches = re.compile(patron,re.DOTALL).findall(data)    
  84.  
  85.     for scrapedurl,scrapedtitle in matches:
  86.         title = scrapedtitle
  87.         url = urlparse.urljoin(item.url,scrapedurl)
  88.         thumbnail = ""
  89.         plot = ""
  90.         if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
  91.  
  92.         itemlist.append( Item(channel=__channel__, action="series" , title=title , url=url, thumbnail=thumbnail, plot=plot))        
  93.  
  94.     return itemlist
  95.  
  96. def letras(item):
  97.     logger.info("[jkanime.py] letras")
  98.     itemlist = []
  99.    
  100.     data = scrapertools.cache_page(item.url)
  101.     data = scrapertools.get_match(data,'<ul class="animelet">(.*?)</ul>')
  102.    
  103.     patron = '<a href="([^"]+)">([^<]+)</a>'
  104.     matches = re.compile(patron,re.DOTALL).findall(data)    
  105.  
  106.     for scrapedurl,scrapedtitle in matches:
  107.         title = scrapedtitle
  108.         url = urlparse.urljoin(item.url,scrapedurl)
  109.         thumbnail = ""
  110.         plot = ""
  111.         if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
  112.  
  113.         itemlist.append( Item(channel=__channel__, action="series" , title=title , url=url, thumbnail=thumbnail, plot=plot))        
  114.  
  115.     return itemlist
  116.  
  117. def series(item):
  118.     logger.info("[jkanime.py] series")
  119.  
  120.     # Descarga la pagina
  121.     data = scrapertools.cache_page(item.url)
  122.  
  123.     # Extrae las entradas
  124.     '''
  125.    <table class="search">
  126.    <tr>
  127.    <td rowspan="2">
  128.    <a href="http://jkanime.net/basilisk-kouga-ninpou-chou/"><img src="http://jkanime.net/assets/images/animes/thumbnail/basilisk-kouga-ninpou-chou.jpg" width="50" /></a>
  129.    </td>
  130.    <td><a class="titl" href="http://jkanime.net/basilisk-kouga-ninpou-chou/">Basilisk: Kouga Ninpou Chou</a></td>
  131.    <td rowspan="2" style="width:50px; text-align:center;">Serie</td>
  132.    <td rowspan="2" style="width:50px; text-align:center;" >24 Eps</td>
  133.    </tr>
  134.    <tr>
  135.    <td><p>Basilisk, considerada una de las mejores series del genero ninja, nos narra la historia de dos clanes ninja separados por el odio entre dos familias. Los actuales representantes, Kouga Danjo del clan Kouga y Ogen del clan&#8230; <a class="next" href="http://jkanime.net/basilisk-kouga-ninpou-chou/">seguir leyendo</a></p></td>
  136.    </tr>
  137.    </table>
  138.    '''
  139.     patron  = '<table class="search[^<]+'
  140.     patron += '<tr[^<]+'
  141.     patron += '<td[^<]+'
  142.     patron += '<a href="([^"]+)"><img src="([^"]+)"[^<]+</a>[^<]+'
  143.     patron += '</td>[^<]+'
  144.     patron += '<td><a[^>]+>([^<]+)</a></td>[^<]+'
  145.     patron += '<td[^>]+>([^<]+)</td>[^<]+'
  146.     patron += '<td[^>]+>([^<]+)</td>[^<]+'
  147.     patron += '</tr>[^<]+'
  148.     patron += '<tr>[^<]+'
  149.     patron += '<td>(.*?)</td>'
  150.     matches = re.compile(patron,re.DOTALL).findall(data)
  151.     itemlist = []
  152.    
  153.     for scrapedurl, scrapedthumbnail,scrapedtitle,line1,line2,scrapedplot in matches:
  154.         title = scrapedtitle.strip()+" ("+line1.strip()+") ("+line2.strip()+")"
  155.         extra = line2.strip()
  156.         url = urlparse.urljoin(item.url,scrapedurl)
  157.         thumbnail = scrapedthumbnail
  158.         plot = scrapertools.htmlclean(scrapedplot)
  159.         if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
  160.  
  161.         itemlist.append( Item(channel=__channel__, action="episodios" , title=title , url=url, thumbnail=thumbnail, fanart=thumbnail, plot=plot, extra=extra, viewmode="movie_with_plot"))        
  162.  
  163.     try:
  164.         siguiente = scrapertools.get_match(data,'<a class="listsiguiente" href="([^"]+)" >Resultados Siguientes')
  165.         scrapedurl = urlparse.urljoin(item.url,siguiente)
  166.         scrapedtitle = ">> Pagina Siguiente"
  167.         scrapedthumbnail = ""
  168.         scrapedplot = ""
  169.  
  170.         itemlist.append( Item(channel=__channel__, action="series", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
  171.     except:
  172.         pass
  173.     return itemlist
  174.  
  175. def episodios(item):
  176.     logger.info("[jkanime.py] episodios")
  177.     itemlist = []
  178.  
  179.     # Descarga la pagina
  180.     data = scrapertools.cache_page(item.url)
  181.     scrapedplot = scrapertools.get_match(data,'<meta name="description" content="([^"]+)"/>')
  182.     scrapedthumbnail = scrapertools.get_match(data,'<meta property="og.image" content="([^"]+)"/>')
  183.     idserie = scrapertools.get_match(data,"ajax/pagination_episodes/(\d+)/")
  184.     logger.info("idserie="+idserie)
  185.     if " Eps" in item.extra:
  186.         caps_x = item.extra
  187.         caps_x = caps_x.replace(" Eps","")
  188.         capitulos = int(caps_x)
  189.         paginas = capitulos/10
  190.         if capitulos%10>0:
  191.             paginas += 1
  192.     else:
  193.         paginas = 1
  194.     logger.info("idserie="+idserie)
  195.     for numero in range(1,paginas + 1):
  196.  
  197.         numero_pagina = str(numero)
  198.         headers = []
  199.         headers.append( [ "User-Agent" , "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:16.0) Gecko/20100101 Firefox/16.0" ] )
  200.         headers.append( [ "Referer" , item.url ] )
  201.         data = scrapertools.cache_page("http://jkanime.net/ajax/pagination_episodes/"+idserie+"/"+numero_pagina+"/")
  202.         logger.info("data="+data)
  203.    
  204.         '''
  205.        [{"id":"14199","title":"GetBackers - 1","number":"1","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14200","title":"GetBackers - 2","number":"2","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14201","title":"GetBackers - 3","number":"3","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14202","title":"GetBackers - 4","number":"4","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14203","title":"GetBackers - 5","number":"5","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14204","title":"GetBackers - 6","number":"6","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14205","title":"GetBackers - 7","number":"7","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14206","title":"GetBackers - 8","number":"8","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14207","title":"GetBackers - 9","number":"9","animes_id":"122","timestamp":"2012-01-04 16:59:30"},{"id":"14208","title":"GetBackers - 10","number":"10","animes_id":"122","timestamp":"2012-01-04 16:59:30"}]
  206.        '''
  207.         patron = '"id"\:"(\d+)","title"\:"([^"]+)","number"\:"(\d+)","animes_id"\:"(\d+)"'
  208.         matches = re.compile(patron,re.DOTALL).findall(data)
  209.    
  210.         #http://jkanime.net/get-backers/1/
  211.         for id,scrapedtitle,numero,animes_id in matches:
  212.             title = scrapedtitle.strip()
  213.             url = urlparse.urljoin(item.url,numero)
  214.             thumbnail = scrapedthumbnail
  215.             plot = scrapedplot
  216.             if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
  217.  
  218.             itemlist.append( Item(channel=__channel__, action="findvideos" , title=title , url=url, thumbnail=thumbnail, fanart=thumbnail, plot=plot))        
  219.  
  220.     return itemlist
  221.  
  222. def findvideos(item):
  223.     logger.info("[jkanime.py] episodios")
  224.     itemlist = []
  225.  
  226.     # Descarga la pagina
  227.     data = scrapertools.cache_page(item.url)
  228.  
  229.     '''
  230.    clip: {
  231.        url: 'http://jkanime.net/stream/jkget/a958097878b2e53826241592d85ecefb/acaa607e676ddf97bc2e856b813b4762/?t=6e',
  232.    '''
  233.     try:
  234.         mediaurl = scrapertools.get_match(data,"clip\: {\s+url\: '([^']+)'")
  235.         itemlist.append( Item(channel=__channel__, action="play" , title="Ver el vídeo - Mirror 1" , url=mediaurl, thumbnail=item.thumbnail, fanart=item.thumbnail, plot=item.plot, server="directo", folder=False))
  236.     except:
  237.         pass
  238.    
  239.     #flashvars="file=http://jkanime.net/stream/jkget/a958097878b2e53826241592d85ecefb/acaa607e676ddf97bc2e856b813b4762/&
  240.     try:
  241.         mediaurl = scrapertools.get_match(data,'flashvars\="file\=([^\&]+)\&')
  242.         itemlist.append( Item(channel=__channel__, action="play" , title="Ver el vídeo - Mirror 2" , url=mediaurl, thumbnail=item.thumbnail, fanart=item.thumbnail, plot=item.plot, server="directo", folder=False))
  243.     except:
  244.         pass
  245.    
  246.  
  247.     return itemlist
  248.  
  249. # Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal.
  250. def test():
  251.     bien = True
  252.    
  253.     # mainlist
  254.     mainlist_items = mainlist(Item())
  255.    
  256.     # Comprueba que todas las opciones tengan algo (excepto el buscador)
  257.     for mainlist_item in mainlist_items:
  258.         if mainlist_item.action!="search":
  259.             exec "itemlist = "+mainlist_item.action+"(mainlist_item)"
  260.             if len(itemlist)==0:
  261.                 return false
  262.    
  263.     # Comprueba si alguno de los vídeos de "Novedades" devuelve mirrors
  264.     episodios_items = newlist(mainlist_items[0])
  265.    
  266.     bien = False
  267.     for episodio_item in episodios_items:
  268.         mirrors = findvideos(item=episodio_item)
  269.         if len(mirrors)>0:
  270.             bien = True
  271.             break
  272.    
  273.     return bien
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement