Advertisement
Guest User

Untitled

a guest
Jan 21st, 2019
91
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 7.57 KB | None | 0 0
  1. # -*- coding: utf-8 -*-
  2.  
  3. '''
  4. Eggman Add-on
  5.  
  6. This program is free software: you can redistribute it and/or modify
  7. it under the terms of the GNU General Public License as published by
  8. the Free Software Foundation, either version 3 of the License, or
  9. (at your option) any later version.
  10.  
  11. This program is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15.  
  16. You should have received a copy of the GNU General Public License
  17. along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. '''
  19.  
  20. import re,urllib,urlparse,json
  21.  
  22. from resources.lib.modules import cleantitle
  23. from resources.lib.modules import client
  24. from resources.lib.modules import control
  25. from resources.lib.modules import debrid
  26. from resources.lib.modules import log_utils
  27. from resources.lib.modules import source_utils
  28. from resources.lib.modules import cfscrape
  29.  
  30. class source:
  31. def __init__(self):
  32. self.priority = 1
  33. self.language = ['en']
  34. self.domains = ['rlsbb.ru']
  35. self.base_link = 'http://rlsbb.ru'
  36. self.search_base_link = 'http://search.rlsbb.ru'
  37. self.search_cookie = 'serach_mode=rlsbb'
  38. self.search_link = '/lib/search526049.php?phrase=%s&pindex=1&content=true'
  39.  
  40. def movie(self, imdb, title, localtitle, aliases, year):
  41. try:
  42. url = {'imdb': imdb, 'title': title, 'year': year}
  43. url = urllib.urlencode(url)
  44. return url
  45. except:
  46. return
  47.  
  48. def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
  49. try:
  50. url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
  51. url = urllib.urlencode(url)
  52. return url
  53. except:
  54. return
  55.  
  56. def episode(self, url, imdb, tvdb, title, premiered, season, episode):
  57. try:
  58. if url == None: return
  59.  
  60. url = urlparse.parse_qs(url)
  61. url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
  62. url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
  63. url = urllib.urlencode(url)
  64. return url
  65. except:
  66. return
  67.  
  68. def sources(self, url, hostDict, hostprDict):
  69. try:
  70. sources = []
  71. scraper = cfscrape.create_scraper()
  72.  
  73. if url == None: return sources
  74.  
  75. if debrid.status() == False: raise Exception()
  76.  
  77. data = urlparse.parse_qs(url)
  78. data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
  79. title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
  80. hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
  81. premDate = ''
  82.  
  83. query = '%s S%02dE%02d' % (
  84. data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
  85. data['title'], data['year'])
  86. query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
  87.  
  88. query = query.replace("&", "and")
  89. query = query.replace(" ", " ")
  90. query = query.replace(" ", "-")
  91.  
  92. url = self.search_link % urllib.quote_plus(query)
  93. url = urlparse.urljoin(self.base_link, url)
  94.  
  95. url = "http://rlsbb.ru/" + query
  96. if 'tvshowtitle' not in data: url = url + "-1080p"
  97.  
  98. r = scraper.get(url).content
  99.  
  100. if r == None and 'tvshowtitle' in data:
  101. season = re.search('S(.*?)E', hdlr)
  102. season = season.group(1)
  103. query = title
  104. query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
  105. query = query + "-S" + season
  106. query = query.replace("&", "and")
  107. query = query.replace(" ", " ")
  108. query = query.replace(" ", "-")
  109. url = "http://rlsbb.ru/" + query
  110. r = scraper.get(url).content
  111.  
  112.  
  113. for loopCount in range(0,2):
  114. if loopCount == 1 or (r == None and 'tvshowtitle' in data):
  115.  
  116.  
  117. premDate = re.sub('[ \.]','-',data['premiered'])
  118. query = re.sub('[\\\\:;*?"<>|/\-\']', '', data['tvshowtitle'])
  119. query = query.replace("&", " and ").replace(" ", " ").replace(" ", "-")
  120. query = query + "-" + premDate
  121.  
  122. url = "http://rlsbb.to/" + query
  123. url = url.replace('The-Late-Show-with-Stephen-Colbert','Stephen-Colbert')
  124.  
  125.  
  126. r = scraper.get(url).content
  127.  
  128. posts = client.parseDOM(r, "div", attrs={"class": "content"})
  129. hostDict = hostprDict + hostDict
  130. items = []
  131. for post in posts:
  132. try:
  133. u = client.parseDOM(post, 'a', ret='href')
  134. for i in u:
  135. try:
  136. name = str(i)
  137. if hdlr in name.upper(): items.append(name)
  138. elif len(premDate) > 0 and premDate in name.replace(".","-"): items.append(name)
  139.  
  140. except:
  141. pass
  142. except:
  143. pass
  144.  
  145. if len(items) > 0: break
  146.  
  147. seen_urls = set()
  148.  
  149. for item in items:
  150. try:
  151. info = []
  152.  
  153. url = str(item)
  154. url = client.replaceHTMLCodes(url)
  155. url = url.encode('utf-8')
  156.  
  157. if url in seen_urls: continue
  158. seen_urls.add(url)
  159.  
  160. host = url.replace("\\", "")
  161. host2 = host.strip('"')
  162. host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(host2.strip().lower()).netloc)[0]
  163.  
  164. if not host in hostDict: raise Exception()
  165. if any(x in host2 for x in ['.rar', '.zip', '.iso']): continue
  166.  
  167. if '720p' in host2:
  168. quality = 'HD'
  169. elif '1080p' in host2:
  170. quality = '1080p'
  171. elif '2160p' in host2:
  172. quality = '4K'
  173. else:
  174. quality = 'SD'
  175.  
  176. info = ' | '.join(info)
  177. host = client.replaceHTMLCodes(host)
  178. host = host.encode('utf-8')
  179. sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': host2, 'info': info, 'direct': False, 'debridonly': False})
  180.  
  181. except:
  182. pass
  183. check = [i for i in sources if not i['quality'] == 'CAM']
  184. if check: sources = check
  185. return sources
  186. except:
  187. return sources
  188.  
  189. def resolve(self, url):
  190. return url
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement