Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # -*- coding: utf-8 -*-
- """
- openload.io urlresolver plugin
- Copyright (C) 2015 tknorris
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
- """
- import cookielib
- import gzip
- import re
- import StringIO
- import urllib
- import urllib2
- import socket
- # Set Global timeout - Useful for slow connections and Putlocker.
- socket.setdefaulttimeout(10)
- class Net:
- '''
- This class wraps :mod:`urllib2` and provides an easy way to make http
- requests while taking care of cookies, proxies, gzip compression and
- character encoding.
- Example::
- from addon.common.net import Net
- net = Net()
- response = net.http_GET('http://xbmc.org')
- print response.content
- '''
- _cj = cookielib.LWPCookieJar()
- _proxy = None
- _user_agent = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
- _http_debug = False
- def __init__(self, cookie_file='', proxy='', user_agent='', http_debug=False):
- '''
- Kwargs:
- cookie_file (str): Full path to a file to be used to load and save
- cookies to.
- proxy (str): Proxy setting (eg.
- ``'http://user:pass@example.com:1234'``)
- user_agent (str): String to use as the User Agent header. If not
- supplied the class will use a default user agent (chrome)
- http_debug (bool): Set ``True`` to have HTTP header info written to
- the XBMC log for all requests.
- '''
- if cookie_file:
- self.set_cookies(cookie_file)
- if proxy:
- self.set_proxy(proxy)
- if user_agent:
- self.set_user_agent(user_agent)
- self._http_debug = http_debug
- self._update_opener()
- def set_cookies(self, cookie_file):
- '''
- Set the cookie file and try to load cookies from it if it exists.
- Args:
- cookie_file (str): Full path to a file to be used to load and save
- cookies to.
- '''
- try:
- self._cj.load(cookie_file, ignore_discard=True)
- self._update_opener()
- return True
- except:
- return False
- def get_cookies(self):
- '''Returns A dictionary containing all cookie information by domain.'''
- return self._cj._cookies
- def save_cookies(self, cookie_file):
- '''
- Saves cookies to a file.
- Args:
- cookie_file (str): Full path to a file to save cookies to.
- '''
- self._cj.save(cookie_file, ignore_discard=True)
- def set_proxy(self, proxy):
- '''
- Args:
- proxy (str): Proxy setting (eg.
- ``'http://user:pass@example.com:1234'``)
- '''
- self._proxy = proxy
- self._update_opener()
- def get_proxy(self):
- '''Returns string containing proxy details.'''
- return self._proxy
- def set_user_agent(self, user_agent):
- '''
- Args:
- user_agent (str): String to use as the User Agent header.
- '''
- self._user_agent = user_agent
- def get_user_agent(self):
- '''Returns user agent string.'''
- return self._user_agent
- def _update_opener(self):
- '''
- Builds and installs a new opener to be used by all future calls to
- :func:`urllib2.urlopen`.
- '''
- if self._http_debug:
- http = urllib2.HTTPHandler(debuglevel=1)
- else:
- http = urllib2.HTTPHandler()
- if self._proxy:
- opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
- urllib2.ProxyHandler({'http':
- self._proxy}),
- urllib2.HTTPBasicAuthHandler(),
- http)
- else:
- opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self._cj),
- urllib2.HTTPBasicAuthHandler(),
- http)
- urllib2.install_opener(opener)
- def http_GET(self, url, headers={}, compression=True):
- '''
- Perform an HTTP GET request.
- Args:
- url (str): The URL to GET.
- Kwargs:
- headers (dict): A dictionary describing any headers you would like
- to add to the request. (eg. ``{'X-Test': 'testing'}``)
- compression (bool): If ``True`` (default), try to use gzip
- compression.
- Returns:
- An :class:`HttpResponse` object containing headers and other
- meta-information about the page and the page content.
- '''
- return self._fetch(url, headers=headers, compression=compression)
- def http_POST(self, url, form_data, headers={}, compression=True):
- '''
- Perform an HTTP POST request.
- Args:
- url (str): The URL to POST.
- form_data (dict): A dictionary of form data to POST.
- Kwargs:
- headers (dict): A dictionary describing any headers you would like
- to add to the request. (eg. ``{'X-Test': 'testing'}``)
- compression (bool): If ``True`` (default), try to use gzip
- compression.
- Returns:
- An :class:`HttpResponse` object containing headers and other
- meta-information about the page and the page content.
- '''
- return self._fetch(url, form_data, headers=headers, compression=compression)
- def http_HEAD(self, url, headers={}):
- '''
- Perform an HTTP HEAD request.
- Args:
- url (str): The URL to GET.
- Kwargs:
- headers (dict): A dictionary describing any headers you would like
- to add to the request. (eg. ``{'X-Test': 'testing'}``)
- Returns:
- An :class:`HttpResponse` object containing headers and other
- meta-information about the page.
- '''
- request = urllib2.Request(url)
- request.get_method = lambda: 'HEAD'
- request.add_header('User-Agent', self._user_agent)
- for key in headers:
- request.add_header(key, headers[key])
- response = urllib2.urlopen(request)
- return HttpResponse(response)
- def _fetch(self, url, form_data={}, headers={}, compression=True):
- '''
- Perform an HTTP GET or POST request.
- Args:
- url (str): The URL to GET or POST.
- form_data (dict): A dictionary of form data to POST. If empty, the
- request will be a GET, if it contains form data it will be a POST.
- Kwargs:
- headers (dict): A dictionary describing any headers you would like
- to add to the request. (eg. ``{'X-Test': 'testing'}``)
- compression (bool): If ``True`` (default), try to use gzip
- compression.
- Returns:
- An :class:`HttpResponse` object containing headers and other
- meta-information about the page and the page content.
- '''
- req = urllib2.Request(url)
- if form_data:
- if isinstance(form_data, basestring):
- form_data = form_data
- else:
- form_data = urllib.urlencode(form_data, True)
- req = urllib2.Request(url, form_data)
- req.add_header('User-Agent', self._user_agent)
- for key in headers:
- req.add_header(key, headers[key])
- if compression:
- req.add_header('Accept-Encoding', 'gzip')
- req.add_unredirected_header('Host', req.get_host())
- response = urllib2.urlopen(req)
- return HttpResponse(response)
- class HttpResponse:
- '''
- This class represents a resoponse from an HTTP request.
- The content is examined and every attempt is made to properly encode it to
- Unicode.
- .. seealso::
- :meth:`Net.http_GET`, :meth:`Net.http_HEAD` and :meth:`Net.http_POST`
- '''
- content = ''
- '''Unicode encoded string containing the body of the reposne.'''
- def __init__(self, response):
- '''
- Args:
- response (:class:`mimetools.Message`): The object returned by a call
- to :func:`urllib2.urlopen`.
- '''
- self._response = response
- @property
- def content(self):
- html = self._response.read()
- encoding = None
- try:
- if self._response.headers['content-encoding'].lower() == 'gzip':
- html = gzip.GzipFile(fileobj=StringIO.StringIO(html)).read()
- except:
- pass
- try:
- content_type = self._response.headers['content-type']
- if 'charset=' in content_type:
- encoding = content_type.split('charset=')[-1]
- except:
- pass
- r = re.search('<meta\s+http-equiv="Content-Type"\s+content="(?:.+?);\s+charset=(.+?)"', html, re.IGNORECASE)
- if r:
- encoding = r.group(1)
- if encoding is not None:
- try: html = html.decode(encoding)
- except: pass
- return html
- def get_headers(self, as_dict=False):
- '''Returns headers returned by the server.
- If as_dict is True, headers are returned as a dictionary otherwise a list'''
- if as_dict:
- return dict([(item[0].title(), item[1]) for item in self._response.info().items()])
- else:
- return self._response.info().headers
- def get_url(self):
- '''
- Return the URL of the resource retrieved, commonly used to determine if
- a redirect was followed.
- '''
- return self._response.geturl()
- import re
- import urllib2
- #from urlresolver9 import common
- #from urlresolver9.resolver import UrlResolver, ResolverError
- from HTMLParser import HTMLParser
- import time
- import urllib
- import base64
- #from lib.png import Reader as PNGReader
- FF_USER_AGENT = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:49.0) Gecko/20100101 Firefox/49.0'
- class OpenLoadResolver():
- name = "openload"
- domains = ["openload.io", "openload.co"]
- pattern = '(?://|\.)(openload\.(?:io|co))/(?:embed|f)/([0-9a-zA-Z-_]+)'
- def __init__(self):
- self.net = Net()
- def get_media_url(self, host, media_id):
- try:
- myurl = 'http://openload.co/embed/%s' % media_id
- HTTP_HEADER = {
- 'User-Agent': FF_USER_AGENT,
- 'Referer': myurl} # 'Connection': 'keep-alive'
- html = self.net.http_GET(myurl, headers=HTTP_HEADER).content
- mylink = self.get_mylink(html)
- if set('[<>=!@#$%^&*()+{}":;\']+$').intersection(mylink):
- #common.log_utils.log_notice('############################## ERROR A openload mylink: %s' % (mylink))
- time.sleep(2)
- html = self.net.http_GET(myurl, headers=HTTP_HEADER).content
- mylink = self.get_mylink(html)
- if set('[<>=!@#$%^&*()+{}":;\']+$').intersection(mylink):
- #common.log_utils.log_notice('############################## ERROR A openload mylink: %s' % (mylink))
- time.sleep(2)
- html = self.net.http_GET(myurl, headers=HTTP_HEADER).content
- mylink = self.get_mylink(html)
- #common.log_utils.log_notice('A openload mylink: %s' % mylink)
- #print "Mylink", mylink, urllib.quote_plus(mylink)
- videoUrl = 'https://openload.co/stream/{0}?mime=true'.format(mylink)
- #common.log_utils.log_notice('A openload resolve parse: %s' % videoUrl)
- #dtext = videoUrl.replace('https', 'http')
- headers = {'User-Agent': HTTP_HEADER['User-Agent'], 'Referer':myurl}
- req = urllib2.Request(videoUrl, None, headers)
- res = urllib2.urlopen(req)
- videoUrl = res.geturl()
- res.close()
- return videoUrl
- # video_url = 'https://openload.co/stream/%s?mime=true' % myvidurl
- except Exception as e:
- #common.log_utils.log_notice('Exception during openload resolve parse: %s' % e)
- print("Error", e)
- raise
- def get_url(self, host, media_id):
- return 'http://openload.io/embed/%s' % media_id
- def get_mylink(self, html):
- try:
- html = html.encode('utf-8')
- except:
- pass
- if any(x in html for x in ['We are sorry', 'File not found']):
- raise Exception('The file was removed')
- n = re.findall('<span id="(.*?)">(.*?)</span>', html)
- print "y",n
- id = n[0][1]
- def parseInt(sin):
- m = re.search(r'^(\d+)[.,]?\d*?', str(sin))
- return int(m.groups()[-1]) if m and not callable(sin) else None
- #id = '0311103128141621815820119150520011012136101190210112156191831907609106180800810519060010720506202080101111807006144060791010518090141001810619090141071804600099170791110218177170930612518146081221612200162201241516319'
- firstTwoChars = parseInt(id[0:2])
- num = 2;
- txt = ''
- while num < len(id):
- print "NUM", num
- txt += chr(parseInt(id[num:num + 3]) - firstTwoChars * parseInt(id[num + 3:num + 3 + 2]))
- print "NUM", txt
- num += 5
- #alina = 'https://openload.co/stream/' + txt;
- return txt
- #magic = ord(y[-1])
- #y = " ".join(y.split(chr(magic - 1)))
- #y = chr(magic - 1).join(y.split(y[-1]))
- #y = chr(magic).join(y.split(" "))
- #enc_data = y
- #print enc_data
- #enc_data = HTMLParser().unescape(enc_data)
- enc_data = HTMLParser().unescape(y)
- res = []
- for c in enc_data:
- j = ord(c)
- if j >= 33 and j <= 126:
- j = ((j + 14) % 94)
- j = j + 33
- res += chr(j)
- mylink = ''.join(res)
- tmp100 = re.findall('<script type="text/javascript">(゚ω゚.*?)</script>', html, re.DOTALL)
- encdata = ''
- tmpEncodedData = tmp100[0].split('┻━┻')
- for tmpItem in tmpEncodedData:
- try:
- encdata += self.decodeOpenLoad(tmpItem)
- except:
- pass
- print "AAAAA",encdata
- exit()
- encnumbers = re.findall('return(.*?);', encdata, re.DOTALL)
- print encnumbers
- #https://openload.co/stream/rZ04_L_uRuU~1478308714~95.160.0.0~VWnfq0ig?mime=true
- #https://openload.co/stream/JlSTfXTluk8~1478209703~46.169.0.0~49kqoQ-2?mime=true')
- encnumbers1 = re.findall('(\d+).*?(\d+)', encnumbers[0])[0]
- encnumbers2 = re.findall('(\d+) \- (\d+)', encnumbers[1])[0]
- encnumbers4 = re.findall('(\d+)', encnumbers[3])[0]
- number1 = int(encnumbers1[0]) + int(encnumbers1[1])
- number2 = int(encnumbers2[0]) - int(encnumbers2[1]) + number1
- number4 = int(encnumbers4[0])
- number3 = number2 - number4
- print "num1", number1
- print "num2", number2
- print "num4", number4
- print "num3", number3
- print "a",len(mylink)-number2
- # var str =
- # tmp.substring(0, tmp.length - number2())
- # + String.fromCharCode(tmp.slice(0 - number2()).charCodeAt(0) + number3())
- # + tmp.substring(tmp.length - number2() + 1);
- # mylink = ''.join(res)[0:-3] + chr(ord(''.join(res)[-1]) -2 3)
- #https://openload.co/stream/ExatdBfcJ38~1478307277~95.160.0.0~hppYZUHF?mime=true
- mynewlink1 = mylink[0:-number2]
- mynewlink2 = chr(ord(mylink[-number2])+number3)
- mynewlink3 = mylink[len(mylink)-number2+1:]
- print "my2", mynewlink1,mynewlink2,mynewlink3
- mynewlink = mynewlink1+mynewlink2+mynewlink3
- return mynewlink
- # If you want to use the code for openload please at least put the info from were you take it:
- # for example: "Code take from plugin IPTVPlayer: "https://gitlab.com/iptvplayer-for-e2/iptvplayer-for-e2/"
- # It will be very nice if you send also email to me samsamsam@o2.pl and inform were this code will be used
- # start https://github.com/whitecream01/WhiteCream-V0.0.1/blob/master/plugin.video.uwc/plugin.video.uwc-1.0.51.zip?raw=true
- def decode(self,encoded):
- tab = encoded.split('\\')
- ret = ''
- for item in tab:
- try:
- ret += chr(int(item, 8))
- except Exception:
- ret += item
- return ret
- def base10toN(self,num, n):
- num_rep = {10: 'a', 11: 'b', 12: 'c', 13: 'd', 14: 'e', 15: 'f', 16: 'g', 17: 'h', 18: 'i', 19: 'j', 20: 'k',
- 21: 'l', 22: 'm', 23: 'n', 24: 'o', 25: 'p', 26: 'q', 27: 'r', 28: 's', 29: 't', 30: 'u', 31: 'v',
- 32: 'w', 33: 'x', 34: 'y', 35: 'z'}
- new_num_string = ''
- current = num
- while current != 0:
- remainder = current % n
- if 36 > remainder > 9:
- remainder_string = num_rep[remainder]
- elif remainder >= 36:
- remainder_string = '(' + str(remainder) + ')'
- else:
- remainder_string = str(remainder)
- new_num_string = remainder_string + new_num_string
- current = current / n
- return new_num_string
- def decodeOpenLoad(self,aastring):
- # decodeOpenLoad made by mortael, please leave this line for proper credit :)
- # aastring = re.search(r"<video(?:.|\s)*?<script\s[^>]*?>((?:.|\s)*?)</script", html, re.DOTALL | re.IGNORECASE).group(1)
- aastring = aastring.replace("(゚Д゚)[゚ε゚]+(o゚ー゚o)+ ((c^_^o)-(c^_^o))+ (-~0)+ (゚Д゚) ['c']+ (-~-~1)+", "")
- aastring = aastring.replace("((゚ー゚) + (゚ー゚) + (゚Θ゚))", "9")
- aastring = aastring.replace("((゚ー゚) + (゚ー゚))", "8")
- aastring = aastring.replace("((゚ー゚) + (o^_^o))", "7")
- aastring = aastring.replace("((c^_^o)-(c^_^o))", "0")
- aastring = aastring.replace("((゚ー゚) + (゚Θ゚))", "5")
- aastring = aastring.replace("(゚ー゚)", "4")
- aastring = aastring.replace("((o^_^o) - (゚Θ゚))", "2")
- aastring = aastring.replace("(o^_^o)", "3")
- aastring = aastring.replace("(゚Θ゚)", "1")
- aastring = aastring.replace("(+!+[])", "1")
- aastring = aastring.replace("(c^_^o)", "0")
- aastring = aastring.replace("(0+0)", "0")
- aastring = aastring.replace("(゚Д゚)[゚ε゚]", "\\")
- aastring = aastring.replace("(3 +3 +0)", "6")
- aastring = aastring.replace("(3 - 1 +0)", "2")
- aastring = aastring.replace("(!+[]+!+[])", "2")
- aastring = aastring.replace("(-~-~2)", "4")
- aastring = aastring.replace("(-~-~1)", "3")
- aastring = aastring.replace("(-~0)", "1")
- aastring = aastring.replace("(-~1)", "2")
- aastring = aastring.replace("(-~3)", "4")
- aastring = aastring.replace("(0-0)", "0")
- aastring = aastring.replace("(゚Д゚).゚ω゚ノ", "10")
- aastring = aastring.replace("(゚Д゚).゚Θ゚ノ", "11")
- aastring = aastring.replace("(゚Д゚)[\'c\']", "12")
- aastring = aastring.replace("(゚Д゚).゚ー゚ノ", "13")
- aastring = aastring.replace("(゚Д゚).゚Д゚ノ", "14")
- aastring = aastring.replace("(゚Д゚)[゚Θ゚]", "15")
- decodestring = re.search(r"\\\+([^(]+)", aastring, re.DOTALL | re.IGNORECASE).group(1)
- decodestring = "\\+" + decodestring
- decodestring = decodestring.replace("+", "")
- decodestring = decodestring.replace(" ", "")
- decodestring = self.decode(decodestring)
- decodestring = decodestring.replace("\\/", "/")
- if 'toString' in decodestring:
- base = re.compile(r"toString\(a\+(\d+)", re.DOTALL | re.IGNORECASE).findall(decodestring)[0]
- base = int(base)
- match = re.compile(r"(\(\d[^)]+\))", re.DOTALL | re.IGNORECASE).findall(decodestring)
- for repl in match:
- match1 = re.compile(r"(\d+),(\d+)", re.DOTALL | re.IGNORECASE).findall(repl)
- base2 = base + int(match1[0][0])
- repl2 = self.base10toN(int(match1[0][1]), base2)
- decodestring = decodestring.replace(repl, repl2)
- decodestring = decodestring.replace("+", "")
- decodestring = decodestring.replace("\"", "")
- return decodestring
- def openloadresolv(url):
- #url = 'https://openload.co/embed/aFNMCPye31g/'
- tt= '1'
- try:
- media_id = re.findall('embed/(.*?)/', url)[0]; tt = '2'
- except:
- pass
- if tt == '1':
- media_id = re.findall('embed/(.*?)$', url)[0]
- sos = OpenLoadResolver()
- host = 'Host'
- self = ''
- media_url = sos.get_media_url( host, media_id)
- return media_url
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement