SHARE
TWEET

XBMC Free Cable CBS 19.09.2013 MB 2

a guest Sep 19th, 2013 94 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. import xbmcplugin
  2. import xbmcplugin
  3. import xbmc
  4. import xbmcgui
  5. import urllib
  6. import urllib2
  7. import sys
  8. import os
  9. import re
  10. import cookielib
  11. from datetime import datetime
  12. import time
  13.  
  14.  
  15. import demjson
  16. from BeautifulSoup import BeautifulSoup
  17. from BeautifulSoup import MinimalSoup
  18. import resources.lib._common as common
  19. from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
  20.  
  21. #mblanchette 19.09.2013
  22. #andyman 16.04.2013
  23. pluginhandle = int (sys.argv[1])
  24.  
  25. NEW_BASE_URL = "http://www.cbs.com/watch/"
  26. BASE_URL = "http://www.cbs.com/video/"
  27. BASE = "http://www.cbs.com"
  28.  
  29. # def masterlist():
  30.     # print "DEBUG Entering masterlist function"
  31.     # data = common.getURL(BASE_URL)
  32.     # tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  33.     # menu=tree.find(attrs={'id' : 'videoContent'})
  34.     # categories=menu.findAll('div', attrs={'id' : True}, recursive=False)
  35.     # db_shows = []
  36.     # for item in categories:
  37.         # shows = item.findAll(attrs={'id' : 'show_block_interior'})
  38.         # for show in shows:
  39.             # name = show.find('img')['alt'].encode('utf-8')
  40.             # thumb = BASE_URL + show.find('img')['src']
  41.             # url = BASE + show.find('a')['href']
  42.             # print name+'|'+thumb+'|'+url
  43.             # if 'MacGyver' in name:
  44.                 # url += '?vs=Full%20Episodes'
  45.             # #if 'daytime/lets_make_a_deal' in url:
  46.                 # #url = url.replace('daytime/lets_make_a_deal','shows/lets_make_a_deal')
  47.             # if 'cbs_evening_news/video/' in url:
  48.                 # url = 'http://www.cbs.com/shows/cbs_evening_news/video/'
  49.             # elif 'shows/dogs_in_the_city/' in url:
  50.                 # url+='video/'
  51.             # elif '/shows/3/' in url:
  52.                 # url+='video/'
  53.             # elif '/shows/3/' in url:
  54.                 # url+='video/'
  55.             # elif '/shows/nyc_22' in url:
  56.                 # name = 'NYC 22'
  57.                 # url+='video/'
  58.             # db_shows.append((name,'cbs','showcats',url))
  59.     # for show in stShows('http://startrek.com/videos',db=True):
  60.         # db_shows.append(show)
  61.     # return db_shows
  62.  
  63. def rootlist():
  64.     print "DEBUG Entering rootlist function"
  65.     data = common.getURL(NEW_BASE_URL)
  66.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  67.     menu=tree.find(attrs={'id' : 'cbs-show-nav-inner'})
  68.     categories=menu.findAll(attrs={'class' : 'cbs-show-category'})
  69.     for index,item in enumerate(categories):
  70.         catid = str(index)
  71.         name = item.string
  72.         common.addDirectory(name, 'cbs', 'shows', catid)
  73.     common.setView('seasons')
  74.  
  75. def shows(catid = common.args.url):
  76.     print "DEBUG Entering shows function"
  77.     xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
  78.     data = common.getURL(NEW_BASE_URL)
  79.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  80.     topmenu=tree.find(attrs={'id' : 'cbs-show-nav-inner'})
  81.     showsmenu=tree.find(attrs={'class' : 'video-carousel-box'})
  82.     categorylists=topmenu.findAll(attrs={'class' : 'cbs-show-list'})
  83.     for index,item in enumerate(categorylists):
  84.         if str(index) == catid:
  85.             shows = item.findAll(attrs={'class' : 'show-nav-link'})
  86.             for show in shows:
  87.                 name = show.string #.encode('utf-8')
  88.                 imagelink=showsmenu.find('a', {'title':name})
  89.                 if imagelink == None:
  90.                     url = BASE + show['href'] + 'video/'
  91.                 else:
  92.                     thumbnail = imagelink.find('img')['src']
  93.                     url = imagelink['href']
  94.                 if 'MacGyver' in name:
  95.                     url += '?vs=Full%20Episodes'
  96.                 elif 'shows/dogs_in_the_city/' in url:
  97.                     url+='video/'
  98.                 elif '/shows/partners/' in url:
  99.                     url+='video/'
  100.                 elif '/shows/nyc_22' in url:
  101.                     name = 'NYC 22'
  102.                     url+='video/'
  103.                 common.addShow(name, 'cbs', 'showcats', url)#, thumb=thumbnail)
  104.             break
  105.     #if catid == 'classics':
  106.         #stShows('http://startrek.com/videos')
  107.     xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_LABEL)
  108.     common.setView('tvshows')
  109.  
  110. def stShows(url = common.args.url,db=False):
  111.     stbase = 'http://www.startrek.com'
  112.     data = common.getURL(url)
  113.     remove = re.compile('<!.*?">')
  114.     data = re.sub(remove, '', data)
  115.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  116.     stshows=tree.find('div',attrs={'id' : 'channels'}).findAll('li', attrs={'class' : True})
  117.     st_shows = []      
  118.     for show in stshows:
  119.         name = show['class'].replace('-',' ').title()
  120.         thumb = stbase+show.find('img')['src']
  121.         url = stbase+show.find('a')['href']
  122.         if 'Star Trek' not in name:
  123.             name = 'Star Trek '+name
  124.         if db:
  125.             st_shows.append((name,'cbs','stshowcats',url))
  126.         else:
  127.             common.addShow(name, 'cbs', 'stshowcats', url)#, thumb=thumb)
  128.     if db:
  129.         return st_shows
  130.  
  131. def stshowcats(url = common.args.url):
  132.     stbase = 'http://www.startrek.com'
  133.     data = common.getURL(url)
  134.     remove = re.compile('<!.*?">')
  135.     data = re.sub(remove, '', data)
  136.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  137.     stcats=tree.find('div',attrs={'id' : 'content'}).findAll('div', attrs={'class' : 'box_news'})      
  138.     for cat in stcats:
  139.         name = cat.find('h4').contents[1].strip()
  140.         common.addDirectory(name, 'cbs', 'stvideos', url+'<name>'+name)
  141.     common.setView('seasons')
  142.  
  143. def stvideos(url = common.args.url):
  144.     stbase = 'http://www.startrek.com'
  145.     argname = url.split('<name>')[1]
  146.     url = url.split('<name>')[0]
  147.     stbase = 'http://www.startrek.com'
  148.     data = common.getURL(url)
  149.     remove = re.compile('<!.*?">')
  150.     data = re.sub(remove, '', data)
  151.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  152.     stcats=tree.find('div',attrs={'id' : 'content'}).findAll('div', attrs={'class' : 'box_news'})      
  153.     for cat in stcats:
  154.         name = cat.find('h4').contents[1].strip()
  155.         if name == argname:
  156.             titleUrl=stbase+cat.find('a',attrs={'class' : 'title '})['onclick'].split("url:'")[1].split("'}); return")[0]
  157.             if 'Full Episodes' in argname:
  158.                 titleUrl += '/page_full/1'
  159.             stprocessvideos(titleUrl)
  160.     common.setView('episodes')
  161.  
  162. def stprocessvideos(purl):
  163.     print "enter stprocessvideos"
  164.     stbase = 'http://www.startrek.com'
  165.     xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
  166.     data = common.getURL(purl)
  167.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  168.     videos=tree.find(attrs={'class' : 'videos_container'}).findAll('li')
  169.     for video in videos:
  170.         thumb = video.find('img')['src']
  171.         url = stbase+video.find('a')['href']
  172.         try:
  173.             showname,name = video.findAll('a')[1].string.split('-')
  174.         except:
  175.             name = video.findAll('a')[1].string
  176.             showname = ''
  177.         try:
  178.             seasonepisode, duration = video.findAll('p')
  179.             seasonepisode = seasonepisode.string.replace('Season ','').split(' Ep. ')
  180.             season = int(seasonepisode[0])
  181.             episode = int(seasonepisode[1])
  182.             duration = duration.string.split('(')[1].replace(')','')
  183.         except:
  184.             season = 0
  185.             episode = 0
  186.             duration = ''
  187.         if season <> 0 or episode <> 0:
  188.             displayname = '%sx%s - %s' % (str(season),str(episode),name)
  189.         else:
  190.             displayname = name
  191.         u = sys.argv[0]
  192.         u += '?url="'+urllib.quote_plus(url)+'"'
  193.         u += '&mode="cbs"'
  194.         u += '&sitemode="playST"'
  195.         infoLabels={ "Title":displayname,
  196.                      "Season":season,
  197.                      "Episode":episode,
  198.                      #"premiered":aired,
  199.                      "Duration":duration,
  200.                      "TVShowTitle":showname
  201.                      }
  202.         common.addVideo(u,displayname,thumb,infoLabels=infoLabels)
  203.     if len(videos) == 4:
  204.         if '/page_full/' not in purl and '/page_other/' not in purl:
  205.             nurl = purl+'/page_other/2'
  206.         else:
  207.             page = int(purl.split('/')[-1])
  208.             nextpage = page + 1
  209.             nurl = purl.replace('/'+str(page),'/'+str(nextpage))
  210.         stprocessvideos(nurl)
  211.  
  212. def showcats(url = common.args.url):
  213.     data = common.getURL(url)
  214.     #try:
  215.     #    var show  = new CBS.Show({id:111381});
  216.     #    show_id = re.compile("new CBS\.Show\(id:(.*?)\);").findall(data)
  217.     #    url = 'http://www.cbs.com/carousels/'+dir3+'/video/'+dir1+'/'+dir2+'/0/400/
  218.     try:
  219.         print 'CBS: Trying New Carousel'
  220.         carousels = re.compile("loadUpCarousel\('(.*?)','(.*?)', '(.*?)', (.*?), true, stored").findall(data)
  221.         carousels[0][0]
  222.         for name,dir1,dir2,dir3 in carousels:
  223.             url = 'http://www.cbs.com/carousels/'+dir3+'/video/'+dir1+'/'+dir2+'/0/400/'
  224.             common.addDirectory(name, 'cbs', 'newvideos', url)
  225.     except:
  226.         print 'CBS: Carousel Failed'
  227.         tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  228.         try:
  229.             print 'CBS: trying secondary-show-nav-wrapper'
  230.             options = tree.find(attrs={'id' : 'secondary-show-nav-wrapper'})
  231.             options = options.findAll('a')
  232.             for option in options:
  233.                 name = option.string.encode('utf-8')
  234.                 url = BASE + option['href']
  235.                 common.addDirectory(name, 'cbs', 'videos', url)
  236.             print 'CBS: trying vid_module'
  237.             options = tree.findAll(attrs={'class' : 'vid_module'})
  238.             for option in options:
  239.                 moduleid = option['id']
  240.                 name = option.find(attrs={'class' : 'hdr'}).string
  241.                 common.addDirectory(name, 'cbs', 'showsubcats', url+'<moduleid>'+moduleid)
  242.         except:
  243.             print 'CBS: secondary-show-nav-wrapper failed'
  244.             print 'CBS: trying vid_module secondary'
  245.             try:
  246.                 options = tree.findAll(attrs={'class' : 'vid_module'})
  247.                 print 'DEBUG: '+options
  248.                 for option in options:
  249.                     moduleid = option['id']
  250.                     name = option.find(attrs={'class' : 'hdr'}).string
  251.                     common.addDirectory(name, 'cbs', 'showsubcats', url+'<moduleid>'+moduleid)
  252.             except:
  253.                 print 'CBS: vid_module secondary failed'
  254.                 print 'CBS: trying 19.09.2013 MB id-carousel'
  255.                 idpattern = re.compile("id-carousel-(\d+)")
  256.                 categories = tree.findAll(id=idpattern)
  257.                 for cat in categories:
  258.                     catid = idpattern.match(cat['id']).group(1)
  259.                     thisUrl = 'http://www.cbs.com/carousels/videosBySection/'+catid+'/offset/0/limit/40/xs/{excludeShow}'
  260.                     data = common.getURL(thisUrl)
  261.                     name = demjson.decode(data)['result']['title']
  262.                     common.addDirectory(name, 'cbs', 'newvideos2', thisUrl)
  263.     common.setView('seasons')                                      
  264.  
  265. def showsubcats(url = common.args.url):
  266.     moduleid = url.split('<moduleid>')[1]
  267.     url      = url.split('<moduleid>')[0]
  268.     data = common.getURL(url)
  269.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  270.     vid_module = tree.find(attrs={'id' : moduleid})
  271.     PAGES(vid_module)
  272.     common.setView('episodes')
  273.    
  274. def videos(url = common.args.url):
  275.     xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
  276.     data = common.getURL(url)
  277.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  278.     print 'CBS: trying vid_module'
  279.     try:
  280.         options = tree.findAll(attrs={'class' : 'vid_module'})
  281.         if len(options) == 1:
  282.             PAGES(tree)
  283.         else:
  284.             for option in options:
  285.                 moduleid = option['id']
  286.                 name = option.find(attrs={'class' : 'hdr'}).string
  287.                 common.addDirectory(name, 'cbs', 'showsubcats', url+'<moduleid>'+moduleid)                                        
  288.     except:
  289.         PAGES(tree)
  290.     common.setView('episodes')
  291.  
  292. def newvideos(url = common.args.url):
  293.     data = common.getURL(url)
  294.     itemList = demjson.decode(data)['itemList']
  295.     for video in itemList:
  296.         url = video['pid']
  297.         description = video['description']
  298.         thumb = video['thumbnail']
  299.         seriesTitle = video['seriesTitle']
  300.         title = video['label']
  301.         try:episodeNum = int(video['episodeNum'])
  302.         except:episodeNum = 0
  303.         try:seasonNum = int(video['seasonNum'])
  304.         except:seasonNum = 0
  305.         duration = int(video['duration'])
  306.         airDate = video['_airDate']
  307.         rating = video['rating']
  308.         u = sys.argv[0]
  309.         u += '?url="'+urllib.quote_plus(url)+'"'
  310.         u += '&mode="cbs"'
  311.         u += '&sitemode="play"'
  312.         displayname = '%sx%s - %s' % (seasonNum,episodeNum,title)
  313.         infoLabels={ "Title":title,
  314.                      "Plot":description,
  315.                      "Season":seasonNum,
  316.                      "Episode":episodeNum,
  317.                      "premiered":airDate,
  318.                      "Duration":str(duration),
  319.                      "mpaa":rating,
  320.                      "TVShowTitle":seriesTitle
  321.                      }
  322.         common.addVideo(u,displayname,thumb,infoLabels=infoLabels)
  323.     common.setView('episodes')  
  324.  
  325. def newvideos2(url = common.args.url):
  326.     data = common.getURL(url)
  327.     itemList = demjson.decode(data)['result']['data']
  328.     for video in itemList:
  329.         # data from JSON file
  330.         vurl = BASE + video['url']
  331.         thumb = video['thumb']['large']
  332.         seriesTitle = video['series_title']
  333.         title = video['label']
  334.         # need to fetch the video URL for the rest of the meta data
  335.         videodata = common.getURL(vurl)
  336.         videotree=BeautifulSoup(videodata, convertEntities=BeautifulSoup.HTML_ENTITIES)
  337.         description = videotree.find('meta',attrs={'name' : 'description'})['content'].replace("\\'",'"')
  338.         #16.04.2013 - airdate is now in JSON
  339.         try:
  340.             airdatediv = videotree.find('div',attrs={'class' : 'airdate'})
  341.             aird1 = str(airdatediv).split('<')[1].split(':')[1].strip()
  342.             aird2 = datetime.strptime(aird1, '%m/%d/%y')
  343.             airDate = datetime.strftime(aird2, '%Y-%m-%d')
  344.         except:
  345.             airDate = 0
  346.         metadiv = videotree.find('div',attrs={'class' : 'title'})
  347.         # <span>S6 Ep18 (20:12)  -->  [(u'6', u'18', u'20', u'12')]
  348.         meta = re.compile("<span>S(\d+)\D+(\d+)\D+(\d+)\:(\d+)").findall(str(metadiv))
  349.         try:episodeNum = int(meta[0][1])
  350.         except:episodeNum = 0
  351.         try:seasonNum = int(meta[0][0])
  352.         except:seasonNum = 0
  353.         try:duration = int(meta[0][2])
  354.         except:duration = int('0')
  355.         #rating = video['rating']
  356.         rating = 0
  357.         u = sys.argv[0]
  358.         u += '?url="'+urllib.quote_plus(vurl)+'"'
  359.         u += '&mode="cbs"'
  360.         u += '&sitemode="play"'
  361.         displayname = '%sx%s - %s' % (seasonNum,episodeNum,title)
  362.         infoLabels={ "Title":title,
  363.                      "Plot":description,
  364.                      "Season":seasonNum,
  365.                      "Episode":episodeNum,
  366.                      "premiered":airDate,
  367.                      "Duration":str(duration),
  368.                      "mpaa":rating,
  369.                      "TVShowTitle":seriesTitle
  370.                      }
  371.         common.addVideo(u,displayname,thumb,infoLabels=infoLabels)
  372.     common.setView('episodes')  
  373.    
  374. def PAGES( tree ):
  375.     try:
  376.         print 'starting PAGES'
  377.         try:
  378.             search_elements = tree.find(attrs={'name' : 'searchEl'})['value']
  379.             return_elements = tree.find(attrs={'name' : 'returnEl'})['value']
  380.         except:
  381.             print 'CBS: search and return elements failed'
  382.         try:
  383.             last_page = tree.find(attrs={'id' : 'pagination0'}).findAll(attrs={'class' : 'vids_pag_off'})[-1].string
  384.             last_page = int(last_page) + 1
  385.         except:
  386.             print 'CBS: last page failed reverting to default'
  387.             last_page = 2
  388.         for pageNum in range(1,last_page):
  389.             values = {'pg' : str(pageNum),
  390.                       'repub' : 'yes',
  391.                       'displayType' : 'twoby',
  392.                       'search_elements' : search_elements,
  393.                       'return_elements' : return_elements,
  394.                       'carouselId' : '0',
  395.                       'vs' : 'Default',
  396.                       'play' : 'true'
  397.                       }
  398.             url = 'http://www.cbs.com/sitecommon/includes/video/2009_carousel_data_multiple.php'
  399.             data = common.getURL(url, values)
  400.             VIDEOLINKS(data)
  401.     except:
  402.         print 'Pages Failed'
  403.  
  404. def VIDEOLINKS( data ):
  405.     print "Entering VIDEOLINKS function"
  406.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  407.     vidfeed=tree.find(attrs={'class' : 'vids_feed'})
  408.     videos = vidfeed.findAll(attrs={'class' : 'floatLeft','style' : True})
  409.     for video in videos:
  410.         thumb = video.find('img')['src']
  411.         vidtitle = video.find(attrs={'class' : 'vidtitle'})
  412.         pid = vidtitle['href'].split('pid=')[1].split('&')[0]
  413.         displayname = vidtitle.string.encode('utf-8')
  414.         try:
  415.             title = displayname.split('-')[1].strip()
  416.             series = displayname.split('-')[0].strip()
  417.         except:
  418.             print 'title/series metadata failure'
  419.             title = displayname
  420.             series = ''
  421.  
  422.         metadata = video.find(attrs={'class' : 'season_episode'}).renderContents()
  423.         try:
  424.             duration = metadata.split('(')[1].replace(')','')
  425.         except:
  426.             print 'duration metadata failure'
  427.             duration = ''
  428.         try:
  429.             aired = metadata.split('<')[0].split(':')[1].strip()
  430.         except:
  431.             print 'air date metadata failure'
  432.             aired = ''
  433.         try:
  434.             seasonepisode = thumb.split('/')[-1].split('_')[2]
  435.             if 3 == len(seasonepisode):
  436.                 season = int(seasonepisode[:1])
  437.                 episode = int(seasonepisode[-2:])
  438.             elif 4 == len(seasonepisode):
  439.                 season = int(seasonepisode[:2])
  440.                 episode = int(seasonepisode[-2:])
  441.             if season <> 0 or episode <> 0:
  442.                 displayname = '%sx%s - %s' % (str(season),str(episode),title)
  443.         except:
  444.             print 'season/episode metadata failed'
  445.             season = 0
  446.             episode = 0
  447.         u = sys.argv[0]
  448.         u += '?url="'+urllib.quote_plus(pid)+'"'
  449.         u += '&mode="cbs"'
  450.         u += '&sitemode="play"'
  451.         infoLabels={ "Title":title,
  452.                      "Season":season,
  453.                      "Episode":episode,
  454.                      "premiered":aired,
  455.                      "Duration":duration,
  456.                      "TVShowTitle":series
  457.                      }
  458.         common.addVideo(u,displayname,thumb,infoLabels=infoLabels)
  459.     common.setView('episodes')
  460.  
  461.  
  462. def clean_subs(data):
  463.         br = re.compile(r'<br.*?>')
  464.         br_2 = re.compile(r'\n')
  465.         tag = re.compile(r'<.*?>')
  466.         space = re.compile(r'\s\s\s+')
  467.         sub = br.sub('\n', data)
  468.         sub = tag.sub(' ', sub)
  469.         sub = br_2.sub('<br/>', sub)
  470.         sub = space.sub(' ', sub)
  471.         return sub
  472.    
  473.        
  474. def convert_subtitles(subtitles, output):
  475.     subtitle_data = subtitles
  476.     subtitle_data = subtitle_data.replace("\n","").replace("\r","")
  477.     subtitle_data = BeautifulStoneSoup(subtitle_data)
  478.     subtitle_array = []
  479.     srt_output = ''
  480.  
  481.     print "CBS: --> Converting subtitles to SRT"
  482.     #self.update_dialog('Converting Subtitles to SRT')
  483.     lines = subtitle_data.findAll('p') #split the file into lines
  484.     for line in lines:
  485.         if line is not None:
  486.             #print "LINE: " + str(line)
  487.             #print "LINE BEGIN: " + str(line['begin'])
  488.            
  489.             sub=str(clean_subs(str(line)))
  490.             try:
  491.                 newsub=sub
  492.                 sub = BeautifulStoneSoup(sub, convertEntities=BeautifulStoneSoup.ALL_ENTITIES)
  493.             except:
  494.                 sub=newsub
  495.             #print "CURRENT SUB: " + str(sub)
  496.             begin_time = line['begin']
  497.             end_time = line['end']
  498.             start_split =begin_time.split(".")                        
  499.             end_split =end_time.split(".")                        
  500.             timestamp = "%s,%s" % (start_split[0], start_split[1])
  501.             end_timestamp = "%s,%s" % (end_split[0], end_split[1])
  502.             #print "TIMESTAMP " + str(timestamp) + " " + str(end_timestamp)
  503.    
  504.             temp_dict = {'start':timestamp, 'end':end_timestamp, 'text':sub}
  505.             subtitle_array.append(temp_dict)
  506.                
  507.     for i, subtitle in enumerate(subtitle_array):
  508.         line = str(i+1)+"\n"+str(subtitle['start'])+" --> "+str(subtitle['end'])+"\n"+str(subtitle['text'])+"\n\n"
  509.         srt_output += line
  510.                    
  511.     file = open(os.path.join(common.pluginpath,'resources','cache',output+'.srt'), 'w')
  512.     file.write(srt_output)
  513.     file.close()
  514.     print "CBS: --> Successfully converted subtitles to SRT"
  515.     #self.update_dialog('Conversion Complete')
  516.     return True
  517.    
  518. def playST(url = common.args.url):
  519.     print "Entering playST function"
  520.  
  521.     if 'watch_episode' in url:
  522.         pid=url.split('/')[-1]
  523.         play(pid)
  524.     else:
  525.         data=common.getURL(url)
  526.         url = re.compile("flowplayer\\('flow_player', '.*?', '(.*?)'\\)").findall(data)[0]
  527.         item = xbmcgui.ListItem(path=url)
  528.         xbmcplugin.setResolvedUrl(pluginhandle, True, item)  
  529.  
  530. def play(url = common.args.url):
  531.     print "DEBUG Entering play function"
  532.     swfUrl = 'http://can.cbs.com/thunder/player/chrome/canplayer.swf'
  533.     if 'http://' in url:
  534.         data=common.getURL(url)
  535.         try:
  536.             pid = re.compile('video.settings.pid = "(.*?)";').findall(data)[0]
  537.         except:
  538.             pid = re.compile("video.settings.pid = '(.*?)';").findall(data)[0]
  539.     else:
  540.         pid = url  
  541.     # OLD URL
  542.     #url = "http://release.theplatform.com/content.select?format=SMIL&Tracking=true&balance=true&MBR=true&pid=" + pid
  543.     url = "http://link.theplatform.com/s/dJ5BDC/%s?format=SMIL&Tracking=true&mbr=true" % pid
  544.     if (common.settings['enableproxy'] == 'true'):
  545.         proxy = True
  546.     else:
  547.         proxy = False
  548.     data=common.getURL(url,proxy=proxy)
  549.     tree=BeautifulStoneSoup(data, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
  550.    
  551.     if (common.settings['enablesubtitles'] == 'true'):
  552.         closedcaption = tree.find('param',attrs={'name':'ClosedCaptionURL'})
  553.         if (closedcaption is not None):
  554.             xml_closedcaption = common.getURL(closedcaption['value'])
  555.             convert_subtitles(xml_closedcaption,pid)
  556.  
  557.     rtmpbase = tree.find('meta')
  558.     if rtmpbase:
  559.         rtmpbase = rtmpbase['base']
  560.         items=tree.find('switch').findAll('video')
  561.         hbitrate = -1
  562.         sbitrate = int(common.settings['quality']) * 1024
  563.         for item in items:
  564.             bitrate = int(item['system-bitrate'])
  565.             if bitrate > hbitrate and bitrate <= sbitrate:
  566.                 hbitrate = bitrate
  567.                 playpath = item['src']
  568.                 if '.mp4' in playpath:
  569.                     playpath = 'mp4:'+playpath
  570.                 else:
  571.                     playpath = playpath.replace('.flv','')
  572.                 finalurl = rtmpbase+' playpath='+playpath + " swfurl=" + swfUrl + " swfvfy=true"
  573.     item = xbmcgui.ListItem(path=finalurl)
  574.     xbmcplugin.setResolvedUrl(pluginhandle, True, item)
  575.     if (common.settings['enablesubtitles'] == 'true') and (closedcaption is not None):
  576.         while not xbmc.Player().isPlaying():
  577.             print 'CBS--> Not Playing'
  578.             xbmc.sleep(100)
  579.    
  580.         subtitles = os.path.join(common.pluginpath,'resources','cache',pid+'.srt')
  581.         print "CBS --> Setting subtitles"
  582.         xbmc.Player().setSubtitles(subtitles)
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
Top