Advertisement
Guest User

cbs.py (16.04.2013)

a guest
Apr 16th, 2013
855
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 24.14 KB | None | 0 0
  1. import xbmcplugin
  2. import xbmcplugin
  3. import xbmc
  4. import xbmcgui
  5. import urllib
  6. import urllib2
  7. import sys
  8. import os
  9. import re
  10. import cookielib
  11. from datetime import datetime
  12. import time
  13.  
  14.  
  15. import demjson
  16. from BeautifulSoup import BeautifulSoup
  17. from BeautifulSoup import MinimalSoup
  18. import resources.lib._common as common
  19. from BeautifulSoup import BeautifulSoup, BeautifulStoneSoup
  20.  
  21. #andyman 16.04.2013
  22. pluginhandle = int (sys.argv[1])
  23.  
  24. BASE_URL = "http://www.cbs.com/video/"
  25. BASE = "http://www.cbs.com"
  26.  
  27. # def masterlist():
  28.     # print "DEBUG Entering masterlist function"
  29.     # data = common.getURL(BASE_URL)
  30.     # tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  31.     # menu=tree.find(attrs={'id' : 'videoContent'})
  32.     # categories=menu.findAll('div', attrs={'id' : True}, recursive=False)
  33.     # db_shows = []
  34.     # for item in categories:
  35.         # shows = item.findAll(attrs={'id' : 'show_block_interior'})
  36.         # for show in shows:
  37.             # name = show.find('img')['alt'].encode('utf-8')
  38.             # thumb = BASE_URL + show.find('img')['src']
  39.             # url = BASE + show.find('a')['href']
  40.             # print name+'|'+thumb+'|'+url
  41.             # if 'MacGyver' in name:
  42.                 # url += '?vs=Full%20Episodes'
  43.             # #if 'daytime/lets_make_a_deal' in url:
  44.                 # #url = url.replace('daytime/lets_make_a_deal','shows/lets_make_a_deal')
  45.             # if 'cbs_evening_news/video/' in url:
  46.                 # url = 'http://www.cbs.com/shows/cbs_evening_news/video/'
  47.             # elif 'shows/dogs_in_the_city/' in url:
  48.                 # url+='video/'
  49.             # elif '/shows/3/' in url:
  50.                 # url+='video/'
  51.             # elif '/shows/3/' in url:
  52.                 # url+='video/'
  53.             # elif '/shows/nyc_22' in url:
  54.                 # name = 'NYC 22'
  55.                 # url+='video/'
  56.             # db_shows.append((name,'cbs','showcats',url))
  57.     # for show in stShows('http://startrek.com/videos',db=True):
  58.         # db_shows.append(show)
  59.     # return db_shows
  60.  
  61. def rootlist():
  62.     print "DEBUG Entering rootlist function"
  63.     data = common.getURL(BASE_URL)
  64.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  65.     menu=tree.find(attrs={'id' : 'daypart_nav'})
  66.     categories=menu.findAll('a')
  67.     for item in categories:
  68.         if item['href'].find('javascript') == 0:
  69.             catid = item['onclick'].replace("showDaypart('",'').replace("');",'')
  70.             name = re.compile('<a.*>(.+)</a>').findall(str(item))[0].title()
  71.             common.addDirectory(name, 'cbs', 'shows', catid)
  72.     common.setView('seasons')
  73.  
  74. def shows(catid = common.args.url):
  75.     print "DEBUG Entering shows function"
  76.     xbmcplugin.setContent(int(sys.argv[1]), 'tvshows')
  77.     data = common.getURL(BASE_URL)
  78.     data = re.compile('<!-- SHOWS LIST -->(.*?)<!-- END SHOWS LIST -->',re.DOTALL).findall(data)[0]  
  79.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  80.     categories=tree.findAll('div', attrs={'id' : True}, recursive=False)
  81.     for item in categories:
  82.         if item['id'] == catid:
  83.             shows = item.findAll(attrs={'id' : 'show_block_interior'})
  84.             for show in shows:
  85.                 name = show.find('img')['alt'].encode('utf-8')
  86.                 thumbnail = BASE + show.find('img')['src']
  87.                 url = show.find('a')['href']
  88.                 if 'MacGyver' in name:
  89.                     url += '?vs=Full%20Episodes'
  90.                 #if 'daytime/lets_make_a_deal' in url:
  91.                     #url = url.replace('daytime/lets_make_a_deal','shows/lets_make_a_deal')
  92.                 if 'cbs_evening_news' in url:
  93.                     url = 'http://www.cbs.com/shows/cbs_evening_news/video/'
  94.                 elif 'shows/dogs_in_the_city/' in url:
  95.                     url+='video/'
  96.                 elif '/shows/partners/' in url:
  97.                     url+='video/'
  98.                 elif '/shows/elementary/' in url:
  99.                     url+='video/'
  100.                 elif '/shows/vegas/' in url:
  101.                     url+='video/'
  102.                 elif '/shows/nyc_22' in url:
  103.                     name = 'NYC 22'
  104.                     url+='video/'
  105.                 common.addShow(name, 'cbs', 'showcats', url)#, thumb=thumbnail)
  106.             break
  107.     #if catid == 'classics':
  108.         #stShows('http://startrek.com/videos')
  109.     xbmcplugin.addSortMethod(int(sys.argv[1]), xbmcplugin.SORT_METHOD_LABEL)
  110.     common.setView('tvshows')
  111.  
  112. def stShows(url = common.args.url,db=False):
  113.     stbase = 'http://www.startrek.com'
  114.     data = common.getURL(url)
  115.     remove = re.compile('<!.*?">')
  116.     data = re.sub(remove, '', data)
  117.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  118.     stshows=tree.find('div',attrs={'id' : 'channels'}).findAll('li', attrs={'class' : True})
  119.     st_shows = []      
  120.     for show in stshows:
  121.         name = show['class'].replace('-',' ').title()
  122.         thumb = stbase+show.find('img')['src']
  123.         url = stbase+show.find('a')['href']
  124.         if 'Star Trek' not in name:
  125.             name = 'Star Trek '+name
  126.         if db:
  127.             st_shows.append((name,'cbs','stshowcats',url))
  128.         else:
  129.             common.addShow(name, 'cbs', 'stshowcats', url)#, thumb=thumb)
  130.     if db:
  131.         return st_shows
  132.  
  133. def stshowcats(url = common.args.url):
  134.     stbase = 'http://www.startrek.com'
  135.     data = common.getURL(url)
  136.     remove = re.compile('<!.*?">')
  137.     data = re.sub(remove, '', data)
  138.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  139.     stcats=tree.find('div',attrs={'id' : 'content'}).findAll('div', attrs={'class' : 'box_news'})      
  140.     for cat in stcats:
  141.         name = cat.find('h4').contents[1].strip()
  142.         common.addDirectory(name, 'cbs', 'stvideos', url+'<name>'+name)
  143.     common.setView('seasons')
  144.  
  145. def stvideos(url = common.args.url):
  146.     stbase = 'http://www.startrek.com'
  147.     argname = url.split('<name>')[1]
  148.     url = url.split('<name>')[0]
  149.     stbase = 'http://www.startrek.com'
  150.     data = common.getURL(url)
  151.     remove = re.compile('<!.*?">')
  152.     data = re.sub(remove, '', data)
  153.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  154.     stcats=tree.find('div',attrs={'id' : 'content'}).findAll('div', attrs={'class' : 'box_news'})      
  155.     for cat in stcats:
  156.         name = cat.find('h4').contents[1].strip()
  157.         if name == argname:
  158.             titleUrl=stbase+cat.find('a',attrs={'class' : 'title '})['onclick'].split("url:'")[1].split("'}); return")[0]
  159.             if 'Full Episodes' in argname:
  160.                 titleUrl += '/page_full/1'
  161.             stprocessvideos(titleUrl)
  162.     common.setView('episodes')
  163.  
  164. def stprocessvideos(purl):
  165.     print "enter stprocessvideos"
  166.     stbase = 'http://www.startrek.com'
  167.     xbmcplugin.addSortMethod(pluginhandle, xbmcplugin.SORT_METHOD_LABEL)
  168.     data = common.getURL(purl)
  169.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  170.     videos=tree.find(attrs={'class' : 'videos_container'}).findAll('li')
  171.     for video in videos:
  172.         thumb = video.find('img')['src']
  173.         url = stbase+video.find('a')['href']
  174.         try:
  175.             showname,name = video.findAll('a')[1].string.split('-')
  176.         except:
  177.             name = video.findAll('a')[1].string
  178.             showname = ''
  179.         try:
  180.             seasonepisode, duration = video.findAll('p')
  181.             seasonepisode = seasonepisode.string.replace('Season ','').split(' Ep. ')
  182.             season = int(seasonepisode[0])
  183.             episode = int(seasonepisode[1])
  184.             duration = duration.string.split('(')[1].replace(')','')
  185.         except:
  186.             season = 0
  187.             episode = 0
  188.             duration = ''
  189.         if season <> 0 or episode <> 0:
  190.             displayname = '%sx%s - %s' % (str(season),str(episode),name)
  191.         else:
  192.             displayname = name
  193.         u = sys.argv[0]
  194.         u += '?url="'+urllib.quote_plus(url)+'"'
  195.         u += '&mode="cbs"'
  196.         u += '&sitemode="playST"'
  197.         infoLabels={ "Title":displayname,
  198.                      "Season":season,
  199.                      "Episode":episode,
  200.                      #"premiered":aired,
  201.                      "Duration":duration,
  202.                      "TVShowTitle":showname
  203.                      }
  204.         common.addVideo(u,displayname,thumb,infoLabels=infoLabels)
  205.     if len(videos) == 4:
  206.         if '/page_full/' not in purl and '/page_other/' not in purl:
  207.             nurl = purl+'/page_other/2'
  208.         else:
  209.             page = int(purl.split('/')[-1])
  210.             nextpage = page + 1
  211.             nurl = purl.replace('/'+str(page),'/'+str(nextpage))
  212.         stprocessvideos(nurl)
  213.  
  214. def showcats(url = common.args.url):
  215.     data = common.getURL(url)
  216.     #try:
  217.     #    var show  = new CBS.Show({id:111381});
  218.     #    show_id = re.compile("new CBS\.Show\(id:(.*?)\);").findall(data)
  219.     #    url = 'http://www.cbs.com/carousels/'+dir3+'/video/'+dir1+'/'+dir2+'/0/400/
  220.     try:
  221.         print 'CBS: Trying New Carousel'
  222.         carousels = re.compile("loadUpCarousel\('(.*?)','(.*?)', '(.*?)', (.*?), true, stored").findall(data)
  223.         carousels[0][0]
  224.         for name,dir1,dir2,dir3 in carousels:
  225.             url = 'http://www.cbs.com/carousels/'+dir3+'/video/'+dir1+'/'+dir2+'/0/400/'
  226.             common.addDirectory(name, 'cbs', 'newvideos', url)
  227.     except:
  228.         print 'CBS: Carousel Failed'
  229.         tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  230.         try:
  231.             print 'CBS: trying secondary-show-nav-wrapper'
  232.             options = tree.find(attrs={'id' : 'secondary-show-nav-wrapper'})
  233.             options = options.findAll('a')
  234.             for option in options:
  235.                 name = option.string.encode('utf-8')
  236.                 url = BASE + option['href']
  237.                 common.addDirectory(name, 'cbs', 'videos', url)
  238.             print 'CBS: trying vid_module'
  239.             options = tree.findAll(attrs={'class' : 'vid_module'})
  240.             for option in options:
  241.                 moduleid = option['id']
  242.                 name = option.find(attrs={'class' : 'hdr'}).string
  243.                 common.addDirectory(name, 'cbs', 'showsubcats', url+'<moduleid>'+moduleid)
  244.         except:
  245.             print 'CBS: secondary-show-nav-wrapper failed'
  246.             print 'CBS: trying vid_module secondary'
  247.             try:
  248.                 options = tree.findAll(attrs={'class' : 'vid_module'})
  249.                 print 'DEBUG: '+options
  250.                 for option in options:
  251.                     moduleid = option['id']
  252.                     name = option.find(attrs={'class' : 'hdr'}).string
  253.                     common.addDirectory(name, 'cbs', 'showsubcats', url+'<moduleid>'+moduleid)
  254.             except:
  255.                 print 'CBS: vid_module secondary failed'
  256.                 print 'CBS: trying 16.04.2013 id-carousel'
  257.                 categories = re.compile("id-carousel-(\d+)").findall(str(tree))
  258.                 for catid in categories:
  259.                     thisUrl = 'http://www.cbs.com/carousels/videosBySection/'+catid+'/offset/0/limit/40/'
  260.                     data = common.getURL(thisUrl)
  261.                     name = demjson.decode(data)['result']['title']
  262.                     common.addDirectory(name, 'cbs', 'newvideos2', thisUrl)
  263.     common.setView('seasons')                                      
  264.  
  265. def showsubcats(url = common.args.url):
  266.     moduleid = url.split('<moduleid>')[1]
  267.     url      = url.split('<moduleid>')[0]
  268.     data = common.getURL(url)
  269.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  270.     vid_module = tree.find(attrs={'id' : moduleid})
  271.     PAGES(vid_module)
  272.     common.setView('episodes')
  273.    
  274. def videos(url = common.args.url):
  275.     xbmcplugin.setContent(int(sys.argv[1]), 'episodes')
  276.     data = common.getURL(url)
  277.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  278.     print 'CBS: trying vid_module'
  279.     try:
  280.         options = tree.findAll(attrs={'class' : 'vid_module'})
  281.         if len(options) == 1:
  282.             PAGES(tree)
  283.         else:
  284.             for option in options:
  285.                 moduleid = option['id']
  286.                 name = option.find(attrs={'class' : 'hdr'}).string
  287.                 common.addDirectory(name, 'cbs', 'showsubcats', url+'<moduleid>'+moduleid)                                        
  288.     except:
  289.         PAGES(tree)
  290.     common.setView('episodes')
  291.  
  292. def newvideos(url = common.args.url):
  293.     data = common.getURL(url)
  294.     itemList = demjson.decode(data)['itemList']
  295.     for video in itemList:
  296.         url = video['pid']
  297.         description = video['description']
  298.         thumb = video['thumbnail']
  299.         seriesTitle = video['seriesTitle']
  300.         title = video['label']
  301.         try:episodeNum = int(video['episodeNum'])
  302.         except:episodeNum = 0
  303.         try:seasonNum = int(video['seasonNum'])
  304.         except:seasonNum = 0
  305.         duration = int(video['duration'])
  306.         airDate = video['_airDate']
  307.         rating = video['rating']
  308.         u = sys.argv[0]
  309.         u += '?url="'+urllib.quote_plus(url)+'"'
  310.         u += '&mode="cbs"'
  311.         u += '&sitemode="play"'
  312.         displayname = '%sx%s - %s' % (seasonNum,episodeNum,title)
  313.         infoLabels={ "Title":title,
  314.                      "Plot":description,
  315.                      "Season":seasonNum,
  316.                      "Episode":episodeNum,
  317.                      "premiered":airDate,
  318.                      "Duration":str(duration),
  319.                      "mpaa":rating,
  320.                      "TVShowTitle":seriesTitle
  321.                      }
  322.         common.addVideo(u,displayname,thumb,infoLabels=infoLabels)
  323.     common.setView('episodes')  
  324.  
  325. def newvideos2(url = common.args.url):
  326.     data = common.getURL(url)
  327.     itemList = demjson.decode(data)['result']['data']
  328.     for video in itemList:
  329.         # data from JSON file
  330.         vurl = BASE + video['url']
  331.         thumb = video['thumb']['large']
  332.         seriesTitle = video['series_title']
  333.         title = video['label']
  334.         # need to fetch the video URL for the rest of the meta data
  335.         videodata = common.getURL(vurl)
  336.         videotree=BeautifulSoup(videodata, convertEntities=BeautifulSoup.HTML_ENTITIES)
  337.         description = videotree.find('meta',attrs={'name' : 'description'})['content'].replace("\\'",'"')
  338.         #16.04.2013 - airdate is now in JSON
  339.         try:
  340.             airdatediv = videotree.find('div',attrs={'class' : 'airdate'})
  341.             aird1 = str(airdatediv).split('<')[1].split(':')[1].strip()
  342.             aird2 = datetime.strptime(aird1, '%m/%d/%y')
  343.             airDate = datetime.strftime(aird2, '%Y-%m-%d')
  344.         except:
  345.             airDate = 0
  346.         metadiv = videotree.find('div',attrs={'class' : 'title'})
  347.         # <span>S6 Ep18 (20:12)  -->  [(u'6', u'18', u'20', u'12')]
  348.         meta = re.compile("<span>S(\d+)\D+(\d+)\D+(\d+)\:(\d+)").findall(str(metadiv))
  349.         try:episodeNum = int(meta[0][1])
  350.         except:episodeNum = 0
  351.         try:seasonNum = int(meta[0][0])
  352.         except:seasonNum = 0
  353.         try:duration = int(meta[0][2])
  354.         except:duration = int('0')
  355.         #rating = video['rating']
  356.         rating = 0
  357.         u = sys.argv[0]
  358.         u += '?url="'+urllib.quote_plus(vurl)+'"'
  359.         u += '&mode="cbs"'
  360.         u += '&sitemode="play"'
  361.         displayname = '%sx%s - %s' % (seasonNum,episodeNum,title)
  362.         infoLabels={ "Title":title,
  363.                      "Plot":description,
  364.                      "Season":seasonNum,
  365.                      "Episode":episodeNum,
  366.                      "premiered":airDate,
  367.                      "Duration":str(duration),
  368.                      "mpaa":rating,
  369.                      "TVShowTitle":seriesTitle
  370.                      }
  371.         common.addVideo(u,displayname,thumb,infoLabels=infoLabels)
  372.     common.setView('episodes')  
  373.    
  374. def PAGES( tree ):
  375.     try:
  376.         print 'starting PAGES'
  377.         try:
  378.             search_elements = tree.find(attrs={'name' : 'searchEl'})['value']
  379.             return_elements = tree.find(attrs={'name' : 'returnEl'})['value']
  380.         except:
  381.             print 'CBS: search and return elements failed'
  382.         try:
  383.             last_page = tree.find(attrs={'id' : 'pagination0'}).findAll(attrs={'class' : 'vids_pag_off'})[-1].string
  384.             last_page = int(last_page) + 1
  385.         except:
  386.             print 'CBS: last page failed reverting to default'
  387.             last_page = 2
  388.         for pageNum in range(1,last_page):
  389.             values = {'pg' : str(pageNum),
  390.                       'repub' : 'yes',
  391.                       'displayType' : 'twoby',
  392.                       'search_elements' : search_elements,
  393.                       'return_elements' : return_elements,
  394.                       'carouselId' : '0',
  395.                       'vs' : 'Default',
  396.                       'play' : 'true'
  397.                       }
  398.             url = 'http://www.cbs.com/sitecommon/includes/video/2009_carousel_data_multiple.php'
  399.             data = common.getURL(url, values)
  400.             VIDEOLINKS(data)
  401.     except:
  402.         print 'Pages Failed'
  403.  
  404. def VIDEOLINKS( data ):
  405.     print "Entering VIDEOLINKS function"
  406.     tree=BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES)
  407.     vidfeed=tree.find(attrs={'class' : 'vids_feed'})
  408.     videos = vidfeed.findAll(attrs={'class' : 'floatLeft','style' : True})
  409.     for video in videos:
  410.         thumb = video.find('img')['src']
  411.         vidtitle = video.find(attrs={'class' : 'vidtitle'})
  412.         pid = vidtitle['href'].split('pid=')[1].split('&')[0]
  413.         displayname = vidtitle.string.encode('utf-8')
  414.         try:
  415.             title = displayname.split('-')[1].strip()
  416.             series = displayname.split('-')[0].strip()
  417.         except:
  418.             print 'title/series metadata failure'
  419.             title = displayname
  420.             series = ''
  421.  
  422.         metadata = video.find(attrs={'class' : 'season_episode'}).renderContents()
  423.         try:
  424.             duration = metadata.split('(')[1].replace(')','')
  425.         except:
  426.             print 'duration metadata failure'
  427.             duration = ''
  428.         try:
  429.             aired = metadata.split('<')[0].split(':')[1].strip()
  430.         except:
  431.             print 'air date metadata failure'
  432.             aired = ''
  433.         try:
  434.             seasonepisode = thumb.split('/')[-1].split('_')[2]
  435.             if 3 == len(seasonepisode):
  436.                 season = int(seasonepisode[:1])
  437.                 episode = int(seasonepisode[-2:])
  438.             elif 4 == len(seasonepisode):
  439.                 season = int(seasonepisode[:2])
  440.                 episode = int(seasonepisode[-2:])
  441.             if season <> 0 or episode <> 0:
  442.                 displayname = '%sx%s - %s' % (str(season),str(episode),title)
  443.         except:
  444.             print 'season/episode metadata failed'
  445.             season = 0
  446.             episode = 0
  447.         u = sys.argv[0]
  448.         u += '?url="'+urllib.quote_plus(pid)+'"'
  449.         u += '&mode="cbs"'
  450.         u += '&sitemode="play"'
  451.         infoLabels={ "Title":title,
  452.                      "Season":season,
  453.                      "Episode":episode,
  454.                      "premiered":aired,
  455.                      "Duration":duration,
  456.                      "TVShowTitle":series
  457.                      }
  458.         common.addVideo(u,displayname,thumb,infoLabels=infoLabels)
  459.     common.setView('episodes')
  460.  
  461.  
  462. def clean_subs(data):
  463.         br = re.compile(r'<br.*?>')
  464.         br_2 = re.compile(r'\n')
  465.         tag = re.compile(r'<.*?>')
  466.         space = re.compile(r'\s\s\s+')
  467.         sub = br.sub('\n', data)
  468.         sub = tag.sub(' ', sub)
  469.         sub = br_2.sub('<br/>', sub)
  470.         sub = space.sub(' ', sub)
  471.         return sub
  472.    
  473.        
  474. def convert_subtitles(subtitles, output):
  475.     subtitle_data = subtitles
  476.     subtitle_data = subtitle_data.replace("\n","").replace("\r","")
  477.     subtitle_data = BeautifulStoneSoup(subtitle_data)
  478.     subtitle_array = []
  479.     srt_output = ''
  480.  
  481.     print "CBS: --> Converting subtitles to SRT"
  482.     #self.update_dialog('Converting Subtitles to SRT')
  483.     lines = subtitle_data.findAll('p') #split the file into lines
  484.     for line in lines:
  485.         if line is not None:
  486.             #print "LINE: " + str(line)
  487.             #print "LINE BEGIN: " + str(line['begin'])
  488.            
  489.             sub=str(clean_subs(str(line)))
  490.             try:
  491.                 newsub=sub
  492.                 sub = BeautifulStoneSoup(sub, convertEntities=BeautifulStoneSoup.ALL_ENTITIES)
  493.             except:
  494.                 sub=newsub
  495.             #print "CURRENT SUB: " + str(sub)
  496.             begin_time = line['begin']
  497.             end_time = line['end']
  498.             start_split =begin_time.split(".")                        
  499.             end_split =end_time.split(".")                        
  500.             timestamp = "%s,%s" % (start_split[0], start_split[1])
  501.             end_timestamp = "%s,%s" % (end_split[0], end_split[1])
  502.             #print "TIMESTAMP " + str(timestamp) + " " + str(end_timestamp)
  503.    
  504.             temp_dict = {'start':timestamp, 'end':end_timestamp, 'text':sub}
  505.             subtitle_array.append(temp_dict)
  506.                
  507.     for i, subtitle in enumerate(subtitle_array):
  508.         line = str(i+1)+"\n"+str(subtitle['start'])+" --> "+str(subtitle['end'])+"\n"+str(subtitle['text'])+"\n\n"
  509.         srt_output += line
  510.                    
  511.     file = open(os.path.join(common.pluginpath,'resources','cache',output+'.srt'), 'w')
  512.     file.write(srt_output)
  513.     file.close()
  514.     print "CBS: --> Successfully converted subtitles to SRT"
  515.     #self.update_dialog('Conversion Complete')
  516.     return True
  517.    
  518. def playST(url = common.args.url):
  519.     print "Entering playST function"
  520.  
  521.     if 'watch_episode' in url:
  522.         pid=url.split('/')[-1]
  523.         play(pid)
  524.     else:
  525.         data=common.getURL(url)
  526.         url = re.compile("flowplayer\\('flow_player', '.*?', '(.*?)'\\)").findall(data)[0]
  527.         item = xbmcgui.ListItem(path=url)
  528.         xbmcplugin.setResolvedUrl(pluginhandle, True, item)  
  529.  
  530. def play(url = common.args.url):
  531.     print "DEBUG Entering play function"
  532.     swfUrl = 'http://can.cbs.com/thunder/player/chrome/canplayer.swf'
  533.     if 'http://' in url:
  534.         data=common.getURL(url)
  535.         try:
  536.             pid = re.compile('video.settings.pid = "(.*?)";').findall(data)[0]
  537.         except:
  538.             pid = re.compile("video.settings.pid = '(.*?)';").findall(data)[0]
  539.     else:
  540.         pid = url  
  541.     # OLD URL
  542.     #url = "http://release.theplatform.com/content.select?format=SMIL&Tracking=true&balance=true&MBR=true&pid=" + pid
  543.     url = "http://link.theplatform.com/s/dJ5BDC/%s?format=SMIL&Tracking=true&mbr=true" % pid
  544.     if (common.settings['enableproxy'] == 'true'):
  545.         proxy = True
  546.     else:
  547.         proxy = False
  548.     data=common.getURL(url,proxy=proxy)
  549.     tree=BeautifulStoneSoup(data, convertEntities=BeautifulStoneSoup.XML_ENTITIES)
  550.    
  551.     if (common.settings['enablesubtitles'] == 'true'):
  552.         closedcaption = tree.find('param',attrs={'name':'ClosedCaptionURL'})
  553.         if (closedcaption is not None):
  554.             xml_closedcaption = common.getURL(closedcaption['value'])
  555.             convert_subtitles(xml_closedcaption,pid)
  556.  
  557.     rtmpbase = tree.find('meta')
  558.     if rtmpbase:
  559.         rtmpbase = rtmpbase['base']
  560.         items=tree.find('switch').findAll('video')
  561.         hbitrate = -1
  562.         sbitrate = int(common.settings['quality']) * 1024
  563.         for item in items:
  564.             bitrate = int(item['system-bitrate'])
  565.             if bitrate > hbitrate and bitrate <= sbitrate:
  566.                 hbitrate = bitrate
  567.                 playpath = item['src']
  568.                 if '.mp4' in playpath:
  569.                     playpath = 'mp4:'+playpath
  570.                 else:
  571.                     playpath = playpath.replace('.flv','')
  572.                 finalurl = rtmpbase+' playpath='+playpath + " swfurl=" + swfUrl + " swfvfy=true"
  573.     item = xbmcgui.ListItem(path=finalurl)
  574.     xbmcplugin.setResolvedUrl(pluginhandle, True, item)
  575.     if (common.settings['enablesubtitles'] == 'true') and (closedcaption is not None):
  576.         while not xbmc.Player().isPlaying():
  577.             print 'CBS--> Not Playing'
  578.             xbmc.sleep(100)
  579.    
  580.         subtitles = os.path.join(common.pluginpath,'resources','cache',pid+'.srt')
  581.         print "CBS --> Setting subtitles"
  582.         xbmc.Player().setSubtitles(subtitles)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement