Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #!/usr/bin/python
- import bs4, requests, urllib, glob, base64
- from random import randint
- from google import google
- import sys, re, os
- reload(sys)
- sys.setdefaultencoding("utf-8")
- import urllib
- import ftplib
- from ftplib import FTP
- import paramiko
- import hashlib
- import MySQLdb
- from pydrive.auth import GoogleAuth
- from pydrive.drive import GoogleDrive
- from datetime import datetime
- from bs4 import BeautifulSoup
- import urllib2, json, cookielib
- def get_soup(url,header):
- return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)),'html.parser')
- def get_images(query):
- image_type="ActiOn"
- query= query.split()
- query='+'.join(query)
- url = "https://www.google.com/search?q=" + query + "&as_st=y&tbs=islt:qsvga,isz:ex,iszw:130,iszh:130&tbm=isch&source=lnt"
- print 'Search URL is: ' + url
- #add the directory for your image here
- DIR="img"
- header={'User-Agent':"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
- }
- soup = get_soup(url,header)
- ActualImages=[]# contains the link for Large original images, type of image
- for a in soup.find_all("div",{"class":"rg_meta"}):
- link , Type =json.loads(a.text)["ou"] ,json.loads(a.text)["ity"]
- ActualImages.append((link,Type))
- print "There are total" , len(ActualImages),"images"
- if not os.path.exists(DIR):
- os.mkdir(DIR)
- if not os.path.exists(DIR):
- os.mkdir(DIR)
- ###print images
- for i , (img , Type) in enumerate( ActualImages):
- try:
- req = urllib2.Request(img, headers={'User-Agent' : header})
- raw_img = urllib2.urlopen(req).read()
- cntr = len([i for i in os.listdir(DIR) if image_type in i]) + 1
- print cntr
- if len(Type)==0:
- f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+".jpg"), 'wb')
- else :
- f = open(os.path.join(DIR , image_type + "_"+ str(cntr)+"."+Type), 'wb')
- f.write(raw_img)
- f.close()
- except Exception as e:
- print "could not load : "+img
- print e
- def get_pages(url):
- text = requests.get(url).text
- soup = bs4.BeautifulSoup(text, 'html.parser')
- #aa = soup.find_all('a')
- aa = [d for d in soup.find_all('a') if d.has_attr('data-visibility-tracking') and d.has_attr('aria-label')]
- link_list = []
- for a in xrange(len(aa)-1):
- # get page no
- link_list.append('https://www.youtube.com' + aa[a]['href'])
- return link_list
- def get_wo(jumper):
- searchTerm = sys.argv[1].replace(' ', '+')
- url = 'https://www.youtube.com/results?search_query='+searchTerm
- link_list = [url,] + get_pages(url)
- for i in xrange(jumper):
- url = link_list[-1]
- link_list = link_list + get_pages(url)
- #return link_list
- return list(set(link_list))
- def get_video_url(page_url):
- text = requests.get(page_url).text
- soup = bs4.BeautifulSoup(text, 'html.parser')
- div = [d for d in soup.find_all('div') if d.has_attr('class') and 'yt-lockup-dismissable' in d['class']]
- video_urls = []
- for d in div:
- aa = [x for x in d.find_all('a') if x.has_attr('title')]
- for a in aa:
- video_urls.append(['https://www.youtube.com' + a['href'], a['title']])
- return video_urls
- # return a download link to mp3 file
- def ftp_ssh_mp3(mp3_file, ARTIST_SLUG):
- host = '173.208.200.58'
- ARTIST_SLUG = ARTIST_SLUG.replace(' ','').replace(' ','').replace('\r','')
- # make an ssh connection
- ssh = paramiko.SSHClient()
- ssh.load_system_host_keys()
- ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
- ssh.connect(host, username='customer', password='anhyeuem12AB')
- #
- m = hashlib.md5()
- m.update(mp3_file)
- old_mp3_file = mp3_file
- mp3_file = m.hexdigest() + '.mp3'
- # check if ex_dir existed, if not create it
- ex_dir = '/var/www/html/mp3/' + ARTIST_SLUG
- ex_dir = ex_dir.replace('\r', '')
- remote_mp3_file = ('').join([ex_dir,'/',mp3_file])
- print 'remote_mp3_file: ' + remote_mp3_file
- url = 'http://'+host+'/'+'mp3/'+ ARTIST_SLUG
- print 'Check response code of : ' + url
- ret = requests.get(url).status_code
- if ret != 200:
- print 'Directory not existed: ' + ex_dir
- command = 'mkdir "' + ex_dir + '"'
- print 'Executing commmand on Server: ' + command
- ssh.exec_command(command)
- else:
- print 'Directory existed'
- # check if mp3_file existed, if not upload it.
- url = 'http://' + host + '/mp3/' + ARTIST_SLUG + '/' + mp3_file
- print 'Check if mp3 file existed: ' + remote_mp3_file
- print 'Check response code of : ' + url
- ret = requests.head(url).status_code
- if ret != 200:
- print 'mp3 file not exsited yet'
- print 'FTP to server & uploading file ...'
- session = ftplib.FTP(host,'customer','anhyeuem12AB')
- session.cwd(ex_dir)
- file = open(old_mp3_file,'rb') # file to send
- session.storbinary('STOR ' + mp3_file, file) # send the file
- file.close() # close file and FTP
- session.quit()
- command = 'chmod +r "' + remote_mp3_file + '"'
- print 'Executing commmand on Server: ' + command
- ssh.exec_command(command)
- else:
- print 'mp3 file existed.'
- return 'http://173.208.200.58/mp3/' + ARTIST_SLUG + '/' + urllib.quote_plus(mp3_file)
- def image_upload(media_file):
- print 'Try uploading ' + media_file
- if media_file.endswith('.png'):
- headers = {
- 'Authorization' : 'Basic ' + base64.b64encode( 'van7hu' + ':' + 'bongda.com.vn22AB' ),
- "Content-Disposition" : "attachment;filename=\"" + media_file + "\"",
- "Content-Type" : "image/png"
- }
- elif media_file.endswith('.jpg'):
- headers = {
- 'Authorization' : 'Basic ' + base64.b64encode( 'van7hu' + ':' + 'bongda.com.vn22AB' ),
- "Content-Disposition" : "attachment;filename=\"" + media_file + "\"",
- "Content-Type" : "image/jpg"
- }
- elif media_file.endswith('.jpeg'):
- headers = {
- 'Authorization' : 'Basic ' + base64.b64encode( 'van7hu' + ':' + 'bongda.com.vn22AB' ),
- "Content-Disposition" : "attachment;filename=\"" + media_file + "\"",
- "Content-Type" : "image/jpg"
- }
- else:
- print 'Not supported media type, exit: ' + media_file
- sys.exit(1)
- endpoint = 'http://apkapp.info/wp-json/wp/v2/media/'
- data = open(media_file, 'rb').read();
- res = requests.post(endpoint, data = data, headers = headers)
- res_lst = res.text.split(' ')
- res_lst = res.text.split(',')
- media_id = res_lst[0].replace('\r\n{"id":', '')
- media_link = res_lst[3].replace('"guid":{"rendered":"', '').replace('"', '').replace('\\', '')
- return [int(media_id.replace('{"id":','')), str(media_link)]
- def get_meta_info():
- with open('meta_info.txt', 'r') as f:
- lines = f.readlines()
- for i in xrange(len(lines)):
- lines[i] = lines[i].replace('\n', '')
- return lines
- # as webm and mp3
- def download_youtube_mp3(ytb_link):
- command = 'youtube-dl -x --audio-format mp3 --audio-quality 0 ' + ytb_link
- print 'Executing command: ' + command
- os.system(command)
- def download_youtube_mp4(ytb_link):
- command = 'youtube-dl --recode-video mp4 -k --postprocessor-args "-strict experimental" ' + ytb_link
- print 'Executing command: ' + command
- os.system(command)
- def remove_downloaded_youtube():
- command = 'rm -rf *.webm *.mp3 *.mp4 *.mkv'
- print 'Executing command: ' + command
- os.system(command)
- def prepare_post_content_mp3(ARTIST_SLUG, ARTIST_NAME, SONG_NAME, THUMB_URL, mp3_url):
- post_sample_content = """Artist: <font size='3'><a href='http://apkapp.info/?s=ARTIST_SLUG'>ARTIST_NAME</a></font>
- <table border=0 id='pointer'>
- <tr>
- <td onclick='window.location.href="AD_URL1"' id='width20'>
- <a href="AD_URL1"><img src="AD_IMG1" id="img_ad"><span id='ad_text_size'>AD_TITLE1</span></a>
- </td>
- <td onclick='window.location.href="AD_URL2"' id='width20'>
- <a href="AD_URL2"><img src="AD_IMG2" id="img_ad"><span id='ad_text_size'>AD_TITLE2</span></a>
- </td>
- <td onclick='window.location.href="AD_URL3"' id='width20'>
- <a href="AD_URL3"><img src="AD_IMG3" id="img_ad"><span id='ad_text_size'>AD_TITLE3</span></a>
- </td>
- <td onclick='window.location.href="AD_URL4"' id='width20'>
- <a href="AD_URL4"><img src="AD_IMG4" id="img_ad"><span id='ad_text_size'>AD_TITLE4</span></a>
- </td>
- </tr>
- </table>
- <button class='xemsaox' onclick="location.href='mp3_url'">Get mp3</button>
- [zoomsounds_player source='mp3_url' config='skinwavewithcomments' playerid='118800' waveformbg='https://lh3.googleusercontent.com/OCkCqtmYpqevOPlhNY4R8oy37CmypYXtsM6CdwstJp-2X8y4O_MdmnOOyTZ2dODVq7sfxLqoRG2H-fGJ8GAwYDp7jtiyyesUiMjIZA4czV7dDqnaw0qhpkRBpfSmqW_uOkQtGvhJUn9nYAK2MQwQ_PtCfl4uHgb1cae5n7qNC8DjRgVorBBr_gZVLg0IZFXbLW0UTp-8KsqrZSyGHAgxbh7Q40-CKFvBKxZ7KblCTfwsEun4LElkYFe5ZPZOsn1EBrxsbXrSyAZVmm0VX7UXRnEQR-5YTIzZ6ttugwYonTFNwmiGxOCsg5RyYpwTNWMLE1v2fBUsBgSStiLrnwQqrK4VAfV-irLXdfXsy6ZG174u0uPdjGJq3qw3PcJUHatmxZDC5PbSrxTHR-K6OqTOV7bM641t40ZVNZfZmjOTzzL-eDWkKCUu5q5VBm254sJ4FK63bP5QbxOQem6nPadxEayRSKfyF4z4HUnoqsR1giPk8eWI63LcgGOZeSWGVw0T27N_Ugwz37Twr5Ilyk7q66elCiyOxK7IUuiur6-QYi0=w1170-h140-no' waveformprog='https://lh3.googleusercontent.com/3ZCeepH9HAhs1ojwrMVKRW4poGaqPSbeczAAs8XjBl8E4zh0vSzXY4ou7KtRXUoMDff70qz8vEa5YLwq_4kp4ufRHcTK8_7lbs5Ux4jTETAkhluI75nUweiBYztNkwtxRggzTLnu2kdyVn3lubZGDbe4-pxyvBtz2tWauKs9fw7wiMCcrkFz5BFi_X1q7ViGA205qTfuTLjltWzom09Xm8vgt5EsTHyInFoMAeSobImMrG5j67VTgrX_9vYDNu3RE_TbISRY9c7wdEXOplQZXJDHH3c86rdVaoclhGAbli3mHJ92iZmGrZM1JH0glyj-ymSSq8RU1Tw2Slb1QFYEwzJpr_wOR9BqqccLAf-yLawNG5TqTQLhrYekNfPaWEtUrcYvHMDeg2R_x7zZg0Q_FI4qvUjBrTu8ClZIf_fml4mer7KEl3uhNEDNr7pe9suucRGO_f_whT8bqjFsRCvh9obFhvj0Suvc-SNFTeLavV6EwIqFVYdHCwyedHxdmOGTsruvXw3CRqon0UFb2jqR2GO6ZUSQ9k9emXdGCZAVzqY=w1170-h140-no' thumb='THUMB_URL' autoplay='on' cue='on' enable_likes='on' enable_views='on' songname="SONG_NAME" artistname="ARTIST_NAME"]"""
- post_sample_content = post_sample_content.replace('ARTIST_SLUG', ARTIST_SLUG)
- post_sample_content = post_sample_content.replace('ARTIST_NAME', ARTIST_NAME)
- post_sample_content = post_sample_content.replace('mp3_url',mp3_url)
- post_sample_content = post_sample_content.replace('SONG_NAME', SONG_NAME)
- post_sample_content = post_sample_content.replace('THUMB_URL', THUMB_URL)
- return post_sample_content
- def prepare_post_content_mp4(ARTIST_SLUG, ARTIST_NAME, SONG_NAME, THUMB_URL, GOOGLE_DRIVE_LINK_ID, LINK_DESKTOP, LINK_MOBILE):
- post_sample_content = """Artist: <font size='3'><a href='http://apkapp.info/?s=ARTIST_SLUG'>ARTIST_NAME</font></a>
- <table border=0 id='pointer'>
- <tr>
- <td onclick='window.location.href="AD_URL1"' id='width20'>
- <a href="AD_URL1"><img src="AD_IMG1" id="img_ad"><span id='ad_text_size'>AD_TITLE1</span></a>
- </td>
- <td onclick='window.location.href="AD_URL2"' id='width20'>
- <a href="AD_URL2"><img src="AD_IMG2" id="img_ad"><span id='ad_text_size'>AD_TITLE2</span></a>
- </td>
- <td onclick='window.location.href="AD_URL3"' id='width20'>
- <a href="AD_URL3"><img src="AD_IMG3" id="img_ad"><span id='ad_text_size'>AD_TITLE3</span></a>
- </td>
- <td onclick='window.location.href="AD_URL4"' id='width20'>
- <a href="AD_URL4"><img src="AD_IMG4" id="img_ad"><span id='ad_text_size'>AD_TITLE4</span></a>
- </td>
- </tr>
- </table>
- <button class='xemsaox' onclick="location.href='GOOGLE_DRIVE_LINK_ID'">Get mp4</button>
- <div id='no-mobile'>[fvplayer autoplay="true" src="LINK_DESKTOP" controlbar="yes" ad='<div class="xss"><a href="AD_URL"><img src="AD_IMG" id="feature_img_ad"><br/><span id="ad_text_size">AD_TITLE</span></a></div>' width='100%']</div>
- <div id='no-desktop'>
- [videojs_video url="LINK_MOBILE"]
- </div>
- """
- post_sample_content = post_sample_content.replace('ARTIST_SLUG', ARTIST_SLUG)
- post_sample_content = post_sample_content.replace('ARTIST_NAME', ARTIST_NAME)
- post_sample_content = post_sample_content.replace('GOOGLE_DRIVE_LINK_ID',GOOGLE_DRIVE_LINK_ID)
- post_sample_content = post_sample_content.replace('SONG_NAME', SONG_NAME)
- post_sample_content = post_sample_content.replace('THUMB_URL', THUMB_URL)
- post_sample_content = post_sample_content.replace('LINK_DESKTOP', LINK_DESKTOP)
- post_sample_content = post_sample_content.replace('LINK_MOBILE', LINK_MOBILE)
- post_sample_content = post_sample_content.replace('azlyrics.com', 'apkapp.info')
- return post_sample_content
- def prepare_post_excerpt(ARTIST_SLUG, ARTIST_NAME, mp3_url):
- excerpt = """<a href='http://apkapp.info/?s=ARTIST_SLUG'>ARTIST_NAME</a><br/>
- <button class='xemsaox' onclick="location.href='mp3_url'">Get mp3</button>"""
- excerpt = excerpt.replace('ARTIST_SLUG', ARTIST_SLUG)
- excerpt = excerpt.replace('ARTIST_NAME', ARTIST_NAME)
- excerpt = excerpt.replace('mp3_url',mp3_url)
- return excerpt
- def prepare_post_excerpt_mp4(ARTIST_SLUG, ARTIST_NAME, mp3_url):
- excerpt = """<a href='http://apkapp.info/?s=ARTIST_SLUG'>ARTIST_NAME</a><br/>
- <button class='xemsaox' onclick="location.href='mp3_url'">Get mp4</button>"""
- excerpt = excerpt.replace('ARTIST_SLUG', ARTIST_SLUG)
- excerpt = excerpt.replace('ARTIST_NAME', ARTIST_NAME)
- excerpt = excerpt.replace('mp3_url',mp3_url)
- return excerpt
- # this function used inside function do_the_work(drive)
- def gd_check_exist(drive, file_name):
- print 'Checking existence for file: ' + file_name
- # Auto-iterate through all files in the root folder.
- file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList()
- for file1 in file_list:
- if file1['title'] == file_name:
- return True
- return False
- def gd_get_dlink(drive, file_name):
- print 'Checking existence for file: ' + file_name
- # Auto-iterate through all files in the root folder.
- file_list = drive.ListFile({'q': "'root' in parents and trashed=false"}).GetList()
- for file1 in file_list:
- if file1['title'] == file_name:
- return file1['alternateLink']
- return False
- def upload_gd(media_file, drive):
- print 'Try uploading ' + media_file
- xfile = drive.CreateFile()
- xfile.SetContentFile(media_file)
- xfile.Upload()
- print('Created file %s with mimeType %s' % (xfile['title'], xfile['mimeType']))
- permission = xfile.InsertPermission({
- 'type': 'anyone',
- 'value': 'anyone',
- 'withLink': True, # do not allow discovery on Internet
- 'role': 'reader'})
- print 'Sharable link (to view) is: ' + xfile['alternateLink']
- return xfile['alternateLink']
- def insert_dlink(dlink):
- print 'Connect to database'
- # Open database connection
- db = MySQLdb.connect("103.3.60.141","root","anhyeuem12AB","links" )
- # prepare a cursor object using cursor() method
- cursor = db.cursor()
- command = "insert into link(dlink) values('" + dlink + "')"
- print 'Executing SQL command: ' + command
- try:
- cursor.execute(command)
- db.commit()
- except:
- db.rollback()
- # execute SQL query using execute() method.
- cursor.execute("SELECT id FROM link ORDER BY id DESC LIMIT 1")
- # Fetch a single row using fetchone() method.
- data = cursor.fetchone()
- # disconnect from server
- db.close()
- return int(data[0])
- def getid_from_dlink(dlink):
- print 'Connect to database'
- # Open database connection
- db = MySQLdb.connect("103.3.60.141","root","anhyeuem12AB","links" )
- # prepare a cursor object using cursor() method
- cursor = db.cursor()
- # select id from link where dlink = 'dlink'
- command = "select id from link where dlink = '" + dlink + "'"
- print 'Executing SQL command: ' + command
- try:
- cursor.execute(command)
- db.commit()
- except:
- db.rollback()
- # execute SQL query using execute() method.
- cursor.execute("SELECT id FROM link ORDER BY id DESC LIMIT 1")
- # Fetch a single row using fetchone() method.
- data = cursor.fetchone()
- # disconnect from server
- db.close()
- return int(data[0])
- def post_to_server(title, content, excerpt, feature_img_id, categories):
- headers = {
- "Authorization" : "Basic " + base64.b64encode( "van7hu" + ":" + "bongda.com.vn22AB" ),
- }
- endpoint = 'http://apkapp.info/wp-json/wp/v2/posts'
- tags = [198,]
- data = {
- 'title' : title,
- 'status': 'publish', # publish
- 'content': content,
- 'excerpt': excerpt,
- 'featured_media': feature_img_id,
- 'categories': categories,
- 'tags': tags,
- }
- res = requests.post(endpoint, data = data, headers = headers)
- # get the post link
- res_content = res.text.split(',')
- res_content = filter(None, res_content)
- return res.status_code
- def search(term):
- term = term + " site:http://www.azlyrics.com/"
- print 'Search google for: ' + term
- search_results = google.search(term, 1)
- if len(search_results) !=0:
- return search_results[0].link
- # no lyrics, return 0 (not list)
- def get_lyrics(link):
- headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
- r = requests.get(link, headers=headers)
- data = r.text
- data = data.split(" ")
- idx_start = 0
- idx_stop = 0
- tmp = 0
- for tmp in xrange(len(data)):
- if 'ringtone' in data[tmp]:
- idx_start = tmp
- break
- adx = 0
- for tmp in xrange(idx_start, len(data)):
- if '</div>' in data[tmp]:
- if adx == 0:
- idx_start = tmp+1
- adx = 1
- else:
- idx_stop = tmp
- break
- return ' '.join(data[idx_start: idx_stop])
- def list_all_files(xdir):
- cwd = os.getcwd()
- flist = []
- if not os.path.isdir(xdir):
- return []
- os.chdir(xdir)
- for f in glob.glob("*.*"):
- flist.append(f)
- os.chdir(cwd)
- return flist
- def write_info(singer, singer_slug):
- if os.path.isfile('meta_info.txt'):
- while True:
- answer = raw_input('meta_info.txt existed, do you want to continue? (Y/N): ')
- if answer == 'y' or answer == 'Y':
- break
- elif answer == 'n' or answer == 'N':
- return
- else:
- continue
- with open('meta_info.txt', 'w') as f:
- f.write(singer + "\n")
- f.write(singer_slug + "\n")
- def check_client_secret():
- print 'Checking if client secret existed'
- if os.path.isfile('client_secrets.json'):
- print 'File existed'
- else:
- command = 'cp /home/van7hu/zzz/abc/tmp/client_secrets.json client_secrets.json'
- print 'Executing command: ' + command
- os.system(command)
- def do_the_work():
- searchTerm = sys.argv[1]
- jumper = sys.argv[2]
- directory = sys.argv[3]
- if len(sys.argv) >= 5:
- singer = sys.argv[4]
- cwd = os.getcwd()
- if not os.path.isdir(directory):
- os.mkdir(directory)
- os.chdir(directory)
- if len(sys.argv) >= 6:
- singer_slug = sys.argv[5]
- write_info(singer, singer_slug)
- check_client_secret()
- # init gd for google drive upload
- gauth = GoogleAuth()
- gauth.LoadCredentialsFile("mycreds.txt")
- if gauth.credentials is None:
- # Authenticate if they're not there
- gauth.LocalWebserverAuth()
- elif gauth.access_token_expired:
- # Refresh them if expired
- gauth.Refresh()
- else:
- # Initialize the saved creds
- gauth.Authorize()
- drive = GoogleDrive(gauth)
- # some auxilary steps
- if len(list_all_files('img/')) < 7:
- print 'You have less than image, download 100 images.'
- get_images(singer)
- info = get_meta_info()
- for xz in xrange(len(info)):
- info[xz] = info[xz].replace('\n', '')
- if len(info) < 10:
- with open('meta_info.txt', 'w') as f:
- print 'Uploading image, and write meta_info'
- for inf in info:
- f.write(inf + '\n')
- for img in list_all_files('img/'):
- if img.endswith('jpg') or img.endswith('jpeg') or img.endswith('png'):
- [img_id, img_url] = image_upload('img/'+img)
- f.write(img_url+','+str(img_id)+'\n')
- else:
- pass
- # reload the information
- info = get_meta_info()
- ARTIST_NAME = info[0]
- ARTIST_SLUG = info[1]
- # END auxilary steps
- page_urls = get_wo(int(sys.argv[2]))
- video_urls = []
- for page in page_urls:
- video_urls = video_urls + get_video_url(page)
- print 'Total: ' + str(len(video_urls)) + ' files to get'
- raw_input('Press any key to continue or Ctrl-C to cancel.')
- counter = 0
- for v in xrange(len(video_urls)):
- try:
- v_url = video_urls[v]
- print '---------------'
- print 'Begin iteration ' + str(v+1) + '/' + str(len(video_urls))
- print 'Current time: ' + str(datetime.now())
- print 'Checking the valid of url: ' + v_url[0]
- if 'https://www.youtube.com/watch?v=' not in v_url[0]:
- print 'The link is invalid, continue to next iteration'
- continue
- print 'The link is valid, continue iteration'
- remove_downloaded_youtube()
- SONG_NAME = v_url[1]
- idx = randint(2,len(info))
- while idx >= len(info):
- print 'idx is rather too big. len(info): ' + str(len(info)) + ', idx: ' + str(idx)
- idx = randint(2,len(info))
- tmp = info[idx].split(',')
- try:
- THUMB_URL, FEATURE_IMG_ID = tmp[0], int(tmp[1])
- except:
- print 'Error setting THUMB_URL & FEATURE_IMG_ID'
- print tmp[0]
- print tmp[1]
- sys.exit(0)
- lyrics = ''
- lyrics_link = search(SONG_NAME)
- # check if is integer
- try:
- lyrics_link = lyrics_link + 1
- print 'Song has no lyrics'
- lyrics = ''
- except:
- print 'Song my have Lyrics, link is: ' + str(lyrics_link)
- try:
- lyrics = '<br/><strong>' + SONG_NAME + '</strong><br/>' + get_lyrics(lyrics_link)
- except:
- lyrics = ''
- pass
- print 'Downloading url: ' + v_url[0]
- # working with mp3
- download_youtube_mp3(v_url[0])
- flist = []
- for f in glob.glob("*.mp3"):
- flist.append(f)
- print 'Uploading mp3 file: "' + flist[0] + '"'
- mp3_url = ftp_ssh_mp3(flist[0], ARTIST_SLUG)
- print 'mp3_url: '+mp3_url
- excerpt = prepare_post_excerpt(ARTIST_SLUG, ARTIST_NAME, mp3_url)
- content = prepare_post_content_mp3(ARTIST_SLUG, ARTIST_NAME, SONG_NAME, THUMB_URL, mp3_url) + lyrics
- content = content.replace('azlyrics.com', 'apkapp.info')
- content = content.replace('Usage of', '#Usage of')
- print 'Making post for MP3 file'
- status = post_to_server(SONG_NAME, content, excerpt, FEATURE_IMG_ID, 194) # 194 is music category
- print 'Post_status: ' + str(status)
- m = hashlib.md5()
- m.update(v_url[0])
- mp4_file = m.hexdigest() + '.mp4'
- # check if mp4 file existed already.
- if gd_check_exist(drive, mp4_file) == False:
- print 'Mp4 file not existed yet, download it.'
- download_youtube_mp4(v_url[0])
- print 'Get feature image for MP4 file'
- idx = randint(2,len(info))
- while idx >= len(info):
- print 'idx is rather too big. len(info): ' + str(len(info)) + ', idx: ' + str(idx)
- idx = randint(2,len(info))
- tmp = info[idx].split(',')
- try:
- THUMB_URL, FEATURE_IMG_ID = tmp[0], int(tmp[1])
- except:
- print 'Error setting THUMB_URL & FEATURE_IMG_ID'
- print tmp[0]
- print tmp[1]
- sys.exit(0)
- flist = []
- for f in glob.glob('*.mp4'):
- flist.append(f)
- old_mp4_file = flist[0]
- command = 'cp "' + old_mp4_file + '" ' + mp4_file
- print 'Executing command: ' + command
- os.system(command)
- mp4_url = upload_gd(mp4_file, drive)
- print 'Insert shared link into link table'
- mp4_database_id = insert_dlink(mp4_url)
- LINK_ID = "LINK_ID:A" + str(mp4_database_id) + "A"
- LINK_DESKTOP = "LINK_ID:AA" + str(mp4_database_id) + "A"
- LINK_MOBILE = "LINK_ID:AAA" + str(mp4_database_id) + "A"
- content = prepare_post_content_mp4(ARTIST_SLUG, ARTIST_NAME, SONG_NAME, THUMB_URL, LINK_ID, LINK_DESKTOP, LINK_MOBILE) + lyrics
- content = content.replace('azlyrics.com', 'apkapp.info')
- content = content.replace('Usage of', '#Usage of')
- excerpt = prepare_post_excerpt_mp4(ARTIST_SLUG, ARTIST_NAME, LINK_ID)
- print 'Making post for MP4 file'
- status = post_to_server(SONG_NAME, content, excerpt, FEATURE_IMG_ID, 218) # musics-videoclip
- print 'Post_status: ' + str(status)
- else:
- print 'Mp4 file existed, do not download the file, go to next round.'
- continue
- print 'Get feature image for MP4 file'
- idx = randint(2,len(info))
- while idx >= len(info):
- print 'idx is rather too big. len(info): ' + str(len(info)) + ', idx: ' + str(idx)
- idx = randint(2,len(info))
- tmp = info[idx].split(',')
- try:
- THUMB_URL, FEATURE_IMG_ID = tmp[0], int(tmp[1])
- except:
- print 'Error setting THUMB_URL & FEATURE_IMG_ID'
- print tmp[0]
- print tmp[1]
- sys.exit(0)
- # TODO: Get mp4 link id
- print 'Get mp4 url'
- mp4_url = gd_get_dlink(drive, mp4_file)
- if mp4_url == False:
- print 'Could not get mp4_url, there is error'
- continue
- print 'Mp4 url: ' + mp4_url
- print 'Get mp4 ID from database'
- mp4_database_id = getid_from_dlink(mp4_url)
- print 'mp4_id: ' + str(mp4_database_id)
- LINK_ID = "LINK_ID:A" + str(mp4_database_id) + "A"
- LINK_DESKTOP = "LINK_ID:AA" + str(mp4_database_id) + "A"
- LINK_MOBILE = "LINK_ID:AAA" + str(mp4_database_id) + "A"
- content = prepare_post_content_mp4(ARTIST_SLUG, ARTIST_NAME, SONG_NAME, THUMB_URL, LINK_ID, LINK_DESKTOP, LINK_MOBILE) + lyrics
- content = content.replace('azlyrics.com', 'apkapp.info')
- content = content.replace('Usage of', '#Usage of')
- excerpt = prepare_post_excerpt_mp4(ARTIST_SLUG, ARTIST_NAME, LINK_ID)
- print 'Making post for MP4 file'
- status = post_to_server(SONG_NAME, content, excerpt, FEATURE_IMG_ID, 218) # musics-videoclip
- print 'Post_status: ' + str(status)
- remove_downloaded_youtube()
- except Exception as e:
- print 'Iteration error'
- print 'Error: ' + str(e)
- continue
- os.chdir(cwd)
- do_the_work()
Add Comment
Please, Sign In to add comment