#version 0.6 import os,time, re import urllib class imgur_downloader(): images = [] subdomain = '' counter = False def __init__(self): return None def parseSubdomain(self, url): page = urllib.urlopen(url).read() links = [] last = 0 tag = '"cover"' while 1: last = page.find(tag, last) if last == -1: break links.append( "http:"+page[page.find('href=', last)+6:page.find('">', last+9)]+"/all" ) last = last + 9 return links def parseAlbum(self, url): page = urllib.urlopen(url).read() titleStart = page.find("data-title")+12 albumname = page[titleStart:page.find('"',titleStart)] print "parsing album: %s" % albumname albumimages = [albumname] start = page.find("images:", page.find("ImgurAlbum"))+8 rawAlbumdata = page[start: page.find("]}", start)+2] null=False albumdata = eval(rawAlbumdata) for i in albumdata["items"]: albumimages.append( "http://i.imgur.com/"+i["hash"]+i["ext"] ) return albumimages def parseGallery(self, url): gallery = urllib.urlopen(url).read() baseUrl = gallery.find("baseURL:") url = "http://www.imgur.com"+gallery[baseUrl+8:gallery.find(",", baseUrl)].replace(' ','').replace("'",'') galleryname = gallery[baseUrl+8:gallery.find(",", baseUrl)].replace(' ','').replace('/','').replace("'",'') print "parsing gallery: %s" % galleryname galleryimages = [galleryname] maxpage = gallery.find("maxPage:") pagecount = gallery[maxpage+8:gallery.find(",", maxpage)].replace(' ','') for page in range(eval(pagecount)): if url[-1:] == "/": xmlurl = url + "hot/page/"+str(page)+".xml" else: xmlurl = url + "/hot/page/"+str(page)+".xml" xml = urllib.urlopen(xmlurl).read() print "Page %s" % page last = 0 xml.count("/hash") while 1: hash = xml.find("", last) if hash == -1: break link = xml[ hash+6: xml.find("", hash) ext = xml[ extPos+5 : xml.find("