Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ## an example of how to scrape multiple pages with selenium via js hyperlinks (without simple-url hrefs)
- ## scroll to bottom for some example usages
- ## to view some sample outputs, go to https://bit.ly/proniSc_gdf
- #### [list of inputs and more in "proniScraper_logs.json" -> converted to "_proniScraper_logs.csv"]
- #### ABOUT OUPUT/S: The "Image Viewer" sheet is added later - it's not automatically generated (next step?) ####
- ## [an alternate to https://stackoverflow.com/questions/74232047 ]
- import os
- import sys
- import copy
- import json
- import pandas
- import urllib.parse
- import requests
- from bs4 import BeautifulSoup
- from datetime import datetime
- from selenium import webdriver
- from selenium.webdriver.common.by import By
- from selenium.webdriver.support.ui import WebDriverWait
- from selenium.webdriver.common.keys import Keys
- from selenium.webdriver.support import expected_conditions as EC
- from selenium.webdriver.common.action_chains import ActionChains
- def getErrorMssage(e, excInf, e2Pref=''):
- errMsg = f'{type(e)} on UNKNOWN_LINE - Message: "{str(e)}"'
- try:
- et, em, tb = excInf
- errMsg = f'{et} on line {tb.tb_lineno} - Message: "{em}"'
- except Exception as e2:
- print(f'\n[{e2Pref}]failed to get errorline -', str(e2))
- return errMsg
- def getObjAttrs(inpObj, isv=False):
- aDict = {}
- for d in dir(inpObj):
- try:
- a = getattr(inpObj, d, None)
- if d[0] == '_' or callable(a):
- if isv:
- print(f'--> [not added] {d} {type(a)} {a}')
- continue
- if type(a) not in [str, dict] and hasattr(a, '__iter__'):
- aDict[d] = [i for i in a]
- else:
- aDict[d] = a
- if isv:
- print(f'--> [added] {d} {type(a)} {a}')
- except Exception as e:
- if isv:
- print(f'!--> {d} {e}')
- aDict[d] = getErrorMssage(e, tuple(sys.exc_info()))
- if isv:
- print(f'\n{aDict}')
- return aDict
- def prepForJson(inpObj, foldThresh=50, omitIter=[]):
- if type(inpObj) in [bool, int, float]:
- return inpObj
- if type(inpObj) == dict:
- return {k: (
- f'omitted [{type(v)} with {len(v)} items]'
- if k in omitIter and type(v) in [list, tuple, set]
- else prepForJson(v, foldThresh, omitIter)
- ) for k, v in inpObj.items()}
- if type(inpObj) in [list, tuple, set]:
- return [prepForJson(x, foldThresh, omitIter) for x in inpObj]
- ioStr = str(inpObj)
- return ioStr if len(ioStr) < foldThresh else [ioStr]
- def get_opFilename(nameRoot='proni_scraped', folderPath='', ext='.xlsx'): # fnr, fp, ext
- if type(folderPath) != str or not folderPath.strip():
- folderPath = None
- prevFileNums = [int(fnc) for fnc in [
- f.replace(nameRoot+'_', '', 1)[:-1*len(ext)]
- for f in os.listdir(folderPath)
- if f.startswith(nameRoot+'_') and f.endswith(ext)
- ] if fnc.isdigit()]
- fileNum = (max(prevFileNums)+1) if prevFileNums else 0
- folderPath = '' if folderPath is None else folderPath.strip()
- opfilename = os.path.join(folderPath, f'{nameRoot}_{fileNum}{ext}')
- print(f'saving to {opfilename}')
- return opfilename
- def downloadRefImgs(valInp, folderp=''):
- startTime = datetime.now()
- def checkIfImgKey(k):
- specialImgs = ['cover_image', 'index_image', 'last_image']
- if type(k) == str:
- if k in specialImgs:
- return True
- if k.startswith('image_'):
- return k.replace('image_', '', 1).isdigit()
- return False
- dlg = [{'[rel]imagePath': 'InputFile:'},
- {'[rel]imagePath': 'Total Time (seconds)'},
- {'[rel]imagePath': 'success'}] # [extra info in top 3 rows]
- if type(valInp) == str:
- valImgs = pandas.read_excel(
- valInp, sheet_name='Reference Images'
- ).to_dict('records')
- elif type(valInp) == list:
- valImgs = valInp[:]
- dlg[0]['status'] = f'[direct input]'
- else:
- valImgs = []
- valImgs = [
- vi for vi in valImgs if type(vi) == dict and
- 'ProniReference' in vi and type(vi['ProniReference']) == str
- and 'cover_image' in vi and 'total_images' in vi
- ]
- vln = len(valImgs)
- dlg[0]['imageLink'] = f'with {vln} references'
- dlSuccess = dlFails = dlUnkn = 0
- for i, vi in enumerate(valImgs):
- valRef = vi['ProniReference'].replace('/', '', 2).replace('/', '-')
- ict = len([c for c in vi.keys() if checkIfImgKey(c)])
- aboutVal = f'{ict} images for {valRef} [{i+1} of {vln}]{" "*10}'
- if ict > 0:
- if not os.path.isdir(os.path.join(folderp, valRef)):
- os.mkdir(os.path.join(folderp, valRef))
- for col, v in vi.items():
- if not (checkIfImgKey(col) and type(v) == str):
- continue
- print('', end=f'\rDownloading {col} of {aboutVal}')
- allowedExts = ['jpg', 'jpeg', 'png', 'svg']
- ifne = v.split('.')[-1]
- if not(v.startswith('http') and ifne in allowedExts):
- continue
- img_fn = os.path.join(folderp, valRef)
- img_fn = os.path.join(img_fn, f'{valRef}_{col}.{ifne}')
- for_dlg = {'[rel]imagePath': img_fn,
- 'status': '?unsaved', 'imageLink': v}
- try:
- with open(img_fn, "wb") as f:
- f.write(requests.get(v).content)
- f.close()
- del f
- if os.path.isfile(img_fn):
- dlSuccess += 1
- for_dlg['status'] = 'saved'
- else:
- dlUnkn += 1
- except Exception as err:
- erMsg = getErrorMssage(err, tuple(sys.exc_info()))
- for_dlg['status'] = erMsg
- print('', end=erMsg)
- dlFails += 1
- dlg.append(for_dlg)
- print('')
- totalTime = (datetime.now() - startTime).total_seconds()
- dlg[1]['status'] = totalTime
- dlg[2]['[rel]imagePath'] = f'{dlSuccess} successful'
- dlg[2]['status'] = f'{dlFails} known errors'
- dlg[2]['imageLink'] = f'{dlUnkn} unknown errors'
- dlgfn = get_opFilename('proni_imgdl_logs', folderp, '.csv')
- try:
- pandas.DataFrame(dlg).to_csv(dlgfn, index=False)
- return dlgfn, totalTime
- except Exception as err:
- emsg = getErrorMssage(err, tuple(sys.exc_info()))
- print(f'[failed to save imageDownload logs] {emsg}')
- return emsg, totalTime
- def proni_scraper(searchFor, max_pages=10, max_images=5, conf={}, sInd=None):
- for_sl, pageTimes = {
- '[inp] searchFor': searchFor, '[inp] max_pages': max_pages,
- '[inp] max_images': max_images, '[inp] conf': conf
- }, []
- # rootUrl = 'https://apps.proni.gov.uk'
- searchUrl = 'https://apps.proni.gov.uk/Val12B/Search.aspx'
- errorMsg = 'No Errors'
- total_refImages = 'UNKNOWN'
- total_scrapedImages = 'UNKNOWN'
- total_res = 'UNKNOWN'
- startTime = conf['startTime'] if 'startTime' in conf else 0
- if type(startTime) != type(datetime.now()):
- startTime = datetime.now()
- for_sl['startTime'] = startTime.isoformat()
- logSearch = conf['logSearch'] if 'logSearch' in conf else 'proniScraper_logs.json'
- if type(logSearch) == str and logSearch.endswith('.json'):
- try:
- searchLog = json.load(open(logSearch, 'r'))
- searchLog = searchLog if type(
- searchLog) == list else [copy.deepcopy(searchLog)]
- print(f'{len(searchLog)} items in searchLog')
- except Exception as err:
- slEm = getErrorMssage(err, tuple(sys.exc_info()))
- print('Could not retrieve searchLog', slEm)
- searchLog = []
- else:
- print('scrape will not be logged')
- logSearch = False
- errorLog = conf['errorLog'] if 'errorLog' in conf else []
- tryCt = len(errorLog) + 1
- remRetries = conf['remRetries'] if 'remRetries' in conf else 5
- remRetries = remRetries if type(
- remRetries) == int and remRetries > 0 else 5
- fnr = conf['fnr'] if 'fnr' in conf else 'proni_scraped'
- fp = conf['fp'] if 'fp' in conf else ''
- resDets = conf['resDets'] if 'resDets' in conf else []
- valImgs = conf['valImgs'] if 'valImgs' in conf else []
- val_scraped = conf['val_scraped'] if 'val_scraped' in conf else []
- probRefVals = conf['probRefVals'] if 'probRefVals' in conf else []
- scrollElToTop = "arguments[0].scrollIntoView(true);"
- # scrollElToBottom = "arguments[0].scrollIntoView(false);"
- forStr = f'[for "{searchFor}"]'
- try:
- driver = webdriver.Chrome()
- driver.get(searchUrl)
- # in case it opens on a set of search results
- searchAgain = driver.find_elements(By.ID, 'searchAgain')
- if searchAgain:
- searchAgain[0].click()
- pageStart = datetime.now()
- # new search
- driver.find_element(By.ID, 'txtSearch').send_keys(str(searchFor))
- searchBtn = driver.find_element(By.ID, 'btnSearch')
- driver.execute_script(scrollElToTop, searchBtn)
- searchBtn.click()
- total_res = driver.find_elements(
- By.CSS_SELECTOR, '.resultNavButtons .display > span')
- if total_res:
- total_res = total_res[0].get_attribute('innerText').strip()
- total_res = int(total_res[1:-1].split(' of ')[1])
- elif driver.find_elements(By.ID, 'pnlNoResults'):
- total_res = 0
- print('NO RESULTS FOR', searchFor)
- return
- else:
- total_res = 'UNKNOWN'
- print(f'try#{tryCt} [{remRetries} left]')
- # page loop
- for pn in range(max_pages):
- pgSoup = BeautifulSoup(driver.page_source, 'html.parser')
- pageRows = [{
- c.span.get('id').replace('lbl', '', 1)
- if not c.input else 'ProniReference':
- c.span.get_text(strip=True) if not
- c.input else c.input.get('value')
- for c in r.select('td') if c.select('span,input')
- } for r in pgSoup.select('table#gvSearchResults tr:has(td)')]
- resDets += pageRows
- print('page', pn + 1, '-', len(pageRows), 'rows added of',
- f'min({total_res} rows, {max_pages} pages){forStr:30}')
- if max_images > 0:
- val_new, scrapeVal = list(set([
- r['ProniReference'] for r in pageRows
- if r['ProniReference'] not in (val_scraped + probRefVals)
- ])), True
- else:
- val_new, scrapeVal = 'N/A', False
- pageTimes.append((datetime.now() - pageStart).total_seconds())
- for r in (range(len(pageRows)) if scrapeVal else []):
- viStart = datetime.now()
- val_toScrape = [v for v in driver.find_elements(
- By.CSS_SELECTOR, 'table#gvSearchResults td input[value]'
- ) if v.get_attribute('value') not in (val_scraped + probRefVals)]
- if val_toScrape == []:
- break
- row_images = {
- 'ProniReference': val_toScrape[0].get_attribute('value'),
- 'total_images': 'UNKNOWN', 'scrapeTime': '?',
- 'cover_image': None, 'index_image': None, 'last_image': None
- }
- if val_toScrape[0].get_attribute('disabled'):
- sibSpan = val_toScrape[0].find_elements(
- By.XPATH, '//following-sibling::span[@id="lblPRONI"][@title]')
- if sibSpan:
- vMsg = sibSpan[0].get_attribute('title')
- else:
- vMsg = f'UNKNOWN REASON'
- vMsg = f'Failed to scrape - "{vMsg}"'
- cStatus = f'Skipping {r+1} of {len(val_new)} [{row_images["ProniReference"]}]'
- probRefVals.append(row_images['ProniReference'])
- row_images['total_images'] = vMsg
- row_images['scrapeTime'] = (
- datetime.now() - viStart).total_seconds()
- valImgs.append(row_images)
- print(f'\r{cStatus} - {vMsg}')
- continue
- driver.execute_script(scrollElToTop, val_toScrape[0])
- val_toScrape[0].click()
- try:
- row_images['total_images'] = int(driver.find_element(
- By.CSS_SELECTOR, '.navRow3:has(input[id$="tBtn"]) > span'
- ).get_attribute('innerText').strip()[1:-1].split(' of ')[1])
- except:
- row_images['total_images'] = 'FAILED_TO_SCRAPE'
- cStatus = f'Scraping {r+1} of {len(val_new)} [{row_images["ProniReference"]}]'
- imgsCt_c = f'{row_images["total_images"]} val imgs'
- print('', end=f'\r{cStatus} [{imgsCt_c}]')
- for i in range(max_images):
- imgsCt_c = f'{i+1} of min({row_images["total_images"]}, {max_images})'
- print('', end=f'\r{cStatus} [{imgsCt_c} val imgs]{" "*10}')
- imgCol = 'cover_image' if i == 0 else f'image_{i}'
- row_images[imgCol] = driver.find_element(
- By.ID, 'ImgCtrlLarge').get_attribute('src')
- nextBtn = driver.find_elements(By.ID, 'NextBtn')
- if nextBtn:
- nextBtn[0].click()
- else:
- break
- print('')
- specialImages = [('index_image', 'IndexBtn'),
- ('last_image', 'LastBtn')]
- for si in specialImages:
- try:
- driver.find_element(By.ID, si[1]).click()
- row_images[si[0]] = driver.find_element(
- By.ID, 'ImgCtrlLarge').get_attribute('src')
- except:
- row_images[si[0]] = 'FAILED_TO_SCRAPE'
- val_scraped.append(row_images['ProniReference'])
- row_images['scrapeTime'] = (
- datetime.now() - viStart).total_seconds()
- valImgs.append(row_images)
- driver.find_element(By.ID, 'backButton').click()
- try:
- total_res = int(pgSoup.select_one(
- '.resultNavButtons .display > span').get_text(
- strip=True)[1:-1].split(' of ')[1])
- except:
- pass
- nextPage = driver.find_elements(By.ID, 'NextBtn')
- if not nextPage:
- print('No More Next Page')
- break
- else:
- driver.execute_script(scrollElToTop, nextPage[0])
- pageStart = datetime.now()
- nextPage[0].click()
- driver.quit()
- total_refImages = sum([
- d['total_images'] for d in valImgs
- if type(d['total_images']) == int
- ])
- total_scrapedImages = sum([len([
- 1 for k, v in d.items() if v is not None and
- (k.replace('image_', '').isdigit() or k == 'cover_image')
- ]) for d in valImgs])
- imgdl_logpath = imgdl_time = 'N/A [No images downloaded]'
- if 'downloadImages' in conf and conf['downloadImages']:
- if max_images > 0:
- imgdl_logpath, imgdl_time = downloadRefImgs(valImgs, fp)
- except Exception as err:
- errorMsg = getErrorMssage(err, tuple(sys.exc_info()))
- print('\n', errorMsg)
- errorLog.append({
- 'conf': prepForJson(copy.deepcopy(conf), omitIter=['errorLog', 'valImgs']),
- 'errorMsg': errorMsg,
- 'driverJson': prepForJson(getObjAttrs(driver))
- })
- if remRetries > 0:
- try:
- driver.quit()
- del driver
- except Exception as e:
- print(f'!unable to quit+del driver',
- getErrorMssage(e, tuple(sys.exc_info())))
- conf['remRetries'] = remRetries - 1
- conf['valImgs'] = valImgs
- conf['val_scraped'] = val_scraped
- conf['probRefVals'] = probRefVals
- conf['errorLog'] = copy.deepcopy(errorLog)
- conf['logSearch'] = logSearch
- return proni_scraper(
- searchFor=searchFor, max_pages=max_pages,
- max_images=max_images, conf=conf
- )
- for i, el in enumerate(errorLog):
- try:
- html_fn = get_opFilename('error_pgSrc', fp, '.html')
- pgSrc = el['driverJson']['page_source']
- pgSrc = pgSrc[0] if type(pgSrc) == list and pgSrc else pgSrc
- with open(html_fn, 'wb') as f:
- f.write(str(pgSrc).encode('utf-8'))
- errorLog[i]['driverJson']['page_source'] = html_fn
- except Exception as err:
- errorLog[i]['html_save_error'] = getErrorMssage(
- err, tuple(sys.exc_info()), 'html_save_error')
- errLg_fn = None
- try:
- if errorLog:
- errLg_fn = get_opFilename('proni_errorLog', fp, '.json')
- errorLog = prepForJson(errorLog, omitIter=['errorLog', 'valImgs'])
- with open(errLg_fn, 'w') as f:
- json.dump(errorLog, f, indent=4)
- except Exception as err:
- errLg_fn = getErrorMssage(err, tuple(sys.exc_info()), 'save_errLg')
- print('\n', errLg_fn)
- secRow = (sum(pageTimes)/len(resDets)) if resDets and pageTimes else 'N/A'
- secPage = (sum(pageTimes)/len(pageTimes)) if pageTimes else 'N/A'
- secVI = [vi['scrapeTime'] for vi in valImgs if 'scrapeTime' in vi]
- secRef = (sum(secVI)/len(valImgs)) if secVI and valImgs else 'N/A'
- secVI = (sum(secVI)/(
- total_scrapedImages if total_scrapedImages else 1
- )) if secVI else 'N/A'
- totalTime = datetime.now() - startTime
- # input('Enter')
- dfRefs = [
- {
- 'data': [
- ('Search Keywords', searchFor),
- ('Maximum Pages [param]', max_pages),
- ('Maximum Images [param]', max_images),
- ('', ''),
- ('Total Results Available', total_res),
- ('Total Results Scraped', len(resDets)),
- ('', ''),
- ('Total References Scraped', len(valImgs)),
- ('Total Reference Images', total_refImages),
- ('Total Images Scraped', total_scrapedImages),
- ('Image-Download Log [filename]', imgdl_logpath),
- ('Problematic Refs', probRefVals),
- ('', ''),
- ('Start Time', startTime.isoformat()),
- ('Time Taken [tdeltaStr]', str(totalTime)),
- ('Time Taken [seconds]', totalTime.total_seconds()),
- ('Average Time per Page', secPage),
- ('Average Time per Row', secRow),
- ('Average Time per Reference', secRef),
- ('Average Time per Image', secVI),
- ('Total Image Download Time', imgdl_time),
- ('', ''),
- ('Tries Taken', tryCt),
- ('Failed Tries', len(errorLog)),
- ('Last Error Message', errorMsg),
- ('Error Log [filename]', errLg_fn)
- ], 'name': 'Search Summary', 'h': False
- },
- {'name': 'Search Results', 'data': resDets, 'h': True},
- {'name': 'Reference Images', 'data': valImgs, 'h': True}
- ]
- for k, v in dfRefs[0]['data']:
- skipKeys = ['Search Keywords', 'Maximum Pages [param]']
- skipKeys += ['Maximum Images [param]', 'Start Time']
- if k and k not in skipKeys:
- for_sl[str(k).replace(' ', '_')] = v
- sInd = sInd if type(sInd) == int and sInd > -1 else None
- if type(fnr) == str:
- try:
- op_fn = get_opFilename(fnr, fp)
- if sInd is not None:
- op_fn = op_fn.replace('.xlsx', f'_op-{sInd}.xlsx')
- print(f'[filename changed to {op_fn}]')
- with pandas.ExcelWriter(op_fn) as w:
- for r in dfRefs:
- pandas.DataFrame(r['data']).to_excel(
- w, sheet_name=r['name'], index=False, header=r['h'])
- for_sl['op_excel'] = op_fn
- except Exception as e:
- errorMsg = getErrorMssage(e, tuple(sys.exc_info()), 'ExcelWriter')
- print(errorMsg)
- op_fn = errorMsg
- for_sl['error_ExcelWriter'] = errorMsg
- if logSearch:
- try:
- searchLog = prepForJson(
- searchLog, omitIter=['errorLog', 'valImgs'])
- with open(logSearch, 'w') as f:
- json.dump(searchLog + [for_sl], f, indent=4)
- except Exception as e:
- print(getErrorMssage(e, tuple(sys.exc_info())))
- if type(fnr) != str:
- return dfRefs
- return op_fn if sInd is None else (op_fn, dfRefs)
- def proniList_scraper(searchList, max_pages=10, max_images=5, conf={}):
- if searchList == "allVals":
- conf['[orig] searchList'] = "allVals"
- searchList = [f'VAL/12/B/{i}' for i in range(1, 10)]
- return proniList_scraper(searchList, max_pages, max_images, conf)
- startTime = datetime.now()
- logSearch = conf['logSearch'] if 'logSearch' in conf else 'proniScraper_logs.json'
- if not (type(logSearch) == str and logSearch.endswith('.json')):
- logSearch = False
- for_sl = {
- '[inp] searchList': searchList, '[inp] max_pages': max_pages,
- '[inp] max_images': max_images, '[inp] conf': conf
- }
- if 'indivLogs' in conf:
- rConf['logSearch'] = conf['indivLogs']
- del rConf['indivLogs']
- resCombo, sik, rConf = [], 'search_index', dict(conf.items())
- fnr = conf['fnr'] if 'fnr' in conf else 'proni_scraped'
- fp = conf['fp'] if 'fp' in conf else ''
- for_sl['startTime'] = startTime.isoformat()
- resList = [proni_scraper(
- s, max_pages=max_pages, max_images=max_images, conf=rConf, sInd=i
- ) for i, s in enumerate(searchList) if s and type(s) == str]
- for_sl['inividual_outputs'] = []
- for i, (rfn, r) in enumerate(resList):
- for_sl['inividual_outputs'].append(rfn)
- for dfRef in r:
- if 'data' not in dfRef or 'name' not in dfRef:
- continue
- if dfRef['name'] == 'Search Summary':
- dfRef['data'] = [
- {k: v for k, v in ([(sik, i)] + dfRef['data']) if k}]
- else:
- for dri, drr in enumerate(dfRef['data']):
- dfRef['data'][dri] = {
- k: v for k, v in ([(sik, i)] + list(drr.items()))}
- if not [rc for rc in resCombo if rc['name'] == dfRef['name']]:
- resCombo.append(dfRef)
- continue
- for rci, rc in enumerate(resCombo):
- if rc['name'] == dfRef['name']:
- resCombo[rci]['data'] += dfRef['data']
- if type(fnr) == str:
- opfn = get_opFilename(fnr, fp)
- with pandas.ExcelWriter(opfn) as w:
- for r in resCombo:
- pandas.DataFrame(r['data']).to_excel(
- w, sheet_name=r['name'], index=False, header=True)
- finalOp = opfn
- for_sl['op_excel'] = opfn
- else:
- finalOp = resCombo
- totalTime = datetime.now() - startTime
- for_sl['Total Time (stringified)'] = str(totalTime)
- for_sl['Total Time (seconds)'] = totalTime.total_seconds()
- if logSearch:
- try:
- searchLog = json.load(open(logSearch, 'r'))
- searchLog = searchLog if type(
- searchLog) == list else copy.deepcopy(searchLog)
- print(f'found {len(searchLog)} logs at "{logSearch}"')
- except Exception as err:
- slEm = getErrorMssage(err, tuple(sys.exc_info()))
- print(f'Could not retrieve searchLog from "{logSearch}" -', slEm)
- searchLog = []
- try:
- searchLog = prepForJson(
- searchLog, omitIter=['errorLog', 'valImgs'])
- with open(logSearch, 'w') as f:
- json.dump(searchLog + [for_sl], f, indent=4)
- except Exception as e:
- print(getErrorMssage(e, tuple(sys.exc_info())))
- return finalOp
- '''
- ## [to convert logs to csv] ##
- sl = json.load(open('proniScraper_logs.json', 'r'))
- print(len(sl), 'logs')
- pandas.DataFrame(sl).to_csv('_proniScraper_logs.csv', index=False)
- # '''
- # proni_scraper('Great Victoria', conf={'downloadImages': True})
- # proni_scraper('Rossconor', 10, 10, conf={'downloadImages': True})
- # proniList_scraper(['Camus', 'Downing', 'Kilmore'], 1, 1)
- # to scrape all listings, but without ANY reference images
- # proniList_scraper('allVals', 100000, 0)
- # proniList_scraper(['Camus', 'Downing', 'Kilmore'], 1, 0, conf={'indivLogs': False})
- # proni_scraper('Cavan', 100, 1000)
- # to scrape all references and download all images
- # proniList_scraper('allVals', 100000, 1000, conf={'downloadImages': True})
Add Comment
Please, Sign In to add comment