Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import pymysql
- undo_trans = []
- pkdict = {'course' :'course_id', 'department' : 'dept_id', 'employee' : 'ename', 'professor' : 'prof_id', 'student' :'stu_id'}
- def connect_db(Table, Column, after, Primary_Key, Pri_Value):
- connection = pymysql.connect(
- host = '127.0.0.1',
- user = 'root',
- password = '비밀번호',
- db = 'python_test',
- charset = 'utf8',
- cursorclass=pymysql.cursors.DictCursor)
- try:
- with connection.cursor() as cursor:
- print(Table, Column, after, Primary_Key, Pri_Value)
- sql = "UPDATE %s SET %s = '%s' WHERE %s = '%s'" % (Table, Column, after, Primary_Key, Pri_Value)
- cursor.execute(sql)
- result = cursor.fetchall()
- print(result)
- finally:
- connection.close()
- def parse_log_1(): # 텍스트 파일에 담긴 기록들을 리스트에 담는거.
- chkpt_idx = 0
- global undo_trans
- with open("D:\\workspace(pycharm)\\Engineering2\\recovery.txt", "r", encoding="utf-8") as f:
- logs = []
- for line in f:
- if line[-1] == "\n":
- logs.append(line[:-1]) # \n을 빼고 logs 리스트에 넣으라는 얘기.
- # \n을 제거 안하면 리스트에 담길 때 \n이 같이 담겨서 print할 때 한칸 씩 띄어서 print됨(중요한건 아님)
- else:
- logs.append(line) # \n이 안쳐져있으면 바로 logs 리스트에 넣기.
- # print(logs)
- for i in range(len(logs)): # logs에 담긴 log기록들을 하나씩 가져오는거
- if logs[i].startswith("checkpoint"): # Checkpoint는 기준점이 되고 Checkpoint에 있는 log기록들 담기
- chkpt_idx = i
- transs = logs[i][11:].split(", ") # checkpoint 글자 끝나는 지점부터 ,를 구분자로 해서 쪼개준다.
- undo_trans.extend(transs) # Undo List 만들어 줌.
- # print("last checkpoint idx is : ", i)
- for i in range(chkpt_idx+1, len(logs)):
- if logs[i].split()[1] == 'commit' or logs[i].split()[1] == 'abort':
- undo_trans.remove(logs[i].split()[0])
- elif logs[i].split()[1] == 'start':
- undo_trans.append(logs[i].split()[0])
- elif len(list(logs[i].split())) < 3:
- opers.append(logs[i].split()[1])
- elif len(list(logs[i].split())) == 3:
- connect_db(logs[i].split(',')[0].split('.')[0][5:], # Table
- logs[i].split(',')[0].split('.')[2], # Column
- logs[i].split(',')[-1][1:], # after
- pkdict[logs[i].split(',')[0].split('.')[0][5:]], # Primary_Key
- logs[i].split(',')[0].split('.')[1])# Pri_Value
- elif len(list(logs[i].split())) == 4:
- connect_db(logs[i].split(',')[0].split('.')[0][5:], # Table
- logs[i].split(',')[0].split('.')[2], # Column
- logs[i].split(',')[-1][1:], # after
- pkdict[logs[i].split(',')[0].split('.')[0][5:]], # Primary_Key
- logs[i].split(',')[0].split('.')[1]) # Pri_Value
- # Undo 과정
- for i in range(len(logs)-1, -1, -1):
- if logs[i].split()[0] in undo_trans and logs[i].split()[1] == 'start':
- newlog = open("abort.txt", "a")
- newlog.write(logs[i].split()[0] + ' ' +'abort' + '\n')
- newlog.close()
- undo_trans.remove(logs[i].split()[0])
- elif logs[i].split()[0] in undo_trans and len(list(logs[i].split())) == 4:
- connect_db(logs[i].split(',')[0].split('.')[0][5:], # Table
- logs[i].split(',')[0].split('.')[2], # Column
- logs[i].split(',')[-2][1:], # before
- pkdict[logs[i].split(',')[0].split('.')[0][5:]], # Primary_Key
- logs[i].split(',')[0].split('.')[1]) # Pri_Value
- newlog = open("abort.txt", "a")
- newlog.write(logs[i].split()[0]
- + ' '
- + logs[i].split(',')[0].split('.')[0][5:] + '.' # # Table
- + logs[i].split(',')[0].split('.')[1] + '.' # Pri_Value
- + logs[i].split(',')[0].split('.')[2] + ' '# Column
- + logs[i].split(',')[-2][1:] # before
- + '\n')
- newlog.close()
- print('abort')
- parse_log_1()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement