Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import requests
- import re
- from bs4 import BeautifulSoup
- def scraping():
- p = re.compile(r"\xa0")
- url = "https://www.global-rates.com/en/interest-rates/libor/american-dollar/usd-libor-interest-rate-3-months.aspx"
- resp = requests.get( url )
- soup = BeautifulSoup(resp.text, 'html5lib')
- # 금리 스크래핑
- selector = "form table tbody tr td table tbody tr td table tbody tr:nth-child(1) td:nth-child(1) table tbody tr td:nth-child(2)"
- result = soup.select( selector )
- value = [ p.sub('', x.text.strip()[:-1]) for x in result[1:] ]
- # 날짜 스크래핑
- selector = "form table tbody tr td table tbody tr td table tbody tr:nth-child(1) td:nth-child(1) table tbody tr td:nth-child(1)"
- result = soup.select( selector )
- date = [ p.sub('', x.text.replace(' ', '-')) for x in result[4:14] ]
- # 딕셔너리로 저장
- data = { }
- for i in range(len(date)):
- data[ date[i] ] = value[i]
- return data
- def read_database(filename):
- try:
- f = open(filename, "r")
- lines = f.readlines()
- # 리스트를 딕셔너리로 변환
- data = { }
- for line in lines:
- k, v = line.split(' ')
- data[k] = v.replace('\n', '')
- return data
- except FileNotFoundError:
- return [ ]
- def write_database(filename, data):
- f = open(filename, "w")
- for k, v in data.items():
- f.write(f"{k} {v}" + "\n")
- # ------------------------------------------------------------------
- data = read_database("result.txt")
- newcommer = scraping()
- data.update(newcommer)
- write_database("result.txt", data)
Add Comment
Please, Sign In to add comment