Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import sqlite3
- import csv
- import os
- from datetime import datetime, timedelta
- from urllib.parse import unquote, urlparse, parse_qs
- import shutil
- # === Configuration ===
- extension_id = "fiabciakcmgepblmdkmemdbbkilneeeh"
- history_path = os.path.expandvars(r"%LOCALAPPDATA%\Google\Chrome\User Data\Default\History")
- output_file = r"C:\Temp\TabSuspender_Recovered.csv"
- hours_back = 72 # Changed to 72 hours
- # === Calculate Chrome timestamp ===
- epoch_start = datetime(1601, 1, 1)
- since_time = datetime.utcnow() - timedelta(hours=hours_back)
- threshold = int((since_time - epoch_start).total_seconds() * 1_000_000)
- # === Copy History file to avoid lock ===
- temp_history_path = os.path.join(os.environ['TEMP'], 'Chrome_History_Temp')
- shutil.copy2(history_path, temp_history_path)
- # === Connect to SQLite and fetch recent history ===
- conn = sqlite3.connect(temp_history_path)
- cursor = conn.cursor()
- cursor.execute(f"""
- SELECT url, title, last_visit_time
- FROM urls
- WHERE last_visit_time > ?
- ORDER BY last_visit_time DESC
- """, (threshold,))
- rows = cursor.fetchall()
- conn.close()
- # === Conversion helper ===
- def chrome_time_to_datetime(microseconds):
- return epoch_start + timedelta(microseconds=microseconds)
- # === Extract real URLs from Tab Suspender park.html ===
- def extract_real_url(park_url):
- parsed = urlparse(park_url)
- if parsed.netloc == extension_id and parsed.path == "/park.html":
- query = parse_qs(parsed.query)
- if "url" in query:
- return unquote(query["url"][0])
- return None
- # === Filter, process, and deduplicate ===
- seen = set()
- recovered = []
- for url, title, visit_time in rows:
- real_url = extract_real_url(url)
- if real_url and real_url not in seen:
- visit_dt = chrome_time_to_datetime(visit_time).strftime("%Y-%m-%d %H:%M:%S")
- recovered.append([real_url, title, visit_dt])
- seen.add(real_url)
- # === Save results to CSV ===
- with open(output_file, "w", encoding="utf-8", newline="") as f:
- writer = csv.writer(f)
- writer.writerow(["RecoveredURL", "Title", "LastVisit"])
- writer.writerows(recovered)
- print(f"✅ Recovered {len(recovered)} unique suspended tabs from the last {hours_back} hours.")
- print(f"📄 Output saved to: {output_file}")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement