Advertisement
Guest User

Untitled

a guest
Apr 29th, 2025
37
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 2.26 KB | Source Code | 0 0
  1. import sqlite3
  2. import csv
  3. import os
  4. from datetime import datetime, timedelta
  5. from urllib.parse import unquote, urlparse, parse_qs
  6. import shutil
  7.  
  8. # === Configuration ===
  9. extension_id = "fiabciakcmgepblmdkmemdbbkilneeeh"
  10. history_path = os.path.expandvars(r"%LOCALAPPDATA%\Google\Chrome\User Data\Default\History")
  11. output_file = r"C:\Temp\TabSuspender_Recovered.csv"
  12. hours_back = 72  # Changed to 72 hours
  13.  
  14. # === Calculate Chrome timestamp ===
  15. epoch_start = datetime(1601, 1, 1)
  16. since_time = datetime.utcnow() - timedelta(hours=hours_back)
  17. threshold = int((since_time - epoch_start).total_seconds() * 1_000_000)
  18.  
  19. # === Copy History file to avoid lock ===
  20. temp_history_path = os.path.join(os.environ['TEMP'], 'Chrome_History_Temp')
  21. shutil.copy2(history_path, temp_history_path)
  22.  
  23. # === Connect to SQLite and fetch recent history ===
  24. conn = sqlite3.connect(temp_history_path)
  25. cursor = conn.cursor()
  26.  
  27. cursor.execute(f"""
  28.    SELECT url, title, last_visit_time
  29.    FROM urls
  30.    WHERE last_visit_time > ?
  31.    ORDER BY last_visit_time DESC
  32. """, (threshold,))
  33.  
  34. rows = cursor.fetchall()
  35. conn.close()
  36.  
  37. # === Conversion helper ===
  38. def chrome_time_to_datetime(microseconds):
  39.     return epoch_start + timedelta(microseconds=microseconds)
  40.  
  41. # === Extract real URLs from Tab Suspender park.html ===
  42. def extract_real_url(park_url):
  43.     parsed = urlparse(park_url)
  44.     if parsed.netloc == extension_id and parsed.path == "/park.html":
  45.         query = parse_qs(parsed.query)
  46.         if "url" in query:
  47.             return unquote(query["url"][0])
  48.     return None
  49.  
  50. # === Filter, process, and deduplicate ===
  51. seen = set()
  52. recovered = []
  53.  
  54. for url, title, visit_time in rows:
  55.     real_url = extract_real_url(url)
  56.     if real_url and real_url not in seen:
  57.         visit_dt = chrome_time_to_datetime(visit_time).strftime("%Y-%m-%d %H:%M:%S")
  58.         recovered.append([real_url, title, visit_dt])
  59.         seen.add(real_url)
  60.  
  61. # === Save results to CSV ===
  62. with open(output_file, "w", encoding="utf-8", newline="") as f:
  63.     writer = csv.writer(f)
  64.     writer.writerow(["RecoveredURL", "Title", "LastVisit"])
  65.     writer.writerows(recovered)
  66.  
  67. print(f"✅ Recovered {len(recovered)} unique suspended tabs from the last {hours_back} hours.")
  68. print(f"📄 Output saved to: {output_file}")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement