Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import os
- import random
- import string
- import hashlib
- import time
- # Directory containing the files
- directory = r'C:\Path\to\your\wallpapers'
- """
- This script renames files in a specified directory to random 6-character alphanumeric filenames consisting
- of uppercase letters and digits. It includes the following features:
- 1. Scans the directory and identifies all the files in it.
- 2. Calculates hashes for old files (files that already have 6-character names).
- 3. Calculates hashes for new files (files with filenames not exactly 6 characters long).
- 4. Compares hashes of old files and prompts the user to delete duplicates if conflicts are detected.
- 5. Deletes new files with hashes that match old files, ensuring no duplicate content.
- 6. Deletes new files with hashes that match other new files.
- 7. Renames remaining new files to unique 6-character alphanumeric filenames.
- 8. Prints detailed messages for each file, indicating actions taken (skipping, renaming, deleting).
- 9. Provides a summary at the end of the script, indicating how many files it scanned, skipped, renamed, deleted,
- and the count of each file type.
- 10. Displays the total time the script took to run.
- """
- # Function to generate a random filename
- def generate_random_filename(length=6):
- characters = string.ascii_uppercase + string.digits
- return ''.join(random.choice(characters) for _ in range(length))
- # Function to calculate the Blake2b hash of a file
- def calculate_hash(file_path):
- hasher = hashlib.blake2b()
- try:
- with open(file_path, 'rb') as f:
- for chunk in iter(lambda: f.read(4096), b""):
- hasher.update(chunk)
- return hasher.hexdigest()
- except Exception as e:
- print(f"Error calculating hash for {file_path}: {e}")
- return None
- # Function to rename files in the directory
- def rename_files(directory):
- start_time = time.time()
- scanned_count = 0
- skipped_count = 0
- renamed_count = 0
- duplicate_count = 0
- file_type_counts = {}
- old_files_hashes = {}
- new_files_hashes = {}
- new_files = []
- # Step 1: Scan the directory and identify all the files in it
- print("Scanning directory and identifying files...")
- for filename in os.listdir(directory):
- file_path = os.path.join(directory, filename)
- if os.path.isfile(file_path):
- scanned_count += 1
- file_name, file_extension = os.path.splitext(filename)
- # Count file types
- file_extension = file_extension.lower()
- if file_extension not in file_type_counts:
- file_type_counts[file_extension] = 0
- file_type_counts[file_extension] += 1
- # Categorize files as old or new
- if len(file_name) == 6:
- # Step 2: Calculate hashes for old files
- file_hash = calculate_hash(file_path)
- if file_hash:
- if file_hash in old_files_hashes:
- old_files_hashes[file_hash].append(filename)
- else:
- old_files_hashes[file_hash] = [filename]
- skipped_count += 1
- print(f'Skipping "{filename}" (already 6 characters long) Hash: {file_hash}')
- else:
- # Step 3: Calculate hashes for new files
- file_hash = calculate_hash(file_path)
- if file_hash:
- new_files.append((filename, file_path, file_hash))
- if file_hash in new_files_hashes:
- new_files_hashes[file_hash].append(filename)
- else:
- new_files_hashes[file_hash] = [filename]
- # Check for conflicts among old files
- conflict_files = [(hash, files) for hash, files in old_files_hashes.items() if len(files) > 1]
- if conflict_files:
- print(f"\nDetected {len(conflict_files)} conflicts among old files:")
- for hash, files in conflict_files:
- print(f"Hash: {hash}")
- for file in files:
- print(f" - {file}")
- user_input = input("Do you want to delete these duplicates? (y/n): ").strip().lower()
- if user_input == 'y':
- for hash, files in conflict_files:
- for file in files[1:]: # Keep the first file, delete the rest
- os.remove(os.path.join(directory, file))
- duplicate_count += 1
- print(f'Deleting "{file}" (duplicate content found among old files)')
- # Step 4: Delete new files with hashes that match old files
- print("Checking for duplicates and deleting if found...")
- remaining_new_files = []
- for filename, file_path, file_hash in new_files:
- if file_hash in old_files_hashes:
- duplicate_count += 1
- os.remove(file_path)
- print(f'Deleting "{filename}" (duplicate content found with existing 6-character filename)')
- elif len(new_files_hashes[file_hash]) > 1:
- duplicate_count += 1
- new_files_hashes[file_hash].remove(filename)
- os.remove(file_path)
- print(f'Deleting "{filename}" (duplicate content found with another new file)')
- else:
- remaining_new_files.append((filename, file_path, file_hash))
- # Step 5: Rename remaining new files
- print("Renaming remaining new files...")
- for filename, file_path, file_hash in remaining_new_files:
- file_name, file_extension = os.path.splitext(filename)
- # Generate a unique filename
- new_filename = generate_random_filename() + file_extension
- new_file_path = os.path.join(directory, new_filename)
- while os.path.exists(new_file_path):
- new_filename = generate_random_filename() + file_extension
- new_file_path = os.path.join(directory, new_filename)
- os.rename(file_path, new_file_path)
- renamed_count += 1
- print(f'Renaming "{filename}" to "{new_filename}"')
- end_time = time.time()
- total_time = end_time - start_time
- # Summary
- print("\nSummary:")
- print(f"Total files scanned: {scanned_count}")
- print(f"Files skipped: {skipped_count}")
- print(f"Files renamed: {renamed_count}")
- print(f"Duplicates deleted: {duplicate_count}")
- for file_type, count in file_type_counts.items():
- print(f"{file_type.upper()} files scanned: {count}")
- print(f"Total time taken: {total_time:.2f} seconds")
- # Pause at the end
- input("Press Enter to exit...")
- # Run the renaming process
- rename_files(directory)
Advertisement
Add Comment
Please, Sign In to add comment