Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import csv
- from safetensors.torch import load_file
- import torch
- from pathlib import Path
- import torch.nn as nn
- import torch.nn.functional as F
- from tqdm import tqdm
- import time
- def cal_cross_attn(to_q, to_k, to_v, rand_input):
- hidden_dim, embed_dim = to_q.shape
- attn_to_q = nn.Linear(hidden_dim, embed_dim, bias=False)
- attn_to_k = nn.Linear(hidden_dim, embed_dim, bias=False)
- attn_to_v = nn.Linear(hidden_dim, embed_dim, bias=False)
- attn_to_q.load_state_dict({"weight": to_q})
- attn_to_k.load_state_dict({"weight": to_k})
- attn_to_v.load_state_dict({"weight": to_v})
- return torch.einsum(
- "ik, jk -> ik",
- F.softmax(torch.einsum("ij, kj -> ik", attn_to_q(rand_input), attn_to_k(rand_input)), dim=-1),
- attn_to_v(rand_input)
- )
- def model_hash(filename):
- try:
- with open(filename, "rb") as file:
- import hashlib
- m = hashlib.sha256()
- file.seek(0x100000)
- m.update(file.read(0x10000))
- return m.hexdigest()[0:8]
- except FileNotFoundError:
- return 'NOFILE'
- def load_model(path):
- if path.suffix == ".safetensors":
- return load_file(path, device="cpu")
- else:
- ckpt = torch.load(path, map_location="cpu")
- return ckpt["state_dict"] if "state_dict" in ckpt else ckpt
- def eval(model, n, input):
- qk = f"model.diffusion_model.output_blocks.{n}.1.transformer_blocks.0.attn1.to_q.weight"
- uk = f"model.diffusion_model.output_blocks.{n}.1.transformer_blocks.0.attn1.to_k.weight"
- vk = f"model.diffusion_model.output_blocks.{n}.1.transformer_blocks.0.attn1.to_v.weight"
- atoq, atok, atov = model[qk], model[uk], model[vk]
- attn = cal_cross_attn(atoq, atok, atov, input)
- return attn
- def compare_checkpoints(file1, file2, csv_writer):
- model_a = load_model(file1)
- model_b = load_model(file2)
- map_attn_a = {}
- map_attn_b = {}
- map_rand_input = {}
- for n in range(3, 11):
- hidden_dim, embed_dim = model_a[f"model.diffusion_model.output_blocks.{n}.1.transformer_blocks.0.attn1.to_q.weight"].shape
- rand_input = torch.randn([embed_dim, hidden_dim])
- map_attn_a[n] = eval(model_a, n, rand_input)
- map_attn_b[n] = eval(model_b, n, rand_input)
- map_rand_input[n] = rand_input
- sims = []
- for n in range(3, 11):
- attn_a = map_attn_a[n]
- attn_b = map_attn_b[n]
- sim = torch.mean(torch.cosine_similarity(attn_a, attn_b))
- sims.append(sim)
- similarity = torch.mean(torch.stack(sims)) * 100
- csv_writer.writerow([file1.name, file2.name, similarity.item()])
- # List of file paths to compare
- file_paths = [
- "Basil mix.safetensors",
- "CLR+Izumi+BRAv5.safetensors",
- "CLRL_IzumiBarV5.safetensors",
- "ChillLofiRealistcv2.safetensors",
- "DreamLikeNovelInkF222VisionRealism.safetensors",
- "HenxxmixReal.safetensors",
- "V08_V08.safetensors",
- "anything-v4.0.ckpt",
- "asianRole_v10.safetensors",
- "babes_11.safetensors",
- "bra_v5.safetensors",
- "chilloutmix_NiPrunedFp32Fix.safetensors",
- "chineseDigitalArt_10.ckpt",
- "clarity_19.safetensors",
- "clarity_2.safetensors",
- "deliberate_v2.ckpt",
- "dreamlike-photoreal-2.0.ckpt",
- "dreamshaper_4BakedVae.safetensors",
- "dungeonsNWaifusNew_dungeonsNWaifus22.safetensors",
- "dvarchDreamlikePhotReal.safetensors",
- "dvarchDreamlikePhotRealAsianRoleAOM3A1B.safetensors",
- "f222_v1.ckpt",
- "fotoAssisted_v0.safetensors",
- "hassanblend1512And_hassanblend1512.ckpt",
- "icbinpICantBelieveIts_afterburn.safetensors",
- "izumi_01Safetensors.safetensors",
- "koreanstyle25D_koreanstyle25DBaked.safetensors",
- "lofi_V2pre.safetensors",
- "majicmixFantasy_v20.safetensors",
- "photon_v1.safetensors",
- "juggernaut_final.safetensors",
- "realisticVisionV20_v20.safetensors",
- "uberRealisticPornMerge_urpmv13.safetensors",
- "v1-5-pruned.ckpt",
- "v2-1_768-ema-pruned.safetensors",
- "xxmix9realistic_v26.safetensors"
- ]
- # Create and open the CSV file
- with open('checkpoint_similarity.csv', mode='w', newline='') as file:
- writer = csv.writer(file)
- writer.writerow(['Source', 'Target', 'Weight'])
- # Total number of comparisons
- total_comparisons = (len(file_paths) * (len(file_paths) - 1)) // 2
- # Progress bar initialization
- progress_bar = tqdm(total=total_comparisons, ncols=80)
- # Comparing each file with others
- for i, file1 in enumerate(file_paths):
- for file2 in file_paths[i + 1:]:
- compare_checkpoints(Path(file1), Path(file2), writer)
- time.sleep(0.01) # Simulating computation time
- progress_bar.update(1)
- # Completion message
- print("It is done!")
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement