Norod78

stylegan2-ada_ffhq_gen_pairs

Nov 5th, 2021 (edited)
326
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 4.33 KB | None | 0 0
  1. """Generate images using pretrained network pickle."""
  2.  
  3. import argparse
  4. import sys
  5. import os
  6. import subprocess
  7. import pickle
  8. import re
  9.  
  10. import scipy
  11. import numpy as np
  12. import PIL.Image
  13.  
  14. import dnnlib
  15. import dnnlib.tflib as tflib
  16.  
  17. os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
  18. import moviepy.editor
  19. from opensimplex import OpenSimplex
  20.  
  21. import warnings # mostly numpy warnings for me
  22. warnings.filterwarnings('ignore', category=FutureWarning)
  23. warnings.filterwarnings('ignore', category=DeprecationWarning)
  24. tflib.init_tf()
  25.  
  26. # Load pre-trained network.
  27. # See here: https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/
  28. # and here: https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/
  29. url_1 = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-ffhq-config-f.pkl'
  30. url_2 = 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada/pretrained/metfaces.pkl'    #Replace with your own FFHQ fine tuned model
  31. # Here you can find my MEGA pkl collection with a whole lot of pre-trained ones: https://mega.nz/folder/OtllzJwa#C947mCCdEfMCRTWnDcs4qw
  32.  
  33. with dnnlib.util.open_url(url_1, cache_dir='/Models/') as f_1:
  34.     _G_1, _D_1, Gs_1 = pickle.load(f_1)
  35.     # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
  36.     # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
  37.     # Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.
  38. with dnnlib.util.open_url(url_2, cache_dir='/Models/') as f_2:
  39.     _G_2, _D_2, Gs_2 = pickle.load(f_2)
  40.  
  41. import os
  42.  
  43. out_path = './out'
  44.  
  45. if os.path.isdir(out_path) == False:
  46.     os.mkdir(out_path)
  47.  
  48. from PIL import Image
  49.  
  50. with dnnlib.util.open_url(url_1, cache_dir='/Models/') as f_1:
  51.     _G_1, _D_1, Gs_1 = pickle.load(f_1)
  52.     # _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
  53.     # _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
  54.     # Gs = Long-term average of the generator. Yields higher-quality results t
  55. model_1 = 'ffhq'
  56. model_2 = 'metfaces'
  57.  
  58. random_seed = 1042
  59.  
  60. rnd = np.random.RandomState(random_seed)
  61. seed_path = out_path + '/' + str(random_seed)
  62. if os.path.isdir(seed_path) == False:
  63.     os.mkdir(seed_path)
  64.    
  65. model_1_path = seed_path + '/' + model_1
  66. if os.path.isdir(model_1_path) == False:
  67.     os.mkdir(model_1_path)
  68.    
  69. model_2_path = seed_path + '/' + model_2
  70. if os.path.isdir(model_2_path) == False:
  71.     os.mkdir(model_2_path)
  72.  
  73. # Generate images
  74. import skimage
  75. import skimage.io
  76. import skimage.io._plugins.pil_plugin as pp
  77.  
  78. number_of_gen_cycles = 1750     #1750 * 4 = 7K image pairs, good for p2s2p, e4e. For Pix2PixHD use a higher number like 6500 (*4 = 26000)
  79. number_of_images_per_gen = 4
  80. number_of_gen_cycles_for_noise = 3  #3: Every 12 images, generate 1 with resize blurry artifect and one with dirty poisson noise
  81. image_output_size = (512, 512)      #Uncomment to resize to the size you need
  82. #image_output_size = (1024, 1024)
  83. psi = 0.7
  84. fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
  85.  
  86. for gen_cycle in range(number_of_gen_cycles):
  87.     latents = rnd.randn(number_of_images_per_gen, Gs_1.input_shape[1])
  88.     images = Gs_1.run(latents, None, minibatch = 1, truncation_psi=psi, randomize_noise=True, output_transform=fmt)
  89.     for i in range(number_of_images_per_gen):
  90.         im = Image.fromarray(images[i]).resize(image_output_size)
  91.         if (number_of_gen_cycles_for_noise > 0) and ((gen_cycle % number_of_gen_cycles_for_noise) == 0):
  92.             if i == 0:
  93.                 im64x64 = Image.fromarray(images[i]).resize((96, 96))
  94.                 im = im64x64.resize(image_output_size)
  95.             if i == 1:
  96.                 im = pp.ndarray_to_pil(skimage.util.random_noise(pp.pil_to_ndarray(im), mode="poisson"))
  97.         im.save(model_1_path + '/StyleGanImg_' + str(random_seed) + '_' + str(gen_cycle*number_of_images_per_gen + i) + '_' + str(i) + '.jpg')
  98.     images = Gs_2.run(latents, None, minibatch = 1, truncation_psi=psi, randomize_noise=True, output_transform=fmt)
  99.     for i in range(number_of_images_per_gen):
  100.         im = Image.fromarray(images[i]).resize(image_output_size)
  101.         im.save(model_2_path + '/StyleGanImg_' + str(random_seed) + '_' + str(gen_cycle*number_of_images_per_gen + i) + '_' + str(i) + '.jpg')
  102.  
  103.  
Add Comment
Please, Sign In to add comment