Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- Loading VAE weights from user metadata: D:\NovelAI\stable-diffusion-webui\models\Stable-diffusion\animagine-xl-2.0.vae.safetensors
- Traceback (most recent call last):
- File "D:\NovelAI\stable-diffusion-webui\venv\lib\site-packages\gradio\routes.py", line 488, in run_predict
- output = await app.get_blocks().process_api(
- File "D:\NovelAI\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 1431, in process_api
- result = await self.call_function(
- File "D:\NovelAI\stable-diffusion-webui\venv\lib\site-packages\gradio\blocks.py", line 1103, in call_function
- prediction = await anyio.to_thread.run_sync(
- File "D:\NovelAI\stable-diffusion-webui\venv\lib\site-packages\anyio\to_thread.py", line 33, in run_sync
- return await get_asynclib().run_sync_in_worker_thread(
- File "D:\NovelAI\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 877, in run_sync_in_worker_thread
- return await future
- File "D:\NovelAI\stable-diffusion-webui\venv\lib\site-packages\anyio\_backends\_asyncio.py", line 807, in run
- result = context.run(func, *args)
- File "D:\NovelAI\stable-diffusion-webui\venv\lib\site-packages\gradio\utils.py", line 707, in wrapper
- response = f(*args, **kwargs)
- File "D:\NovelAI\stable-diffusion-webui\modules\ui_extra_networks_checkpoints_user_metadata.py", line 23, in update_vae
- sd_vae.reload_vae_weights()
- File "D:\NovelAI\stable-diffusion-webui\modules\sd_vae.py", line 273, in reload_vae_weights
- load_vae(sd_model, vae_file, vae_source)
- File "D:\NovelAI\stable-diffusion-webui\modules\sd_vae.py", line 212, in load_vae
- _load_vae_dict(model, vae_dict_1)
- File "D:\NovelAI\stable-diffusion-webui\modules\sd_vae.py", line 239, in _load_vae_dict
- model.first_stage_model.load_state_dict(vae_dict_1)
- File "D:\NovelAI\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 2041, in load_state_dict
- raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
- RuntimeError: Error(s) in loading state_dict for AutoencoderKLInferenceWrapper:
- Missing key(s) in state_dict: "encoder.down.0.block.0.norm1.weight", "encoder.down.0.block.0.norm1.bias", "encoder.down.0.block.0.conv1.weight", "encoder.down.0.block.0.conv1.bias", "encoder.down.0.block.0.norm2.weight", "encoder.down.0.block.0.norm2.bias", "encoder.down.0.block.0.conv2.weight", "encoder.down.0.block.0.conv2.bias", "encoder.down.0.block.1.norm1.weight", "encoder.down.0.block.1.norm1.bias", "encoder.down.0.block.1.conv1.weight", "encoder.down.0.block.1.conv1.bias", "encoder.down.0.block.1.norm2.weight", "encoder.down.0.block.1.norm2.bias", "encoder.down.0.block.1.conv2.weight", "encoder.down.0.block.1.conv2.bias", "encoder.down.0.downsample.conv.weight", "encoder.down.0.downsample.conv.bias", "encoder.down.1.block.0.norm1.weight", "encoder.down.1.block.0.norm1.bias", "encoder.down.1.block.0.conv1.weight", "encoder.down.1.block.0.conv1.bias", "encoder.down.1.block.0.norm2.weight", "encoder.down.1.block.0.norm2.bias", "encoder.down.1.block.0.conv2.weight", "encoder.down.1.block.0.conv2.bias", "encoder.down.1.block.0.nin_shortcut.weight", "encoder.down.1.block.0.nin_shortcut.bias", "encoder.down.1.block.1.norm1.weight", "encoder.down.1.block.1.norm1.bias", "encoder.down.1.block.1.conv1.weight", "encoder.down.1.block.1.conv1.bias", "encoder.down.1.block.1.norm2.weight", "encoder.down.1.block.1.norm2.bias", "encoder.down.1.block.1.conv2.weight", "encoder.down.1.block.1.conv2.bias", "encoder.down.1.downsample.conv.weight", "encoder.down.1.downsample.conv.bias", "encoder.down.2.block.0.norm1.weight", "encoder.down.2.block.0.norm1.bias", "encoder.down.2.block.0.conv1.weight", "encoder.down.2.block.0.conv1.bias", "encoder.down.2.block.0.norm2.weight", "encoder.down.2.block.0.norm2.bias", "encoder.down.2.block.0.conv2.weight", "encoder.down.2.block.0.conv2.bias", "encoder.down.2.block.0.nin_shortcut.weight", "encoder.down.2.block.0.nin_shortcut.bias", "encoder.down.2.block.1.norm1.weight", "encoder.down.2.block.1.norm1.bias", "encoder.down.2.block.1.conv1.weight", "encoder.down.2.block.1.conv1.bias", "encoder.down.2.block.1.norm2.weight", "encoder.down.2.block.1.norm2.bias", "encoder.down.2.block.1.conv2.weight", "encoder.down.2.block.1.conv2.bias", "encoder.down.2.downsample.conv.weight", "encoder.down.2.downsample.conv.bias", "encoder.down.3.block.0.norm1.weight", "encoder.down.3.block.0.norm1.bias", "encoder.down.3.block.0.conv1.weight", "encoder.down.3.block.0.conv1.bias", "encoder.down.3.block.0.norm2.weight", "encoder.down.3.block.0.norm2.bias", "encoder.down.3.block.0.conv2.weight", "encoder.down.3.block.0.conv2.bias", "encoder.down.3.block.1.norm1.weight", "encoder.down.3.block.1.norm1.bias", "encoder.down.3.block.1.conv1.weight", "encoder.down.3.block.1.conv1.bias", "encoder.down.3.block.1.norm2.weight", "encoder.down.3.block.1.norm2.bias", "encoder.down.3.block.1.conv2.weight", "encoder.down.3.block.1.conv2.bias", "encoder.mid.block_1.norm1.weight", "encoder.mid.block_1.norm1.bias", "encoder.mid.block_1.conv1.weight", "encoder.mid.block_1.conv1.bias", "encoder.mid.block_1.norm2.weight", "encoder.mid.block_1.norm2.bias", "encoder.mid.block_1.conv2.weight", "encoder.mid.block_1.conv2.bias", "encoder.mid.attn_1.norm.weight", "encoder.mid.attn_1.norm.bias", "encoder.mid.attn_1.q.weight", "encoder.mid.attn_1.q.bias", "encoder.mid.attn_1.k.weight", "encoder.mid.attn_1.k.bias", "encoder.mid.attn_1.v.weight", "encoder.mid.attn_1.v.bias", "encoder.mid.attn_1.proj_out.weight", "encoder.mid.attn_1.proj_out.bias", "encoder.mid.block_2.norm1.weight", "encoder.mid.block_2.norm1.bias", "encoder.mid.block_2.conv1.weight", "encoder.mid.block_2.conv1.bias", "encoder.mid.block_2.norm2.weight", "encoder.mid.block_2.norm2.bias", "encoder.mid.block_2.conv2.weight", "encoder.mid.block_2.conv2.bias", "encoder.norm_out.weight", "encoder.norm_out.bias", "decoder.mid.block_1.norm1.weight", "decoder.mid.block_1.norm1.bias", "decoder.mid.block_1.conv1.weight", "decoder.mid.block_1.conv1.bias", "decoder.mid.block_1.norm2.weight", "decoder.mid.block_1.norm2.bias", "decoder.mid.block_1.conv2.weight", "decoder.mid.block_1.conv2.bias", "decoder.mid.attn_1.norm.weight", "decoder.mid.attn_1.norm.bias", "decoder.mid.attn_1.q.weight", "decoder.mid.attn_1.q.bias", "decoder.mid.attn_1.k.weight", "decoder.mid.attn_1.k.bias", "decoder.mid.attn_1.v.weight", "decoder.mid.attn_1.v.bias", "decoder.mid.attn_1.proj_out.weight", "decoder.mid.attn_1.proj_out.bias", "decoder.mid.block_2.norm1.weight", "decoder.mid.block_2.norm1.bias", "decoder.mid.block_2.conv1.weight", "decoder.mid.block_2.conv1.bias", "decoder.mid.block_2.norm2.weight", "decoder.mid.block_2.norm2.bias", "decoder.mid.block_2.conv2.weight", "decoder.mid.block_2.conv2.bias", "decoder.up.0.block.0.norm1.weight", "decoder.up.0.block.0.norm1.bias", "decoder.up.0.block.0.conv1.weight", "decoder.up.0.block.0.conv1.bias", "decoder.up.0.block.0.norm2.weight", "decoder.up.0.block.0.norm2.bias", "decoder.up.0.block.0.conv2.weight", "decoder.up.0.block.0.conv2.bias", "decoder.up.0.block.0.nin_shortcut.weight", "decoder.up.0.block.0.nin_shortcut.bias", "decoder.up.0.block.1.norm1.weight", "decoder.up.0.block.1.norm1.bias", "decoder.up.0.block.1.conv1.weight", "decoder.up.0.block.1.conv1.bias", "decoder.up.0.block.1.norm2.weight", "decoder.up.0.block.1.norm2.bias", "decoder.up.0.block.1.conv2.weight", "decoder.up.0.block.1.conv2.bias", "decoder.up.0.block.2.norm1.weight", "decoder.up.0.block.2.norm1.bias", "decoder.up.0.block.2.conv1.weight", "decoder.up.0.block.2.conv1.bias", "decoder.up.0.block.2.norm2.weight", "decoder.up.0.block.2.norm2.bias", "decoder.up.0.block.2.conv2.weight", "decoder.up.0.block.2.conv2.bias", "decoder.up.1.block.0.norm1.weight", "decoder.up.1.block.0.norm1.bias", "decoder.up.1.block.0.conv1.weight", "decoder.up.1.block.0.conv1.bias", "decoder.up.1.block.0.norm2.weight", "decoder.up.1.block.0.norm2.bias", "decoder.up.1.block.0.conv2.weight", "decoder.up.1.block.0.conv2.bias", "decoder.up.1.block.0.nin_shortcut.weight", "decoder.up.1.block.0.nin_shortcut.bias", "decoder.up.1.block.1.norm1.weight", "decoder.up.1.block.1.norm1.bias", "decoder.up.1.block.1.conv1.weight", "decoder.up.1.block.1.conv1.bias", "decoder.up.1.block.1.norm2.weight", "decoder.up.1.block.1.norm2.bias", "decoder.up.1.block.1.conv2.weight", "decoder.up.1.block.1.conv2.bias", "decoder.up.1.block.2.norm1.weight", "decoder.up.1.block.2.norm1.bias", "decoder.up.1.block.2.conv1.weight", "decoder.up.1.block.2.conv1.bias", "decoder.up.1.block.2.norm2.weight", "decoder.up.1.block.2.norm2.bias", "decoder.up.1.block.2.conv2.weight", "decoder.up.1.block.2.conv2.bias", "decoder.up.1.upsample.conv.weight", "decoder.up.1.upsample.conv.bias", "decoder.up.2.block.0.norm1.weight", "decoder.up.2.block.0.norm1.bias", "decoder.up.2.block.0.conv1.weight", "decoder.up.2.block.0.conv1.bias", "decoder.up.2.block.0.norm2.weight", "decoder.up.2.block.0.norm2.bias", "decoder.up.2.block.0.conv2.weight", "decoder.up.2.block.0.conv2.bias", "decoder.up.2.block.1.norm1.weight", "decoder.up.2.block.1.norm1.bias", "decoder.up.2.block.1.conv1.weight", "decoder.up.2.block.1.conv1.bias", "decoder.up.2.block.1.norm2.weight", "decoder.up.2.block.1.norm2.bias", "decoder.up.2.block.1.conv2.weight", "decoder.up.2.block.1.conv2.bias", "decoder.up.2.block.2.norm1.weight", "decoder.up.2.block.2.norm1.bias", "decoder.up.2.block.2.conv1.weight", "decoder.up.2.block.2.conv1.bias", "decoder.up.2.block.2.norm2.weight", "decoder.up.2.block.2.norm2.bias", "decoder.up.2.block.2.conv2.weight", "decoder.up.2.block.2.conv2.bias", "decoder.up.2.upsample.conv.weight", "decoder.up.2.upsample.conv.bias", "decoder.up.3.block.0.norm1.weight", "decoder.up.3.block.0.norm1.bias", "decoder.up.3.block.0.conv1.weight", "decoder.up.3.block.0.conv1.bias", "decoder.up.3.block.0.norm2.weight", "decoder.up.3.block.0.norm2.bias", "decoder.up.3.block.0.conv2.weight", "decoder.up.3.block.0.conv2.bias", "decoder.up.3.block.1.norm1.weight", "decoder.up.3.block.1.norm1.bias", "decoder.up.3.block.1.conv1.weight", "decoder.up.3.block.1.conv1.bias", "decoder.up.3.block.1.norm2.weight", "decoder.up.3.block.1.norm2.bias", "decoder.up.3.block.1.conv2.weight", "decoder.up.3.block.1.conv2.bias", "decoder.up.3.block.2.norm1.weight", "decoder.up.3.block.2.norm1.bias", "decoder.up.3.block.2.conv1.weight", "decoder.up.3.block.2.conv1.bias", "decoder.up.3.block.2.norm2.weight", "decoder.up.3.block.2.norm2.bias", "decoder.up.3.block.2.conv2.weight", "decoder.up.3.block.2.conv2.bias", "decoder.up.3.upsample.conv.weight", "decoder.up.3.upsample.conv.bias", "decoder.norm_out.weight", "decoder.norm_out.bias".
- Unexpected key(s) in state_dict: "encoder.conv_norm_out.bias", "encoder.conv_norm_out.weight", "encoder.down_blocks.0.downsamplers.0.conv.bias", "encoder.down_blocks.0.downsamplers.0.conv.weight", "encoder.down_blocks.0.resnets.0.conv1.bias", "encoder.down_blocks.0.resnets.0.conv1.weight", "encoder.down_blocks.0.resnets.0.conv2.bias", "encoder.down_blocks.0.resnets.0.conv2.weight", "encoder.down_blocks.0.resnets.0.norm1.bias", "encoder.down_blocks.0.resnets.0.norm1.weight", "encoder.down_blocks.0.resnets.0.norm2.bias", "encoder.down_blocks.0.resnets.0.norm2.weight", "encoder.down_blocks.0.resnets.1.conv1.bias", "encoder.down_blocks.0.resnets.1.conv1.weight", "encoder.down_blocks.0.resnets.1.conv2.bias", "encoder.down_blocks.0.resnets.1.conv2.weight", "encoder.down_blocks.0.resnets.1.norm1.bias", "encoder.down_blocks.0.resnets.1.norm1.weight", "encoder.down_blocks.0.resnets.1.norm2.bias", "encoder.down_blocks.0.resnets.1.norm2.weight", "encoder.down_blocks.1.downsamplers.0.conv.bias", "encoder.down_blocks.1.downsamplers.0.conv.weight", "encoder.down_blocks.1.resnets.0.conv1.bias", "encoder.down_blocks.1.resnets.0.conv1.weight", "encoder.down_blocks.1.resnets.0.conv2.bias", "encoder.down_blocks.1.resnets.0.conv2.weight", "encoder.down_blocks.1.resnets.0.conv_shortcut.bias", "encoder.down_blocks.1.resnets.0.conv_shortcut.weight", "encoder.down_blocks.1.resnets.0.norm1.bias", "encoder.down_blocks.1.resnets.0.norm1.weight", "encoder.down_blocks.1.resnets.0.norm2.bias", "encoder.down_blocks.1.resnets.0.norm2.weight", "encoder.down_blocks.1.resnets.1.conv1.bias", "encoder.down_blocks.1.resnets.1.conv1.weight", "encoder.down_blocks.1.resnets.1.conv2.bias", "encoder.down_blocks.1.resnets.1.conv2.weight", "encoder.down_blocks.1.resnets.1.norm1.bias", "encoder.down_blocks.1.resnets.1.norm1.weight", "encoder.down_blocks.1.resnets.1.norm2.bias", "encoder.down_blocks.1.resnets.1.norm2.weight", "encoder.down_blocks.2.downsamplers.0.conv.bias", "encoder.down_blocks.2.downsamplers.0.conv.weight", "encoder.down_blocks.2.resnets.0.conv1.bias", "encoder.down_blocks.2.resnets.0.conv1.weight", "encoder.down_blocks.2.resnets.0.conv2.bias", "encoder.down_blocks.2.resnets.0.conv2.weight", "encoder.down_blocks.2.resnets.0.conv_shortcut.bias", "encoder.down_blocks.2.resnets.0.conv_shortcut.weight", "encoder.down_blocks.2.resnets.0.norm1.bias", "encoder.down_blocks.2.resnets.0.norm1.weight", "encoder.down_blocks.2.resnets.0.norm2.bias", "encoder.down_blocks.2.resnets.0.norm2.weight", "encoder.down_blocks.2.resnets.1.conv1.bias", "encoder.down_blocks.2.resnets.1.conv1.weight", "encoder.down_blocks.2.resnets.1.conv2.bias", "encoder.down_blocks.2.resnets.1.conv2.weight", "encoder.down_blocks.2.resnets.1.norm1.bias", "encoder.down_blocks.2.resnets.1.norm1.weight", "encoder.down_blocks.2.resnets.1.norm2.bias", "encoder.down_blocks.2.resnets.1.norm2.weight", "encoder.down_blocks.3.resnets.0.conv1.bias", "encoder.down_blocks.3.resnets.0.conv1.weight", "encoder.down_blocks.3.resnets.0.conv2.bias", "encoder.down_blocks.3.resnets.0.conv2.weight", "encoder.down_blocks.3.resnets.0.norm1.bias", "encoder.down_blocks.3.resnets.0.norm1.weight", "encoder.down_blocks.3.resnets.0.norm2.bias", "encoder.down_blocks.3.resnets.0.norm2.weight", "encoder.down_blocks.3.resnets.1.conv1.bias", "encoder.down_blocks.3.resnets.1.conv1.weight", "encoder.down_blocks.3.resnets.1.conv2.bias", "encoder.down_blocks.3.resnets.1.conv2.weight", "encoder.down_blocks.3.resnets.1.norm1.bias", "encoder.down_blocks.3.resnets.1.norm1.weight", "encoder.down_blocks.3.resnets.1.norm2.bias", "encoder.down_blocks.3.resnets.1.norm2.weight", "encoder.mid_block.attentions.0.group_norm.bias", "encoder.mid_block.attentions.0.group_norm.weight", "encoder.mid_block.attentions.0.to_k.bias", "encoder.mid_block.attentions.0.to_k.weight", "encoder.mid_block.attentions.0.to_out.0.bias", "encoder.mid_block.attentions.0.to_out.0.weight", "encoder.mid_block.attentions.0.to_q.bias", "encoder.mid_block.attentions.0.to_q.weight", "encoder.mid_block.attentions.0.to_v.bias", "encoder.mid_block.attentions.0.to_v.weight", "encoder.mid_block.resnets.0.conv1.bias", "encoder.mid_block.resnets.0.conv1.weight", "encoder.mid_block.resnets.0.conv2.bias", "encoder.mid_block.resnets.0.conv2.weight", "encoder.mid_block.resnets.0.norm1.bias", "encoder.mid_block.resnets.0.norm1.weight", "encoder.mid_block.resnets.0.norm2.bias", "encoder.mid_block.resnets.0.norm2.weight", "encoder.mid_block.resnets.1.conv1.bias", "encoder.mid_block.resnets.1.conv1.weight", "encoder.mid_block.resnets.1.conv2.bias", "encoder.mid_block.resnets.1.conv2.weight", "encoder.mid_block.resnets.1.norm1.bias", "encoder.mid_block.resnets.1.norm1.weight", "encoder.mid_block.resnets.1.norm2.bias", "encoder.mid_block.resnets.1.norm2.weight", "decoder.conv_norm_out.bias", "decoder.conv_norm_out.weight", "decoder.mid_block.attentions.0.group_norm.bias", "decoder.mid_block.attentions.0.group_norm.weight", "decoder.mid_block.attentions.0.to_k.bias", "decoder.mid_block.attentions.0.to_k.weight", "decoder.mid_block.attentions.0.to_out.0.bias", "decoder.mid_block.attentions.0.to_out.0.weight", "decoder.mid_block.attentions.0.to_q.bias", "decoder.mid_block.attentions.0.to_q.weight", "decoder.mid_block.attentions.0.to_v.bias", "decoder.mid_block.attentions.0.to_v.weight", "decoder.mid_block.resnets.0.conv1.bias", "decoder.mid_block.resnets.0.conv1.weight", "decoder.mid_block.resnets.0.conv2.bias", "decoder.mid_block.resnets.0.conv2.weight", "decoder.mid_block.resnets.0.norm1.bias", "decoder.mid_block.resnets.0.norm1.weight", "decoder.mid_block.resnets.0.norm2.bias", "decoder.mid_block.resnets.0.norm2.weight", "decoder.mid_block.resnets.1.conv1.bias", "decoder.mid_block.resnets.1.conv1.weight", "decoder.mid_block.resnets.1.conv2.bias", "decoder.mid_block.resnets.1.conv2.weight", "decoder.mid_block.resnets.1.norm1.bias", "decoder.mid_block.resnets.1.norm1.weight", "decoder.mid_block.resnets.1.norm2.bias", "decoder.mid_block.resnets.1.norm2.weight", "decoder.up_blocks.0.resnets.0.conv1.bias", "decoder.up_blocks.0.resnets.0.conv1.weight", "decoder.up_blocks.0.resnets.0.conv2.bias", "decoder.up_blocks.0.resnets.0.conv2.weight", "decoder.up_blocks.0.resnets.0.norm1.bias", "decoder.up_blocks.0.resnets.0.norm1.weight", "decoder.up_blocks.0.resnets.0.norm2.bias", "decoder.up_blocks.0.resnets.0.norm2.weight", "decoder.up_blocks.0.resnets.1.conv1.bias", "decoder.up_blocks.0.resnets.1.conv1.weight", "decoder.up_blocks.0.resnets.1.conv2.bias", "decoder.up_blocks.0.resnets.1.conv2.weight", "decoder.up_blocks.0.resnets.1.norm1.bias", "decoder.up_blocks.0.resnets.1.norm1.weight", "decoder.up_blocks.0.resnets.1.norm2.bias", "decoder.up_blocks.0.resnets.1.norm2.weight", "decoder.up_blocks.0.resnets.2.conv1.bias", "decoder.up_blocks.0.resnets.2.conv1.weight", "decoder.up_blocks.0.resnets.2.conv2.bias", "decoder.up_blocks.0.resnets.2.conv2.weight", "decoder.up_blocks.0.resnets.2.norm1.bias", "decoder.up_blocks.0.resnets.2.norm1.weight", "decoder.up_blocks.0.resnets.2.norm2.bias", "decoder.up_blocks.0.resnets.2.norm2.weight", "decoder.up_blocks.0.upsamplers.0.conv.bias", "decoder.up_blocks.0.upsamplers.0.conv.weight", "decoder.up_blocks.1.resnets.0.conv1.bias", "decoder.up_blocks.1.resnets.0.conv1.weight", "decoder.up_blocks.1.resnets.0.conv2.bias", "decoder.up_blocks.1.resnets.0.conv2.weight", "decoder.up_blocks.1.resnets.0.norm1.bias", "decoder.up_blocks.1.resnets.0.norm1.weight", "decoder.up_blocks.1.resnets.0.norm2.bias", "decoder.up_blocks.1.resnets.0.norm2.weight", "decoder.up_blocks.1.resnets.1.conv1.bias", "decoder.up_blocks.1.resnets.1.conv1.weight", "decoder.up_blocks.1.resnets.1.conv2.bias", "decoder.up_blocks.1.resnets.1.conv2.weight", "decoder.up_blocks.1.resnets.1.norm1.bias", "decoder.up_blocks.1.resnets.1.norm1.weight", "decoder.up_blocks.1.resnets.1.norm2.bias", "decoder.up_blocks.1.resnets.1.norm2.weight", "decoder.up_blocks.1.resnets.2.conv1.bias", "decoder.up_blocks.1.resnets.2.conv1.weight", "decoder.up_blocks.1.resnets.2.conv2.bias", "decoder.up_blocks.1.resnets.2.conv2.weight", "decoder.up_blocks.1.resnets.2.norm1.bias", "decoder.up_blocks.1.resnets.2.norm1.weight", "decoder.up_blocks.1.resnets.2.norm2.bias", "decoder.up_blocks.1.resnets.2.norm2.weight", "decoder.up_blocks.1.upsamplers.0.conv.bias", "decoder.up_blocks.1.upsamplers.0.conv.weight", "decoder.up_blocks.2.resnets.0.conv1.bias", "decoder.up_blocks.2.resnets.0.conv1.weight", "decoder.up_blocks.2.resnets.0.conv2.bias", "decoder.up_blocks.2.resnets.0.conv2.weight", "decoder.up_blocks.2.resnets.0.conv_shortcut.bias", "decoder.up_blocks.2.resnets.0.conv_shortcut.weight", "decoder.up_blocks.2.resnets.0.norm1.bias", "decoder.up_blocks.2.resnets.0.norm1.weight", "decoder.up_blocks.2.resnets.0.norm2.bias", "decoder.up_blocks.2.resnets.0.norm2.weight", "decoder.up_blocks.2.resnets.1.conv1.bias", "decoder.up_blocks.2.resnets.1.conv1.weight", "decoder.up_blocks.2.resnets.1.conv2.bias", "decoder.up_blocks.2.resnets.1.conv2.weight", "decoder.up_blocks.2.resnets.1.norm1.bias", "decoder.up_blocks.2.resnets.1.norm1.weight", "decoder.up_blocks.2.resnets.1.norm2.bias", "decoder.up_blocks.2.resnets.1.norm2.weight", "decoder.up_blocks.2.resnets.2.conv1.bias", "decoder.up_blocks.2.resnets.2.conv1.weight", "decoder.up_blocks.2.resnets.2.conv2.bias", "decoder.up_blocks.2.resnets.2.conv2.weight", "decoder.up_blocks.2.resnets.2.norm1.bias", "decoder.up_blocks.2.resnets.2.norm1.weight", "decoder.up_blocks.2.resnets.2.norm2.bias", "decoder.up_blocks.2.resnets.2.norm2.weight", "decoder.up_blocks.2.upsamplers.0.conv.bias", "decoder.up_blocks.2.upsamplers.0.conv.weight", "decoder.up_blocks.3.resnets.0.conv1.bias", "decoder.up_blocks.3.resnets.0.conv1.weight", "decoder.up_blocks.3.resnets.0.conv2.bias", "decoder.up_blocks.3.resnets.0.conv2.weight", "decoder.up_blocks.3.resnets.0.conv_shortcut.bias", "decoder.up_blocks.3.resnets.0.conv_shortcut.weight", "decoder.up_blocks.3.resnets.0.norm1.bias", "decoder.up_blocks.3.resnets.0.norm1.weight", "decoder.up_blocks.3.resnets.0.norm2.bias", "decoder.up_blocks.3.resnets.0.norm2.weight", "decoder.up_blocks.3.resnets.1.conv1.bias", "decoder.up_blocks.3.resnets.1.conv1.weight", "decoder.up_blocks.3.resnets.1.conv2.bias", "decoder.up_blocks.3.resnets.1.conv2.weight", "decoder.up_blocks.3.resnets.1.norm1.bias", "decoder.up_blocks.3.resnets.1.norm1.weight", "decoder.up_blocks.3.resnets.1.norm2.bias", "decoder.up_blocks.3.resnets.1.norm2.weight", "decoder.up_blocks.3.resnets.2.conv1.bias", "decoder.up_blocks.3.resnets.2.conv1.weight", "decoder.up_blocks.3.resnets.2.conv2.bias", "decoder.up_blocks.3.resnets.2.conv2.weight", "decoder.up_blocks.3.resnets.2.norm1.bias", "decoder.up_blocks.3.resnets.2.norm1.weight", "decoder.up_blocks.3.resnets.2.norm2.bias", "decoder.up_blocks.3.resnets.2.norm2.weight".
- Loading VAE weights from user metadata: D:\NovelAI\stable-diffusion-webui\models\Stable-diffusion\animagine-xl-2.0.vae.safetensors
Advertisement
Add Comment
Please, Sign In to add comment