Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- from diffusers import DiffusionPipeline
- import gradio as gr
- import numpy as np
- import imageio
- from PIL import Image
- import torch
- device = "cuda" if torch.cuda.is_available() else "cpu"
- pipe = DiffusionPipeline.from_pretrained(/*udaleno*/, torch_dtype=torch.float32)
- pipe.to(device)
- def resize(height,img):
- baseheight = height
- img = Image.open(img)
- hpercent = (baseheight/float(img.size[1]))
- wsize = int((float(img.size[0])*float(hpercent)))
- img = img.resize((wsize,baseheight), Image.Resampling.LANCZOS)
- return img
- def predict(prompt, negative_prompt):
- image = pipe(prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=25, width=768, height=768).images[0]
- return image
- title="i 768x768"
- description="test"
- gr.Interface(fn=predict, inputs=[
- #gr.Image(source="upload", type="numpy", tool="sketch", elem_id="source_container"),
- gr.Textbox(label='What you want the AI to Generate, 77 Token limit'),
- gr.Textbox(label='What you Do Not want the AI to generate')],
- outputs='image',
- title=title,
- description=description,
- article = " "
- ).launch(max_threads=True, debug=True)
Advertisement
Add Comment
Please, Sign In to add comment