Advertisement
kp2016

Untitled

May 10th, 2023
967
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 31.25 KB | None | 0 0
  1. `import torch
  2. from diffusers import StableDiffusionPipeline, DiffusionPipeline
  3. from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, GPT2Tokenizer, GPT2LMHeadModel, GenerationConfig
  4. import PIL
  5. import requests
  6. import os
  7. from diffusers import (
  8.     StableDiffusionPipeline,
  9.     StableDiffusionImg2ImgPipeline,
  10.     StableDiffusionInpaintPipeline,
  11. )
  12. from data.task import Task
  13.  
  14. import cv2
  15. import urllib.request
  16. import asyncio
  17. import json
  18. import uuid
  19. import aioredis
  20. import time
  21. import redis
  22. import multiprocessing
  23.  
  24. import time
  25. from PIL import Image
  26. from io import BytesIO
  27. import boto3
  28. import re
  29.  
  30. from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
  31. from diffusers import UniPCMultistepScheduler
  32. from controlnet_aux import OpenposeDetector
  33. from lora_diffusion import tune_lora_scale, patch_pipe
  34. from data.dataAccessor import saveGeneratedImages, update_db, updateSource
  35. import io
  36.  
  37. #variation
  38. import pprint
  39. s3 = boto3.client(‘s3’)
  40. import random
  41. torch.backends.cudnn.benchmark = True
  42. torch.backends.cuda.matmul.allow_tf32 = True
  43.  
  44. import argparse
  45. import cv2
  46. import glob
  47. import os
  48. from basicsr.archs.rrdbnet_arch import RRDBNet
  49. from basicsr.utils.download_util import load_file_from_url
  50.  
  51. from realesrgan import RealESRGANer
  52. from realesrgan.archs.srvgg_arch import SRVGGNetCompact
  53.  
  54.  
  55. from diffusers import StableDiffusionPipeline
  56. import numpy as np
  57. from torch.optim.adam import Adam
  58.  
  59. from typing import Any, Callable, Dict, List, Optional, Union
  60.  
  61. from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
  62.  
  63. from twoStepPipeline import two_step_pipeline
  64.  
  65. from utility import contains_avatar_name, read_url, slackAlert, sendSlackAlert, add_code_names, upload_images, download_image, replace_all, custom_edit_prompts, create_canny_image, list_files, pickPoses
  66. from prompt_modifier import PromptModifier
  67.  
  68. num_return_sequences = 4  # the number of results to generate
  69. auto_mode = False
  70.  
  71. prompt_modifier = PromptModifier(num_of_sequences=num_return_sequences)
  72.  
  73.  
  74. @update_db
  75. def controlnet_canny_task(task, prompt, imageUrl,taskId, isPromptEngineering: bool, userId, email, seed, style,modelId,width, height,steps,negative_prompt):
  76.     modified_prompts = []
  77.     images = []
  78.     type= “_canny”
  79.     auto_mode =False
  80.  
  81.     negativePrompt=[f’monochrome, neon, x-ray, negative image, oversaturated, {negative_prompt}]*4
  82.  
  83.     if style in lora_styles:
  84.         lora_path = lora_styles[style][‘path’]
  85.         lora_weight = lora_styles[style][‘weight’]
  86.         patch_pipe(
  87.             controlnet_canny_pipe,
  88.             lora_path,
  89.             patch_text=True,
  90.             patch_ti=True,
  91.             patch_unet=True,
  92.         )
  93.  
  94.         tune_lora_scale(controlnet_canny_pipe.unet, lora_weight)
  95.         tune_lora_scale(controlnet_canny_pipe.text_encoder, lora_weight)
  96.  
  97.     if isPromptEngineering:
  98.             auto_mode = isPromptEngineering
  99.     # gpt_model.parallelize()  # Splits the model across several devices
  100.     # input_ids = tokenizer(prompt, return_tensors=‘pt’).to(“cuda”).input_ids
  101.     prompt =  add_code_names(prompt)
  102.     if auto_mode:
  103.         # output = gpt_model.generate(input_ids, do_sample=True, temperature=temperature, top_k=top_k,
  104.         #                             max_length=max_length,
  105.         #                             num_return_sequences=num_return_sequences, repetition_penalty=repitition_penalty,
  106.         #                             penalty_alpha=0.6, no_repeat_ngram_size=1, early_stopping=True)
  107.         modified_prompts = prompt_modifier.modify(prompt)
  108.         print(modified_prompts)
  109.         print(‘\nInput:\n’ + 100 * ‘-’)
  110.         print(‘\033[96m’ + prompt + ‘\033[0m’)
  111.         print(‘\nOutput:\n’ + 100 * ‘-’)
  112.  
  113.         # for i in range(len(output)):
  114.         #     modified_prompts.append(tokenizer.decode(output[i], skip_special_tokens=True))
  115.         modified_prompts = modified_prompts[-4:]
  116.         print(modified_prompts)
  117.     else:
  118.         modified_prompts = [prompt] * num_return_sequences
  119.  
  120.     new_string ={} style ’.format(style)
  121.  
  122. # Loop through each string in the array and add the new string in front
  123.     if style:
  124.       for i in range(len(modified_prompts)):
  125.           modified_prompts[i] = new_string + modified_prompts[i]
  126.           print(modified_prompts[i])
  127.     if seed is not None:
  128.         torch.manual_seed(seed)
  129.     init_image = download_image(imageUrl).resize((width, height))
  130.  
  131.     # init_image = Image.open(img_path).convert(‘RGB’)
  132.     # init_image = init_image.resize((512, 512))
  133.     canny_image = create_canny_image(init_image)
  134.     images = controlnet_canny_pipe(prompt=modified_prompts, image=canny_image,
  135.                                    guidance_scale=9, num_images_per_prompt=1, negative_prompt=negativePrompt,
  136.                                    num_inference_steps=steps,height=height, width=width).images
  137.  
  138.     if style in lora_styles:
  139.         tune_lora_scale(controlnet_canny_pipe.unet, 0.00)
  140.         tune_lora_scale(controlnet_canny_pipe.text_encoder, 0.00)
  141.  
  142.     imageUrls = upload_images(images, type, taskId)
  143.     message = “userId: {}, \n email: {}, \n seed: {}, \n type: CANNY , \n prompt: {},\n modifiedprompt: {},\n modelId: {}, \n negativePrompt: {},\n InputImageurls: {}, \n imageurls: {}“.format(userId, email,seed,prompt,modified_prompts,modelId, negativePrompt[0], imageUrl,, “.join(imageUrls));
  144.     sendSlackAlert(message)
  145.     # gpt_model.deparallelize()
  146.     return
  147.  
  148. @update_db
  149. def controlnet_openpose_task(
  150.     task,
  151.     prompt,
  152.     imageUrl,
  153.     taskId,
  154.     isPromptEngineering: bool,
  155.     userId,
  156.     email,
  157.     seed,
  158.     style,
  159.     modelId,
  160.     width,
  161.     height,
  162.     steps,
  163.     negative_prompt,
  164. ):
  165.     modified_prompts = []
  166.     images = []
  167.     type = “_pose”
  168.     auto_mode = False
  169.  
  170.     if isPromptEngineering:
  171.         auto_mode = isPromptEngineering
  172.     # gpt_model.parallelize()  # Splits the model across several devices
  173.     # input_ids = tokenizer(prompt, return_tensors=‘pt’).to(“cuda”).input_ids
  174.     prompt = add_code_names(prompt)
  175.     if style in lora_styles:
  176.         lora_path = lora_styles[style][“path”]
  177.         lora_weight = lora_styles[style][“weight”]
  178.         patch_pipe(
  179.             controlnet_openpose_pipe,
  180.             lora_path,
  181.             patch_text=True,
  182.             patch_ti=True,
  183.             patch_unet=True,
  184.         )
  185.  
  186.         tune_lora_scale(controlnet_openpose_pipe.unet, lora_weight)
  187.         tune_lora_scale(controlnet_openpose_pipe.text_encoder, lora_weight)
  188.  
  189.     if auto_mode:
  190.         # output = gpt_model.generate(input_ids, do_sample=True, temperature=temperature, top_k=top_k,
  191.         #                             max_length=max_length,
  192.         #                             num_return_sequences=num_return_sequences, repetition_penalty=repitition_penalty,
  193.         #                             penalty_alpha=0.6, no_repeat_ngram_size=1, early_stopping=True)
  194.         modified_prompts = prompt_modifier.modify(prompt)
  195.         print(modified_prompts)
  196.         print(“\nInput:\n” + 100 * “-”)
  197.         print(“\033[96m” + prompt + “\033[0m”)
  198.         print(“\nOutput:\n” + 100 * “-”)
  199.  
  200.         # for i in range(len(output)):
  201.         #     modified_prompts.append(tokenizer.decode(output[i], skip_special_tokens=True))
  202.         modified_prompts = modified_prompts[-4:]
  203.         print(modified_prompts)
  204.     else:
  205.         modified_prompts = [prompt] * num_return_sequences
  206.     new_string ={} style “.format(style)
  207.  
  208.     # Loop through each string in the array and add the new string in front
  209.     if style:
  210.         for i in range(len(modified_prompts)):
  211.             modified_prompts[i] = new_string + modified_prompts[i]
  212.             print(modified_prompts[i])
  213.  
  214.     init_image = download_image(imageUrl).resize((width, height))
  215.     if seed is not None:
  216.         torch.manual_seed(seed)
  217.  
  218.     if “character sheet” in prompt.lower():
  219.         imageUrlPose = imageUrl
  220.         input_image_bytes = read_url(imageUrlPose)
  221.         # input_image = Image.open(io.BytesIO(input_image_bytes))
  222.         # img_path = “/home/ubuntu/models/l_cs_1.png”
  223.         init_image = Image.open(io.BytesIO(input_image_bytes)).convert(“RGB”)
  224.         init_image = init_image.resize((512, 512))
  225.  
  226.         poses = [init_image] * 4
  227.  
  228.         images = controlnet_openpose_pipe(
  229.             prompt=modified_prompts,
  230.             image=poses,
  231.             num_images_per_prompt=1,
  232.             num_inference_steps=steps,
  233.             width=width,
  234.             height=height,
  235.             negative_prompt=[negative_prompt]*4,
  236.         ).images
  237.     else:
  238.  
  239.         poses = [pose_detector(init_image)] * 4
  240.         # init_image = Image.open(img_path).convert(‘RGB’)
  241.         # init_image = init_image.resize((512, 512))
  242.  
  243.         images = controlnet_openpose_pipe(
  244.             prompt=modified_prompts,
  245.             image=poses,
  246.             num_images_per_prompt=1,
  247.             num_inference_steps=steps,
  248.             negative_prompt=[negative_prompt]*4,
  249.             height=height,
  250.             width=width,
  251.         ).images
  252.     if style in lora_styles:
  253.         tune_lora_scale(controlnet_openpose_pipe.unet, 0.00)
  254.         tune_lora_scale(controlnet_openpose_pipe.text_encoder, 0.00)
  255.  
  256.     if “character sheet” in prompt.lower():
  257.         return images
  258.     imageUrls = upload_images(images, type, taskId)
  259.     message = “userId: {}, \n email: {}, \n seed: {}, \n type: pose , \n prompt: {},\n modifiedprompt: {},\nnegative_prompt: {}, modelId: {}\n InputImageurls: {}, \n imageurls: {}“.format(
  260.         userId,
  261.         email,
  262.         seed,
  263.         prompt,
  264.         modified_prompts,
  265.         negative_prompt,
  266.         modelId,
  267.         imageUrl,
  268.         “, “.join(imageUrls),
  269.     )
  270.     sendSlackAlert(message)
  271.     # gpt_model.deparallelize()
  272.     return
  273.  
  274.  
  275. def generateImage(
  276.     prompt: str,
  277.     taskId: str,
  278.     isPromptEngineering: bool,
  279.     userId,
  280.     email,
  281.     seed,
  282.     style,
  283.     modelId,
  284.     width,
  285.     height,
  286.     sourceId,
  287.     steps,
  288.     iteration,
  289.     negative_prompt
  290. ):
  291.     try:
  292.        
  293. #         if sourceId is not None:
  294. #             updateSource(sourceId, userId, “INPROGRESS”)
  295.  
  296. #             sendSlackAlert(“all model generate fn triggered with source id”)
  297.  
  298.         if style in lora_styles:
  299.             lora_path = lora_styles[style][“path”]
  300.             lora_weight = lora_styles[style][“weight”]
  301.             lora_type = lora_styles[style][type]
  302.         modified_prompts = []
  303.         images = []
  304.         type = “”
  305.         auto_mode = False
  306.         if isPromptEngineering:
  307.             auto_mode = isPromptEngineering
  308.  
  309.         # prompt =  add_code_names(prompt)
  310.  
  311.         if auto_mode:
  312.             modified_prompts = prompt_modifier.modify(prompt)
  313.             print(modified_prompts)
  314.             print(“\nInput:\n” + 100 * “-”)
  315.             print(“\033[96m” + prompt + “\033[0m”)
  316.             print(“\nOutput:\n” + 100 * “-”)
  317.             # modified_prompts = modified_prompts[-4:]
  318.             print(modified_prompts)
  319.         else:
  320.             modified_prompts = [prompt] * num_return_sequences
  321.  
  322.         new_string ={} style “.format(style)
  323.  
  324.         # Loop through each string in the array and add the new style string in front
  325.         if style:
  326.             for i in range(len(modified_prompts)):
  327.                 modified_prompts[i] = new_string + modified_prompts[i]
  328.         # Loop through each string in the array and add thecode name
  329.         for i in range(len(modified_prompts)):
  330.             modified_prompts[i] = add_code_names(modified_prompts[i])
  331.             print(modified_prompts[i])
  332.  
  333.         if “character sheet” in prompt.lower():
  334.             if style in lora_styles:
  335.                 lora_path = lora_styles[style][“path”]
  336.                 lora_weight = lora_styles[style][“weight”]
  337.                 lora_type = lora_styles[style][type]
  338.                 patch_pipe(
  339.                     controlnet_openpose_pipe,
  340.                     lora_path,
  341.                     patch_text=True,
  342.                     patch_ti=True,
  343.                     patch_unet=True,
  344.                 )
  345.  
  346.                 tune_lora_scale(controlnet_openpose_pipe.unet, lora_weight)
  347.                 tune_lora_scale(controlnet_openpose_pipe.text_encoder, lora_weight)
  348.  
  349.             poses = pickPoses()
  350.  
  351.             print(“prompt and modified prompt”, prompt, modified_prompts)
  352.             if style in lora_styles:
  353.                 if lora_type == “custom”:
  354.                     patch_pipe(
  355.                         controlnet_openpose_pipe,
  356.                         lora_path,
  357.                         patch_text=True,
  358.                         patch_ti=True,
  359.                         patch_unet=True,
  360.                     )
  361.  
  362.                     tune_lora_scale(controlnet_openpose_pipe.unet, lora_weight)
  363.                     tune_lora_scale(controlnet_openpose_pipe.text_encoder, lora_weight)
  364.  
  365.                     with torch.inference_mode():
  366.                         generator = [
  367.                             torch.Generator(device=“cuda”).manual_seed(seed)
  368.                         ] * num_return_sequences
  369.                         images = controlnet_openpose_pipe(
  370.                             prompt=modified_prompts,
  371.                             num_inference_steps=steps,
  372.                             num_images_per_prompt=1,
  373.                             image=poses,
  374.                             guidance_scale=7.5,
  375.                             height=height,
  376.                             width=width,
  377.                             negative_prompt=[negative_prompt]*4,
  378.                             generator=generator,
  379.                         ).images
  380.  
  381.                     tune_lora_scale(controlnet_openpose_pipe.unet, 0.00)
  382.                     tune_lora_scale(controlnet_openpose_pipe.text_encoder, 0.00)
  383.  
  384.                 elif lora_type == “diffusers”:
  385.                     text2img.unet.load_attn_procs(lora_path)
  386.                     with torch.inference_mode():
  387.                         generator = [
  388.                             torch.Generator(device=“cuda”).manual_seed(seed)
  389.                         ] * num_return_sequences
  390.                         images = controlnet_openpose_pipe(
  391.                             prompt=modified_prompts,
  392.                             num_inference_steps=steps,
  393.                             image=poses,
  394.                             guidance_scale=7.5,
  395.                             height=height,
  396.                             width=width,
  397.                             generator=generator,
  398.                             negative_prompt=[negative_prompt]*4,
  399.                             num_images_per_prompt=1,
  400.                             # cross_attention_kwargs={“scale”: lora_weight},
  401.                         ).images
  402.             else:
  403.                 with torch.inference_mode():
  404.                     print(seed)
  405.                     generator = [
  406.                         torch.Generator(device=“cuda”).manual_seed(seed)
  407.                     ] * num_return_sequences
  408.  
  409.                     images = controlnet_openpose_pipe(
  410.                         prompt=modified_prompts,
  411.                         image=poses,
  412.                         num_inference_steps=steps,
  413.                         guidance_scale=7.5,
  414.                         height=height,
  415.                         width=width,
  416.                         generator=generator,
  417.                         negative_prompt=[negative_prompt]*4,
  418.                         num_images_per_prompt=1,
  419.                         # cross_attention_kwargs={“scale”: 0},
  420.                     ).images
  421.  
  422.         else:
  423.             prompt = [prompt] * 4
  424.  
  425.             if style:
  426.                 for i in range(len(prompt)):
  427.                     prompt[i] = new_string + prompt[i]
  428.  
  429.             for i in range(len(prompt)):
  430.                 prompt[i] = add_code_names(prompt[i])
  431.                 print(prompt[i])
  432.  
  433.             if seed is not None:
  434.                 seed = int(seed)
  435.  
  436.             print(“prompt and modified prompt”, prompt, modified_prompts)
  437.             if style in lora_styles:
  438.                 if lora_type == “custom”:
  439.                     patch_pipe(
  440.                         text2img,
  441.                         lora_path,
  442.                         patch_text=True,
  443.                         patch_ti=True,
  444.                         patch_unet=True,
  445.                     )
  446.  
  447.                     tune_lora_scale(text2img.unet, lora_weight)
  448.                     tune_lora_scale(text2img.text_encoder, lora_weight)
  449.  
  450.                     with torch.inference_mode():
  451.  
  452.                         generator = [
  453.                             torch.Generator(device=“cuda”).manual_seed(seed)
  454.                         ] * num_return_sequences
  455.                         images = text2img.two_step_pipeline(
  456.                             prompt=prompt,
  457.                             modified_prompts=modified_prompts,
  458.                             num_inference_steps=steps,
  459.                             guidance_scale=7.5,
  460.                             height=height,
  461.                             width=width,
  462.                             generator=generator,
  463.                             negative_prompt=[negative_prompt]*4,
  464.                             iteration=iteration
  465.                         ).images
  466.  
  467.                     tune_lora_scale(text2img.unet, 0.00)
  468.                     tune_lora_scale(text2img.text_encoder, 0.00)
  469.  
  470.                 elif lora_type == “diffusers”:
  471.                     text2img.unet.load_attn_procs(lora_path)
  472.                     with torch.inference_mode():
  473.                         generator = [
  474.                             torch.Generator(device=“cuda”).manual_seed(seed)
  475.                         ] * num_return_sequences
  476.                         images = text2img.two_step_pipeline(
  477.                             prompt=prompt,
  478.                             modified_prompts=modified_prompts,
  479.                             num_inference_steps=steps,
  480.                             guidance_scale=7.5,
  481.                             height=height,
  482.                             width=width,
  483.                             generator=generator,
  484.                             # cross_attention_kwargs={“scale”: lora_weight},
  485.                             negative_prompt=[negative_prompt]*4,
  486.                             iteration=iteration
  487.                         ).images
  488.             else:
  489.                 with torch.inference_mode():
  490.  
  491.                     generator = [
  492.                         torch.Generator(device=“cuda”).manual_seed(seed)
  493.                     ] * num_return_sequences
  494.                     images = text2img.two_step_pipeline(
  495.                         prompt=prompt,
  496.                         modified_prompts=modified_prompts,
  497.                         num_inference_steps=steps,
  498.                         guidance_scale=7.5,
  499.                         height=height,
  500.                         width=width,
  501.                         generator=generator,
  502.                         negative_prompt=[negative_prompt]*4,
  503.                         iteration=iteration
  504.                     ).images
  505.  
  506.             if style in lora_styles:
  507.                 tune_lora_scale(text2img.unet, 0.00)
  508.                 tune_lora_scale(text2img.text_encoder, 0.00)
  509.  
  510.         imageUrls = upload_images(images, type, taskId)
  511.         print(imageUrls)
  512.         message = “userId: {}, \n email: {}, \n seed: {}, \n type: TEXT TO IMAGE, \n  prompt: {},\n modifiedprompt: {},\nnegativePrompt: {},\n modelId: {} \n imageurls: {}“.format(
  513.             userId, email, seed, prompt, modified_prompts, negative_prompt, modelId,, “.join(imageUrls)
  514.         )
  515.         print(imageUrls)
  516.         # sendSlackAlert(message)
  517.         # saveGeneratedImages(sourceId, “TEXT_TO_IMAGE”, imageUrls, userId)
  518.         # updateSource(sourceId, userId, “COMPLETED”)
  519.         # gpt_model.deparallelize()
  520.         return
  521.     except Exception as e:
  522.         updateSource(sourceId, userId, “FAILED”)
  523.         print(f”Error: {e})
  524.         return
  525.  
  526.  
  527. def getImageToImange(prompt:str, imageUrl: str, taskId:str,sourceId:int,isPromptEngineering: bool, userId, email, seed, style,modelId, width, height,steps,negative_prompt):
  528.     try :
  529.         print(“height and width”, height, width)
  530.         updateSource(sourceId,userId,“INPROGRESS”)
  531.         images=[]
  532.         modified_prompts = []
  533.         type= “_imgtoimg”
  534.         auto_mode =False
  535.         if isPromptEngineering:
  536.             auto_mode = isPromptEngineering
  537.  
  538.  
  539.         print(“height and width”, height, width)
  540.         if style in lora_styles:
  541.             lora_path = lora_styles[style][‘path’]
  542.             lora_weight = lora_styles[style][‘weight’]
  543.             patch_pipe(
  544.                 img2img,
  545.                 lora_path,
  546.                 patch_text=True,
  547.                 patch_ti=True,
  548.                 patch_unet=True,
  549.             )
  550.  
  551.             tune_lora_scale(img2img.unet, lora_weight)
  552.             tune_lora_scale(img2img.text_encoder, lora_weight)
  553.  
  554.         if auto_mode:
  555.             modified_prompts = prompt_modifier.modify(prompt)
  556.             print(modified_prompts)
  557.  
  558.             print(‘\nInput:\n’ + 100 * ‘-’)
  559.             print(‘\033[96m’ + prompt + ‘\033[0m’)
  560.             print(‘\nOutput:\n’ + 100 * ‘-’)
  561.  
  562.             print(modified_prompts)
  563.         else:
  564.             modified_prompts = [prompt] * num_return_sequences
  565.  
  566.         modified_prompts = modified_prompts[-4:]
  567.  
  568.         new_string ={} style ‘.format(style)
  569.         for i in range(len(modified_prompts)):
  570.             modified_prompts[i]= add_code_names(modified_prompts[i])
  571.         if style:
  572.               for i in range(len(modified_prompts)):
  573.                  modified_prompts[i] = new_string + modified_prompts[i]
  574.  
  575.         init_image = download_image(imageUrl).resize((width, height))
  576.         print(“height and width”, height, width)
  577.         if seed is not None:
  578.             torch.manual_seed(seed)
  579.  
  580.         print(“height and width”, height, width)
  581.  
  582.         with torch.inference_mode():
  583.             images = img2img(prompt=modified_prompts, image=init_image, strength=0.75, negative_prompt=[negative_prompt] * 4,
  584.                      guidance_scale=7.5, num_images_per_prompt=1, num_inference_steps=steps).images
  585.         if style in lora_styles:
  586.             tune_lora_scale(img2img.unet, 0.00)
  587.             tune_lora_scale(img2img.text_encoder, 0.00)
  588.         imageUrls = upload_images(images, type, taskId)
  589.         message = ” userId: {}, \n email: {}, \n seed: {}, \n type: IMAGE TO IMAGE, \n prompt: {},\n modifiedprompt: {},\nnegative_prompt: {}\n modelId: {} \n InputImageurls: {}, \n imageurls: {}“.format(userId, email,seed, prompt,modified_prompts,negative_prompt,modelId,imageUrl,, “.join(imageUrls));
  590.         sendSlackAlert(message)
  591.         updateSource(sourceId,userId,“COMPLETED”)
  592.         saveGeneratedImages(sourceId,“IMAGE_TO_IMAGE”,imageUrls,userId)
  593.         return
  594.     except Exception as e:
  595.         updateSource(sourceId,userId,“FAILED”)
  596.         print(f’Error: {e})
  597.  
  598.  
  599.  
  600. def model_fn(model_dir):
  601.     print(“Logs: model loaded .... starts)
  602.     # Load stable diffusion and move it to the GPU
  603.     global sd_model, controlnet_canny, controlnet_canny_pipe, pose_detector, controlnet_openpose, lora_styles, text2img, compel_proc, pipe, controlnet_openpose_pipe, pipe, img2img;
  604.    
  605.     # Load prompt modifier
  606.     prompt_modifier.load()
  607.        
  608.     sd_model = model_dir
  609.     text2img = two_step_pipeline.from_pretrained(sd_model, torch_dtype=torch.float16).to(“cuda”)
  610.     # text2img.enable_xformers_memory_efficient_attention()
  611.     # text2img = StableDiffusionPipeline.from_pretrained(sd_model, torch_dtype=torch.float16).to(“cuda”)
  612.     img2img = StableDiffusionImg2ImgPipeline(**text2img.components).to(“cuda”)
  613.    
  614.    
  615.     lora_styles = {
  616.     “nq6akX1CIp”: {
  617.         “path”: model_dir + “/laur_style/nq6akX1CIp/final_lora.safetensors,
  618.         “weight”: 0.5,
  619.         “negativePrompt”: [“”],
  620.        “type”: “custom”
  621.     },
  622.     “ghibli”: {
  623.         “path”: model_dir + “/laur_style/nq6akX1CIp/ghibli_style_offset.safetensors,
  624.         “weight”: 0.5,
  625.         “negativePrompt”: [“”],
  626.        “type”: “custom”
  627.     },
  628.     “eQAmnK2kB2": {
  629.        “path”:  model_dir +“/laur_style/eQAmnK2kB2/final_lora.safetensors”,
  630.        “weight”: 0.5,
  631.       “negativePrompt”: [“”],
  632.       “type”: “custom”
  633.  
  634.    },
  635.    “to8contrast”: {
  636.        “path”:  model_dir +“/laur_style/rpjgusOgqD/final_lora.bin”,
  637.        “weight”: 0.5,
  638.        “negativePrompt”: [“”],
  639.        “type”: “custom”
  640.    },
  641.    “jim lee”: {
  642.        “path”:  model_dir +“/laur_style/e2j9mz0jqj/final_lora.bin”,
  643.        “weight”: 0.8,
  644.        “negativePrompt”: [“”],
  645.        “type”: “custom”
  646.    }
  647.    }
  648.  
  649.  
  650.    # text2img = StableDiffusionPipeline.from_pretrained(sd_model, torch_dtype=torch.float16, from_tf=True).to(“cuda”)
  651.    # text2img.enable_xformers_memory_efficient_attention()
  652.  
  653.    # compel_proc = Compel(tokenizer=text2img.tokenizer, text_encoder=text2img.text_encoder)
  654.  
  655.  
  656.    # pipe = StableDiffusionPipeline.from_pretrained(sd_model, torch_dtype=torch.float16).to(“cuda”)
  657.    # pipe = DiffusionPipeline.from_pretrained(sd_model, custom_pipeline=“stable_diffusion_mega”,
  658.                                            # torch_dtype=torch.float16).to(“cuda”)
  659.  
  660.    # pipe.enable_xformers_memory_efficient_attention()
  661.  
  662.  
  663.  
  664.    # pipe = DiffusionPipeline.from_pretrained(sd_model, custom_pipeline=“stable_diffusion_mega”,
  665.    #                                      torch_dtype=torch.float16).to(“cuda”)
  666.    # controlnet pipeline
  667.    controlnet_canny = ControlNetModel.from_pretrained(“lllyasviel/control_v11p_sd15_canny”, torch_dtype=torch.float16)
  668.    controlnet_canny_pipe = StableDiffusionControlNetPipeline.from_pretrained(
  669.        sd_model, controlnet=controlnet_canny, torch_dtype=torch.float16).to(“cuda”)
  670.    controlnet_canny_pipe.scheduler = UniPCMultistepScheduler.from_config(controlnet_canny_pipe.scheduler.config)
  671.    controlnet_canny_pipe.enable_model_cpu_offload()
  672.    # controlnet_canny_pipe.enable_xformers_memory_efficient_attention()
  673.  
  674.    # controlnet openpose
  675.    pose_detector = OpenposeDetector.from_pretrained(“lllyasviel/ControlNet”)
  676.    controlnet_openpose = ControlNetModel.from_pretrained(
  677.        “lllyasviel/control_v11p_sd15_openpose”, torch_dtype=torch.float16)
  678.    controlnet_openpose_pipe = StableDiffusionControlNetPipeline.from_pretrained(
  679.        sd_model, controlnet=controlnet_openpose, torch_dtype=torch.float16).to(“cuda”)
  680.    controlnet_openpose_pipe.scheduler = UniPCMultistepScheduler.from_config(controlnet_openpose_pipe.scheduler.config)
  681.    controlnet_openpose_pipe.enable_model_cpu_offload()
  682.    # controlnet_openpose_pipe.enable_xformers_memory_efficient_attention()
  683.  
  684.    print(“Logs: model loaded ....“)
  685.    return text2img,img2img,controlnet_openpose_pipe,controlnet_canny_pipe
  686.  
  687.  
  688. def predict_fn(data, pipe):
  689.    print(“Logs: predictor loaded ....“)
  690.    print(“lora_styles  “)
  691.    print(lora_styles)    
  692.    task = data
  693.    task2 = Task(data)
  694.    # text2img,img2img,controlnet_openpose_pipe,controlnet_canny_pipe = pipe
  695.    print(“task is “,task)
  696.    result=“”
  697.  
  698.    try:
  699.        # task = json.loads(task[1].decode())
  700.        print(“task begins”)
  701.        userId = task.get(“userId”, “”)
  702.        print(“userId “,userId)
  703.        email = task.get(“email”, “”)
  704.        print(“email “,email)
  705.        seed = task.get(“seed”, None)
  706.        print(“seed “,seed)
  707.        modelId= task.get(“modelId”, “”)
  708.        print(“modelId “,modelId)
  709.        style = task.get(“style”, None)
  710.        print(“style “,style)
  711.        
  712.        seed = task.get(“seed”, None)
  713.        if seed is None:
  714.            seed = random.randint(0, 100)
  715.        else:
  716.            seed = int(seed)
  717.  
  718.        style = task.get(“style”, None)
  719.        steps = task.get(“steps”, “75")
  720.         steps = int(steps)
  721.         height = int(task.get(“height”,512"))
  722.        width = int(task.get(“width”, “512"))
  723.         iteration = float(task.get(“iteration”, 3.0))
  724.         negative_prompt = task.get(“negative_prompt”, “”)
  725.         print(“height and width”, height,width, negative_prompt)
  726.        
  727.         if task[“task_type”] == “GENERATE_AI_IMAGE”:
  728.             result =  generateImage(task[“prompt”], task[“task_id”], task[“auto_mode”],userId,email,seed,style,modelId, width, height, task[“source_id”], steps, iteration, negative_prompt)
  729.         # elif task[“task_type”] == “REMOVE_BG”:
  730.         #     result =  removeBG(task[“task_id”], task[“imageUrl”],userId,email)
  731.         # elif task[“task_type”] == “INPAINT”:
  732.         #     result =  inpaint(task[“prompt”], task[“maskImageUrl”], task[“imageUrl”], task[“task_id”], task[“auto_mode”],userId,email,seed,width,height, task[“source_id”],steps)
  733.         elif task[“task_type”] == “IMAGE_TO_IMAGE”:
  734.             # prompt:str, imageUrl: str, taskId:str,sourceId:int,isPromptEngineering: bool, userId, email, seed, style,modelId, width, height,steps
  735.             result =  getImageToImange(task[“prompt”], task[“imageUrl”], task[“task_id”], task[“source_id”], task[“auto_mode”],userId,email,seed,style,modelId,width,height,steps,negative_prompt)
  736.         elif task[“task_type”] == “GENERATE_VARIATION”:
  737.             # result = await generateImageVariation(task[“task_id”], task[“imageUrl”])
  738.             print(“generate variations”)
  739.         # elif task[“task_type”] == “UPSCALE_IMAGE”:
  740.         #     result =  upscaleImage(task[“imageUrl”], task[“task_id”],userId,email)
  741.         elif task[“task_type”] == “POSE”:
  742.             result =  controlnet_openpose_task(task2, task[“prompt”], task[“imageUrl”], task[“task_id”],  task[“auto_mode”],userId,email,seed,style,modelId,width,height,steps,negative_prompt)
  743.         elif task[“task_type”] == “CANNY”:
  744.             result = controlnet_canny_task(task2, task[“prompt”], task[“imageUrl”], task[“task_id”], task[“auto_mode”],userId,email,seed, style,modelId,width,height,steps,negative_prompt)
  745.         else:
  746.             print(“Invalid TYPE”)
  747.             return result
  748.         # If the task succeeds, exit the retry loop
  749.     except Exception as e:
  750.         print(f”Error: {e})
  751.     # create response
  752.     return {“generated_images”: result }
  753.  
  754. print(“Loaded”)
  755.  
  756. model_fn(‘/home/ec2-user/SageMaker/model_v5.2’)
  757.  
  758. task = {
  759.     “task_type”: “GENERATE_AI_IMAGE”,
  760.     “task_id”: 123,
  761.     “auto_mode”: True,
  762.     “prompt”: “tesing ,,
  763.     “timestamp”: 1683694533,
  764.     “attempt”: 0,
  765.     “seed”: None,
  766.     “style”: “ghibli”,
  767.     “modelId”: “10000,
  768.     “source_id”: 123,
  769.     “userId”: “6755388444249759",
  770.    “width”: “512",
  771.     “height”: “512",
  772.    “negative_prompt”: “hdjdhhd,djddjd”,
  773.    “steps”: “50",
  774.     “queue_name”: “gamma_task_queue10000",
  775. }
  776. predict_fn(task, None)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement