Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- `import torch
- from diffusers import StableDiffusionPipeline, DiffusionPipeline
- from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, GPT2Tokenizer, GPT2LMHeadModel, GenerationConfig
- import PIL
- import requests
- import os
- from diffusers import (
- StableDiffusionPipeline,
- StableDiffusionImg2ImgPipeline,
- StableDiffusionInpaintPipeline,
- )
- from data.task import Task
- import cv2
- import urllib.request
- import asyncio
- import json
- import uuid
- import aioredis
- import time
- import redis
- import multiprocessing
- import time
- from PIL import Image
- from io import BytesIO
- import boto3
- import re
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
- from diffusers import UniPCMultistepScheduler
- from controlnet_aux import OpenposeDetector
- from lora_diffusion import tune_lora_scale, patch_pipe
- from data.dataAccessor import saveGeneratedImages, update_db, updateSource
- import io
- #variation
- import pprint
- s3 = boto3.client(‘s3’)
- import random
- torch.backends.cudnn.benchmark = True
- torch.backends.cuda.matmul.allow_tf32 = True
- import argparse
- import cv2
- import glob
- import os
- from basicsr.archs.rrdbnet_arch import RRDBNet
- from basicsr.utils.download_util import load_file_from_url
- from realesrgan import RealESRGANer
- from realesrgan.archs.srvgg_arch import SRVGGNetCompact
- from diffusers import StableDiffusionPipeline
- import numpy as np
- from torch.optim.adam import Adam
- from typing import Any, Callable, Dict, List, Optional, Union
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
- from twoStepPipeline import two_step_pipeline
- from utility import contains_avatar_name, read_url, slackAlert, sendSlackAlert, add_code_names, upload_images, download_image, replace_all, custom_edit_prompts, create_canny_image, list_files, pickPoses
- from prompt_modifier import PromptModifier
- num_return_sequences = 4 # the number of results to generate
- auto_mode = False
- prompt_modifier = PromptModifier(num_of_sequences=num_return_sequences)
- @update_db
- def controlnet_canny_task(task, prompt, imageUrl,taskId, isPromptEngineering: bool, userId, email, seed, style,modelId,width, height,steps,negative_prompt):
- modified_prompts = []
- images = []
- type= “_canny”
- auto_mode =False
- negativePrompt=[f’monochrome, neon, x-ray, negative image, oversaturated, {negative_prompt}’]*4
- if style in lora_styles:
- lora_path = lora_styles[style][‘path’]
- lora_weight = lora_styles[style][‘weight’]
- patch_pipe(
- controlnet_canny_pipe,
- lora_path,
- patch_text=True,
- patch_ti=True,
- patch_unet=True,
- )
- tune_lora_scale(controlnet_canny_pipe.unet, lora_weight)
- tune_lora_scale(controlnet_canny_pipe.text_encoder, lora_weight)
- if isPromptEngineering:
- auto_mode = isPromptEngineering
- # gpt_model.parallelize() # Splits the model across several devices
- # input_ids = tokenizer(prompt, return_tensors=‘pt’).to(“cuda”).input_ids
- prompt = add_code_names(prompt)
- if auto_mode:
- # output = gpt_model.generate(input_ids, do_sample=True, temperature=temperature, top_k=top_k,
- # max_length=max_length,
- # num_return_sequences=num_return_sequences, repetition_penalty=repitition_penalty,
- # penalty_alpha=0.6, no_repeat_ngram_size=1, early_stopping=True)
- modified_prompts = prompt_modifier.modify(prompt)
- print(modified_prompts)
- print(‘\nInput:\n’ + 100 * ‘-’)
- print(‘\033[96m’ + prompt + ‘\033[0m’)
- print(‘\nOutput:\n’ + 100 * ‘-’)
- # for i in range(len(output)):
- # modified_prompts.append(tokenizer.decode(output[i], skip_special_tokens=True))
- modified_prompts = modified_prompts[-4:]
- print(modified_prompts)
- else:
- modified_prompts = [prompt] * num_return_sequences
- new_string = ‘{} style ’.format(style)
- # Loop through each string in the array and add the new string in front
- if style:
- for i in range(len(modified_prompts)):
- modified_prompts[i] = new_string + modified_prompts[i]
- print(modified_prompts[i])
- if seed is not None:
- torch.manual_seed(seed)
- init_image = download_image(imageUrl).resize((width, height))
- # init_image = Image.open(img_path).convert(‘RGB’)
- # init_image = init_image.resize((512, 512))
- canny_image = create_canny_image(init_image)
- images = controlnet_canny_pipe(prompt=modified_prompts, image=canny_image,
- guidance_scale=9, num_images_per_prompt=1, negative_prompt=negativePrompt,
- num_inference_steps=steps,height=height, width=width).images
- if style in lora_styles:
- tune_lora_scale(controlnet_canny_pipe.unet, 0.00)
- tune_lora_scale(controlnet_canny_pipe.text_encoder, 0.00)
- imageUrls = upload_images(images, type, taskId)
- message = “userId: {}, \n email: {}, \n seed: {}, \n type: CANNY , \n prompt: {},\n modifiedprompt: {},\n modelId: {}, \n negativePrompt: {},\n InputImageurls: {}, \n imageurls: {}“.format(userId, email,seed,prompt,modified_prompts,modelId, negativePrompt[0], imageUrl,“, “.join(imageUrls));
- sendSlackAlert(message)
- # gpt_model.deparallelize()
- return
- @update_db
- def controlnet_openpose_task(
- task,
- prompt,
- imageUrl,
- taskId,
- isPromptEngineering: bool,
- userId,
- email,
- seed,
- style,
- modelId,
- width,
- height,
- steps,
- negative_prompt,
- ):
- modified_prompts = []
- images = []
- type = “_pose”
- auto_mode = False
- if isPromptEngineering:
- auto_mode = isPromptEngineering
- # gpt_model.parallelize() # Splits the model across several devices
- # input_ids = tokenizer(prompt, return_tensors=‘pt’).to(“cuda”).input_ids
- prompt = add_code_names(prompt)
- if style in lora_styles:
- lora_path = lora_styles[style][“path”]
- lora_weight = lora_styles[style][“weight”]
- patch_pipe(
- controlnet_openpose_pipe,
- lora_path,
- patch_text=True,
- patch_ti=True,
- patch_unet=True,
- )
- tune_lora_scale(controlnet_openpose_pipe.unet, lora_weight)
- tune_lora_scale(controlnet_openpose_pipe.text_encoder, lora_weight)
- if auto_mode:
- # output = gpt_model.generate(input_ids, do_sample=True, temperature=temperature, top_k=top_k,
- # max_length=max_length,
- # num_return_sequences=num_return_sequences, repetition_penalty=repitition_penalty,
- # penalty_alpha=0.6, no_repeat_ngram_size=1, early_stopping=True)
- modified_prompts = prompt_modifier.modify(prompt)
- print(modified_prompts)
- print(“\nInput:\n” + 100 * “-”)
- print(“\033[96m” + prompt + “\033[0m”)
- print(“\nOutput:\n” + 100 * “-”)
- # for i in range(len(output)):
- # modified_prompts.append(tokenizer.decode(output[i], skip_special_tokens=True))
- modified_prompts = modified_prompts[-4:]
- print(modified_prompts)
- else:
- modified_prompts = [prompt] * num_return_sequences
- new_string = “{} style “.format(style)
- # Loop through each string in the array and add the new string in front
- if style:
- for i in range(len(modified_prompts)):
- modified_prompts[i] = new_string + modified_prompts[i]
- print(modified_prompts[i])
- init_image = download_image(imageUrl).resize((width, height))
- if seed is not None:
- torch.manual_seed(seed)
- if “character sheet” in prompt.lower():
- imageUrlPose = imageUrl
- input_image_bytes = read_url(imageUrlPose)
- # input_image = Image.open(io.BytesIO(input_image_bytes))
- # img_path = “/home/ubuntu/models/l_cs_1.png”
- init_image = Image.open(io.BytesIO(input_image_bytes)).convert(“RGB”)
- init_image = init_image.resize((512, 512))
- poses = [init_image] * 4
- images = controlnet_openpose_pipe(
- prompt=modified_prompts,
- image=poses,
- num_images_per_prompt=1,
- num_inference_steps=steps,
- width=width,
- height=height,
- negative_prompt=[negative_prompt]*4,
- ).images
- else:
- poses = [pose_detector(init_image)] * 4
- # init_image = Image.open(img_path).convert(‘RGB’)
- # init_image = init_image.resize((512, 512))
- images = controlnet_openpose_pipe(
- prompt=modified_prompts,
- image=poses,
- num_images_per_prompt=1,
- num_inference_steps=steps,
- negative_prompt=[negative_prompt]*4,
- height=height,
- width=width,
- ).images
- if style in lora_styles:
- tune_lora_scale(controlnet_openpose_pipe.unet, 0.00)
- tune_lora_scale(controlnet_openpose_pipe.text_encoder, 0.00)
- if “character sheet” in prompt.lower():
- return images
- imageUrls = upload_images(images, type, taskId)
- message = “userId: {}, \n email: {}, \n seed: {}, \n type: pose , \n prompt: {},\n modifiedprompt: {},\nnegative_prompt: {}, modelId: {}\n InputImageurls: {}, \n imageurls: {}“.format(
- userId,
- email,
- seed,
- prompt,
- modified_prompts,
- negative_prompt,
- modelId,
- imageUrl,
- “, “.join(imageUrls),
- )
- sendSlackAlert(message)
- # gpt_model.deparallelize()
- return
- def generateImage(
- prompt: str,
- taskId: str,
- isPromptEngineering: bool,
- userId,
- email,
- seed,
- style,
- modelId,
- width,
- height,
- sourceId,
- steps,
- iteration,
- negative_prompt
- ):
- try:
- # if sourceId is not None:
- # updateSource(sourceId, userId, “INPROGRESS”)
- # sendSlackAlert(“all model generate fn triggered with source id”)
- if style in lora_styles:
- lora_path = lora_styles[style][“path”]
- lora_weight = lora_styles[style][“weight”]
- lora_type = lora_styles[style][“type”]
- modified_prompts = []
- images = []
- type = “”
- auto_mode = False
- if isPromptEngineering:
- auto_mode = isPromptEngineering
- # prompt = add_code_names(prompt)
- if auto_mode:
- modified_prompts = prompt_modifier.modify(prompt)
- print(modified_prompts)
- print(“\nInput:\n” + 100 * “-”)
- print(“\033[96m” + prompt + “\033[0m”)
- print(“\nOutput:\n” + 100 * “-”)
- # modified_prompts = modified_prompts[-4:]
- print(modified_prompts)
- else:
- modified_prompts = [prompt] * num_return_sequences
- new_string = “{} style “.format(style)
- # Loop through each string in the array and add the new style string in front
- if style:
- for i in range(len(modified_prompts)):
- modified_prompts[i] = new_string + modified_prompts[i]
- # Loop through each string in the array and add thecode name
- for i in range(len(modified_prompts)):
- modified_prompts[i] = add_code_names(modified_prompts[i])
- print(modified_prompts[i])
- if “character sheet” in prompt.lower():
- if style in lora_styles:
- lora_path = lora_styles[style][“path”]
- lora_weight = lora_styles[style][“weight”]
- lora_type = lora_styles[style][“type”]
- patch_pipe(
- controlnet_openpose_pipe,
- lora_path,
- patch_text=True,
- patch_ti=True,
- patch_unet=True,
- )
- tune_lora_scale(controlnet_openpose_pipe.unet, lora_weight)
- tune_lora_scale(controlnet_openpose_pipe.text_encoder, lora_weight)
- poses = pickPoses()
- print(“prompt and modified prompt”, prompt, modified_prompts)
- if style in lora_styles:
- if lora_type == “custom”:
- patch_pipe(
- controlnet_openpose_pipe,
- lora_path,
- patch_text=True,
- patch_ti=True,
- patch_unet=True,
- )
- tune_lora_scale(controlnet_openpose_pipe.unet, lora_weight)
- tune_lora_scale(controlnet_openpose_pipe.text_encoder, lora_weight)
- with torch.inference_mode():
- generator = [
- torch.Generator(device=“cuda”).manual_seed(seed)
- ] * num_return_sequences
- images = controlnet_openpose_pipe(
- prompt=modified_prompts,
- num_inference_steps=steps,
- num_images_per_prompt=1,
- image=poses,
- guidance_scale=7.5,
- height=height,
- width=width,
- negative_prompt=[negative_prompt]*4,
- generator=generator,
- ).images
- tune_lora_scale(controlnet_openpose_pipe.unet, 0.00)
- tune_lora_scale(controlnet_openpose_pipe.text_encoder, 0.00)
- elif lora_type == “diffusers”:
- text2img.unet.load_attn_procs(lora_path)
- with torch.inference_mode():
- generator = [
- torch.Generator(device=“cuda”).manual_seed(seed)
- ] * num_return_sequences
- images = controlnet_openpose_pipe(
- prompt=modified_prompts,
- num_inference_steps=steps,
- image=poses,
- guidance_scale=7.5,
- height=height,
- width=width,
- generator=generator,
- negative_prompt=[negative_prompt]*4,
- num_images_per_prompt=1,
- # cross_attention_kwargs={“scale”: lora_weight},
- ).images
- else:
- with torch.inference_mode():
- print(seed)
- generator = [
- torch.Generator(device=“cuda”).manual_seed(seed)
- ] * num_return_sequences
- images = controlnet_openpose_pipe(
- prompt=modified_prompts,
- image=poses,
- num_inference_steps=steps,
- guidance_scale=7.5,
- height=height,
- width=width,
- generator=generator,
- negative_prompt=[negative_prompt]*4,
- num_images_per_prompt=1,
- # cross_attention_kwargs={“scale”: 0},
- ).images
- else:
- prompt = [prompt] * 4
- if style:
- for i in range(len(prompt)):
- prompt[i] = new_string + prompt[i]
- for i in range(len(prompt)):
- prompt[i] = add_code_names(prompt[i])
- print(prompt[i])
- if seed is not None:
- seed = int(seed)
- print(“prompt and modified prompt”, prompt, modified_prompts)
- if style in lora_styles:
- if lora_type == “custom”:
- patch_pipe(
- text2img,
- lora_path,
- patch_text=True,
- patch_ti=True,
- patch_unet=True,
- )
- tune_lora_scale(text2img.unet, lora_weight)
- tune_lora_scale(text2img.text_encoder, lora_weight)
- with torch.inference_mode():
- generator = [
- torch.Generator(device=“cuda”).manual_seed(seed)
- ] * num_return_sequences
- images = text2img.two_step_pipeline(
- prompt=prompt,
- modified_prompts=modified_prompts,
- num_inference_steps=steps,
- guidance_scale=7.5,
- height=height,
- width=width,
- generator=generator,
- negative_prompt=[negative_prompt]*4,
- iteration=iteration
- ).images
- tune_lora_scale(text2img.unet, 0.00)
- tune_lora_scale(text2img.text_encoder, 0.00)
- elif lora_type == “diffusers”:
- text2img.unet.load_attn_procs(lora_path)
- with torch.inference_mode():
- generator = [
- torch.Generator(device=“cuda”).manual_seed(seed)
- ] * num_return_sequences
- images = text2img.two_step_pipeline(
- prompt=prompt,
- modified_prompts=modified_prompts,
- num_inference_steps=steps,
- guidance_scale=7.5,
- height=height,
- width=width,
- generator=generator,
- # cross_attention_kwargs={“scale”: lora_weight},
- negative_prompt=[negative_prompt]*4,
- iteration=iteration
- ).images
- else:
- with torch.inference_mode():
- generator = [
- torch.Generator(device=“cuda”).manual_seed(seed)
- ] * num_return_sequences
- images = text2img.two_step_pipeline(
- prompt=prompt,
- modified_prompts=modified_prompts,
- num_inference_steps=steps,
- guidance_scale=7.5,
- height=height,
- width=width,
- generator=generator,
- negative_prompt=[negative_prompt]*4,
- iteration=iteration
- ).images
- if style in lora_styles:
- tune_lora_scale(text2img.unet, 0.00)
- tune_lora_scale(text2img.text_encoder, 0.00)
- imageUrls = upload_images(images, type, taskId)
- print(imageUrls)
- message = “userId: {}, \n email: {}, \n seed: {}, \n type: TEXT TO IMAGE, \n prompt: {},\n modifiedprompt: {},\nnegativePrompt: {},\n modelId: {} \n imageurls: {}“.format(
- userId, email, seed, prompt, modified_prompts, negative_prompt, modelId, “, “.join(imageUrls)
- )
- print(imageUrls)
- # sendSlackAlert(message)
- # saveGeneratedImages(sourceId, “TEXT_TO_IMAGE”, imageUrls, userId)
- # updateSource(sourceId, userId, “COMPLETED”)
- # gpt_model.deparallelize()
- return
- except Exception as e:
- updateSource(sourceId, userId, “FAILED”)
- print(f”Error: {e}“)
- return
- def getImageToImange(prompt:str, imageUrl: str, taskId:str,sourceId:int,isPromptEngineering: bool, userId, email, seed, style,modelId, width, height,steps,negative_prompt):
- try :
- print(“height and width”, height, width)
- updateSource(sourceId,userId,“INPROGRESS”)
- images=[]
- modified_prompts = []
- type= “_imgtoimg”
- auto_mode =False
- if isPromptEngineering:
- auto_mode = isPromptEngineering
- print(“height and width”, height, width)
- if style in lora_styles:
- lora_path = lora_styles[style][‘path’]
- lora_weight = lora_styles[style][‘weight’]
- patch_pipe(
- img2img,
- lora_path,
- patch_text=True,
- patch_ti=True,
- patch_unet=True,
- )
- tune_lora_scale(img2img.unet, lora_weight)
- tune_lora_scale(img2img.text_encoder, lora_weight)
- if auto_mode:
- modified_prompts = prompt_modifier.modify(prompt)
- print(modified_prompts)
- print(‘\nInput:\n’ + 100 * ‘-’)
- print(‘\033[96m’ + prompt + ‘\033[0m’)
- print(‘\nOutput:\n’ + 100 * ‘-’)
- print(modified_prompts)
- else:
- modified_prompts = [prompt] * num_return_sequences
- modified_prompts = modified_prompts[-4:]
- new_string = ‘{} style ‘.format(style)
- for i in range(len(modified_prompts)):
- modified_prompts[i]= add_code_names(modified_prompts[i])
- if style:
- for i in range(len(modified_prompts)):
- modified_prompts[i] = new_string + modified_prompts[i]
- init_image = download_image(imageUrl).resize((width, height))
- print(“height and width”, height, width)
- if seed is not None:
- torch.manual_seed(seed)
- print(“height and width”, height, width)
- with torch.inference_mode():
- images = img2img(prompt=modified_prompts, image=init_image, strength=0.75, negative_prompt=[negative_prompt] * 4,
- guidance_scale=7.5, num_images_per_prompt=1, num_inference_steps=steps).images
- if style in lora_styles:
- tune_lora_scale(img2img.unet, 0.00)
- tune_lora_scale(img2img.text_encoder, 0.00)
- imageUrls = upload_images(images, type, taskId)
- message = ” userId: {}, \n email: {}, \n seed: {}, \n type: IMAGE TO IMAGE, \n prompt: {},\n modifiedprompt: {},\nnegative_prompt: {}\n modelId: {} \n InputImageurls: {}, \n imageurls: {}“.format(userId, email,seed, prompt,modified_prompts,negative_prompt,modelId,imageUrl,“, “.join(imageUrls));
- sendSlackAlert(message)
- updateSource(sourceId,userId,“COMPLETED”)
- saveGeneratedImages(sourceId,“IMAGE_TO_IMAGE”,imageUrls,userId)
- return
- except Exception as e:
- updateSource(sourceId,userId,“FAILED”)
- print(f’Error: {e}‘)
- def model_fn(model_dir):
- print(“Logs: model loaded .... starts”)
- # Load stable diffusion and move it to the GPU
- global sd_model, controlnet_canny, controlnet_canny_pipe, pose_detector, controlnet_openpose, lora_styles, text2img, compel_proc, pipe, controlnet_openpose_pipe, pipe, img2img;
- # Load prompt modifier
- prompt_modifier.load()
- sd_model = model_dir
- text2img = two_step_pipeline.from_pretrained(sd_model, torch_dtype=torch.float16).to(“cuda”)
- # text2img.enable_xformers_memory_efficient_attention()
- # text2img = StableDiffusionPipeline.from_pretrained(sd_model, torch_dtype=torch.float16).to(“cuda”)
- img2img = StableDiffusionImg2ImgPipeline(**text2img.components).to(“cuda”)
- lora_styles = {
- “nq6akX1CIp”: {
- “path”: model_dir + “/laur_style/nq6akX1CIp/final_lora.safetensors”,
- “weight”: 0.5,
- “negativePrompt”: [“”],
- “type”: “custom”
- },
- “ghibli”: {
- “path”: model_dir + “/laur_style/nq6akX1CIp/ghibli_style_offset.safetensors”,
- “weight”: 0.5,
- “negativePrompt”: [“”],
- “type”: “custom”
- },
- “eQAmnK2kB2": {
- “path”: model_dir +“/laur_style/eQAmnK2kB2/final_lora.safetensors”,
- “weight”: 0.5,
- “negativePrompt”: [“”],
- “type”: “custom”
- },
- “to8contrast”: {
- “path”: model_dir +“/laur_style/rpjgusOgqD/final_lora.bin”,
- “weight”: 0.5,
- “negativePrompt”: [“”],
- “type”: “custom”
- },
- “jim lee”: {
- “path”: model_dir +“/laur_style/e2j9mz0jqj/final_lora.bin”,
- “weight”: 0.8,
- “negativePrompt”: [“”],
- “type”: “custom”
- }
- }
- # text2img = StableDiffusionPipeline.from_pretrained(sd_model, torch_dtype=torch.float16, from_tf=True).to(“cuda”)
- # text2img.enable_xformers_memory_efficient_attention()
- # compel_proc = Compel(tokenizer=text2img.tokenizer, text_encoder=text2img.text_encoder)
- # pipe = StableDiffusionPipeline.from_pretrained(sd_model, torch_dtype=torch.float16).to(“cuda”)
- # pipe = DiffusionPipeline.from_pretrained(sd_model, custom_pipeline=“stable_diffusion_mega”,
- # torch_dtype=torch.float16).to(“cuda”)
- # pipe.enable_xformers_memory_efficient_attention()
- # pipe = DiffusionPipeline.from_pretrained(sd_model, custom_pipeline=“stable_diffusion_mega”,
- # torch_dtype=torch.float16).to(“cuda”)
- # controlnet pipeline
- controlnet_canny = ControlNetModel.from_pretrained(“lllyasviel/control_v11p_sd15_canny”, torch_dtype=torch.float16)
- controlnet_canny_pipe = StableDiffusionControlNetPipeline.from_pretrained(
- sd_model, controlnet=controlnet_canny, torch_dtype=torch.float16).to(“cuda”)
- controlnet_canny_pipe.scheduler = UniPCMultistepScheduler.from_config(controlnet_canny_pipe.scheduler.config)
- controlnet_canny_pipe.enable_model_cpu_offload()
- # controlnet_canny_pipe.enable_xformers_memory_efficient_attention()
- # controlnet openpose
- pose_detector = OpenposeDetector.from_pretrained(“lllyasviel/ControlNet”)
- controlnet_openpose = ControlNetModel.from_pretrained(
- “lllyasviel/control_v11p_sd15_openpose”, torch_dtype=torch.float16)
- controlnet_openpose_pipe = StableDiffusionControlNetPipeline.from_pretrained(
- sd_model, controlnet=controlnet_openpose, torch_dtype=torch.float16).to(“cuda”)
- controlnet_openpose_pipe.scheduler = UniPCMultistepScheduler.from_config(controlnet_openpose_pipe.scheduler.config)
- controlnet_openpose_pipe.enable_model_cpu_offload()
- # controlnet_openpose_pipe.enable_xformers_memory_efficient_attention()
- print(“Logs: model loaded ....“)
- return text2img,img2img,controlnet_openpose_pipe,controlnet_canny_pipe
- def predict_fn(data, pipe):
- print(“Logs: predictor loaded ....“)
- print(“lora_styles “)
- print(lora_styles)
- task = data
- task2 = Task(data)
- # text2img,img2img,controlnet_openpose_pipe,controlnet_canny_pipe = pipe
- print(“task is “,task)
- result=“”
- try:
- # task = json.loads(task[1].decode())
- print(“task begins”)
- userId = task.get(“userId”, “”)
- print(“userId “,userId)
- email = task.get(“email”, “”)
- print(“email “,email)
- seed = task.get(“seed”, None)
- print(“seed “,seed)
- modelId= task.get(“modelId”, “”)
- print(“modelId “,modelId)
- style = task.get(“style”, None)
- print(“style “,style)
- seed = task.get(“seed”, None)
- if seed is None:
- seed = random.randint(0, 100)
- else:
- seed = int(seed)
- style = task.get(“style”, None)
- steps = task.get(“steps”, “75")
- steps = int(steps)
- height = int(task.get(“height”, “512"))
- width = int(task.get(“width”, “512"))
- iteration = float(task.get(“iteration”, 3.0))
- negative_prompt = task.get(“negative_prompt”, “”)
- print(“height and width”, height,width, negative_prompt)
- if task[“task_type”] == “GENERATE_AI_IMAGE”:
- result = generateImage(task[“prompt”], task[“task_id”], task[“auto_mode”],userId,email,seed,style,modelId, width, height, task[“source_id”], steps, iteration, negative_prompt)
- # elif task[“task_type”] == “REMOVE_BG”:
- # result = removeBG(task[“task_id”], task[“imageUrl”],userId,email)
- # elif task[“task_type”] == “INPAINT”:
- # result = inpaint(task[“prompt”], task[“maskImageUrl”], task[“imageUrl”], task[“task_id”], task[“auto_mode”],userId,email,seed,width,height, task[“source_id”],steps)
- elif task[“task_type”] == “IMAGE_TO_IMAGE”:
- # prompt:str, imageUrl: str, taskId:str,sourceId:int,isPromptEngineering: bool, userId, email, seed, style,modelId, width, height,steps
- result = getImageToImange(task[“prompt”], task[“imageUrl”], task[“task_id”], task[“source_id”], task[“auto_mode”],userId,email,seed,style,modelId,width,height,steps,negative_prompt)
- elif task[“task_type”] == “GENERATE_VARIATION”:
- # result = await generateImageVariation(task[“task_id”], task[“imageUrl”])
- print(“generate variations”)
- # elif task[“task_type”] == “UPSCALE_IMAGE”:
- # result = upscaleImage(task[“imageUrl”], task[“task_id”],userId,email)
- elif task[“task_type”] == “POSE”:
- result = controlnet_openpose_task(task2, task[“prompt”], task[“imageUrl”], task[“task_id”], task[“auto_mode”],userId,email,seed,style,modelId,width,height,steps,negative_prompt)
- elif task[“task_type”] == “CANNY”:
- result = controlnet_canny_task(task2, task[“prompt”], task[“imageUrl”], task[“task_id”], task[“auto_mode”],userId,email,seed, style,modelId,width,height,steps,negative_prompt)
- else:
- print(“Invalid TYPE”)
- return result
- # If the task succeeds, exit the retry loop
- except Exception as e:
- print(f”Error: {e}“)
- # create response
- return {“generated_images”: result }
- print(“Loaded”)
- model_fn(‘/home/ec2-user/SageMaker/model_v5.2’)
- task = {
- “task_type”: “GENERATE_AI_IMAGE”,
- “task_id”: 123,
- “auto_mode”: True,
- “prompt”: “tesing ,“,
- “timestamp”: 1683694533,
- “attempt”: 0,
- “seed”: None,
- “style”: “ghibli”,
- “modelId”: “10000”,
- “source_id”: 123,
- “userId”: “6755388444249759",
- “width”: “512",
- “height”: “512",
- “negative_prompt”: “hdjdhhd,djddjd”,
- “steps”: “50",
- “queue_name”: “gamma_task_queue10000",
- }
- predict_fn(task, None)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement