Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import os
- class JanusModelLoader:
- def __init__(self):
- pass
- @classmethod
- def INPUT_TYPES(s):
- return {
- "required": {
- "model_name": (["deepseek-ai/Janus-Pro-1B", "deepseek-ai/Janus-Pro-7B","Janus-Pro-1B","Janus-Pro-7B"],),
- "device": (["cuda","cpu"],),
- },
- }
- RETURN_TYPES = ("JANUS_MODEL", "JANUS_PROCESSOR")
- RETURN_NAMES = ("model", "processor")
- FUNCTION = "load_model"
- CATEGORY = "Janus-Pro"
- def load_model(self, model_name, device):
- try:
- from janus.models import MultiModalityCausalLM, VLChatProcessor
- from transformers import AutoModelForCausalLM
- import torch
- except ImportError:
- raise ImportError("Please install Janus using 'pip install -r requirements.txt'")
- #device = "cuda" if torch.cuda.is_available() else "cpu"
- try:
- if torch.cuda.is_available() and device=="cuda": device = "cuda"
- elif device=="cpu": device = "cpu"
- except ImportError:
- raise ImportError("[Janus model loader] Error selecting device: {device} ")
- try:
- dtype = torch.bfloat16
- torch.zeros(1, dtype=dtype, device=device)
- except RuntimeError:
- dtype = torch.float16
- # 获取ComfyUI根目录
- comfy_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
- # 构建模型路径
- model_dir = os.path.join(comfy_path,
- "models",
- "Janus-Pro",
- os.path.basename(model_name))
- if not os.path.exists(model_dir):
- raise ValueError(f"Local model not found at {model_dir}. Please download the model and place it in the ComfyUI/models/Janus-Pro folder.")
- vl_chat_processor = VLChatProcessor.from_pretrained(model_dir)
- vl_gpt = AutoModelForCausalLM.from_pretrained(
- model_dir,
- trust_remote_code=True
- )
- vl_gpt = vl_gpt.to(dtype).to(device).eval()
- return (vl_gpt, vl_chat_processor)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement