Skip to content

Commit

Permalink
Support loading directly to vram with CLIPLoader node.
Browse files Browse the repository at this point in the history
  • Loading branch information
comfyanonymous committed Aug 12, 2024
1 parent 9829b01 commit 9acfe4d
Showing 1 changed file with 7 additions and 1 deletion.
8 changes: 7 additions & 1 deletion comfy/sd.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,8 @@ def __init__(self, target=None, embedding_directory=None, no_init=False, tokeniz

self.tokenizer = tokenizer(embedding_directory=embedding_directory, tokenizer_data=tokenizer_data)
self.patcher = comfy.model_patcher.ModelPatcher(self.cond_stage_model, load_device=load_device, offload_device=offload_device)
if params['device'] == load_device:
model_management.load_model_gpu(self.patcher)
self.layer_idx = None
logging.debug("CLIP model load device: {}, offload device: {}, current: {}".format(load_device, offload_device, params['device']))

Expand Down Expand Up @@ -455,7 +457,11 @@ class EmptyClass:
clip_target.clip = comfy.text_encoders.sd3_clip.SD3ClipModel
clip_target.tokenizer = comfy.text_encoders.sd3_clip.SD3Tokenizer

clip = CLIP(clip_target, embedding_directory=embedding_directory)
parameters = 0
for c in clip_data:
parameters += comfy.utils.calculate_parameters(c)

clip = CLIP(clip_target, embedding_directory=embedding_directory, parameters=parameters)
for c in clip_data:
m, u = clip.load_sd(c)
if len(m) > 0:
Expand Down

0 comments on commit 9acfe4d

Please sign in to comment.