Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

SD XL Inpainting - 'NoneType' object has no attribute 'repeat' #83

Open
WaterKnight1998 opened this issue Feb 12, 2024 · 3 comments
Open

Comments

@WaterKnight1998
Copy link

Example code:

from diffusers import AutoPipelineForInpainting, DPMSolverMultistepScheduler
import torch
from diffusers.utils import load_image, make_image_grid
from compel import Compel, ReturnedEmbeddingsType

pipeline = AutoPipelineForInpainting.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16).to("cuda")

compel_proc = Compel(tokenizer=[pipeline.tokenizer, pipeline.tokenizer_2] , text_encoder=[pipeline.text_encoder, pipeline.text_encoder_2], returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, requires_pooled=[False, True],device="cuda" )

prompt_embeds, pooled_prompt_embeds = compel_proc("whatever you want")
    negative_prompt_embeds, pooled_negative_prompt_embeds = compel_proc("whatever you don't want")
    image = pipeline(
        prompt_embeds=prompt_embeds, 
        pooled_prompt_embeds=pooled_prompt_embeds,
        negative_prompt_embeds=negative_prompt_embeds, 
        pooled_negative_prompt_embeds=pooled_negative_prompt_embeds,
        image=img_original, 
        mask_image=mask,
        generator=generator,
        num_inference_steps=50,
        strength=1,
        ).images[0]

Error:

ile /databricks/python/lib/python3.10/site-packages/torch/utils/_contextlib.py:115, in context_decorator.<locals>.decorate_context(*args, **kwargs)
    112 @functools.wraps(func)
    113 def decorate_context(*args, **kwargs):
    114     with ctx_factory():
--> 115         return func(*args, **kwargs)

File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py:1547, in StableDiffusionXLInpaintPipeline.__call__(self, prompt, prompt_2, image, mask_image, masked_image_latents, height, width, padding_mask_crop, strength, num_inference_steps, timesteps, denoising_start, denoising_end, guidance_scale, negative_prompt, negative_prompt_2, num_images_per_prompt, eta, generator, latents, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, output_type, return_dict, cross_attention_kwargs, guidance_rescale, original_size, crops_coords_top_left, target_size, negative_original_size, negative_crops_coords_top_left, negative_target_size, aesthetic_score, negative_aesthetic_score, clip_skip, callback_on_step_end, callback_on_step_end_tensor_inputs, **kwargs)
   1537 # 3. Encode input prompt
   1538 text_encoder_lora_scale = (
   1539     self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
   1540 )
   1542 (
   1543     prompt_embeds,
   1544     negative_prompt_embeds,
   1545     pooled_prompt_embeds,
   1546     negative_pooled_prompt_embeds,
-> 1547 ) = self.encode_prompt(
   1548     prompt=prompt,
   1549     prompt_2=prompt_2,
   1550     device=device,
   1551     num_images_per_prompt=num_images_per_prompt,
   1552     do_classifier_free_guidance=self.do_classifier_free_guidance,
   1553     negative_prompt=negative_prompt,
   1554     negative_prompt_2=negative_prompt_2,
   1555     prompt_embeds=prompt_embeds,
   1556     negative_prompt_embeds=negative_prompt_embeds,
   1557     pooled_prompt_embeds=pooled_prompt_embeds,
   1558     negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
   1559     lora_scale=text_encoder_lora_scale,
   1560     clip_skip=self.clip_skip,
   1561 )
   1563 # 4. set timesteps
   1564 def denoising_value_valid(dnv):

File /local_disk0/.ephemeral_nfs/cluster_libraries/python/lib/python3.10/site-packages/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_inpaint.py:734, in StableDiffusionXLInpaintPipeline.encode_prompt(self, prompt, prompt_2, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, negative_prompt_2, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, lora_scale, clip_skip)
    731     negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
    732     negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
--> 734 pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
    735     bs_embed * num_images_per_prompt, -1
    736 )
    737 if do_classifier_free_guidance:
    738     negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(
    739         bs_embed * num_images_per_prompt, -1
    740     )

AttributeError: 'NoneType' object has no attribute 'repeat'

Thanks in advance!

@Woodyet
Copy link

Woodyet commented May 17, 2024

Still getting this error

@damian0815
Copy link
Owner

what happens when you run this:

prompt_embeds, pooled_prompt_embeds = compel_proc("whatever you want")
print('pooled positive:', pooled_prompt_embeds)
negative_prompt_embeds, pooled_negative_prompt_embeds = compel_proc("whatever you don't want")
print('pooled negative:', pooled_negative_prompt_embeds)

?

@Woodyet
Copy link

Woodyet commented May 28, 2024

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

3 participants