RuntimeError: ожидаемый плавает скалярного типа, но обнаружил половину в стабильной диффузииPython

Программы на Python
Ответить Пред. темаСлед. тема
Anonymous
 RuntimeError: ожидаемый плавает скалярного типа, но обнаружил половину в стабильной диффузии

Сообщение Anonymous »

I am currently using Kangle Kurnel using Python version 3.7
It kept saying RuntimeError: expected scalar type Float but found Half
installed packs
accelerate 0.12.0
torch 1.9.1+cu111
torchaudio 0.9.1
torchmetrics 0.11.0
TOCHTEXT 0.12.0
TOCHVISION 0.10.1+CU111
Transformers 4.26.0 < /p>
Вот мой код < /p>

Код: Выделить всё

from diffusers import StableDiffusionPipeline
import torch
import poetpy
import random
import os
import datetime
import gradio as gr
import time

model_id = "prompthero/openjourney-v2"
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
pipe = pipe.to("cuda")

def gen_image(requested_poem):
count = 1

poem =poetpy.get_poetry("title,author", requested_poem)

for j in range(0, count) :
try:
print("In Process")
req_time = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S')
print(poem)
title = poem[0]["title"]
lines = poem[0]["lines"]
author = poem[0]["author"]

prompt = ""
for line in lines:
prompt+=line+" "

print(title)
# ai

image = pipe(prompt).images[0]

if os.path.exists("./gen_imgs/"+author) == False:
os.mkdir("./gen_imgs/"+author)
else:
pass

file_path = "./gen_imgs/"+author+"/"+str(title.replace("?","").replace(".","").replace('"',"").replace("/","").replace("*","").replace("\\","").replace("", "").replace("|", "")) + " " + req_time + ".png"

image.save(file_path)

except IndexError:
j = 0
break
except KeyError:
j = 0

break

print("Session Terminated")

return image, (author + " - " + title)

requested_poem = "the Raven;Edgar allan poe" # "Title;Author"

gen_image(requested_poem)
'''

app = gr.Interface(
gen_image,
title = "PoemJourney by coding_ms",
inputs = [
gr.Textbox(label = '그림으로 바꾸고 싶은 시(영문 반드시) Example) The Raven;Edgar Allan Poe')
],
outputs = [
gr.Image(),
gr.Text()
]
)

app.launch(share=True, debug=True)
'''
< /code>
ошибка: < /p>
RuntimeError                              Traceback (most recent call last)
/tmp/ipykernel_24/1234688772.py in 
60 requested_poem = "the Raven;Edgar allan poe" # "Title;Author"
61
---> 62 gen_image(requested_poem)
63 '''
64

/tmp/ipykernel_24/1234688772.py in gen_image(requested_poem)
33             # ai
34
---> 35             image = pipe(prompt).images[0]
36
37             if os.path.exists("./gen_imgs/"+author) == False:

/opt/conda/lib/python3.7/site-packages/torch/autograd/grad_mode.py in decorate_context(*args, **kwargs)
26         def decorate_context(*args, **kwargs):
27             with self.__class__():
---> 28                 return func(*args, **kwargs)
29         return cast(F, decorate_context)
30

/opt/conda/lib/python3.7/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py in __call__(self, prompt, height, width, num_inference_steps, guidance_scale, negative_prompt, num_images_per_prompt, eta, generator, latents, prompt_embeds, negative_prompt_embeds, output_type, return_dict, callback, callback_steps)
572             negative_prompt,
573             prompt_embeds=prompt_embeds,
-->  574             negative_prompt_embeds=negative_prompt_embeds,
575         )
576

/opt/conda/lib/python3.7/site-packages/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py in _encode_prompt(self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt, prompt_embeds, negative_prompt_embeds)
288             prompt_embeds = self.text_encoder(
289                 text_input_ids.to(device),
--> 290                 attention_mask=attention_mask,
291             )
292             prompt_embeds = prompt_embeds[0]

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051             return forward_call(*input, **kwargs)
1052         # Do not call functions when jit is used
1053         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.7/site-packages/transformers/models/clip/modeling_clip.py in forward(self, input_ids, attention_mask, position_ids, output_attentions, output_hidden_states, return_dict)
730             output_hidden_states=output_hidden_states,
731             return_dict=return_dict,
--> 732         )
733
734         last_hidden_state = encoder_outputs[0]

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051             return forward_call(*input, **kwargs)
1052         # Do not call functions when jit is used
1053         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.7/site-packages/transformers/models/clip/modeling_clip.py in forward(self, input_ids, attention_mask, position_ids, output_attentions, output_hidden_states, return_dict)
651                     causal_attention_mask,
652                 )
--> 653             else:
654                 layer_outputs = encoder_layer(
655                     hidden_states,

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051             return forward_call(*input, **kwargs)
1052         # Do not call functions when jit is used
1053         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.7/site-packages/transformers/models/clip/modeling_clip.py in forward(self, inputs_embeds, attention_mask, causal_attention_mask, output_attentions, output_hidden_states, return_dict)
580     """
581
--> 582     def __init__(self, config: CLIPConfig):
583         super().__init__()
584         self.config = config

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050                 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1051             return forward_call(*input, **kwargs)
1052         # Do not call functions when jit is used
1053         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.7/site-packages/transformers/models/clip/modeling_clip.py in forward(self, hidden_states, attention_mask, causal_attention_mask, output_attentions)
323
324         if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
--> 325             raise ValueError(
326                 f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
327                 f" {attn_output.size()}"

/opt/conda/lib/python3.7/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)
1049         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1050                 or _global_forward_hooks or _global_forward_pre_hooks):
->  1051             return forward_call(*input, **kwargs)
1052         # Do not call functions when jit is used
1053         full_backward_hooks, non_full_backward_hooks = [], []

/opt/conda/lib/python3.7/site-packages/transformers/models/clip/modeling_clip.py in forward(self, hidden_states, attention_mask, causal_attention_mask, output_attentions)
258         return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
259
--> 260     def forward(
261         self,
262         hidden_states: torch.Tensor,

RuntimeError: expected scalar type Float but found Half
Я попытался установить новые обновленные версии трансформаторов и факела - не работают.

Подробнее здесь: https://stackoverflow.com/questions/753 ... -diffusion
Реклама
Ответить Пред. темаСлед. тема

Быстрый ответ

Изменение регистра текста: 
Смайлики
:) :( :oops: :roll: :wink: :muza: :clever: :sorry: :angel: :read: *x)
Ещё смайлики…
   
К этому ответу прикреплено по крайней мере одно вложение.

Если вы не хотите добавлять вложения, оставьте поля пустыми.

Максимально разрешённый размер вложения: 15 МБ.

  • Похожие темы
    Ответы
    Просмотры
    Последнее сообщение

Вернуться в «Python»