В настоящее время я работаю над личным пользовательским узлом, который на самом деле представляет собой просто сеть Apply Control, KSampler и VAE Decode, в режиме с намерением слегка изменить изображение, например добавить к персонажу куртку. Я работал нормально и выводил изображение, которое отправлял в узел «Сохранить изображение». Однако когда я пытаюсь вернуть что-то большее, чем просто изображение, я получаю ошибку ниже. Я сравнил его с другими узлами и попробовал каждый из новых выходов с изображением индивидуально, но изображение не работало. Я не знаю, где ошибка, поскольку единственное, что я изменил, — это возвращаемые значения.
Final Return Types:
Final Image: No shape
Model:
Out0:
Out1:
VAE:
!!! Exception during processing !!! Cannot handle this data type: (1, 1, 1024, 3), |u1
Traceback (most recent call last):
File "C:\Users\batki\Documents\ComfyUI_windows_portable_nvidia\ComfyUI_windows_portable\python_embeded\Lib\site-packages\PIL\Image.py", line 3315, in fromarray
mode, rawmode = _fromarray_typemap[typekey]
~~~~~~~~~~~~~~~~~~^^^^^^^^^
KeyError: ((1, 1, 1024, 3), '|u1')
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\batki\Documents\ComfyUI_windows_portable_nvidia\ComfyUI_windows_portable\ComfyUI\execution.py", line 323, in execute
output_data, output_ui, has_subgraph = get_output_data(obj, input_data_all, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\batki\Documents\ComfyUI_windows_portable_nvidia\ComfyUI_windows_portable\ComfyUI\execution.py", line 198, in get_output_data
return_values = _map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True, execution_block_cb=execution_block_cb, pre_execute_cb=pre_execute_cb)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\batki\Documents\ComfyUI_windows_portable_nvidia\ComfyUI_windows_portable\ComfyUI\execution.py", line 169, in _map_node_over_list
process_inputs(input_dict, i)
File "C:\Users\batki\Documents\ComfyUI_windows_portable_nvidia\ComfyUI_windows_portable\ComfyUI\execution.py", line 158, in process_inputs
results.append(getattr(obj, func)(**inputs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\batki\Documents\ComfyUI_windows_portable_nvidia\ComfyUI_windows_portable\ComfyUI\nodes.py", line 1511, in save_images
img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\Users\batki\Documents\ComfyUI_windows_portable_nvidia\ComfyUI_windows_portable\python_embeded\Lib\site-packages\PIL\Image.py", line 3319, in fromarray
raise TypeError(msg) from e
TypeError: Cannot handle this data type: (1, 1, 1024, 3), |u1
Вот код, в котором прокомментирован исходный возврат, и эта версия работала нормально.
class ChangePipeOUT:
@classmethod
def INPUT_TYPES(s):
return {
"required": {
"pipe": ("PIPE_LINE",),
"image": ("IMAGE", ),
"positive": ("STRING", {"multiline": True, "default": ""}), # Editable positive text box
"negative": ("STRING", {"multiline": True, "default": ""}), # Editable negative text box
"strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 10.0, "step": 0.01}),
"start_percent": ("FLOAT", {"default": 0.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"end_percent": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.001}),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "tooltip": "The random seed used for creating the noise."}),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000, "tooltip": "The number of steps used in the denoising process."}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0, "step":0.1, "round": 0.01, "tooltip": "The Classifier-Free Guidance scale balances creativity and adherence to the prompt. Higher values result in images more closely matching the prompt however too high values will negatively impact quality."}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS, {"tooltip": "The algorithm used when sampling, this can affect the quality, speed, and style of the generated output."}),
"scheduler": (comfy.samplers.KSampler.SCHEDULERS, {"tooltip": "The scheduler controls how noise is gradually removed to form the image."}),
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01, "tooltip": "The amount of denoising applied, lower values will maintain the structure of the initial image allowing for image to image sampling."}),
}
}
RETURN_TYPES = (
"MODEL","CONDITIONING","CONDITIONING","VAE","IMAGE"
)
RETURN_NAMES = (
"model","positive","negative","vae","image"
)
#RETURN_TYPES = ("IMAGE",)
#RETURN_NAMES = (
# "new_image"
#)
FUNCTION = "extract_pipe"
CATEGORY = "sampling"
def extract_pipe(self, pipe, image, positive, negative, strength, start_percent, end_percent, seed, steps, cfg, sampler_name, scheduler, denoise):
# Extract all components from the pipe object
vae = pipe["vae"]
model = pipe["model"]
clip = pipe["clip"]
control_net = pipe["control_net"]
latent = pipe["latent_image"]
full_positive = pipe["base_positive"] + positive
full_negative = pipe["base_negative"] + negative
positive_tokens = clip.tokenize(full_positive)
positive_output = clip.encode_from_tokens(positive_tokens, return_pooled=True, return_dict=True)
positive_cond = positive_output.pop("cond")
negative_tokens = clip.tokenize(full_negative)
negative_output = clip.encode_from_tokens(negative_tokens, return_pooled=True, return_dict=True)
negative_cond = negative_output.pop("cond")
if strength == 0:
print('strength = 0')
img, = self.sample(model, seed, steps, cfg, sampler_name, scheduler, [[positive_cond, positive_output]], [[negative_cond, negative_output]], latent, denoise)
final_image = self.decode(vae, img)
return (model, [[positive_cond, positive_output]], [[negative_cond, negative_output]], vae, final_image)
#return final_image
out0, out1 = self.apply_controlnet(
[[positive_cond, positive_output]], [[negative_cond, negative_output]],
control_net, image, strength, start_percent, end_percent, vae=vae, extra_concat=[]
)
img, = self.sample(model, seed, steps, cfg, sampler_name, scheduler, out0, out1, latent, denoise)
final_image = self.decode(vae, img)
pos = [[positive_cond, positive_output]]
neg = [[negative_cond, negative_output]]
print("Final Return Types:")
print("Final Image:", type(final_image), final_image.shape if hasattr(final_image, 'shape') else "No shape")
print("Model:", type(model))
print("Out0:", type(out0))
print("Out1:", type(out1))
print("VAE:", type(vae))
return model, pos, neg, vae, final_image
#return final_image
def apply_controlnet(self, positive, negative, control_net, image, strength, start_percent, end_percent, vae=None, extra_concat=[]):
if strength == 0:
return (positive, negative)
control_hint = image.movedim(-1,1)
cnets = {}
out = []
for conditioning in [positive, negative]:
c = []
for t in conditioning:
d = t[1].copy()
prev_cnet = d.get('control', None)
if prev_cnet in cnets:
c_net = cnets[prev_cnet]
else:
c_net = control_net.copy().set_cond_hint(control_hint, strength, (start_percent, end_percent), vae=vae, extra_concat=extra_concat)
c_net.set_previous_controlnet(prev_cnet)
cnets[prev_cnet] = c_net
d['control'] = c_net
d['control_apply_to_uncond'] = False
n = [t[0], d]
c.append(n)
out.append(c)
return (out[0], out[1])
def sample(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise):
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=denoise)
def decode(self, vae, samples):
images = vae.decode(samples["samples"])
if len(images.shape) == 5: #Combine batches
images = images.reshape(-1, images.shape[-3], images.shape[-2], images.shape[-1])
return (images, )
Подробнее здесь: https://stackoverflow.com/questions/791 ... just-an-im
Ошибка пользовательского узла Comfy UI с выводом IMAGE при возврате более чем просто IMAGE ⇐ Python
-
- Похожие темы
- Ответы
- Просмотры
- Последнее сообщение
-
-
Раздувать от узла до нескольких экземпляров функции узла в Langchain's Langgraph?
Anonymous » » в форуме Python - 0 Ответы
- 2 Просмотры
-
Последнее сообщение Anonymous
-