Как преобразовать .pth (формат предварительно обученной голосовой библиотеки, которую я использую) в .pt (формат, читаемый программой C#, с которой я ее запускаю)? Я нашел информацию только о torchvision (для преобразования .pth визуальных моделей).
Информации о PyTorch.audio мало, поэтому я немного запутался.(Я также знаю, что .pth и .pt по сути одинаковы, но Python мешает форматированию .pth).
Это было самое близкое, что мне удалось получить на примерах torchvision:
pth_file_path = 'FILE/PATH/model.pth'
pt_file_path = 'FILE/PATH/model.pt'
import torch
import torchaudio
from unet import UNet
from zmq import device
model = UNet(3, 2)
model.load_state_dict(torch.load(pth_file_path, map_location=torch.device('cpu')))
Но выдает ошибку времени выполнения:
model.load_state_dict(torch.load(pth_file_path, map_location=torch.device('cpu')))
Traceback (most recent call last):
File ".\IMPORT.py", line 11, in
model.load_state_dict(torch.load(pth_file_path, map_location=torch.device('cpu')))
File "C:\Users\user\AppData\Local\Programs\Python\Python38\lib\site-packages\torch\nn\modules\module.py", line 2215, in load_state_dict
raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
RuntimeError: Error(s) in loading state_dict for UNet:
Missing key(s) in state_dict: "encoder.encoding_blocks.0.conv1.conv_layer.weight", "encoder.encoding_blocks.0.conv1.conv_layer.bias", "encoder.encoding_blocks.0.conv1.block.0.weight", "encoder.encoding_blocks.0.conv1.block.0.bias", "encoder.encoding_blocks.0.conv2.conv_layer.weight", "encoder.encoding_blocks.0.conv2.conv_layer.bias", "encoder.encoding_blocks.0.conv2.block.0.weight", "encoder.encoding_blocks.0.conv2.block.0.bias", "encoder.encoding_blocks.1.conv1.conv_layer.weight", "encoder.encoding_blocks.1.conv1.conv_layer.bias", "encoder.encoding_blocks.1.conv1.block.0.weight", "encoder.encoding_blocks.1.conv1.block.0.bias", "encoder.encoding_blocks.1.conv2.conv_layer.weight", "encoder.encoding_blocks.1.conv2.conv_layer.bias", "encoder.encoding_blocks.1.conv2.block.0.weight", "encoder.encoding_blocks.1.conv2.block.0.bias", "encoder.encoding_blocks.2.conv1.conv_layer.weight", "encoder.encoding_blocks.2.conv1.conv_layer.bias", "encoder.encoding_blocks.2.conv1.block.0.weight", "encoder.encoding_blocks.2.conv1.block.0.bias", "encoder.encoding_blocks.2.conv2.conv_layer.weight", "encoder.encoding_blocks.2.conv2.conv_layer.bias", "encoder.encoding_blocks.2.conv2.block.0.weight", "encoder.encoding_blocks.2.conv2.block.0.bias", "encoder.encoding_blocks.3.conv1.conv_layer.weight", "encoder.encoding_blocks.3.conv1.conv_layer.bias", "encoder.encoding_blocks.3.conv1.block.0.weight", "encoder.encoding_blocks.3.conv1.block.0.bias", "encoder.encoding_blocks.3.conv2.conv_layer.weight", "encoder.encoding_blocks.3.conv2.conv_layer.bias", "encoder.encoding_blocks.3.conv2.block.0.weight", "encoder.encoding_blocks.3.conv2.block.0.bias", "bottom_block.conv1.conv_layer.weight", "bottom_block.conv1.conv_layer.bias", "bottom_block.conv1.block.0.weight", "bottom_block.conv1.block.0.bias", "bottom_block.conv2.conv_layer.weight", "bottom_block.conv2.conv_layer.bias", "bottom_block.conv2.block.0.weight", "bottom_block.conv2.block.0.bias", "decoder.decoding_blocks.0.upsample.weight", "decoder.decoding_blocks.0.upsample.bias", "decoder.decoding_blocks.0.conv1.conv_layer.weight", "decoder.decoding_blocks.0.conv1.conv_layer.bias", "decoder.decoding_blocks.0.conv1.block.0.weight", "decoder.decoding_blocks.0.conv1.block.0.bias", "decoder.decoding_blocks.0.conv2.conv_layer.weight", "decoder.decoding_blocks.0.conv2.conv_layer.bias", "decoder.decoding_blocks.0.conv2.block.0.weight", "decoder.decoding_blocks.0.conv2.block.0.bias", "decoder.decoding_blocks.1.upsample.weight", "decoder.decoding_blocks.1.upsample.bias", "decoder.decoding_blocks.1.conv1.conv_layer.weight", "decoder.decoding_blocks.1.conv1.conv_layer.bias", "decoder.decoding_blocks.1.conv1.block.0.weight", "decoder.decoding_blocks.1.conv1.block.0.bias", "decoder.decoding_blocks.1.conv2.conv_layer.weight", "decoder.decoding_blocks.1.conv2.conv_layer.bias", "decoder.decoding_blocks.1.conv2.block.0.weight", "decoder.decoding_blocks.1.conv2.block.0.bias", "decoder.decoding_blocks.2.upsample.weight", "decoder.decoding_blocks.2.upsample.bias", "decoder.decoding_blocks.2.conv1.conv_layer.weight", "decoder.decoding_blocks.2.conv1.conv_layer.bias", "decoder.decoding_blocks.2.conv1.block.0.weight", "decoder.decoding_blocks.2.conv1.block.0.bias", "decoder.decoding_blocks.2.conv2.conv_layer.weight", "decoder.decoding_blocks.2.conv2.conv_layer.bias", "decoder.decoding_blocks.2.conv2.block.0.weight", "decoder.decoding_blocks.2.conv2.block.0.bias", "decoder.decoding_blocks.3.upsample.weight", "decoder.decoding_blocks.3.upsample.bias", "decoder.decoding_blocks.3.conv1.conv_layer.weight", "decoder.decoding_blocks.3.conv1.conv_layer.bias", "decoder.decoding_blocks.3.conv1.block.0.weight", "decoder.decoding_blocks.3.conv1.block.0.bias", "decoder.decoding_blocks.3.conv2.conv_layer.weight", "decoder.decoding_blocks.3.conv2.conv_layer.bias", "decoder.decoding_blocks.3.conv2.block.0.weight", "decoder.decoding_blocks.3.conv2.block.0.bias", "classifier.conv_layer.weight", "classifier.conv_layer.bias", "classifier.block.0.weight", "classifier.block.0.bias".
Unexpected key(s) in state_dict: "weight", "config", "info", "sr", "f0", "version".
Подробнее здесь: https://stackoverflow.com/questions/791 ... th-to-a-pt
Как преобразовать .pth в .pt? ⇐ Python
-
- Похожие темы
- Ответы
- Просмотры
- Последнее сообщение