Я нашел в статье модель классификации стадий сна на основе сигнала ЭКГ, и эта модель общедоступна на Github. Их модель рассчитана на входное окно длительностью 270 секунд при частоте 200 Гц. В результате размер входных данных равен (1,54000), и это прекрасно работает. Я хочу попробовать посмотреть на его производительность при понижении частоты сигнала до 64 Гц. В результате получается окно ввода 64*270 = (1,17280). У меня два вопроса.
[*]Уместно ли изменить только ввод, не затрагивая размер ядра, или его тоже следует уменьшить?
[*]Как изменить модель, чтобы она могла работать с частотой 64 Гц?
< /ol>
Это пример кода для запуска модели:
import torch as th
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class ResBlock(nn.Module):
def __init__(self, Lin, Lout, filter_len, dropout, subsampling, momentum, maxpool_padding=0):
assert filter_len%2==1
super(ResBlock, self).__init__()
self.Lin = Lin
self.Lout = Lout
self.filter_len = filter_len
self.dropout = dropout
self.subsampling = subsampling
self.momentum = momentum
self.maxpool_padding = maxpool_padding
self.bn1 = nn.BatchNorm1d(self.Lin, momentum=self.momentum, affine=True)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(self.dropout)
self.conv1 = nn.Conv1d(self.Lin, self.Lin, self.filter_len, stride=self.subsampling, padding=self.filter_len//2, bias=False)
self.bn2 = nn.BatchNorm1d(self.Lin, momentum=self.momentum, affine=True)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(self.dropout)
self.conv2 = nn.Conv1d(self.Lin, self.Lout, self.filter_len, stride=1, padding=self.filter_len//2, bias=False)
#self.bn3 = nn.BatchNorm1d(self.Lout, momentum=self.momentum, affine=True)
if self.Lin==self.Lout and self.subsampling>1:
self.maxpool = nn.MaxPool1d(self.subsampling, padding=self.maxpool_padding)
def forward(self, x):
if self.Lin==self.Lout:
res = x
x = self.bn1(x)
x = self.relu1(x)
x = self.dropout1(x)
x = self.conv1(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.dropout2(x)
x = self.conv2(x)
if self.Lin==self.Lout:
if self.subsampling>1:
x = x+self.maxpool(res)
else:
x = x+res
#x = self.bn3(x)
return x
class ECGSleepNet(nn.Module):
def __init__(self, to_combine=False,nb_classes = 5,n_timestep = 54000):#, filter_len):
super(ECGSleepNet, self).__init__()
self.filter_len = 17#33
self.filter_num = 64#16
self.padding = self.filter_len//2
self.dropout = 0.5
self.momentum = 0.1
self.subsampling = 4
self.n_channel = 1
self.n_timestep = n_timestep#54000#//2
#self.n_output = 5
self.n_output = nb_classes
self.to_combine = to_combine
# input convolutional block
# 1 x 54000
self.conv1 = nn.Conv1d(1, self.filter_num, self.filter_len, stride=1, padding=self.padding, bias=False)
self.bn1 = nn.BatchNorm1d(self.filter_num, momentum=self.momentum, affine=True)
self.relu1 = nn.ReLU()
# 64 x 54000
self.conv2_1 = nn.Conv1d(self.filter_num, self.filter_num, self.filter_len, stride=self.subsampling, padding=self.padding, bias=False)
self.bn2 = nn.BatchNorm1d(self.filter_num, momentum=self.momentum, affine=True)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(self.dropout)
self.conv2_2 = nn.Conv1d(self.filter_num, self.filter_num, self.filter_len, stride=1, padding=self.padding, bias=False)
self.maxpool2 = nn.MaxPool1d(self.subsampling)
#self.bn_input = nn.BatchNorm1d(self.filter_num, momentum=self.momentum, affine=True)
# 64 x 13500
self.resblock1 = ResBlock(self.filter_num, self.filter_num, self.filter_len,
self.dropout, 1, self.momentum)
self.resblock2 = ResBlock(self.filter_num, self.filter_num, self.filter_len,
self.dropout, self.subsampling, self.momentum)
self.resblock3 = ResBlock(self.filter_num, self.filter_num*2, self.filter_len,
self.dropout, 1, self.momentum)
self.resblock4 = ResBlock(self.filter_num*2, self.filter_num*2, self.filter_len,
self.dropout, self.subsampling, self.momentum, maxpool_padding=1)
# 128 x 844
self.resblock5 = ResBlock(self.filter_num*2, self.filter_num*2, self.filter_len,
self.dropout, 1, self.momentum)
self.resblock6 = ResBlock(self.filter_num*2, self.filter_num*2, self.filter_len,
self.dropout, self.subsampling, self.momentum)
self.resblock7 = ResBlock(self.filter_num*2, self.filter_num*3, self.filter_len,
self.dropout, 1, self.momentum)
self.resblock8 = ResBlock(self.filter_num*3, self.filter_num*3, self.filter_len,
self.dropout, self.subsampling, self.momentum, maxpool_padding=1)
# 192 x 53
self.resblock9 = ResBlock(self.filter_num*3, self.filter_num*3, self.filter_len,
self.dropout, 1, self.momentum)
self.resblock10 = ResBlock(self.filter_num*3, self.filter_num*3, self.filter_len,
self.dropout, self.subsampling, self.momentum, maxpool_padding=2)
self.resblock11 = ResBlock(self.filter_num*3, self.filter_num*4, self.filter_len,
self.dropout, 1, self.momentum)
self.resblock12 = ResBlock(self.filter_num*4, self.filter_num*4, self.filter_len,
self.dropout, self.subsampling, self.momentum, maxpool_padding=2)
# 256 x 4
self.resblock13 = ResBlock(self.filter_num*4, self.filter_num*5, self.filter_len,
self.dropout, 1, self.momentum)
# 320 x 4
self.bn_output = nn.BatchNorm1d(self.filter_num*5, momentum=self.momentum, affine=True)
self.relu_output = nn.ReLU()
#if not self.to_combine:
dummy = self._forward(Variable(th.ones(1,self.n_channel, self.n_timestep)))
self.fc_output = nn.Linear(dummy.size(1), self.n_output)
def _forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
res = x
x = self.conv2_1(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.dropout2(x)
x = self.conv2_2(x)
x = x+self.maxpool2(res)
#x = self.bn_input(x)
x = self.resblock1(x)
x = self.resblock2(x)
x = self.resblock3(x)
x = self.resblock4(x)
x = self.resblock5(x)
x = self.resblock6(x)
x = self.resblock7(x)
x = self.resblock8(x)
if hasattr(self, 'to_combine') and self.to_combine:
return x
x = self.resblock9(x)
x = self.resblock10(x)
x = self.resblock11(x)
x = self.resblock12(x)
x = self.resblock13(x)
x = self.bn_output(x)
x = self.relu_output(x)
x = x.view(x.size(0), -1)
return x
def forward(self, x):
h = self._forward(x)
if not hasattr(self, 'to_combine') or not self.to_combine:
x = self.fc_output(h)
return x, h
def load_param(self, model_path):
model = th.load(model_path)
if type(model)==nn.DataParallel and hasattr(model, 'module'):
model = model.module
if hasattr(model, 'state_dict'):
model = model.state_dict()
self.load_state_dict(model)
def fix_param(self):
for param in self.parameters():
param.requires_grad = False
def unfix_param(self):
for param in self.parameters():
param.requires_grad = True
def init(self, method='orth'):
pass
if __name__ == '__main__':
Hz200_input = th.rand(1,1,54000)
Hz64_input = th.rand(1,1,64*270)
ECGPaper = ECGSleepNet(nb_classes = 5)
output = ECGPaper(Hz200_input)
output = ECGPaper(Hz64_input)
Это отлично работает для входа 200 Гц, но при входе 64 Гц выдает ошибку:
in forward
x = x+self.maxpool(res)
RuntimeError: The size of tensor a (68) must match the size of tensor b (67) at non-singleton dimension 2
Это происходит на шестом слое повторной блокировки «x = self.resblock6(x)». Очевидно, что размер слоев меняется по мере изменения размера входных данных, но как мне это учесть соответствующим образом? При распечатке размеров повторных блоков это результат для первых шести слоев с частотой 200 Гц и 64 Гц:
output = ECGPaper(Hz200_input)
Output after resblock1: torch.Size([1, 64, 13500])
Output after resblock2: torch.Size([1, 64, 3375])
Output after resblock3: torch.Size([1, 128, 3375])
Output after resblock4: torch.Size([1, 128, 844])
Output after resblock5: torch.Size([1, 128, 844])
Output after resblock6: torch.Size([1, 128, 211])
Output after resblock7: torch.Size([1, 192, 211])
Output after resblock8: torch.Size([1, 192, 53])
output = ECGPaper(Hz64_input)
Output after resblock1: torch.Size([1, 64, 4320])
Output after resblock2: torch.Size([1, 64, 1080])
Output after resblock3: torch.Size([1, 128, 1080])
Output after resblock4: torch.Size([1, 128, 270])
Output after resblock5: torch.Size([1, 128, 270])
Подробнее здесь: https://stackoverflow.com/questions/790 ... quency-for
Как адаптировать остаточную сеть 1D CNN для обработки другой входной частоты для данных временных рядов? ⇐ Python
-
- Похожие темы
- Ответы
- Просмотры
- Последнее сообщение