В настоящее время я работаю над проектом и хочу использовать модель swin untr и способ сохранения вывода так же, как dl+direct. цель состоит в том, чтобы выполнить сегментацию мозга
однако, когда я помещаю разные файлы в качестве входных данных, вывод, отображаемый в окне графика spyder, остается прежним, я думаю, это может быть потому, что код не сохранил весь вывод
import os
import glob
import nibabel as nib
import subprocess
import uuid
import matplotlib.pyplot as plt
from pathlib import Path
# Define folder path
folder_path = r'F:\yplai\research-contributions-main\SwinUNETR\BRATS21\dataset\101524'
# Path and filename
script_path = r'F:\yplai\DL-DiReCT-main\src\DeepSCAN_Anatomy_Newnet_apply.py'
model_path = r'F:\yplai\research-contributions-main\SwinUNETR\BRATS21\pretrained_models\model.pt' # Replace with the actual model file path
lowmem = "True" # or "False", depending on your needs
destination_dir = r'F:\yplai\destination'
check_files = [
'softmax_seg.nii.gz',
'seg_Left-Amygdala.nii.gz',
'seg_Left-Cerebral-Cortex.nii.gz',
'seg_Left-Cerebral-White-Matter.nii.gz',
'seg_Left-Hippocampus.nii.gz',
'seg_Right-Amygdala.nii.gz',
'seg_Right-Cerebral-Cortex.nii.gz',
'seg_Right-Cerebral-White-Matter.nii.gz',
'seg_Right-Hippocampus.nii.gz',
'seg_WM-hypointensities.nii.gz'
]
#unet.load_state_dict(checkpoint['state_dict'], strict=False)
def process_nii_folder(folder_path):
# Existing code for processing NIfTI files
# Update the check_files list with files in the destination directory after processing
updated_check_files = os.listdir(destination_dir)
# Check for new files and add them to the check_files list
for file in updated_check_files:
if file not in check_files:
check_files.append(file)
check_files = os.listdir(folder_path)
for check_file in check_files:
check_file_path = os.path.join(folder_path, check_file)
# 檢查檔案是否為NIfTI格式
if check_file_path.endswith('.nii.gz'):
# 加載影像並顯示切片
img = nib.load(check_file_path)
img_data = img.get_fdata()
# 遍歷影像的切片
for z_slice in range(img_data.shape[2]):
plt.imshow(img_data[:, :, z_slice], cmap='gray')
plt.title(f"{os.path.basename(check_file_path)} - Slice {z_slice}")
plt.axis('off')
plt.show()
else:
print(f"Ignore non-NIfTI file: {check_file_path}")
for check_file in check_files:
check_file_path = os.path.join(destination_dir, check_file)
if os.path.exists(check_file_path):
img = nib.load(check_file_path)
img_data = img.get_fdata()
plt.imshow(img_data[:, :, img_data.shape[2] // 2], cmap='gray')
plt.title(os.path.basename(check_file_path))
plt.axis('off')
plt.show()
else:
print(f"File not found: {check_file_path}")
# Ensure the destination directory exists
os.makedirs(destination_dir, exist_ok=True)
# Process the folder
process_nii_folder(folder_path)
это код, который я написал, он может отображать входные данные, но не может отображать выходные данные
Я пытаюсь таким образом вычислить точность и кубик и сохранить вывод, но я не могу получить результат.
import os
import glob
import nibabel as nib
import subprocess
import uuid
import matplotlib.pyplot as plt
import torch
import numpy as np
from pathlib import Path
from monai.networks.nets import SwinUNETR
from monai.data import Dataset, DataLoader
import torch.nn.functional as F
from sklearn.metrics import accuracy_score
from monai.metrics import DiceMetric
#from torchvision.transforms import Resize
from monai.transforms import Resize
import torch
# Load the model and the checkpoint
model = SwinUNETR() # Instantiate your SwinUNETR model
checkpoint = torch.load('path_to_checkpoint.pth') # Load the checkpoint
# Adjust the shapes of the model's parameters to match the checkpoint's shapes
model.swinViT.patch_embed.proj.weight.data[:, :1, ...] = checkpoint['swinViT.patch_embed.proj.weight']
model.encoder1.layer.conv1.conv.weight.data[:, :1, ...] = checkpoint['encoder1.layer.conv1.conv.weight']
model.encoder1.layer.conv3.conv.weight.data[:, :1, ...] = checkpoint['encoder1.layer.conv3.conv.weight']
model.out.conv.conv.weight.data[:2, ...] = checkpoint['out.conv.conv.weight']
model.out.conv.conv.bias.data[:2] = checkpoint['out.conv.conv.bias']
# Load the adjusted state_dict to the model
model.load_state_dict(model.state_dict())
# Continue with your model training or evaluation
# Define folder path
folder_path = r'F:\yplai\research-contributions-main\SwinUNETR\BRATS21\dataset\101524'
destination_dir = r'F:\yplai\research-contributions-main\SwinUNETR\BRATS21\outputs'
#check the files:
if os.path.exists(folder_path):
files = os.listdir(folder_path)
if files:
print("資料夾中的檔案:")
for file in files:
print(file)
else:
print("資料夾是空的")
else:
print("資料夾不存在")
# Load the SwinUNETR model
model_path = r'F:\yplai\research-contributions-main\SwinUNETR\BRATS21\pretrained_models\fold0_f48_ep300_4gpu_dice0_8854\fold0_f48_ep300_4gpu_dice0_8854\model.pt' # Replace with the actual model file path
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = SwinUNETR(
img_size=(128, 128, 128),
in_channels=1,
out_channels=2,
feature_size=48,
use_checkpoint=False,
).to(device)
# 加載預訓練模型權重
checkpoint = torch.load(model_path)
pretrained_dict = checkpoint['state_dict']
# Load the state_dict with filtering out unexpected keys
model_dict = model.state_dict()
filtered_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(filtered_dict)
model.load_state_dict(model_dict)
class SwinUNETR(torch.nn.Module):
def __init__(self):
super(SwinUNETR, self).__init__()
# Define your layers here
def forward(self, x):
print("Input shape:", x.shape) # Print input shape
# Add more print statements for intermediate shapes if needed
# Define the forward pass of your model here
x = self.layer1(x)
print("Shape after layer1:", x.shape) # Print shape after layer1
x = self.layer2(x)
print("Shape after layer2:", x.shape) # Print shape after layer2
# Continue with the rest of your forward pass
return x
# Create an instance of your model
model = SwinUNETR()
print(model)
# 手動調整權重的 shape,以匹配當前模型
# 將 4 通道的輸入層平均轉換為 1 通道
if 'swinViT.patch_embed.proj.weight' in pretrained_dict:
pretrained_dict['swinViT.patch_embed.proj.weight'] = pretrained_dict['swinViT.patch_embed.proj.weight'].mean(1, keepdim=True)
if 'encoder1.layer.conv1.conv.weight' in pretrained_dict:
pretrained_dict['encoder1.layer.conv1.conv.weight'] = pretrained_dict['encoder1.layer.conv1.conv.weight'].mean(1, keepdim=True)
if 'encoder1.layer.conv3.conv.weight' in pretrained_dict:
pretrained_dict['encoder1.layer.conv3.conv.weight'] = pretrained_dict['encoder1.layer.conv3.conv.weight'].mean(1, keepdim=True)
# 修改輸出層,將 3 通道輸出裁剪為 2 通道
if 'out.conv.conv.weight' in pretrained_dict:
pretrained_dict['out.conv.conv.weight'] = pretrained_dict['out.conv.conv.weight'][:2, :]
if 'out.conv.conv.bias' in pretrained_dict:
pretrained_dict['out.conv.conv.bias'] = pretrained_dict['out.conv.conv.bias'][:2]
# 更新模型的權重字典
model_dict.update(pretrained_dict)
# 載入修改後的權重
model.load_state_dict(model_dict)
model.load_state_dict(model_dict, strict=False)
# 設置模型為推理模式
model.eval()
dice_metric = DiceMetric(include_background=False, reduction="mean")
def process_nii_folder(folder_path, destination_dir):
# List all NIfTI files in the folder
nii_files = glob.glob(os.path.join(folder_path, '*.nii.gz'))
accuracy_list = [] # List to store accuracy values
resize_transform = Resize((224, 160)) # Resize transform applied to all images
# 假設 x 是你的輸入張量,並且需要改變形狀
for file_path in nii_files:
# Load image file
img = nib.load(file_path)
img_data = img.get_fdata()
img_data = np.expand_dims(img_data, axis=0) # Add channel dimension
# Convert to tensor and move to device
img_tensor = torch.tensor(img_data, dtype=torch.float32).to(device)
# Resize the image tensor to match the model's required input size
#img_tensor_resized = resize_transform(img_tensor)
# 使用 MONAI 的 Resize 來處理 3D 張量
resize_transform = Resize(spatial_size=(128, 128, 128)) # 調整為模型需要的尺寸
img_tensor_resized = resize_transform(img_tensor)
#x = img_tensor_resized.permute(0, 2, 3, 1) # 將維度重新排列
print(f"Input shape before model: {img_tensor_resized.shape}")
with torch.no_grad():
# Run inference
output = model(img_tensor_resized)
probabilities = F.softmax(output, dim=1) # Apply softmax to get probabilities
pred = torch.argmax(probabilities, dim=1).cpu().numpy() # Get predicted labels
# Calculate accuracy (example calculation, replace with actual accuracy calculation)
accuracy = 0.0 # Placeholder accuracy calculation, replace this with real calculation
accuracy_list.append(accuracy)
# Assuming img_tensor_resized is your input tensor
output = model(img_tensor_resized)
# Generate unique filename for processed image
unique_filename = str(uuid.uuid4()) + ".nii.gz"
processed_file_path = os.path.join(destination_dir, unique_filename)
# Save prediction as NIfTI image
pred_nifti = nib.Nifti1Image(pred[0], img.affine) # Use the affine matrix from the original image
nib.save(pred_nifti, processed_file_path)
print("Processed file path:", processed_file_path)
# Optional: Visualize the middle slice of the predicted segmentation
plt.imshow(pred[0, :, :, pred.shape[2] // 2], cmap='gray') # Display middle slice
plt.title(f'Predicted segmentation for {os.path.basename(file_path)}')
plt.axis('off')
plt.show()
return accuracy_list
# Print overall accuracy
avg_accuracy = sum(accuracy_list) / len(accuracy_list) if accuracy_list else 0
print(f"Average accuracy over all files: {avg_accuracy * 100:.2f}%")
# Ensure the destination directory exists
os.makedirs(destination_dir, exist_ok=True)
process_nii_folder(folder_path, destination_dir)
def process_nii_folder_with_accuracy(folder_path, ground_truth_path):
# List all NIfTI files in the folder
nii_files = glob.glob(os.path.join(folder_path, '*.nii.gz'))
ground_truth_files = glob.glob(os.path.join(ground_truth_path, '*.nii.gz'))
accuracy_list = [] # List to store accuracy values
dice_scores = [] # List to store Dice scores
for file_path, gt_path in zip(nii_files, ground_truth_files):
# Load input image
img = nib.load(file_path)
img_data = img.get_fdata()
img_data = np.expand_dims(img_data, axis=0) # Add channel dimension
# Convert to tensor and move to device
img_tensor = torch.tensor(img_data, dtype=torch.float32).to(device)
# Load ground truth segmentation
gt_img = nib.load(gt_path)
gt_data = gt_img.get_fdata().astype(np.int32) # Ground truth should be integer labels
# Make sure ground truth matches input image shape
assert gt_data.shape == img_data.shape[1:], "Ground truth size does not match the input image size."
# Convert to tensor for ground truth
gt_tensor = torch.tensor(gt_data, dtype=torch.int64).unsqueeze(0).to(device)
with torch.no_grad():
# Run inference
output = model(img_tensor)
probabilities = F.softmax(output, dim=1) # Apply softmax to get probabilities
pred = torch.argmax(probabilities, dim=1).cpu().numpy() # Get predicted labels
# Calculate Dice score
dice_score = dice_metric(y_pred=output, y=gt_tensor.unsqueeze(1))
# Calculate accuracy (pixel-level)
pred_flat = pred.flatten()
gt_flat = gt_data.flatten()
accuracy = accuracy_score(gt_flat, pred_flat)
accuracy_list.append(accuracy)
dice_scores.append(dice_score.item())
#prediction check
print(f"Prediction shape: {pred.shape}")
plt.imshow(pred[0, :, :, pred.shape[2] // 2], cmap='gray')
plt.title('Middle slice of the prediction')
plt.show()
#ground truth check
print(f"Ground truth shape: {gt_data.shape}")
plt.imshow(gt_data[:, :, gt_data.shape[2] // 2], cmap='gray')
plt.title('Middle slice of the ground truth')
plt.show()
print(f"Processed file: {file_path}")
print(f"Accuracy: {accuracy * 100:.2f}%")
print(f"Dice Score: {dice_score.item():.4f}")
# Save prediction as NIfTI image
unique_filename = str(uuid.uuid4()) + ".nii.gz"
processed_file_path = os.path.join(destination_dir, unique_filename)
pred_nifti = nib.Nifti1Image(pred[0], img.affine)
nib.save(pred_nifti, processed_file_path)
print("Processed file saved at:", processed_file_path)
# Visualize the middle slice of the predicted segmentation
plt.imshow(pred[0, :, :, pred.shape[2] // 2], cmap='gray') # Display middle slice
plt.title(f'Predicted segmentation for {os.path.basename(file_path)}')
plt.axis('off')
plt.show()
# Print overall accuracy and Dice score
avg_accuracy = sum(accuracy_list) / len(accuracy_list) if accuracy_list else 0
avg_dice_score = sum(dice_scores) / len(dice_scores) if dice_scores else 0
print(f"Average accuracy over all files: {avg_accuracy * 100:.2f}%")
print(f"Average Dice Score over all files: {avg_dice_score:.4f}")
# Provide the ground truth folder path
ground_truth_path = r'path_to_ground_truth_files' # Replace with actual path
# Process the folder with accuracy and dice score
process_nii_folder_with_accuracy(folder_path, ground_truth_path)
Подробнее здесь: https://stackoverflow.com/questions/791 ... -in-just-a
Я хочу поместить рассчитанный воксель в свой CSV-файл, но не могу поместить его так же, как код в dl+direct. ⇐ Python
-
- Похожие темы
- Ответы
- Просмотры
- Последнее сообщение