-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathensemble.py
108 lines (84 loc) · 3.26 KB
/
ensemble.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import os
import shutil
import sys
import cv2
import numpy as np
import segmentation_models_pytorch as smp
import torch.nn as nn
# create test dataset
import torch
from sklearn.metrics import roc_auc_score, confusion_matrix
from torch.utils.data import DataLoader
from torch.nn import functional as F
import config
from datasets import DRACDataset
from preprocess import get_validation_augmentation, get_preprocessing
from utils import visualize
from tqdm import tqdm
ENCODER = config.ENCODER
ENCODER_WEIGHTS = config.ENCODER_WEIGHTS
CLASSES = config.CLASSES
ACTIVATION = config.ACTIVATION
DEVICE = config.DEVICE
DATA_DIR = config.DATA_DIR
classes = {'IMA':'1. Intraretinal Microvascular Abnormalities', 'NA':'2. Nonperfusion Areas', 'NE':'3. Neovascularization', 'NE_nohealth':'3. Neovascularization'}
x_test_dir = os.path.join(DATA_DIR, '1. Original Images', 'b. Testing Set')
y_test_dir = os.path.join(DATA_DIR, '1. Original Images', 'b. Testing Set')
# x_test_dir = os.path.join(DATA_DIR, '1. Original Images', 'val')
# y_test_dir = os.path.join(DATA_DIR, '2. Groundtruths', 'val', classes[CLASSES[0]])
# y_test_dir = os.path.join(DATA_DIR, 'test', 'anno')
preprocessing_fn = smp.encoders.get_preprocessing_fn(ENCODER, ENCODER_WEIGHTS)
test_dataset = DRACDataset(
x_test_dir,
y_test_dir,
augmentation=get_validation_augmentation(),
preprocessing=get_preprocessing(preprocessing_fn),
)
# test dataset without transformations for image visualization
test_dataset_vis = DRACDataset(
x_test_dir, y_test_dir,
)
model_paths = {'IMA':['MODEL_PATH1','MODEL_PATH2'],\
'NA':['MODEL_PATH1','MODEL_PATH2'],\
# 'NE':['MODEL_PATH1','MODEL_PATH2'], \
'NE':['NE_UNetpp_DICE_best_model','NE_UNetpp_DICE_best_model'], \
'NE_nohealth':['MODEL_PATH1','MODEL_PATH2']}
# load best saved checkpoint
best_models = []
for model_path in model_paths[CLASSES[0]]:
best_models.append(torch.load('./models/'+model_path+'.pth'))
print('ensemble',model_paths[CLASSES[0]])
if not os.path.exists('./results'):
os.makedirs('./results')
if not os.path.exists('./results/'+CLASSES[0]):
os.makedirs('./results/'+CLASSES[0])
else:
shutil.rmtree('./results/'+CLASSES[0])
os.makedirs('./results/'+CLASSES[0])
print("product",CLASSES[0])
for i in tqdm(range(len(test_dataset))):
# n = np.random.choice(len(test_dataset))
n=i
image_vis = test_dataset_vis[n][0].astype('uint8')
image, gt_mask = test_dataset[n]
x_tensor = torch.from_numpy(image).to(DEVICE).unsqueeze(0)
number = len(best_models)
all_pred = None
for best_model in best_models:
pr_mask = best_model.predict(x_tensor)
pr_mask = (pr_mask.squeeze().cpu().numpy())
if all_pred is None:
all_pred = pr_mask
else:
all_pred = all_pred + pr_mask
pr_mask = all_pred/number
pr_mask = (pr_mask.round())
# pr_mask[pr_mask>0] = 255
cv2.imwrite('./results/'+CLASSES[0]+'/'+test_dataset_vis[n][2], pr_mask)
if CLASSES[0] == 'NE_nohealth':
if not os.path.exists('./results/NE'):
shutil.copytree('./results/NE_nohealth', './results/NE')
else:
shutil.rmtree('./results/NE')
shutil.copytree('./results/NE_nohealth', './results/NE')
shutil.rmtree('./results/NE_nohealth')