@@ -56,19 +56,19 @@ def __init__(
56
56
)
57
57
58
58
def forward_backward (self , data ):
59
- imgs , pids = self ._parse_data_for_train (data )
59
+ imgs , pids = self .parse_data_for_train (data )
60
60
61
61
if self .use_gpu :
62
62
imgs = imgs .cuda ()
63
63
pids = pids .cuda ()
64
64
65
65
outputs1 , features1 = self .model1 (imgs )
66
- loss1_x = self ._compute_loss (self .criterion_x , outputs1 , pids )
67
- loss1_t = self ._compute_loss (self .criterion_t , features1 , pids )
66
+ loss1_x = self .compute_loss (self .criterion_x , outputs1 , pids )
67
+ loss1_t = self .compute_loss (self .criterion_t , features1 , pids )
68
68
69
69
outputs2 , features2 = self .model2 (imgs )
70
- loss2_x = self ._compute_loss (self .criterion_x , outputs2 , pids )
71
- loss2_t = self ._compute_loss (self .criterion_t , features2 , pids )
70
+ loss2_x = self .compute_loss (self .criterion_x , outputs2 , pids )
71
+ loss2_t = self .compute_loss (self .criterion_t , features2 , pids )
72
72
73
73
loss1_ml = self .compute_kl_div (
74
74
outputs2 .detach (), outputs1 , is_logit = True
@@ -113,7 +113,7 @@ def compute_kl_div(p, q, is_logit=True):
113
113
q = F .softmax (q , dim = 1 )
114
114
return - (p * torch .log (q + 1e-8 )).sum (1 ).mean ()
115
115
116
- def _two_stepped_transfer_learning (
116
+ def two_stepped_transfer_learning (
117
117
self , epoch , fixbase_epoch , open_layers , model = None
118
118
):
119
119
"""Two stepped transfer learning.
@@ -138,7 +138,7 @@ def _two_stepped_transfer_learning(
138
138
open_all_layers (model1 )
139
139
open_all_layers (model2 )
140
140
141
- def _extract_features (self , input ):
141
+ def extract_features (self , input ):
142
142
if self .deploy == 'model1' :
143
143
return self .model1 (input )
144
144
0 commit comments