forked from JuanLuisGonzalez/PLADE-net
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathloss_functions.py
More file actions
205 lines (169 loc) · 8.06 KB
/
loss_functions.py
File metadata and controls
205 lines (169 loc) · 8.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import models
# Define VGG19
class Vgg19_pc(torch.nn.Module):
def __init__(self, requires_grad=False):
super(Vgg19_pc, self).__init__()
vgg_pretrained_features = models.vgg19(pretrained=True).features
vgg_pretrained_features = nn.DataParallel(vgg_pretrained_features.cuda())
# This has Vgg config E:
# partial convolution paper uses up to pool3
# [64,'r', 64,r, 'M', 128,'r', 128,r, 'M', 256,'r', 256,r, 256,r, 256,r, 'M', 512,'r', 512,r, 512,r, 512,r]
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
n_new = 0
for x in range(5): # pool1,
self.slice1.add_module(str(n_new), vgg_pretrained_features.module[x])
n_new += 1
for x in range(5, 10): # pool2
self.slice2.add_module(str(n_new), vgg_pretrained_features.module[x])
n_new += 1
for x in range(10, 19): # pool3
self.slice3.add_module(str(n_new), vgg_pretrained_features.module[x])
n_new += 1
for x in range(19, 28): # pool4
self.slice4.add_module(str(n_new), vgg_pretrained_features.module[x])
n_new += 1
for param in self.parameters():
param.requires_grad = requires_grad
def forward(self, x, full=False):
h_relu1_2 = self.slice1(x)
h_relu2_2 = self.slice2(h_relu1_2)
h_relu3_4 = self.slice3(h_relu2_2)
if full:
h_relu4_4 = self.slice4(h_relu3_4)
return h_relu1_2, h_relu2_2, h_relu3_4, h_relu4_4
else:
return h_relu1_2, h_relu2_2, h_relu3_4
# Get an instance of the pre-trained VGG19
vgg = Vgg19_pc()
# Our loss functions
def rec_loss_fnc(mask, synth, label, vgg_label, a_p):
loss = torch.mean(mask * torch.abs(synth - label))
if a_p > 0 and vgg_label is not None:
loss = loss + a_p * perceptual_loss(vgg(mask * synth + (1 - mask) * label), vgg_label)
return loss
def perceptual_loss(out_vgg, label_vgg, layer=None):
if layer is not None:
l_p = torch.mean((out_vgg[layer] - label_vgg[layer]) ** 2)
else:
l_p = 0
for i in range(3):
l_p += torch.mean((out_vgg[i] - label_vgg[i]) ** 2)
return l_p
def smoothness(img, disp, gamma=1):
B, C, H, W = img.shape
m_rgb = torch.ones((B, C, 1, 1)).cuda()
m_rgb[:, 0, :, :] = 0.411 * m_rgb[:, 0, :, :]
m_rgb[:, 1, :, :] = 0.432 * m_rgb[:, 1, :, :]
m_rgb[:, 2, :, :] = 0.45 * m_rgb[:, 2, :, :]
gray_img = getGrayscale(img + m_rgb)
# Disparity smoothness
sx_filter = torch.autograd.Variable(torch.Tensor([[0, 0, 0], [-1, 2, -1], [0, 0, 0]])).unsqueeze(
0).unsqueeze(0).cuda()
sy_filter = torch.autograd.Variable(torch.Tensor([[0, -1, 0], [0, 2, 0], [0, -1, 0]])).unsqueeze(
0).unsqueeze(0).cuda()
dx_filter = torch.autograd.Variable(torch.Tensor([[0, 0, 0], [0, 1, -1], [0, 0, 0]])).unsqueeze(
0).unsqueeze(0).cuda()
dy_filter = torch.autograd.Variable(torch.Tensor([[0, -1, 0], [0, 1, 0], [0, 0, 0]])).unsqueeze(
0).unsqueeze(0).cuda()
dx1_filter = torch.autograd.Variable(torch.Tensor([[0, 0, 0], [-1, 1, 0], [0, 0, 0]])).unsqueeze(
0).unsqueeze(0).cuda()
dy1_filter = torch.autograd.Variable(torch.Tensor([[0, 0, 0], [0, 1, 0], [0, -1, 0]])).unsqueeze(
0).unsqueeze(0).cuda()
dx_img = F.conv2d(gray_img, sx_filter, padding=1, stride=1)
dy_img = F.conv2d(gray_img, sy_filter, padding=1, stride=1)
dx_d = F.conv2d(disp, dx_filter, padding=1, stride=1)
dy_d = F.conv2d(disp, dy_filter, padding=1, stride=1)
dx1_d = F.conv2d(disp, dx1_filter, padding=1, stride=1)
dy1_d = F.conv2d(disp, dy1_filter, padding=1, stride=1)
Cds = torch.mean(
(torch.abs(dx_d) + torch.abs(dx1_d)) * torch.exp(-gamma * torch.abs(dx_img)) +
(torch.abs(dy_d) + torch.abs(dy1_d)) * torch.exp(-gamma * torch.abs(dy_img)))
return Cds
def getGrayscale(input):
# Input is mini-batch N x 3 x H x W of an RGB image (analog rgb from 0...1)
output = torch.autograd.Variable(input.data.new(*input.size()))
# Output is mini-batch N x 3 x H x W from y = 0 ... 1
output[:, 0, :, :] = 0.299 * input[:, 0, :, :] + 0.587 * input[:, 1, :, :] + 0.114 * input[:, 2, :, :]
return output[:, 0, :, :].unsqueeze(1)
def corrL1Loss(out_vgg, label_vgg, layer=None, nei=(3, 3)):
if layer is not None:
l_p = torch.mean(torch.abs(get_corr(out_vgg[layer], nei) - get_corr(label_vgg[layer], nei)))
else:
l_p = 0
for i in range(3):
l_p += torch.mean(torch.abs(get_corr(out_vgg[i], nei) - get_corr(label_vgg[i], nei)))
return l_p
def get_corr(feats, neig=(3, 3)):
eps = 0.0000001
B, C, H, W = feats.shape
padding = nn.ReflectionPad2d([(neig[1] - 1) // 2, (neig[1] - 1) // 2,
(neig[0] - 1) // 2, (neig[0] - 1) // 2])
pad_feats = padding(feats)
for i in range(neig[0]):
for j in range(neig[1]):
feat_l2 = torch.norm(feats, dim=1).unsqueeze(1)
pfeat_l2 = torch.norm(pad_feats[:, :, i:(H + i), j:(W + j)], dim=1).unsqueeze(1)
corr = torch.sum(feats * pad_feats[:, :, i:(H + i), j:(W + j)], dim=1).unsqueeze(1) \
/ (feat_l2 * pfeat_l2 + eps)
if i == 0 and j == 0:
corr_volume = corr
else:
corr_volume = torch.cat((corr_volume, corr), 1)
return torch.abs(corr_volume)
###################################################################################################################
# From FLow net code
###################################################################################################################
class MultiscaleEPE(nn.Module):
def __init__(self, multiscale_weights):
super(MultiscaleEPE, self).__init__()
self.w_m = multiscale_weights
def forward(self, output_diparity, label_disparity):
return multiscaleEPE(output_diparity, label_disparity, self.w_m, False)
def EPE(net_out, target, sparse=False, disp=True, mean=True):
EPE_map = torch.norm(target - net_out, p=2, dim=1) # l2 norm per image
batch_size = EPE_map.size(0)
if sparse:
if disp:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = target[:, 0] == 0
else:
# invalid flow is defined with both flow coordinates to be exactly 0
mask = (target[:, 0] == 0) & (target[:, 1] == 0)
EPE_map = EPE_map[~mask.data]
if mean:
return EPE_map.mean()
else:
return EPE_map.sum() / batch_size
def sparse_max_pool(input, size):
positive = (input > 0).float()
negative = (input < 0).float()
output = nn.functional.adaptive_max_pool2d(input * positive, size) - nn.functional.adaptive_max_pool2d(
-input * negative, size)
return output
def multiscaleEPE(network_output, target_flow, weights=None, sparse=False):
def one_scale(output, target, sparse):
b, _, h, w = output.size()
if sparse:
target_scaled = sparse_max_pool(target, (h, w))
else:
target_scaled = nn.functional.adaptive_avg_pool2d(target, (h, w))
return EPE(output, target_scaled, sparse, mean=False)
if type(network_output) not in [tuple, list]:
network_output = [network_output] # if output is not a touple, make it a touple of one element
if weights is None:
weights = [0.001, 0.005, 0.01, 0.02, 0.08, 0.32] # as in original article
assert (len(weights) == len(network_output))
loss = 0
for output, weight in zip(network_output, weights):
loss += weight * one_scale(output, target_flow, sparse) # linear combination of net outputs and weights
return loss
def realEPE(output, target, sparse=False):
b, _, h, w = target.size()
upsampled_output = nn.functional.interpolate(output, size=(h, w), mode='bilinear', align_corners=True)
return EPE(upsampled_output, target, sparse, mean=True)