-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathutils.py
109 lines (74 loc) · 3.17 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import numpy as np
from scipy.optimize import curve_fit
from scipy import stats
import torch
import torch.nn.functional as F
import torch.nn as nn
def logistic_func(X, bayta1, bayta2, bayta3, bayta4):
logisticPart = 1 + np.exp(np.negative(np.divide(X - bayta3, np.abs(bayta4))))
yhat = bayta2 + np.divide(bayta1 - bayta2, logisticPart)
return yhat
def fit_function(y_label, y_output):
beta = [np.max(y_label), np.min(y_label), np.mean(y_output), 0.5]
popt, _ = curve_fit(logistic_func, y_output, \
y_label, p0=beta, maxfev=100000000)
y_output_logistic = logistic_func(y_output, *popt)
return y_output_logistic
def fit_function_regression_values(y_label, y_output):
beta = [np.max(y_label), np.min(y_label), np.mean(y_output), 0.5]
popt, _ = curve_fit(logistic_func, y_output, \
y_label, p0=beta, maxfev=100000000)
y_output_logistic = logistic_func(y_output, *popt)
return y_output_logistic, popt
def performance_fit(y_label, y_output):
y_output_logistic = fit_function(y_label, y_output)
PLCC = stats.pearsonr(y_output_logistic, y_label)[0]
SRCC = stats.spearmanr(y_output, y_label)[0]
KRCC = stats.stats.kendalltau(y_output, y_label)[0]
RMSE = np.sqrt(((y_output_logistic-y_label) ** 2).mean())
return PLCC, SRCC, KRCC, RMSE
def performance_fit_regression_values(y_label, y_output):
y_output_logistic, popt = fit_function_regression_values(y_label, y_output)
PLCC = stats.pearsonr(y_output_logistic, y_label)[0]
SRCC = stats.spearmanr(y_output, y_label)[0]
KRCC = stats.stats.kendalltau(y_output, y_label)[0]
RMSE = np.sqrt(((y_output_logistic-y_label) ** 2).mean())
return PLCC, SRCC, KRCC, RMSE, popt
def plcc_loss(y_pred, y):
sigma_hat, m_hat = torch.std_mean(y_pred, unbiased=False)
y_pred = (y_pred - m_hat) / (sigma_hat + 1e-8)
sigma, m = torch.std_mean(y, unbiased=False)
y = (y - m) / (sigma + 1e-8)
loss0 = torch.nn.functional.mse_loss(y_pred, y) / 4
rho = torch.mean(y_pred * y)
loss1 = torch.nn.functional.mse_loss(rho * y_pred, y) / 4
return ((loss0 + loss1) / 2).float()
EPS = 1e-2
esp = 1e-8
class Fidelity_Loss(torch.nn.Module):
def __init__(self):
super(Fidelity_Loss, self).__init__()
def forward(self, p, g):
g = g.view(-1, 1)
p = p.view(-1, 1)
loss = 1 - (torch.sqrt(p * g + esp) + torch.sqrt((1 - p) * (1 - g) + esp))
return torch.mean(loss)
def read_float_with_comma(num):
return float(num.replace(",", "."))
eps = 1e-12
def loss_m4(y_pred, y):
"""prediction monotonicity related loss"""
#assert y_pred.size(0) > 1 #
preds = y_pred - y_pred.t()
gts = y - y.t()
# signed = torch.sign(gts)
triu_indices = torch.triu_indices(y_pred.size(0), y_pred.size(0), offset=1)
preds = preds[triu_indices[0], triu_indices[1]]
gts = gts[triu_indices[0], triu_indices[1]]
g = 0.5 * (torch.sign(gts) + 1)
constant = torch.sqrt(torch.Tensor([2.])).to(preds.device)
p = 0.5 * (1 + torch.erf(preds / constant))
g = g.view(-1, 1)
p = p.view(-1, 1)
loss = torch.mean((1 - (torch.sqrt(p * g + esp) + torch.sqrt((1 - p) * (1 - g) + esp))))
return loss