Skip to content

Commit

Permalink
fix lint issues in log score of truncated distributions
Browse files Browse the repository at this point in the history
  • Loading branch information
sallen12 committed Sep 10, 2024
1 parent a4236db commit f929266
Show file tree
Hide file tree
Showing 5 changed files with 276 additions and 1 deletion.
6 changes: 6 additions & 0 deletions docs/api/logarithmic.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,9 @@
# Logarithmic Score

::: scoringrules.logs_tlogistic

::: scoringrules.logs_tnormal

::: scoringrules.logs_tt

::: scoringrules.logs_normal
129 changes: 129 additions & 0 deletions scoringrules/_logs.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,135 @@
from scoringrules.core.typing import Array, ArrayLike, Backend


def logs_tlogistic(
observation: "ArrayLike",
location: "ArrayLike",
scale: "ArrayLike",
/,
lower: "ArrayLike" = float("-inf"),
upper: "ArrayLike" = float("inf"),
*,
backend: "Backend" = None,
) -> "ArrayLike":
r"""Compute the logarithmic score (LS) for the truncated logistic distribution.
This score is equivalent to the negative log likelihood of the truncated logistic distribution.
Parameters
----------
observation: ArrayLike
The observed values.
location: ArrayLike
Location parameter of the forecast distribution.
scale: ArrayLike
Scale parameter of the forecast distribution.
lower: ArrayLike
Lower boundary of the truncated forecast distribution.
upper: ArrayLike
Upper boundary of the truncated forecast distribution.
Returns
-------
score:
The LS between tLogistic(location, scale, lower, upper) and obs.
Examples
--------
>>> import scoringrules as sr
>>> sr.logs_tlogistic(0.0, 0.1, 0.4, -1.0, 1.0)
"""
return logarithmic.tlogistic(
observation, location, scale, lower, upper, backend=backend
)


def logs_tnormal(
observation: "ArrayLike",
location: "ArrayLike",
scale: "ArrayLike",
/,
lower: "ArrayLike" = float("-inf"),
upper: "ArrayLike" = float("inf"),
*,
backend: "Backend" = None,
) -> "ArrayLike":
r"""Compute the logarithmic score (LS) for the truncated normal distribution.
This score is equivalent to the negative log likelihood of the truncated normal distribution.
Parameters
----------
observation: ArrayLike
The observed values.
location: ArrayLike
Location parameter of the forecast distribution.
scale: ArrayLike
Scale parameter of the forecast distribution.
lower: ArrayLike
Lower boundary of the truncated forecast distribution.
upper: ArrayLike
Upper boundary of the truncated forecast distribution.
Returns
-------
score:
The LS between tNormal(location, scale, lower, upper) and obs.
Examples
--------
>>> import scoringrules as sr
>>> sr.logs_tnormal(0.0, 0.1, 0.4, -1.0, 1.0)
"""
return logarithmic.tnormal(
observation, location, scale, lower, upper, backend=backend
)


def logs_tt(
observation: "ArrayLike",
df: "ArrayLike",
/,
location: "ArrayLike" = 0.0,
scale: "ArrayLike" = 1.0,
lower: "ArrayLike" = float("-inf"),
upper: "ArrayLike" = float("inf"),
*,
backend: "Backend" = None,
) -> "ArrayLike":
r"""Compute the logarithmic score (LS) for the truncated Student's t distribution.
This score is equivalent to the negative log likelihood of the truncated t distribution.
Parameters
----------
observation: ArrayLike
The observed values.
df: ArrayLike
Degrees of freedom parameter of the forecast distribution.
location: ArrayLike
Location parameter of the forecast distribution.
scale: ArrayLike
Scale parameter of the forecast distribution.
lower: ArrayLike
Lower boundary of the truncated forecast distribution.
upper: ArrayLike
Upper boundary of the truncated forecast distribution.
Returns
-------
scpre:
The LS between tt(df, location, scale, lower, upper) and obs.
Examples
--------
>>> import scoringrules as sr
>>> sr.logs_tt(0.0, 2.0, 0.1, 0.4, -1.0, 1.0)
"""
return logarithmic.tt(
observation, df, location, scale, lower, upper, backend=backend
)


def logs_normal(
observation: "ArrayLike",
mu: "ArrayLike",
Expand Down
84 changes: 83 additions & 1 deletion scoringrules/core/logarithmic.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,94 @@
import typing as tp

from scoringrules.backend import backends
from scoringrules.core.stats import _norm_pdf
from scoringrules.core.stats import (
_logis_pdf,
_logis_cdf,
_norm_pdf,
_norm_cdf,
_t_pdf,
_t_cdf,
)

if tp.TYPE_CHECKING:
from scoringrules.core.typing import Array, ArrayLike, Backend


def tlogistic(
obs: "ArrayLike",
location: "ArrayLike",
scale: "ArrayLike",
lower: "ArrayLike",
upper: "ArrayLike",
backend: "Backend" = None,
) -> "Array":
"""Compute the logarithmic score for the truncated logistic distribution."""
B = backends.active if backend is None else backends[backend]
obs, mu, sigma, lower, upper = map(B.asarray, (obs, location, scale, lower, upper))
ω = (obs - mu) / sigma
u = (upper - mu) / sigma
l = (lower - mu) / sigma
F_u = _logis_cdf(u, backend=backend)
F_l = _logis_cdf(l, backend=backend)
denom = F_u - F_l

ind_out = (ω < l) | (ω > u)
prob = _logis_pdf(ω) / sigma
s = B.where(ind_out, float("inf"), -B.log(prob / denom))
return s


def tnormal(
obs: "ArrayLike",
location: "ArrayLike",
scale: "ArrayLike",
lower: "ArrayLike",
upper: "ArrayLike",
backend: "Backend" = None,
) -> "Array":
"""Compute the logarithmic score for the truncated normal distribution."""
B = backends.active if backend is None else backends[backend]
obs, mu, sigma, lower, upper = map(B.asarray, (obs, location, scale, lower, upper))
ω = (obs - mu) / sigma
u = (upper - mu) / sigma
l = (lower - mu) / sigma
F_u = _norm_cdf(u, backend=backend)
F_l = _norm_cdf(l, backend=backend)
denom = F_u - F_l

ind_out = (ω < l) | (ω > u)
prob = _norm_pdf(ω) / sigma
s = B.where(ind_out, float("inf"), -B.log(prob / denom))
return s


def tt(
obs: "ArrayLike",
df: "ArrayLike",
location: "ArrayLike",
scale: "ArrayLike",
lower: "ArrayLike",
upper: "ArrayLike",
backend: "Backend" = None,
) -> "Array":
"""Compute the logarithmic score for the truncated t distribution."""
B = backends.active if backend is None else backends[backend]
obs, df, mu, sigma, lower, upper = map(
B.asarray, (obs, df, location, scale, lower, upper)
)
ω = (obs - mu) / sigma
u = (upper - mu) / sigma
l = (lower - mu) / sigma
F_u = _t_cdf(u, df, backend=backend)
F_l = _t_cdf(l, df, backend=backend)
denom = F_u - F_l

ind_out = (ω < l) | (ω > u)
prob = _t_pdf(ω, df) / sigma
s = B.where(ind_out, float("inf"), -B.log(prob / denom))
return s


def normal(
obs: "ArrayLike",
mu: "ArrayLike",
Expand Down
6 changes: 6 additions & 0 deletions scoringrules/core/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,12 @@ def _norm_pdf(x: "ArrayLike", backend: "Backend" = None) -> "Array":
return (1.0 / B.sqrt(2.0 * B.pi)) * B.exp(-(x**2) / 2)


def _logis_pdf(x: "ArrayLike", backend: "Backend" = None) -> "Array":
"""Probability density function for the standard logistic distribution."""
B = backends.active if backend is None else backends[backend]
return B.exp(-x) / (1 + B.exp(-x)) ** 2


def _logis_cdf(x: "ArrayLike", backend: "Backend" = None) -> "Array":
"""Cumulative distribution function for the standard logistic distribution."""
B = backends.active if backend is None else backends[backend]
Expand Down
52 changes: 52 additions & 0 deletions tests/test_logs.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,58 @@
N = 100


@pytest.mark.parametrize("backend", BACKENDS)
def test_tlogis(backend):
obs, location, scale, lower, upper = 4.9, 3.5, 2.3, 0.0, 20.0
res = _logs.logs_tlogistic(obs, location, scale, lower, upper, backend=backend)
expected = 2.11202
assert np.isclose(res, expected)

# aligns with logs_logistic
# res0 = _logs.logs_logistic(obs, location, scale, backend=backend)
# res = _logs.logs_tlogistic(obs, location, scale, backend=backend)
# assert np.isclose(res, res0)


@pytest.mark.parametrize("backend", BACKENDS)
def test_tnormal(backend):
obs, location, scale, lower, upper = 4.2, 2.9, 2.2, 1.5, 17.3
res = _logs.logs_tnormal(obs, location, scale, lower, upper, backend=backend)
expected = 1.577806
assert np.isclose(res, expected)

obs, location, scale, lower, upper = -1.0, 2.9, 2.2, 1.5, 17.3
res = _logs.logs_tnormal(obs, location, scale, lower, upper, backend=backend)
expected = float("inf")
assert np.isclose(res, expected)

# aligns with logs_normal
res0 = _logs.logs_normal(obs, location, scale, backend=backend)
res = _logs.logs_tnormal(obs, location, scale, backend=backend)
assert np.isclose(res, res0)


@pytest.mark.parametrize("backend", BACKENDS)
def test_tt(backend):
if backend in ["jax", "torch", "tensorflow"]:
pytest.skip("Not implemented in jax, torch or tensorflow backends")

obs, df, location, scale, lower, upper = 1.9, 2.9, 3.1, 4.2, 1.5, 17.3
res = _logs.logs_tt(obs, df, location, scale, lower, upper, backend=backend)
expected = 2.002856
assert np.isclose(res, expected)

obs, df, location, scale, lower, upper = -1.0, 2.9, 3.1, 4.2, 1.5, 17.3
res = _logs.logs_tt(obs, df, location, scale, lower, upper, backend=backend)
expected = float("inf")
assert np.isclose(res, expected)

# aligns with logs_t
# res0 = _logs.logs_t(obs, df, location, scale, backend=backend)
# res = _logs.logs_tt(obs, df, location, scale, backend=backend)
# assert np.isclose(res, res0)


@pytest.mark.parametrize("backend", BACKENDS)
def test_normal(backend):
res = _logs.logs_normal(0.0, 0.1, 0.1, backend=backend)
Expand Down

0 comments on commit f929266

Please sign in to comment.