diff --git a/petab/v1/distributions.py b/petab/v1/distributions.py index de7a638c..f8e807da 100644 --- a/petab/v1/distributions.py +++ b/petab/v1/distributions.py @@ -36,6 +36,7 @@ "Normal", "Rayleigh", "Uniform", + "LogUniform", ] @@ -382,6 +383,10 @@ class Uniform(Distribution): If ``False``, no transformation is applied. If a transformation is applied, the lower and upper bounds are the lower and upper bounds of the underlying uniform distribution. + Note that this differs from the usual definition of a log-uniform + distribution, where the logarithm of the variable is uniformly + distributed between the logarithms of the bounds (see also + :class:`LogUniform`). """ def __init__( @@ -411,6 +416,45 @@ def _ppf_untransformed_untruncated(self, q) -> np.ndarray | float: return uniform.ppf(q, loc=self._low, scale=self._high - self._low) +class LogUniform(Distribution): + """A log-uniform or reciprocal distribution. + + A random variable is log-uniformly distributed between ``low`` and ``high`` + if its logarithm is uniformly distributed between ``log(low)`` and + ``log(high)``. + + :param low: The lower bound of the distribution. + :param high: The upper bound of the distribution. + :param trunc: The truncation limits of the distribution. + """ + + def __init__( + self, + low: float, + high: float, + trunc: tuple[float, float] | None = None, + ): + self._logbase = np.exp(1) + self._low = self._log(low) + self._high = self._log(high) + super().__init__(log=self._logbase, trunc=trunc) + + def __repr__(self): + return self._repr({"low": self._low, "high": self._high}) + + def _sample(self, shape=None) -> np.ndarray | float: + return np.random.uniform(low=self._low, high=self._high, size=shape) + + def _pdf_untransformed_untruncated(self, x) -> np.ndarray | float: + return uniform.pdf(x, loc=self._low, scale=self._high - self._low) + + def _cdf_untransformed_untruncated(self, x) -> np.ndarray | float: + return uniform.cdf(x, loc=self._low, scale=self._high - self._low) + + def _ppf_untransformed_untruncated(self, q) -> np.ndarray | float: + return uniform.ppf(q, loc=self._low, scale=self._high - self._low) + + class Laplace(Distribution): """A (log-)Laplace distribution. diff --git a/petab/v2/core.py b/petab/v2/core.py index 9727b21d..22453878 100644 --- a/petab/v2/core.py +++ b/petab/v2/core.py @@ -201,7 +201,7 @@ class PriorDistribution(str, Enum): PriorDistribution.LAPLACE: Laplace, PriorDistribution.LOG_LAPLACE: Laplace, PriorDistribution.LOG_NORMAL: Normal, - PriorDistribution.LOG_UNIFORM: Uniform, + PriorDistribution.LOG_UNIFORM: LogUniform, PriorDistribution.NORMAL: Normal, PriorDistribution.RAYLEIGH: Rayleigh, PriorDistribution.UNIFORM: Uniform, @@ -1060,7 +1060,12 @@ def prior_dist(self) -> Distribution: # `Uniform.__init__` does not accept the `trunc` parameter low = max(self.prior_parameters[0], self.lb) high = min(self.prior_parameters[1], self.ub) - return cls(low, high, log=log) + return cls(low, high) + + if cls == LogUniform: + # Mind the different interpretation of distribution parameters for + # Uniform(..., log=True) and LogUniform!! + return cls(*self.prior_parameters, trunc=[self.lb, self.ub]) return cls(*self.prior_parameters, log=log, trunc=[self.lb, self.ub]) diff --git a/tests/v1/test_distributions.py b/tests/v1/test_distributions.py index 7b7cd4aa..f4b3e3fe 100644 --- a/tests/v1/test_distributions.py +++ b/tests/v1/test_distributions.py @@ -1,4 +1,5 @@ import sys +from math import exp import numpy as np import pytest @@ -115,3 +116,20 @@ def cdf(x): assert_allclose( distribution.pdf(sample), reference_pdf, rtol=1e-10, atol=1e-14 ) + + +def test_log_uniform(): + """Test Uniform(a, b, log=True) vs LogUniform(a, b).""" + # support between exp(1) and exp(2) + dist = Uniform(1, 2, log=True) + assert dist.pdf(exp(0)) == 0 + assert dist.pdf(exp(1)) > 0 + assert dist.pdf(exp(2)) > 0 + assert dist.pdf(exp(3)) == 0 + + # support between 1 and 2 + dist = LogUniform(1, 2) + assert dist.pdf(0) == 0 + assert dist.pdf(1) > 0 + assert dist.pdf(2) > 0 + assert dist.pdf(3) == 0