Skip to content

Commit 749168c

Browse files
committed
Fix test_priors_to_measurements
Fixes an issue in `test_priors_to_measurements` which led to evaluating the prior at the wrong parameter values (using the location parameter of the prior instead of the actually estimated parameters). The problem was in the test code, not the tested code.
1 parent 9baf981 commit 749168c

File tree

1 file changed

+50
-21
lines changed

1 file changed

+50
-21
lines changed

tests/v1/test_priors.py

Lines changed: 50 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,15 @@
88
from scipy.stats import norm
99

1010
import petab.v1
11-
from petab.v1 import get_simulation_conditions
11+
from petab.v1 import (
12+
ESTIMATE,
13+
MEASUREMENT,
14+
OBJECTIVE_PRIOR_TYPE,
15+
OBSERVABLE_ID,
16+
SIMULATION,
17+
get_simulation_conditions,
18+
get_simulation_df,
19+
)
1220
from petab.v1.priors import priors_to_measurements
1321

1422

@@ -17,20 +25,29 @@
1725
)
1826
def test_priors_to_measurements(problem_id):
1927
"""Test the conversion of priors to measurements."""
28+
# setup
2029
petab_problem_priors: petab.v1.Problem = (
2130
benchmark_models_petab.get_problem(problem_id)
2231
)
2332
petab_problem_priors.visualization_df = None
2433
assert petab.v1.lint_problem(petab_problem_priors) is False
25-
2634
if problem_id == "Isensee_JCB2018":
2735
# required to match the stored simulation results below
2836
petab.v1.flatten_timepoint_specific_output_overrides(
2937
petab_problem_priors
3038
)
3139
assert petab.v1.lint_problem(petab_problem_priors) is False
40+
3241
original_problem = deepcopy(petab_problem_priors)
42+
x_scaled_dict = dict(
43+
zip(
44+
original_problem.x_free_ids,
45+
original_problem.x_nominal_free_scaled,
46+
strict=True,
47+
)
48+
)
3349

50+
# convert priors to measurements
3451
petab_problem_measurements = priors_to_measurements(petab_problem_priors)
3552

3653
# check that the original problem is not modified
@@ -45,6 +62,7 @@ def test_priors_to_measurements(problem_id):
4562
getattr(original_problem, attr)
4663
)
4764
).empty, diff
65+
4866
# check that measurements and observables were added
4967
assert petab.v1.lint_problem(petab_problem_measurements) is False
5068
assert (
@@ -59,6 +77,7 @@ def test_priors_to_measurements(problem_id):
5977
petab_problem_measurements.measurement_df.shape[0]
6078
> petab_problem_priors.measurement_df.shape[0]
6179
)
80+
6281
# ensure we didn't introduce any new conditions
6382
assert len(
6483
get_simulation_conditions(petab_problem_measurements.measurement_df)
@@ -67,26 +86,40 @@ def test_priors_to_measurements(problem_id):
6786
# verify that the objective function value is the same
6887

6988
# load/construct the simulation results
70-
simulation_df_priors = petab.v1.get_simulation_df(
89+
simulation_df_priors = get_simulation_df(
7190
Path(
7291
benchmark_models_petab.MODELS_DIR,
7392
problem_id,
7493
f"simulatedData_{problem_id}.tsv",
7594
)
7695
)
77-
simulation_df_measurements = pd.concat(
78-
[
79-
petab_problem_measurements.measurement_df.rename(
80-
columns={petab.v1.MEASUREMENT: petab.v1.SIMULATION}
81-
)[
82-
petab_problem_measurements.measurement_df[
83-
petab.v1.C.OBSERVABLE_ID
84-
].str.startswith("prior_")
85-
],
86-
simulation_df_priors,
96+
# for the prior observables, we need to "simulate" the model with the
97+
# nominal parameter values
98+
simulated_prior_observables = (
99+
petab_problem_measurements.measurement_df.rename(
100+
columns={MEASUREMENT: SIMULATION}
101+
)[
102+
petab_problem_measurements.measurement_df[
103+
OBSERVABLE_ID
104+
].str.startswith("prior_")
87105
]
88106
)
89107

108+
def apply_parameter_values(row):
109+
# apply the parameter values to the observable formula for the prior
110+
if row[OBSERVABLE_ID].startswith("prior_"):
111+
row[SIMULATION] = x_scaled_dict[
112+
row[OBSERVABLE_ID].removeprefix("prior_")
113+
]
114+
return row
115+
116+
simulated_prior_observables = simulated_prior_observables.apply(
117+
apply_parameter_values, axis=1
118+
)
119+
simulation_df_measurements = pd.concat(
120+
[simulation_df_priors, simulated_prior_observables]
121+
)
122+
90123
llh_priors = petab.v1.calculate_llh_for_table(
91124
petab_problem_priors.measurement_df,
92125
simulation_df_priors,
@@ -102,10 +135,8 @@ def test_priors_to_measurements(problem_id):
102135

103136
# get prior objective function contribution
104137
parameter_ids = petab_problem_priors.parameter_df.index.values[
105-
(petab_problem_priors.parameter_df[petab.v1.ESTIMATE] == 1)
106-
& petab_problem_priors.parameter_df[
107-
petab.v1.OBJECTIVE_PRIOR_TYPE
108-
].notna()
138+
(petab_problem_priors.parameter_df[ESTIMATE] == 1)
139+
& petab_problem_priors.parameter_df[OBJECTIVE_PRIOR_TYPE].notna()
109140
]
110141
priors = petab.v1.get_priors_from_df(
111142
petab_problem_priors.parameter_df,
@@ -117,9 +148,7 @@ def test_priors_to_measurements(problem_id):
117148
prior_type, prior_pars, par_scale, par_bounds = prior
118149
if prior_type == petab.v1.PARAMETER_SCALE_NORMAL:
119150
prior_contrib += norm.logpdf(
120-
petab_problem_priors.x_nominal_free_scaled[
121-
petab_problem_priors.x_free_ids.index(parameter_id)
122-
],
151+
x_scaled_dict[parameter_id],
123152
loc=prior_pars[0],
124153
scale=prior_pars[1],
125154
)
@@ -134,4 +163,4 @@ def test_priors_to_measurements(problem_id):
134163
llh_priors + prior_contrib, llh_measurements, rtol=1e-3, atol=1e-16
135164
), (llh_priors + prior_contrib, llh_measurements)
136165
# check that the tolerance is not too high
137-
assert np.abs(prior_contrib) > 1e-3 * np.abs(llh_priors)
166+
assert np.abs(prior_contrib) > 1e-8 * np.abs(llh_priors)

0 commit comments

Comments
 (0)