Skip to content

Commit

Permalink
fixes to code and tests adjusting for additional fit in np_shift rank…
Browse files Browse the repository at this point in the history
…ings
  • Loading branch information
MoAly98 committed Mar 8, 2025
1 parent bd956bb commit a467b7a
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 28 deletions.
24 changes: 12 additions & 12 deletions src/cabinetry/fit/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,10 +454,10 @@ def _goodness_of_fit(
return p_val


def _get_impacts_summary(impacts_by_modifier_type, method, total_error=None):
def _get_impacts_summary(impacts_by_modifier_type):

non_syst_modifiers = ["normfactor", "shapefactor", "staterror"] # Lumi ?
impact_totals = defaultdict(lambda: defaultdict(float))
impacts_summary = defaultdict(lambda: defaultdict(float))
# Dictionary to store the merged values after removing certain modifiers
syst_impacts_map = defaultdict(list)
# Iterate through each modifier and its corresponding data
Expand All @@ -467,19 +467,19 @@ def _get_impacts_summary(impacts_by_modifier_type, method, total_error=None):
syst_impacts_map[impact_type].extend(impact_values)

for impact_type in ["impact_up", "impact_down"]:
impact_totals["syst"][impact_type] = np.sqrt(
impacts_summary["syst"][impact_type] = np.sqrt(
sum(np.power(syst_impacts_map[impact_type], 2))
)
for non_syst_modifier in non_syst_modifiers:
impact_totals[non_syst_modifier][impact_type] = np.sqrt(
impacts_summary[non_syst_modifier][impact_type] = np.sqrt(
sum(
np.power(
impacts_by_modifier_type[non_syst_modifier][impact_type], 2
)
)
)

return impact_totals
return impacts_summary


def _get_datastat_impacts_np_shift(
Expand All @@ -490,19 +490,19 @@ def _get_datastat_impacts_np_shift(
# Get statistical uncertainty
fix_pars_datastat = fit_kwargs["fix_pars"].copy()
fix_pars_datastat = [
True for i_par in range(len(fix_pars_datastat)) if i_par != poi_index
True if i_par != poi_index else False
for i_par in range(len(model.config.par_names))
]
init_pars_datastat = fit_kwargs["init_pars"].copy()
init_pars_datastat = [
fit_results.bestfit[i_par]
for i_par in range(len(init_pars_datastat))
if i_par != poi_index
fit_results.bestfit[i_par] if i_par != poi_index else init_pars_datastat[i_par]
for i_par in range(len(model.config.par_names))
]
fit_results_datastat = _fit_model(
model,
data,
init_pars=fix_pars_datastat,
fix_pars=init_pars_datastat,
init_pars=init_pars_datastat,
fix_pars=fix_pars_datastat,
**{k: v for k, v in fit_kwargs.items() if k not in ["init_pars", "fix_pars"]},
)
datastat_poi_val = fit_results_datastat.bestfit[poi_index]
Expand Down Expand Up @@ -748,7 +748,7 @@ def _np_impacts(
# update combined parameters index (e.g. staterrors)
i_global_par += model.config.param_set(parameter).n_parameters
impacts_summary = _get_impacts_summary(impacts_by_modifier_type)
impacts_summary = _get_datastat_impacts_quadruture(
impacts_summary = _get_datastat_impacts_np_shift(
impacts_summary, model, data, poi_index, fit_results, fit_kwargs
)

Expand Down
47 changes: 31 additions & 16 deletions tests/fit/test_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -522,13 +522,21 @@ def test_fit(mock_fit, mock_print, mock_gof):
fit.FitResults(
np.asarray([0.8, 0.9]), np.asarray([0.1, 0.1]), ["a", "b"], np.empty(0), 0.0
),
# !datastats: nominal fit results because everything is fixed except POI
fit.FitResults(
np.asarray([1.0, 0.9]), np.asarray([0.1, 0.1]), ["a", "b"], np.empty(0), 0.0
),
# for second ranking call with fixed parameter
fit.FitResults(
np.asarray([1.2, 0.9]), np.asarray([0.1, 0.1]), ["a", "b"], np.empty(0), 0.0
),
fit.FitResults(
np.asarray([0.8, 0.9]), np.asarray([0.1, 0.1]), ["a", "b"], np.empty(0), 0.0
),
# !datastats: nominal fit results because everything is fixed except POI
fit.FitResults(
np.asarray([1.0, 0.9]), np.asarray([0.1, 0.1]), ["a", "b"], np.empty(0), 0.0
),
# for third ranking call without reference results
fit.FitResults(
np.asarray([1.0, 0.9]), np.asarray([0.3, 0.3]), ["a", "b"], np.empty(0), 0.0
Expand All @@ -539,6 +547,11 @@ def test_fit(mock_fit, mock_print, mock_gof):
fit.FitResults(
np.asarray([0.7, 0.9]), np.asarray([0.1, 0.1]), ["a", "b"], np.empty(0), 0.0
),
# !datastats: nominal fit results because everything is fixed except POI
fit.FitResults(
np.asarray([1.0, 0.9]), np.asarray([0.1, 0.1]), ["a", "b"], np.empty(0), 0.0
),
# covariance-based method
fit.FitResults(
np.asarray([0.7, 0.9]),
np.asarray([0.1, 0.1]),
Expand All @@ -564,7 +577,7 @@ def test_ranking(mock_fit, example_spec):
# correct call to fit
expected_fix = [False, True]
expected_inits = [[1.0, 0.95019305], [1.0, 0.84980695], [1.0, 0.92], [1.0, 0.88]]
assert mock_fit.call_count == 4
assert mock_fit.call_count == 5 # four fits + datastat fit
for i in range(4):
assert mock_fit.call_args_list[i][0] == (model, data)
assert np.allclose(
Expand Down Expand Up @@ -593,7 +606,7 @@ def test_ranking(mock_fit, example_spec):
model, data, fit_results=fit_results, impacts_method="covariance"
)
# no further calls to mock fit
assert mock_fit.call_count == 4
assert mock_fit.call_count == 5
# POI removed from fit results
assert np.allclose(ranking_results.bestfit, [0.9])
assert np.allclose(ranking_results.uncertainty, [0.02])
Expand All @@ -620,9 +633,9 @@ def test_ranking(mock_fit, example_spec):
custom_fit=True,
impacts_method="np_shift",
)
# expect two calls in this ranking (and had 4 before, so 6 total): pre-fit
# expect three calls in this ranking (and had 5 before, so 8 total): pre-fit
# uncertainty is 0 since parameter is fixed, mock post-fit uncertainty is not 0
assert mock_fit.call_count == 6
assert mock_fit.call_count == 8
assert mock_fit.call_args[1]["custom_fit"] is True
assert np.allclose(ranking_results.prefit_up, [0.0])
assert np.allclose(ranking_results.prefit_down, [0.0])
Expand All @@ -631,6 +644,7 @@ def test_ranking(mock_fit, example_spec):

# no reference results, init/fixed pars, par bounds, strategy/maxiter/tolerance
# NP shifting method
# !is this test complete? parameter is still considered fixed with prefit_unc = 0.0
ranking_results = fit.ranking(
model,
data,
Expand All @@ -644,9 +658,9 @@ def test_ranking(mock_fit, example_spec):
custom_fit=True,
impacts_method="np_shift",
)
assert mock_fit.call_count == 9
assert mock_fit.call_count == 12
# reference fit
assert mock_fit.call_args_list[-3] == (
assert mock_fit.call_args_list[-4] == (
(model, data),
{
"init_pars": [1.5, 1.0],
Expand All @@ -659,22 +673,23 @@ def test_ranking(mock_fit, example_spec):
},
)
# fits for impact (comparing each option separately since init_pars needs allclose)
assert mock_fit.call_args_list[-3][0] == (model, data)
assert np.allclose(mock_fit.call_args_list[-3][1]["init_pars"], [1.5, 1.2])
assert mock_fit.call_args_list[-3][1]["fix_pars"] == [False, True]
assert mock_fit.call_args_list[-3][1]["par_bounds"] == [(0, 5), (0.1, 10)]
assert mock_fit.call_args_list[-3][1]["strategy"] == 2
assert mock_fit.call_args_list[-3][1]["maxiter"] == 100
assert mock_fit.call_args_list[-3][1]["tolerance"] == 0.01
assert mock_fit.call_args_list[-3][1]["custom_fit"] is True
assert mock_fit.call_args_list[-2][0] == (model, data)
assert np.allclose(mock_fit.call_args_list[-2][1]["init_pars"], [1.5, 1.2])
assert np.allclose(mock_fit.call_args_list[-2][1]["init_pars"], [1.5, 0.6])
assert mock_fit.call_args_list[-2][1]["fix_pars"] == [False, True]
assert mock_fit.call_args_list[-2][1]["par_bounds"] == [(0, 5), (0.1, 10)]
assert mock_fit.call_args_list[-2][1]["strategy"] == 2
assert mock_fit.call_args_list[-2][1]["maxiter"] == 100
assert mock_fit.call_args_list[-2][1]["tolerance"] == 0.01
assert mock_fit.call_args_list[-2][1]["custom_fit"] is True
assert mock_fit.call_args_list[-1][0] == (model, data)
assert np.allclose(mock_fit.call_args_list[-1][1]["init_pars"], [1.5, 0.6])
assert mock_fit.call_args_list[-1][1]["fix_pars"] == [False, True]
assert mock_fit.call_args_list[-1][1]["par_bounds"] == [(0, 5), (0.1, 10)]
assert mock_fit.call_args_list[-1][1]["strategy"] == 2
assert mock_fit.call_args_list[-1][1]["maxiter"] == 100
assert mock_fit.call_args_list[-1][1]["tolerance"] == 0.01
assert mock_fit.call_args_list[-1][1]["custom_fit"] is True
# [-1] is datastat
# ranking results
assert np.allclose(ranking_results.prefit_up, [0.0])
assert np.allclose(ranking_results.prefit_down, [0.0])
Expand Down Expand Up @@ -704,7 +719,7 @@ def test_ranking(mock_fit, example_spec):
impacts_method="covariance",
)
# reference fit
assert mock_fit.call_count == 10
assert mock_fit.call_count == 13
assert mock_fit.call_args_list[-1] == (
(model, data),
{
Expand Down

0 comments on commit a467b7a

Please sign in to comment.