BibTeX
@article{2506.03037v1,
Author = {Shubhendu Trivedi and Brian D. Nord},
Title = {On the Need to Align Intent and Implementation in Uncertainty
Quantification for Machine Learning},
Eprint = {2506.03037v1},
ArchivePrefix = {arXiv},
PrimaryClass = {cs.LG},
Abstract = {Quantifying uncertainties for machine learning (ML) models is a foundational
challenge in modern data analysis. This challenge is compounded by at least two
key aspects of the field: (a) inconsistent terminology surrounding uncertainty
and estimation across disciplines, and (b) the varying technical requirements
for establishing trustworthy uncertainties in diverse problem contexts. In this
position paper, we aim to clarify the depth of these challenges by identifying
these inconsistencies and articulating how different contexts impose distinct
epistemic demands. We examine the current landscape of estimation targets
(e.g., prediction, inference, simulation-based inference), uncertainty
constructs (e.g., frequentist, Bayesian, fiducial), and the approaches used to
map between them. Drawing on the literature, we highlight and explain examples
of problematic mappings. To help address these issues, we advocate for
standards that promote alignment between the \textit{intent} and
\textit{implementation} of uncertainty quantification (UQ) approaches. We
discuss several axes of trustworthiness that are necessary (if not sufficient)
for reliable UQ in ML models, and show how these axes can inform the design and
evaluation of uncertainty-aware ML systems. Our practical recommendations focus
on scientific ML, offering illustrative cases and use scenarios, particularly
in the context of simulation-based inference (SBI).},
Year = {2025},
Month = {Jun},
Url = {http://arxiv.org/abs/2506.03037v1},
File = {2506.03037v1.pdf}
}