BibTeX
@article{2502.04949v2,
Author = {Lasse Elsemüller and Valentin Pratz and Mischa von Krause and Andreas Voss and Paul-Christian Bürkner and Stefan T. Radev},
Title = {Does Unsupervised Domain Adaptation Improve the Robustness of Amortized
Bayesian Inference? A Systematic Evaluation},
Eprint = {2502.04949v2},
ArchivePrefix = {arXiv},
PrimaryClass = {stat.ML},
Abstract = {Neural networks are fragile when confronted with data that significantly
deviates from their training distribution. This is true in particular for
simulation-based inference methods, such as neural amortized Bayesian inference
(ABI), where models trained on simulated data are deployed on noisy real-world
observations. Recent robust approaches employ unsupervised domain adaptation
(UDA) to match the embedding spaces of simulated and observed data. However,
the lack of comprehensive evaluations across different domain mismatches raises
concerns about the reliability in high-stakes applications. We address this gap
by systematically testing UDA approaches across a wide range of
misspecification scenarios in silico and practice. We demonstrate that aligning
summary spaces between domains effectively mitigates the impact of unmodeled
phenomena or noise. However, the same alignment mechanism can lead to failures
under prior misspecifications - a critical finding with practical consequences.
Our results underscore the need for careful consideration of misspecification
types when using UDA to increase the robustness of ABI.},
Year = {2025},
Month = {Feb},
Url = {http://arxiv.org/abs/2502.04949v2},
File = {2502.04949v2.pdf}
}