BibTeX
@article{2405.02488v3,
Author = {Ali Al Kadhim and Harrison B. Prosper},
Title = {Modeling Sampling Distributions of Test Statistics with Autograd},
Eprint = {2405.02488v3},
ArchivePrefix = {arXiv},
PrimaryClass = {stat.ML},
Abstract = {Simulation-based inference methods that feature correct conditional coverage
of confidence sets based on observations that have been compressed to a scalar
test statistic require accurate modeling of either the p-value function or the
cumulative distribution function (cdf) of the test statistic. If the model of
the cdf, which is typically a deep neural network, is a function of the test
statistic then the derivative of the neural network with respect to the test
statistic furnishes an approximation of the sampling distribution of the test
statistic. We explore whether this approach to modeling conditional
1-dimensional sampling distributions is a viable alternative to the probability
density-ratio method, also known as the likelihood-ratio trick. Relatively
simple, yet effective, neural network models are used whose predictive
uncertainty is quantified through a variety of methods.},
Year = {2024},
Month = {May},
Url = {http://arxiv.org/abs/2405.02488v3},
File = {2405.02488v3.pdf}
}