BibTeX
@article{2102.06522v2,
Author = {Samuel Wiqvist and Jes Frellsen and Umberto Picchini},
Title = {Sequential Neural Posterior and Likelihood Approximation},
Eprint = {2102.06522v2},
ArchivePrefix = {arXiv},
PrimaryClass = {stat.ML},
Abstract = {We introduce the sequential neural posterior and likelihood approximation
(SNPLA) algorithm. SNPLA is a normalizing flows-based algorithm for inference
in implicit models, and therefore is a simulation-based inference method that
only requires simulations from a generative model. SNPLA avoids Markov chain
Monte Carlo sampling and correction-steps of the parameter proposal function
that are introduced in similar methods, but that can be numerically unstable or
restrictive. By utilizing the reverse KL divergence, SNPLA manages to learn
both the likelihood and the posterior in a sequential manner. Over four
experiments, we show that SNPLA performs competitively when utilizing the same
number of model simulations as used in other methods, even though the inference
problem for SNPLA is more complex due to the joint learning of posterior and
likelihood function. Due to utilizing normalizing flows SNPLA generates
posterior draws much faster (4 orders of magnitude) than MCMC-based methods.},
Year = {2021},
Month = {Feb},
Url = {http://arxiv.org/abs/2102.06522v2},
File = {2102.06522v2.pdf}
}