BibTeX
@article{2407.10877v2,
Author = {Denise Lanzieri and Justine Zeghal and T. Lucas Makinen and Alexandre Boucaud and Jean-Luc Starck and François Lanusse},
Title = {Optimal Neural Summarisation for Full-Field Weak Lensing Cosmological
Implicit Inference},
Eprint = {2407.10877v2},
DOI = {10.1051/0004-6361/202451535},
ArchivePrefix = {arXiv},
PrimaryClass = {astro-ph.CO},
Abstract = {Traditionally, weak lensing cosmological surveys have been analyzed using
summary statistics motivated by their analytically tractable likelihoods, or by
their ability to access higher-order information, at the cost of requiring
Simulation-Based Inference (SBI) approaches. While informative, these
statistics are neither designed nor guaranteed to be statistically sufficient.
With the rise of deep learning, it becomes possible to create summary
statistics optimized to extract the full data information. We compare different
neural summarization strategies proposed in the weak lensing literature, to
assess which loss functions lead to theoretically optimal summary statistics to
perform full-field inference. In doing so, we aim to provide guidelines and
insights to the community to help guide future neural-based inference analyses.
We design an experimental setup to isolate the impact of the loss function used
to train neural networks. We have developed the sbi_lens JAX package, which
implements an automatically differentiable lognormal wCDM LSST-Y10 weak lensing
simulator. The explicit full-field posterior obtained using the Hamiltonian
Monte Carlo sampler gives us a ground truth to which to compare different
compression strategies. We provide theoretical insight into the loss functions
used in the literature and show that some do not necessarily lead to sufficient
statistics (e.g. Mean Square Error (MSE)), while those motivated by information
theory (e.g. Variational Mutual Information Maximization (VMIM)) can. Our
numerical experiments confirm these insights and show, in our simulated wCDM
scenario, that the Figure of Merit (FoM) of an analysis using neural summaries
optimized under VMIM achieves 100% of the reference Omega_c - sigma_8
full-field FoM, while an analysis using neural summaries trained under MSE
achieves only 81% of the same reference FoM.},
Year = {2024},
Month = {Jul},
Note = {A&A 697, A162 (2025)},
Url = {http://arxiv.org/abs/2407.10877v2},
File = {2407.10877v2.pdf}
}