@article{ECP1037,
author = {Antar Bandyopadhyay and David Aldous},
title = {How to Combine Fast Heuristic Markov Chain Monte Carlo with Slow Exact Sampling},
journal = {Electron. Commun. Probab.},
fjournal = {Electronic Communications in Probability},
volume = {6},
year = {2001},
keywords = {Confidence interval, Exact sampling, Markov chain Monte Carlo.},
abstract = {Given a probability law $\pi$ on a set $S$ and a function $g : S \rightarrow R$, suppose one wants to estimate the mean $\bar{g} = \int g d\pi$. The Markov Chain Monte Carlo method consists of inventing and simulating a Markov chain with stationary distribution $\pi$. Typically one has no a priori bounds on the chain's mixing time, so even if simulations suggest rapid mixing one cannot infer rigorous confidence intervals for $\bar{g}$. But suppose there is also a separate method which (slowly) gives samples exactly from $\pi$. Using $n$ exact samples, one could immediately get a confidence interval of length $O(n^{-1/2})$. But one can do better. Use each exact sample as the initial state of a Markov chain, and run each of these $n$ chains for $m$ steps. We show how to construct confidence intervals which are always valid, and which, if the (unknown) relaxation time of the chain is sufficiently small relative to $m/n$, have length $O(n^{-1} \log n)$ with high probability.},
pages = {no. 8, 79-89},
issn = {1083-589X},
doi = {10.1214/ECP.v6-1037},
url = {http://ecp.ejpecp.org/article/view/1037}}