This famous theorem states that when you have:
- A large number of independent trials (
$n \to \infty$ ) - Each trial has a small probability of success (
$p \to 0$ ) - The expected number of successes (
$\lambda = np$ ) remains constant
The binomial distribution with
Using
Through algebraic manipulation and using the limit as
set.seed(20250216)
# Poisson intensity parameter
lambda <- 2
# large $n$ with small $p$
n <- 100000
p <- lambda / n
# Simulate draws from poisson distribution and from many independent Bernoulli trials
B <- 500000
draws_binom <- rbinom(n = B, size = n, prob = p)
# equivalent, but much slower:
# draws_binom <- replicate(B, { sum(runif(n) <= p) })
draws_poisson <- rpois(B, lambda = lambda)
# Comparing empirical distribution
table(draws_binom) / B
#> 0 1 2 3 4 5 6 7 8 9 10 11
#> 0.135834 0.269874 0.270208 0.180854 0.090558 0.036096 0.012078 0.003396 0.000870 0.000192 0.000038 0.000002
table(draws_poisson) / B
#> 0 1 2 3 4 5 6 7 8 9 10 11
#> 0.135438 0.270674 0.269854 0.181228 0.090568 0.035796 0.011954 0.003410 0.000848 0.000198 0.000026 0.000006
library(tinyplot)
tinytheme("clean2")
res <- rbind(
data.frame(type = "Binomial (large n, small p)", draws = draws_binom),
data.frame(type = "Poisson", draws = draws_poisson)
)
tinyplot(
~ draws, facet = ~type,
data = res,
type = "histogram",
freq = FALSE,
breaks = -0.5 + 0:(max(res$draws) + 1),
xlab = ""
)