--- /dev/null
+from math import sqrt
+from statistics import mean, variance
+
+from numpy.random import normal, seed
+
+# seed the random number generator for reproducibility of given figures,
+# commment this out to run a new experiment
+seed(1)
+
+def cohens_d(X, Y):
+ return (
+ (mean(X) + mean(Y)) /
+ sqrt(
+ (len(X)*variance(X) + len(Y)*variance(Y)) /
+ (len(X) + len(Y))
+ )
+ )
+
+def population_with_error(μ, σ, n):
+ def trait():
+ return normal(μ, 1)
+ def measurement_error():
+ return normal(0, σ)
+ return [trait() + measurement_error() for _ in range(n)]
+
+
+# trait differs by 1 standard deviation
+adjusted_f = population_with_error(1, 0, 10000)
+adjusted_m = population_with_error(0, 0, 10000)
+
+# as above, but with 0.5 standard units measurment error
+measured_f = population_with_error(1, 0.5, 10000)
+measured_m = population_with_error(0, 0.5, 10000)
+
+smart_d = cohens_d(adjusted_f, adjusted_m)
+print(smart_d) # 1.0193773432617055 — d≈1.0, as expected!
+
+naïve_d = cohens_d(measured_f, measured_m)
+print(naïve_d) # 0.8953395386313235
+
+
+def performance(g, σ_g, s, n):
+ def general_ability():
+ return normal(g, σ_g)
+ def special_ability():
+ return normal(s, 1)
+ return [general_ability() + special_ability() for _ in range(n)]
+
+# ♀ one standard deviation better than ♂ at the special factor
+population_f = performance(0, 1, 1, 10000)
+population_m = performance(0, 1, 0, 10000)
+
+# ... but suppose we control/match for general intelligence
+matched_f = performance(0, 0, 1, 10000)
+matched_m = performance(0, 0, 0, 10000)
+
+population_d = cohens_d(population_f, population_m)
+print(population_d) # 0.7287587808164793
+
+matched_d = cohens_d(matched_f, matched_m)
+print(matched_d) # 1.018362581243161