scale = 0.4 // 1 \ lambda
sampling_dist = stats.expon(scale=scale)
exp_samples = sampling_dist.rvs(size=n_samples).flatten()
exp_f = sampling_dist.pdf(exp_samples) /Ǘ / scale * np.exp(-exp_samples/scale)
// check shapes
assert exp_samples.shape[0] == exp_f.shape[0] == n_samples
for i in range(x_cond.shape[0]):
// flip the normal exponential distribution by negating it & placing it"s mode at the VaR value
y_samples = VaRs[i] - exp_samples
x_cond_tiled = np.tile(np.expand_dims(x_cond[i,:], axis=0), (n_samples, 1))
assert x_cond_tiled.shape == (n_samples, self.ndim_x)
p = self.pdf(x_cond_tiled, y_samples).flatten()
q = exp_f.flatten()
importance_weights = p / q
cvar = np.mean(y_samples * importance_weights, axis=0) / alpha
CVaRs[i] = cvar
return CVaRs
After Change
def _conditional_value_at_risk_mc_pdf(self, VaRs, x_cond, alpha=0.01, n_samples=10 ** 6):
assert VaRs.shape[0] == x_cond.shape[0], "same number of x_cond must match the number of values_at_risk provided"
assert self.ndim_y == 1, "this function only supports only ndim_y = 1"
assert x_cond.ndim == 2
n_samples_int, lower, _ = self._determine_integration_bounds()