y, u, theta, xo, yo, ro, zo = inputs
self.map[:, :] = y
self.map[:] = u
ro_ = tt.switch(tt.lt(zo, 0.0), ro, 0.0).eval()
outputs[0][0] = self.map.flux(theta=theta, xo=xo, yo=yo, ro=ro_)
def grad(self, inputs, gradients):
After Change
// HACK: nudge at least one ylm away from zero
// to force starry to compute all derivatives
if (len(y) > 2) and (y[2] == 0):
self.map[1, 0] = 1.e-15
outputs[0][0] = self.map.flux(theta=theta, xo=xo, yo=yo, zo=zo, ro=ro)
def grad(self, inputs, gradients):