#pylint: disable=W0401,W0614,W0622 #All pylint codes: http://pylint.pycqa.org/en/latest/technical_reference/features.html # This script finds the median values of the radius (cylindrical) and height of # a Miyamoto-Nagai disk as a function of the b parameter by numerical # integration. After obtaining the results of a large number of points, we find # analytical fitting formulae. from pylab import * import scipy.integrate, scipy.interpolate write_data = False # Generating the data file takes a long time; do it just once file_name = 'miyamoto-nagai-properties.dat' rho_mn = lambda d, z, M, a, b: \ (b**2 * M / (4*pi)) * \ (a*d**2 + (a + 3*sqrt(z**2+b**2))*(a+sqrt(z**2+b**2))**2) / \ ((d**2 + (a+sqrt(z**2+b**2))**2)**2.5 * (z**2+b**2)**1.5) if write_data: file_obj = open(file_name, 'w') n = 4096*2 a = 1 f = 1000 b_array = logspace(-log10(64), log10(64), 31) for i, b in enumerate(b_array): dist_lambda = lambda z: scipy.integrate.quad(lambda d: 4*pi*d*rho_mn(d, z, 1, a, b), 0, inf)[0] z = logspace(log10(b/f), log10(b*f), n-1) z = insert(z, 0, 0) dist = array([dist_lambda(z_) for z_ in z]) cumulative = scipy.integrate.cumtrapz(dist, z, initial=0) cumulative /= cumulative[-1] interp = scipy.interpolate.interp1d(z, cumulative, fill_value=(cumulative[0], cumulative[-1]), bounds_error=False) m_z = scipy.optimize.brentq(lambda x: interp(x)-0.5, b/f, b*f) dist_lambda = lambda d: 2*scipy.integrate.quad(lambda z: 4*pi*d*rho_mn(d, z, 1, a, b), 0, inf)[0] d = logspace(-log10(f), log10(f), n-1) d = insert(d, 0, 0) dist = array([dist_lambda(d_) for d_ in d]) cumulative = scipy.integrate.cumtrapz(dist, d, initial=0) cumulative /= cumulative[-1] interp = scipy.interpolate.interp1d(d, cumulative, fill_value=(cumulative[0], cumulative[-1]), bounds_error=False) m_d = scipy.optimize.brentq(lambda x: interp(x)-0.5, 1/f, f) file_obj.write(f'{i:02d} {b:.8E} {m_d:.8E} {m_z:.8E}') file_obj.close() i, b, m_d, m_z = np.loadtxt(file_name, unpack=True) # m_z appears to be proportional to b; we just need to find the slope _, slope = polyfit(log(b), log(m_z), 1) slope = exp(slope) m_z_pred = slope*b scatter = std((m_z - m_z_pred)/m_z) max_err = abs((m_z - m_z_pred)/m_z).max() print(f'Slope: {slope:.5E}') print(f'Scatter: {scatter:.5E} Maximum error: {max_err:.5E}') # m_d appears to be constant for low b and rise linearly for high b. We use a simple model that blends these two linear functions def func(x, p1, p2, p3, p4): blend_func = lambda x: np.arctan(x)/np.pi+0.5 return p1 + (p2*x+p3)*blend_func(x/p4) cost = lambda p: sum((log(m_d) - func(log(b), *p))**2) p = scipy.optimize.minimize(cost, [1,1,-1,1]).x m_d_pred = exp(func(log(b), *p)) scatter = std((m_d - m_d_pred)/m_d) max_err = abs((m_d - m_d_pred)/m_d).max() print(f'Parameters: {p}') print(f'Scatter: {scatter:.5E} Maximum error: {max_err:.5E}') def generate_particles(n, a, b, d_max=5, z_max=5, batch_mult=1): rho_max = rho_mn(0, 0, 1, a, b) n_batch = int(2**ceil(log(n)/log(2)))*batch_mult x_new = empty(0) y_new = empty(0) z_new = empty(0) while len(x_new) < n: x_ = (rand(n_batch)*2-1)*d_max*sqrt(2) y_ = (rand(n_batch)*2-1)*d_max*sqrt(2) d_ = sqrt(x_**2 + y_**2) mask = d_ < d_max x_ = x_[mask] y_ = y_[mask] d_ = d_[mask] z_ = rand(len(d_))*z_max p = rho_mn(d_, z_, 1, a, b)/rho_max random_number = rand(len(d_)) mask = random_number < p x_ = x_[mask] y_ = y_[mask] z_ = z_[mask] z_sign = np.round(rand(len(z_)))*2-1 z_ *= z_sign x_new = append(x_new, x_) y_new = append(y_new, y_) z_new = append(z_new, z_) print(len(x_new)) x_new = x_new[:n] y_new = y_new[:n] z_new = z_new[:n] return x_new, y_new, z_new x, y, z = generate_particles(43807, 2.98, 1.61, d_max=50, z_max=50, batch_mult=16) figure() plot(x, z, ',') xlim(-40,40); ylim(-20,20); gca().set_aspect('equal') show()