import numpy as np import matplotlib.pyplot as plt def setPlotParams(): params = {'legend.fontsize': 'x-large', 'axes.labelsize':20, 'axes.titlesize':20, 'xtick.labelsize':20, 'ytick.labelsize':20, 'text.usetex': True, 'text.latex.preamble': r'\usepackage{bm}'} return params def prob1(p = 0.6, alpha = 0.01): #Given $p = 0.6$, Solve for $N$ in the inequality $p^{N-1}(N - (N-1)p) < \alpha$, with $\alpha = 0.01$. #First, solve the equation using a root finder. from scipy.optimize import root_scalar def func(x): return x*np.log10(p) + np.log10(1 + (1-p)*x/p) - np.log10(alpha) soln = root_scalar(func, bracket = [0, 20], method = 'bisect') n_soln_eq = np.ceil(soln.root) #Second, find the minimum of the absolute value of the function. x = np.linspace(0, 40, 100) y = func(x) kmin = np.argmin(abs(y)) n_soln_minabsval = np.ceil(x[kmin]) #Third, visual estimation from a plot (guided by the solution from the second method). plt.figure(figsize = (8, 8)) plt.rcParams.update(setPlotParams()) plt.plot(x, y, 'k-', lw = 5) plt.plot(np.array([n_soln_minabsval, n_soln_minabsval]), np.array([-100, 100]), 'k--', lw = 2) plt.plot(np.array([-100, 100]), np.array([0, 0])) plt.title(r'$\bm{f(x) = x\log{p} + \log{\Big(2N/3 + 1\Big)} - \log{\alpha}}$') #plt.title('Plot of ' + r'$p^{N-1}(N - (N-1)p) - \alpha = 0$') plt.xlabel(r'$\bm{x}$') plt.ylabel(r'$\bm{f(x)}$') plt.xlim(0, np.ceil(1.2*n_soln_minabsval)) plt.ylim(-1.05*np.max(y), 1.05*np.max(y)) plt.savefig('hw2_prob1.png') plt.close() print('From root-finding method, N = {}. From minimum absolute value method, N = {}.'.format(n_soln_eq.astype(int), n_soln_minabsval.astype(int))) def prob2(): pass def prob3(Ntrials = 1000, Ntosses = 100): x1score = np.zeros(Ntrials) x2score = np.zeros(Ntrials) lasttail = [] x1 = np.random.choice([0, 1], size = [Ntrials, Ntosses]) x1score = x1.sum(axis = 1) x2 = np.random.choice([-1, 1], size = [Ntrials, Ntosses]) x2score = x2.sum(axis = 1) #Only select those trials in which the 100th toss resulted in heads. #We expect there to be about Ntrials/2 such cases. lasttossheads = np.array(np.where(x2[:, Ntosses-1] == 1)) #For these cases, save the location of the most recent tail. # Remember to add 1 to the location to change it to 1-indexing. for i in range(lasttossheads.size): lasttail.append(1 + np.max(np.argwhere(x2[lasttossheads[0][i],:]==-1))) lasttail = np.array(lasttail) #Print out diagnostic results. x1meanprint = str(x1score.mean().round(decimals = 2)) x1varprint = str(x1score.var().round(decimals = 2)) x2meanprint = str(x2score.mean().round(decimals = 2)) x2varprint = str(x2score.var().round(decimals = 2)) lasttailmeanprint = str(lasttail.mean().round(decimals = 2)) lasttailvarprint = str(lasttail.var().round(decimals = 2)) print('For distribution in part (a), mean = {}, variance = {}'.format(x1meanprint, x1varprint)) print('For distribution in part (b), mean = {}, variance = {}'.format(x2meanprint, x2varprint)) print('For part (c), number of times 100th flip is a head = {}'.format(lasttossheads.size)) print('For distribution in part (c), mean = {}, variance = {}'.format(lasttailmeanprint, lasttailvarprint)) #Plotting results. plt.figure(figsize = (8, 8)) plt.rcParams.update(setPlotParams()) nn1, bins, patches = plt.hist(x1score, color = 'black', lw = 2, label = r'mean = ' + x1meanprint + '\\ var = ' + x1varprint, histtype = 'step') nn2, bins, patches = plt.hist(x2score, color = 'cyan', lw = 2, label = r'mean = ' + x2meanprint + '\\ var = ' + x2varprint, histtype = 'step') nn3, bins, patches = plt.hist(lasttail, color = 'red', lw = 2, label = r'mean = ' + lasttailmeanprint + '\\ var = ' + lasttailvarprint, histtype = 'step') plt.title('Simulated distributions for problem 3') plt.xlabel(r'$\bm{\textrm{Total score } X}$') plt.xlim(1.05*np.min(x2score), Ntosses) plt.ylim(0, 1.05*np.max([np.max(nn1), np.max(nn2), np.max(nn3)])) plt.legend(loc = 'best') plt.savefig('hw2_prob3.png', bbox_inches = 'tight') plt.close() def prob4(): pass def prob5(radius = 4.0, Nsamples = 1000): #First, the wrong method r = radius * np.random.uniform(0, 1, size = Nsamples) th = np.pi * np.random.uniform(-1, 1, size = Nsamples) x = r * np.cos(th) y = r * np.sin(th) plt.figure(figsize = (8, 8)) plt.rcParams.update(setPlotParams()) plt.scatter(x, y, c = 'r', marker = '.') plt.xlabel('x') plt.ylabel('y') plt.savefig('hw2_prob5_wrong.png', bbox_inches = 'tight') #Now, one of the correct ways. #Correction for the loss of points when the square is truncated into a circle. #The resulting plot will only have APPROXIMATELY Nsample points. area_correct = 4/np.pi x, y = radius * np.random.uniform(-1, 1, size = [2, np.ceil(Nsamples*area_correct).astype(int)]) flag = x**2 + y**2 <= radius**2 x1 = x[flag] y1 = y[flag] plt.figure(figsize = (8, 8)) plt.rcParams.update(setPlotParams()) plt.scatter(x1, y1, c = 'r', marker = '.') plt.xlabel('x') plt.ylabel('y') plt.savefig('hw2_prob5_right.png', bbox_inches = 'tight')