From dce489c1542efbc4c329f3be5ee4e3728e43690e Mon Sep 17 00:00:00 2001 From: Jakob van Santen Date: Wed, 25 Sep 2024 14:00:28 +0200 Subject: [PATCH] numpy -> np see https://github.com/icecube/toise/pull/46 --- .gitignore | 1 + toise/angular_resolution.py | 58 ++--- toise/effective_areas.py | 239 +++++++++--------- toise/energy_resolution.py | 32 +-- .../externals/AtmosphericSelfVeto/__init__.py | 52 ++-- .../externals/AtmosphericSelfVeto/selfveto.py | 118 ++++----- toise/factory.py | 38 +-- toise/fictive.py | 29 ++- toise/figures/diffuse/dnnc.py | 3 +- toise/figures/diffuse/spectrum.py | 8 +- toise/multillh.py | 44 ++-- toise/plotting.py | 28 +- toise/surface_veto.py | 78 +++--- toise/surfaces.py | 176 ++++++------- 14 files changed, 453 insertions(+), 451 deletions(-) diff --git a/.gitignore b/.gitignore index 563212d..a09ab5d 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ notebooks/**/*.pdf notebooks/**/*.fits notebooks/**/.ipynb_checkpoints .vscode +env* diff --git a/toise/angular_resolution.py b/toise/angular_resolution.py index b8f4a75..a85c3d6 100644 --- a/toise/angular_resolution.py +++ b/toise/angular_resolution.py @@ -1,6 +1,6 @@ import os -import numpy +import numpy as np from scipy import interpolate, stats from .util import center, data_dir @@ -13,7 +13,7 @@ def get_angular_resolution( return FictiveCascadePointSpreadFunction() elif channel == "radio": return FictiveCascadePointSpreadFunction( - lower_limit=numpy.radians(2), crossover_energy=0 + lower_limit=np.radians(2), crossover_energy=0 ) if geometry == "Fictive": return FictiveKingPointSpreadFunction() @@ -32,10 +32,10 @@ class AngularResolution(object): def __init__( self, fname=os.path.join(data_dir, "veto", "aachen_angular_resolution.npz") ): - f = numpy.load(fname) + f = np.load(fname) xd = f["log10_energy"] yd = f["cos_theta"] - x, y = numpy.meshgrid(xd, yd) + x, y = np.meshgrid(xd, yd) zd = f["median_opening_angle"] # extrapolate with a constant zd[-8:, :] = zd[-9, :] @@ -45,12 +45,12 @@ def __init__( ) def median_opening_angle(self, energy, cos_theta): - loge, ct = numpy.broadcast_arrays(numpy.log10(energy), cos_theta) + loge, ct = np.broadcast_arrays(np.log10(energy), cos_theta) mu_reco = self._spline.ev(loge.flatten(), ct.flatten()).reshape(loge.shape) # dirty hack: add the muon/neutrino opening angle in quadtrature - return numpy.radians(numpy.sqrt(mu_reco**2 + 0.7**2 / (10 ** (loge - 3)))) + return np.radians(np.sqrt(mu_reco**2 + 0.7**2 / (10 ** (loge - 3)))) class PointSpreadFunction(object): @@ -72,16 +72,16 @@ def __init__(self, fname="aachen_psf.fits", scale=1.0): self._scale = scale def __call__(self, psi, energy, cos_theta): - psi, loge, ct = numpy.broadcast_arrays( - numpy.degrees(psi) / self._scale, numpy.log10(energy), cos_theta + psi, loge, ct = np.broadcast_arrays( + np.degrees(psi) / self._scale, np.log10(energy), cos_theta ) - loge = numpy.clip(loge, *self._loge_extents) + loge = np.clip(loge, *self._loge_extents) if self._mirror: - ct = -numpy.abs(ct) - ct = numpy.clip(ct, *self._ct_extents) + ct = -np.abs(ct) + ct = np.clip(ct, *self._ct_extents) evaluates = self._spline.evaluate_simple([loge, ct, psi]) - return numpy.where(numpy.isfinite(evaluates), evaluates, 1.0) + return np.where(np.isfinite(evaluates), evaluates, 1.0) class _king_gen(stats.rv_continuous): @@ -122,10 +122,10 @@ class _fm_gen(stats.rv_continuous): """ def _argcheck(self, kappa): - return numpy.all(kappa >= 0) + return np.all(kappa >= 0) def _pdf(self, x, kappa): - return numpy.exp(kappa * x) * kappa / (2 * numpy.sinh(kappa)) + return np.exp(kappa * x) * kappa / (2 * np.sinh(kappa)) fisher = _fm_gen(name="fisher", a=-1, b=1) @@ -136,17 +136,17 @@ def __init__(self, scale=1.0): self._scale = scale def get_quantile(self, p, energy, cos_theta): - p, loge, ct = numpy.broadcast_arrays(p, numpy.log10(energy), cos_theta) + p, loge, ct = np.broadcast_arrays(p, np.log10(energy), cos_theta) if hasattr(self._scale, "__call__"): scale = self._scale(10**loge) else: scale = self._scale sigma, gamma = self.get_params(loge, ct) - return numpy.radians(king.ppf(p, sigma, gamma)) / scale + return np.radians(king.ppf(p, sigma, gamma)) / scale def __call__(self, psi, energy, cos_theta): - psi, loge, ct = numpy.broadcast_arrays( - numpy.degrees(psi), numpy.log10(energy), cos_theta + psi, loge, ct = np.broadcast_arrays( + np.degrees(psi), np.log10(energy), cos_theta ) if hasattr(self._scale, "__call__"): scale = self._scale(10**loge) @@ -234,14 +234,14 @@ def get_params(self, log_energy, cos_theta): """ Interpolate for sigma and gamma """ - with numpy.errstate(invalid="ignore"): - angular_resolution_scale = numpy.where( + with np.errstate(invalid="ignore"): + angular_resolution_scale = np.where( log_energy < 6, 0.05 * (6 - log_energy) ** 2.5, 0 ) # dip at the horizon, improvement with energy up to 1e6 sigma = 10 ** ( - numpy.where( - numpy.abs(cos_theta) < 0.15, + np.where( + np.abs(cos_theta) < 0.15, (cos_theta * 3) ** 2 - 1.2, (cos_theta / 1.05) ** 2 - 1.0, ) @@ -253,17 +253,17 @@ def get_params(self, log_energy, cos_theta): class FictiveCascadePointSpreadFunction(object): - def __init__(self, lower_limit=numpy.radians(5), crossover_energy=1e6): + def __init__(self, lower_limit=np.radians(5), crossover_energy=1e6): self._b = lower_limit - self._a = self._b * numpy.sqrt(crossover_energy) + self._a = self._b * np.sqrt(crossover_energy) def get_params(self, loge, cos_theta): - return self._a / numpy.sqrt(10**loge) + self._b + return self._a / np.sqrt(10**loge) + self._b def __call__(self, psi, energy, cos_theta): - psi, energy, cos_theta = numpy.broadcast_arrays(psi, energy, cos_theta) - sigma = self._a / numpy.sqrt(energy) + self._b + psi, energy, cos_theta = np.broadcast_arrays(psi, energy, cos_theta) + sigma = self._a / np.sqrt(energy) + self._b - evaluates = 1 - numpy.exp(-(psi**2) / (2 * sigma**2)) - return numpy.where(numpy.isfinite(evaluates), evaluates, 1.0) + evaluates = 1 - np.exp(-(psi**2) / (2 * sigma**2)) + return np.where(np.isfinite(evaluates), evaluates, 1.0) diff --git a/toise/effective_areas.py b/toise/effective_areas.py index 20b4218..8be087e 100644 --- a/toise/effective_areas.py +++ b/toise/effective_areas.py @@ -5,7 +5,6 @@ import dashi import healpy -import numpy import numpy as np import tables from scipy import interpolate @@ -36,17 +35,17 @@ def load_jvs_mese(): for j, channel in enumerate(("cascade", "track")): for interaction in "cc", "nc", "gr": try: - data = numpy.loadtxt(base.format(**locals())) + data = np.loadtxt(base.format(**locals())) except: pass if shape is None: edges = [] for k in range(4): - lo = numpy.unique(data[:, k * 2]) - hi = numpy.unique(data[:, k * 2 + 1]) - edges.append(numpy.concatenate((lo, [hi[-1]]))) + lo = np.unique(data[:, k * 2]) + hi = np.unique(data[:, k * 2 + 1]) + edges.append(np.concatenate((lo, [hi[-1]]))) shape = [len(e) - 1 for e in reversed(edges)] - aeff = numpy.zeros([6] + list(reversed(shape)) + [2]) + aeff = np.zeros([6] + list(reversed(shape)) + [2]) aeff[i, ..., j] += data[:, -2].reshape(shape).T return edges, aeff @@ -59,7 +58,7 @@ def __init__( if not filename.startswith("/"): filename = os.path.join(data_dir, "selection_efficiency", filename) if filename.endswith(".npz"): - f = numpy.load(filename) + f = np.load(filename) loge = f["log_energy"] eff = f["efficiency"] @@ -75,13 +74,13 @@ def __init__( detected.project([0]), generated.project([0]) ) - edges = numpy.concatenate((sp.x - sp.xerr, [sp.x[-1] + sp.xerr[-1]])) - loge = 0.5 * (numpy.log10(edges[1:]) + numpy.log10(edges[:-1])) + edges = np.concatenate((sp.x - sp.xerr, [sp.x[-1] + sp.xerr[-1]])) + loge = 0.5 * (np.log10(edges[1:]) + np.log10(edges[:-1])) - loge = numpy.concatenate((loge, loge + loge[-1] + numpy.diff(loge)[0])) - v = numpy.concatenate((sp.y, sp.y[-5:].mean() * numpy.ones(sp.y.size))) - w = 1 / numpy.concatenate((sp.yerr, 1e-2 * numpy.ones(sp.yerr.size))) - w[~numpy.isfinite(w)] = 1 + loge = np.concatenate((loge, loge + loge[-1] + np.diff(loge)[0])) + v = np.concatenate((sp.y, sp.y[-5:].mean() * np.ones(sp.y.size))) + w = 1 / np.concatenate((sp.yerr, 1e-2 * np.ones(sp.yerr.size))) + w[~np.isfinite(w)] = 1 self.interp = interpolate.UnivariateSpline(loge, v, w=w) if energy_threshold is None: @@ -90,9 +89,9 @@ def __init__( self.energy_threshold = energy_threshold def __call__(self, muon_energy, cos_theta): - return numpy.where( + return np.where( muon_energy >= self.energy_threshold, - numpy.clip(self.interp(numpy.log10(muon_energy)), 0, 1), + np.clip(self.interp(np.log10(muon_energy)), 0, 1), 0.0, ) @@ -117,16 +116,16 @@ def _eval(self, loge, cos_theta): return self._spline.eval([loge, cos_theta]) def __call__(self, muon_energy, cos_theta): - loge, cos_theta = numpy.broadcast_arrays(numpy.log10(muon_energy), cos_theta) + loge, cos_theta = np.broadcast_arrays(np.log10(muon_energy), cos_theta) if hasattr(self._scale, "__call__"): scale = self._scale(10**loge) else: scale = self._scale - return numpy.where( + return np.where( muon_energy >= self.energy_threshold, - numpy.clip( + np.clip( scale - * self._spline.evaluate_simple([numpy.log10(muon_energy), cos_theta]), + * self._spline.evaluate_simple([np.log10(muon_energy), cos_theta]), 0, 1, ), @@ -180,7 +179,7 @@ def __call__(self, deposited_energy, cos_theta): return ( 0.75 * self._efficiency - / (1 + numpy.exp(-2.5 * numpy.log(deposited_energy / self._threshold))) + / (1 + np.exp(-2.5 * np.log(deposited_energy / self._threshold))) ) @@ -225,12 +224,12 @@ class StepFunction(VetoThreshold): """ def __init__(self, threshold=0, maximum_inclination=60): - self.max_inclination = numpy.cos(numpy.radians(maximum_inclination)) + self.max_inclination = np.cos(np.radians(maximum_inclination)) self.threshold = threshold def accept(self, e_mu, cos_theta=1.0): - return numpy.where( + return np.where( cos_theta > 0.05, (e_mu > self.threshold) & (cos_theta >= self.max_inclination), True, @@ -240,7 +239,7 @@ def veto(self, e_mu, cos_theta=1.0): """ Return True if an atmospheric event would be rejected by the veto """ - return numpy.where( + return np.where( cos_theta > 0.05, (e_mu > self.threshold) & (cos_theta >= self.max_inclination), False, @@ -280,28 +279,28 @@ def _interpolate_production_efficiency( with tables.open_file(os.path.join(data_dir, "cross_sections", fname)) as hdf: for family, anti in itertools.product(flavors, ("", "_bar")): h = dashi.histload(hdf, "/nu" + family + anti) - edges = [numpy.log10(h.binedges[0]), h.binedges[1]] + list( - map(numpy.log10, h.binedges[2:]) + edges = [np.log10(h.binedges[0]), h.binedges[1]] + list( + map(np.log10, h.binedges[2:]) ) centers = list(map(center, edges)) newcenters = [ centers[0], - numpy.clip(cos_zenith, centers[1].min(), centers[1].max()), + np.clip(cos_zenith, centers[1].min(), centers[1].max()), ] + centers[2:] - with numpy.errstate(divide="ignore"): - y = numpy.where( - ~(h.bincontent <= 0), numpy.log10(h.bincontent), -numpy.inf + with np.errstate(divide="ignore"): + y = np.where( + ~(h.bincontent <= 0), np.log10(h.bincontent), -np.inf ) - assert not numpy.isnan(y).any() + assert not np.isnan(y).any() interpolant = interpolate.RegularGridInterpolator( - centers, y, bounds_error=True, fill_value=-numpy.inf + centers, y, bounds_error=True, fill_value=-np.inf ) - xi = numpy.vstack( - [x.flatten() for x in numpy.meshgrid(*newcenters, indexing="ij")] + xi = np.vstack( + [x.flatten() for x in np.meshgrid(*newcenters, indexing="ij")] ).T - assert numpy.isfinite(xi).all() + assert np.isfinite(xi).all() # NB: we use nearest-neighbor interpolation here because # n-dimensional linear interpolation has the unfortunate side-effect @@ -311,15 +310,15 @@ def _interpolate_production_efficiency( # underestimates the muon flux from steeply falling neutrino spectra. v = interpolant(xi, "nearest").reshape([x.size for x in newcenters]) - v[~numpy.isfinite(v)] = -numpy.inf + v[~np.isfinite(v)] = -np.inf - assert not numpy.isnan(v).any() + assert not np.isnan(v).any() efficiencies.append(10**v) return (h.binedges[0], None,) + tuple( h.binedges[2:] - ), numpy.array(efficiencies) + ), np.array(efficiencies) def _ring_range(nside): @@ -330,8 +329,8 @@ def _ring_range(nside): # get cos(colatitude) at the center of each ring, and invert to get # cos(zenith). This assumes that the underlying map is in equatorial # coordinates. - centers = -healpy.ringinfo(nside, numpy.arange(1, 4 * nside))[2] - return numpy.concatenate(([-1], 0.5 * (centers[1:] + centers[:-1]), [1])) + centers = -healpy.ringinfo(nside, np.arange(1, 4 * nside))[2] + return np.concatenate(([-1], 0.5 * (centers[1:] + centers[:-1]), [1])) def get_muon_production_efficiency(ct_edges=None): @@ -347,7 +346,7 @@ def get_muon_production_efficiency(ct_edges=None): with the same axes. """ if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) @@ -369,7 +368,7 @@ def get_starting_event_efficiency(ct_edges=None): with the same axes. """ if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) @@ -393,7 +392,7 @@ def get_cascade_production_density(ct_edges=None): with the same axes. """ if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) @@ -426,7 +425,7 @@ def get_doublebang_production_density(ct_edges=None): with the same axes. """ if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) @@ -481,7 +480,7 @@ def restrict_energy_range(self, emin, emax): # find bins with lower edge >= emin and upper edge <= emax mask = (self.bin_edges[0][1:] <= emax) & (self.bin_edges[0][:-1] >= emin) - idx = numpy.arange(mask.size)[mask][[0, -1]] + idx = np.arange(mask.size)[mask][[0, -1]] reduced = copy.copy(self) reduced.bin_edges = list(reduced.bin_edges) @@ -496,7 +495,7 @@ def truncate_energy_range(self, emin, emax): # find bins with lower edge >= emin and upper edge <= emax mask = (self.bin_edges[0][1:] <= emax) & (self.bin_edges[0][:-1] >= emin) - idx = numpy.arange(mask.size)[mask][[0, -1]] + idx = np.arange(mask.size)[mask][[0, -1]] reduced = copy.copy(self) @@ -526,18 +525,18 @@ def nring(self): @property def ring_repeat_pattern(self): assert self.is_healpix - return healpy.ringinfo(self.nside, numpy.arange(self.nring) + 1)[1] + return healpy.ringinfo(self.nside, np.arange(self.nring) + 1)[1] def eval_psf(point_spread_function, mu_energy, ct, psi_bins): - ct, mu_energy, psi_bins = numpy.meshgrid(ct, mu_energy, psi_bins, indexing="ij") + ct, mu_energy, psi_bins = np.meshgrid(ct, mu_energy, psi_bins, indexing="ij") return point_spread_function(psi_bins, mu_energy, ct) def create_bundle_aeff( energy_resolution=defer(get_energy_resolution, "IceCube"), - veto_efficiency: VetoThreshold = StepFunction(numpy.inf), - veto_coverage=lambda ct: numpy.zeros(len(ct) - 1), + veto_efficiency: VetoThreshold = StepFunction(np.inf), + veto_coverage=lambda ct: np.zeros(len(ct) - 1), selection_efficiency=defer(MuonSelectionEfficiency), surface=defer(get_fiducial_surface, "IceCube"), cos_theta=None, @@ -584,21 +583,21 @@ def create_bundle_aeff( # Step 2: Geometric muon effective area (no selection effects yet) # NB: assumes cylindrical symmetry. - aeff = numpy.vectorize(surface.average_area)(cos_theta[:-1], cos_theta[1:])[None, :] + aeff = np.vectorize(surface.average_area)(cos_theta[:-1], cos_theta[1:])[None, :] # Step 3: apply selection efficiency - # selection_efficiency = selection_efficiency(*numpy.meshgrid(center(e_mu), center(cos_theta), indexing='ij')).T + # selection_efficiency = selection_efficiency(*np.meshgrid(center(e_mu), center(cos_theta), indexing='ij')).T selection_efficiency = selection_efficiency( - *numpy.meshgrid(e_mu[1:], center(cos_theta), indexing="ij") + *np.meshgrid(e_mu[1:], center(cos_theta), indexing="ij") ) aeff = aeff * selection_efficiency # Step 4: apply smearing for energy resolution response = energy_resolution.get_response_matrix(e_mu, e_mu) - aeff = numpy.apply_along_axis( - numpy.inner, + aeff = np.apply_along_axis( + np.inner, 2, - aeff[..., None] * numpy.eye(response.shape[0])[:, None, :], + aeff[..., None] * np.eye(response.shape[0])[:, None, :], response, ) @@ -609,7 +608,7 @@ def create_bundle_aeff( # Step 5.2: apply suppression from surface veto veto_suppression = 1 - veto_efficiency.accept( - *numpy.meshgrid(center(e_mu), center(cos_theta), indexing="ij") + *np.meshgrid(center(e_mu), center(cos_theta), indexing="ij") ) # combine into an energy- and zenith-dependent acceptance for muon bundles @@ -630,11 +629,11 @@ def create_bundle_aeff( def create_throughgoing_aeff( energy_resolution=defer(get_energy_resolution, "IceCube"), - veto_coverage=lambda ct: numpy.zeros(len(ct) - 1), + veto_coverage=lambda ct: np.zeros(len(ct) - 1), selection_efficiency=defer(MuonSelectionEfficiency), surface=defer(get_fiducial_surface, "IceCube"), psf=defer(get_angular_resolution, "IceCube"), - psi_bins=numpy.sqrt(numpy.linspace(0, numpy.radians(2) ** 2, 100)), + psi_bins=np.sqrt(np.linspace(0, np.radians(2) ** 2, 100)), cos_theta=None, ): """ @@ -686,37 +685,37 @@ def create_throughgoing_aeff( # Step 2: Geometric muon effective area (no selection effects yet) # NB: assumes cylindrical symmetry. aeff = efficiency * ( - numpy.vectorize(surface.average_area)(cos_theta[:-1], cos_theta[1:])[ + np.vectorize(surface.average_area)(cos_theta[:-1], cos_theta[1:])[ None, None, :, None ] ) # Step 3: apply selection efficiency - # selection_efficiency = selection_efficiency(*numpy.meshgrid(center(e_mu), center(cos_theta), indexing='ij')).T + # selection_efficiency = selection_efficiency(*np.meshgrid(center(e_mu), center(cos_theta), indexing='ij')).T selection_efficiency = selection_efficiency( - *numpy.meshgrid(e_mu[1:], center(cos_theta), indexing="ij") + *np.meshgrid(e_mu[1:], center(cos_theta), indexing="ij") ).T # Explicit energy threshold disabled for now; let muon background take over # at whatever energy it drowns out the signal - # selection_efficiency *= energy_threshold.accept(*numpy.meshgrid(e_mu[1:], center(cos_theta), indexing='ij')).T + # selection_efficiency *= energy_threshold.accept(*np.meshgrid(e_mu[1:], center(cos_theta), indexing='ij')).T aeff *= selection_efficiency[None, None, :, :] # Step 4: apply smearing for angular resolution # Add an overflow bin if none present - if numpy.isfinite(psi_bins[-1]): - psi_bins = numpy.concatenate((psi_bins, [numpy.inf])) + if np.isfinite(psi_bins[-1]): + psi_bins = np.concatenate((psi_bins, [np.inf])) cdf = eval_psf(psf, center(e_mu), center(cos_theta), psi_bins[:-1]) - total_aeff = numpy.zeros((6,) + aeff.shape[1:] + (psi_bins.size - 1,)) + total_aeff = np.zeros((6,) + aeff.shape[1:] + (psi_bins.size - 1,)) # expand differential contributions along the opening-angle axis - total_aeff[2:4, ..., :-1] = aeff[..., None] * numpy.diff(cdf, axis=2)[None, ...] + total_aeff[2:4, ..., :-1] = aeff[..., None] * np.diff(cdf, axis=2)[None, ...] # put the remainder in the overflow bin total_aeff[2:4, ..., -1] = aeff * (1 - cdf[..., -1])[None, None, ...] # Step 5: apply smearing for energy resolution response = energy_resolution.get_response_matrix(e_mu, e_mu) - total_aeff = numpy.apply_along_axis(numpy.inner, 3, total_aeff, response) + total_aeff = np.apply_along_axis(np.inner, 3, total_aeff, response) # Step 6: split the effective area in into a portion shadowed by the # surface veto (if it exists) and one that is not @@ -736,12 +735,12 @@ def create_throughgoing_aeff( def create_cascade_aeff( channel="cascade", energy_resolution=defer(get_energy_resolution, channel="cascade"), - energy_threshold=StepFunction(numpy.inf), - veto_coverage=lambda ct: numpy.zeros(len(ct) - 1), + energy_threshold=StepFunction(np.inf), + veto_coverage=lambda ct: np.zeros(len(ct) - 1), selection_efficiency=defer(HECascadeSelectionEfficiency), surface=defer(get_fiducial_surface, "IceCube"), psf=defer(get_angular_resolution, "IceCube", channel="cascade"), - psi_bins=numpy.sqrt(numpy.linspace(0, numpy.radians(20) ** 2, 10)), + psi_bins=np.sqrt(np.linspace(0, np.radians(20) ** 2, 10)), cos_theta=None, ): """ @@ -776,25 +775,25 @@ def create_cascade_aeff( # Step 3: apply selection efficiency selection_efficiency = selection_efficiency( - *numpy.meshgrid(e_shower[1:], center(cos_theta), indexing="ij") + *np.meshgrid(e_shower[1:], center(cos_theta), indexing="ij") ).T aeff *= selection_efficiency[None, None, ...] # Step 4: apply smearing for angular resolution # Add an overflow bin if none present - if numpy.isfinite(psi_bins[-1]): - psi_bins = numpy.concatenate((psi_bins, [numpy.inf])) + if np.isfinite(psi_bins[-1]): + psi_bins = np.concatenate((psi_bins, [np.inf])) cdf = eval_psf(psf, center(e_shower), center(cos_theta), psi_bins[:-1]) - total_aeff = numpy.empty(aeff.shape + (psi_bins.size - 1,)) + total_aeff = np.empty(aeff.shape + (psi_bins.size - 1,)) # expand differential contributions along the opening-angle axis - total_aeff[..., :-1] = aeff[..., None] * numpy.diff(cdf, axis=2)[None, ...] + total_aeff[..., :-1] = aeff[..., None] * np.diff(cdf, axis=2)[None, ...] # put the remainder in the overflow bin total_aeff[..., -1] = aeff * (1 - cdf[..., -1])[None, None, ...] # Step 5: apply smearing for energy resolution response = energy_resolution.get_response_matrix(e_shower, e_shower) - total_aeff = numpy.apply_along_axis(numpy.inner, 3, total_aeff, response) + total_aeff = np.apply_along_axis(np.inner, 3, total_aeff, response) edges = (e_nu, cos_theta, e_shower, psi_bins) @@ -805,15 +804,15 @@ def create_cascade_aeff( def create_starting_aeff( energy_resolution=defer(get_energy_resolution, channel="cascade"), - energy_threshold=StepFunction(numpy.inf), - veto_coverage=lambda ct: numpy.zeros(len(ct) - 1), + energy_threshold=StepFunction(np.inf), + veto_coverage=lambda ct: np.zeros(len(ct) - 1), selection_efficiency=defer(HECascadeSelectionEfficiency), classification_efficiency=defer(get_classification_efficiency, "IceCube"), surface=defer(get_fiducial_surface, "IceCube"), psf=defer(get_angular_resolution, "IceCube", channel="cascade"), - psi_bins=numpy.sqrt(numpy.linspace(0, numpy.radians(20) ** 2, 10)), - neutrino_energy=numpy.logspace(4, 12, 81), - cos_theta=numpy.linspace(-1, 1, 21), + psi_bins=np.sqrt(np.linspace(0, np.radians(20) ** 2, 10)), + neutrino_energy=np.logspace(4, 12, 81), + cos_theta=np.linspace(-1, 1, 21), ): """ Create an effective area for neutrinos interacting inside the volume @@ -845,7 +844,7 @@ def create_starting_aeff( # Step 3: apply overall selection efficiency selection_efficiency = selection_efficiency( - *numpy.meshgrid(e_shower[1:], center(cos_theta), indexing="ij") + *np.meshgrid(e_shower[1:], center(cos_theta), indexing="ij") ).T aeff *= selection_efficiency[None, None, ...] # Step 3.5: calculate channel selection efficiency, padding the shape for @@ -853,7 +852,7 @@ def create_starting_aeff( weights = {} e_shower_center = np.exp(center(np.log(e_shower))) for event_class in classification_efficiency.classes: - weights[event_class] = numpy.array( + weights[event_class] = np.array( [ classification_efficiency(nutype, event_class, e_shower_center)[ None, None, :, None @@ -864,19 +863,19 @@ def create_starting_aeff( # Step 4: apply smearing for angular resolution # Add an overflow bin if none present - if numpy.isfinite(psi_bins[-1]): - psi_bins = numpy.concatenate((psi_bins, [numpy.inf])) + if np.isfinite(psi_bins[-1]): + psi_bins = np.concatenate((psi_bins, [np.inf])) cdf = eval_psf(psf, center(e_shower), center(cos_theta), psi_bins[:-1]) - total_aeff = numpy.empty(aeff.shape + (psi_bins.size - 1,)) + total_aeff = np.empty(aeff.shape + (psi_bins.size - 1,)) # expand differential contributions along the opening-angle axis - total_aeff[..., :-1] = aeff[..., None] * numpy.diff(cdf, axis=2)[None, ...] + total_aeff[..., :-1] = aeff[..., None] * np.diff(cdf, axis=2)[None, ...] # put the remainder in the overflow bin total_aeff[..., -1] = aeff * (1 - cdf[..., -1])[None, None, ...] # Step 5: apply smearing for energy resolution response = energy_resolution.get_response_matrix(e_shower, e_shower) - total_aeff = numpy.apply_along_axis(numpy.inner, 3, total_aeff, response) + total_aeff = np.apply_along_axis(np.inner, 3, total_aeff, response) edges = (e_nu, cos_theta, e_shower, psi_bins) @@ -904,12 +903,12 @@ def _interpolate_ara_aeff(ct_edges=None, depth=200, nstations=37): from scipy import interpolate if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) # interpolate to a grid compatible with the IceCube/Gen2 effective areas - loge_edges = numpy.linspace(2, 12, 101) + loge_edges = np.linspace(2, 12, 101) fpath = os.path.join(data_dir, "aeff", "cosZenDepAeff_z{}.half.txt".format(depth)) @@ -936,23 +935,23 @@ def _interpolate_ara_aeff(ct_edges=None, depth=200, nstations=37): paeff.reverse() aeff.append(paeff) - aeff = numpy.asarray(aeff) * nstations + aeff = np.asarray(aeff) * nstations # convert energy from exponent to GeV - # energy = 10**edge(numpy.asarray(energy))*1e-9 + # energy = 10**edge(np.asarray(energy))*1e-9 - centers = (numpy.asarray(energy) - 9, numpy.asarray(cos_theta)) + centers = (np.asarray(energy) - 9, np.asarray(cos_theta)) - edges = numpy.array([energy, cos_theta]) + edges = np.array([energy, cos_theta]) # centers = map(center, edges) newcenters = [ center(loge_edges), - numpy.clip(center(ct_edges), centers[1].min(), centers[1].max()), + np.clip(center(ct_edges), centers[1].min(), centers[1].max()), ] - xi = numpy.vstack( - [x.flatten() for x in numpy.meshgrid(*newcenters, indexing="ij")] + xi = np.vstack( + [x.flatten() for x in np.meshgrid(*newcenters, indexing="ij")] ).T - assert numpy.isfinite(xi).all() + assert np.isfinite(xi).all() interpolant = interpolate.RegularGridInterpolator( centers, aeff, bounds_error=False, fill_value=0 @@ -966,7 +965,7 @@ def _interpolate_ara_aeff(ct_edges=None, depth=200, nstations=37): v = interpolant(xi, method="nearest").reshape([x.size for x in newcenters]) # assume flavor-independence for ARA by extending same aeff across all flavors - return (10**loge_edges, ct_edges), numpy.repeat(v[None, ...], 6, axis=0) + return (10**loge_edges, ct_edges), np.repeat(v[None, ...], 6, axis=0) def create_ara_aeff( @@ -999,15 +998,15 @@ def create_ara_aeff( # Step 2: for now, assume no energy resolution # Note that it doesn't matter which energy distribution we use, just as # long as it's identical for all neutrino energy bins - # e_reco = numpy.copy(e_nu) - # aeff = numpy.repeat(aeff[...,None], aeff.shape[1], axis=-1) + # e_reco = np.copy(e_nu) + # aeff = np.repeat(aeff[...,None], aeff.shape[1], axis=-1) # aeff /= aeff.shape[1] - e_reco = numpy.array([e_nu[0], e_nu[-1]]) + e_reco = np.array([e_nu[0], e_nu[-1]]) aeff = aeff[..., None] # Step 3: dummy angular resolution smearing - psi_bins = numpy.asarray([0, numpy.inf]) - total_aeff = numpy.zeros(aeff.shape + (psi_bins.size - 1,)) + psi_bins = np.asarray([0, np.inf]) + total_aeff = np.zeros(aeff.shape + (psi_bins.size - 1,)) # put everything in first psi_bin for no angular resolution total_aeff[..., 0] = aeff[...] @@ -1093,7 +1092,7 @@ def create_radio_aeff( nstations=305, energy_resolution=get_energy_resolution(channel="radio"), psf=get_angular_resolution(channel="radio"), - psi_bins=numpy.sqrt(numpy.linspace(0, numpy.radians(20) ** 2, 10)), + psi_bins=np.sqrt(np.linspace(0, np.radians(20) ** 2, 10)), veff_filename=dict( e="nu_e_Gen2_100m_1.5sigma.json", mu="nu_mu_Gen2_100m_1.5sigma.json" ), @@ -1127,19 +1126,19 @@ def create_radio_aeff( # Step 4: apply smearing for angular resolution # Add an overflow bin if none present - if numpy.isfinite(psi_bins[-1]): - psi_bins = numpy.concatenate((psi_bins, [numpy.inf])) + if np.isfinite(psi_bins[-1]): + psi_bins = np.concatenate((psi_bins, [np.inf])) cdf = eval_psf(psf, center(e_shower), center(cos_theta), psi_bins[:-1]) - total_aeff = numpy.empty(aeff.shape + (psi_bins.size - 1,)) + total_aeff = np.empty(aeff.shape + (psi_bins.size - 1,)) # expand differential contributions along the opening-angle axis - total_aeff[..., :-1] = aeff[..., None] * numpy.diff(cdf, axis=2)[None, ...] + total_aeff[..., :-1] = aeff[..., None] * np.diff(cdf, axis=2)[None, ...] # put the remainder in the overflow bin total_aeff[..., -1] = aeff * (1 - cdf[..., -1])[None, None, ...] # Step 5: apply smearing for energy resolution response = energy_resolution.get_response_matrix(e_shower, e_shower) - total_aeff = numpy.apply_along_axis(numpy.inner, 3, total_aeff, response) + total_aeff = np.apply_along_axis(np.inner, 3, total_aeff, response) edges = (e_nu, cos_theta, e_shower, psi_bins) @@ -1164,13 +1163,13 @@ def _interpolate_gen2_ehe_aeff(ct_edges=None): from scipy import interpolate if ct_edges is None: - ct_edges = numpy.linspace(-1, 1, 11) + ct_edges = np.linspace(-1, 1, 11) elif isinstance(ct_edges, int): nside = ct_edges ct_edges = _ring_range(nside) # interpolate to a grid compatible with the IceCube/Gen2 effective areas - loge_edges = numpy.linspace(2, 12, 101) + loge_edges = np.linspace(2, 12, 101) flavors = ["NuE", "NuMu", "NuTau"] filenames = [ @@ -1180,7 +1179,7 @@ def _interpolate_gen2_ehe_aeff(ct_edges=None): aeffs = [] for filename in filenames: - data = numpy.load(filename) + data = np.load(filename) cos_theta_bins = data["cos_theta_bins"] energy_bins = data["energy_bins"] energy_bins_at_det = data["energy_bins_at_det"] @@ -1192,18 +1191,18 @@ def _interpolate_gen2_ehe_aeff(ct_edges=None): new_centers = [ center(loge_edges), - numpy.clip(center(ct_edges), cos_theta_center.min(), cos_theta_center.max()), + np.clip(center(ct_edges), cos_theta_center.min(), cos_theta_center.max()), center(loge_edges), ] - xi = numpy.vstack( - [x.flatten() for x in numpy.meshgrid(*new_centers, indexing="ij")] + xi = np.vstack( + [x.flatten() for x in np.meshgrid(*new_centers, indexing="ij")] ) vs = [] for aeff in aeffs: interpolant = interpolate.RegularGridInterpolator( - (numpy.log10(e_center), cos_theta_center, numpy.log10(e_center_at_det)), + (np.log10(e_center), cos_theta_center, np.log10(e_center_at_det)), np.moveaxis(aeff, -1, 0), bounds_error=False, fill_value=0, @@ -1214,7 +1213,7 @@ def _interpolate_gen2_ehe_aeff(ct_edges=None): # Assume Nu/NuBar symmetry for effective areas return ( (10**loge_edges, ct_edges, 10**loge_edges), - numpy.concatenate([numpy.repeat(v[None, ...], 2, axis=0) for v in vs]), + np.concatenate([np.repeat(v[None, ...], 2, axis=0) for v in vs]), ) @@ -1244,8 +1243,8 @@ def create_gen2_ehe_aeff( aeff = aeff.sum(axis=-1, keepdims=True) # Step 3: dummy angular resolution smearing - psi_bins = numpy.array([0, numpy.inf]) - total_aeff = numpy.zeros(aeff.shape + (psi_bins.size - 1,)) + psi_bins = np.array([0, np.inf]) + total_aeff = np.zeros(aeff.shape + (psi_bins.size - 1,)) # put everything in first psi_bin for no angular resolution total_aeff[..., 0] = aeff[...] diff --git a/toise/energy_resolution.py b/toise/energy_resolution.py index d8d5d72..edddc91 100644 --- a/toise/energy_resolution.py +++ b/toise/energy_resolution.py @@ -1,6 +1,6 @@ import os -import numpy +import numpy as np from scipy import interpolate from scipy.special import erf @@ -12,7 +12,7 @@ def get_energy_resolution(geometry="Sunflower", spacing=200, channel="muon"): return FictiveCascadeEnergyResolution() elif channel == "radio": return FictiveCascadeEnergyResolution( - lower_limit=numpy.log10(1 + 0.2), crossover_energy=1e8 + lower_limit=np.log10(1 + 0.2), crossover_energy=1e8 ) if geometry == "Fictive": return FictiveMuonEnergyResolution() @@ -32,7 +32,7 @@ def __init__( self, bias=None, sigma=None, - loge_range=(-numpy.inf, numpy.inf), + loge_range=(-np.inf, np.inf), overdispersion=1.0, ): """ @@ -60,19 +60,19 @@ def get_response_matrix(self, true_energy, reco_energy): :param true_energy: edges of true muon energy bins :param reco_energy: edges of reconstructed muon energy bins """ - loge_true = numpy.log10(true_energy) - loge_center = numpy.clip( + loge_true = np.log10(true_energy) + loge_center = np.clip( 0.5 * (loge_true[:-1] + loge_true[1:]), *self._loge_range ) - loge_width = numpy.diff(loge_true) - loge_lo = numpy.log10(reco_energy[:-1]) - loge_hi = numpy.log10(reco_energy[1:]) + loge_width = np.diff(loge_true) + loge_lo = np.log10(reco_energy[:-1]) + loge_hi = np.log10(reco_energy[1:]) # evaluate at the right edge for maximum smearing on a falling spectrum loge_center = loge_true[1:] - mu, hi = numpy.meshgrid( + mu, hi = np.meshgrid( self.bias(loge_center) + loge_width, loge_hi, indexing="ij" ) - sigma, lo = numpy.meshgrid(self.sigma(loge_center), loge_lo, indexing="ij") + sigma, lo = np.meshgrid(self.sigma(loge_center), loge_lo, indexing="ij") return ((erf((hi - mu) / sigma) - erf((lo - mu) / sigma)) / 2.0).T @@ -89,7 +89,7 @@ def __init__(self, fname="aachen_muon_energy_profile.npz", overdispersion=1.0): """ if not fname.startswith("/"): fname = os.path.join(data_dir, "energy_reconstruction", fname) - f = numpy.load(fname) + f = np.load(fname) loge_range = (f["loge"].min(), f["loge"].max()) bias = interpolate.UnivariateSpline(f["loge"], f["mean"], s=f["smoothing"]) sigma = interpolate.UnivariateSpline( @@ -102,20 +102,20 @@ def __init__(self, fname="aachen_muon_energy_profile.npz", overdispersion=1.0): class FictiveMuonEnergyResolution(EnergySmearingMatrix): def bias(self, loge): - return numpy.log10(10 ** (loge / 1.13) + 500) + return np.log10(10 ** (loge / 1.13) + 500) def sigma(self, loge): - return 0.22 + 0.23 * (1 - numpy.exp(-(10**loge) / 5e6)) + return 0.22 + 0.23 * (1 - np.exp(-(10**loge) / 5e6)) class FictiveCascadeEnergyResolution(EnergySmearingMatrix): - def __init__(self, lower_limit=numpy.log10(1.1), crossover_energy=1e6): + def __init__(self, lower_limit=np.log10(1.1), crossover_energy=1e6): super(FictiveCascadeEnergyResolution, self).__init__() self._b = lower_limit - self._a = self._b * numpy.sqrt(crossover_energy) + self._a = self._b * np.sqrt(crossover_energy) def bias(self, loge): return loge def sigma(self, loge): - return self._b + self._a / numpy.sqrt(10**loge) + return self._b + self._a / np.sqrt(10**loge) diff --git a/toise/externals/AtmosphericSelfVeto/__init__.py b/toise/externals/AtmosphericSelfVeto/__init__.py index c1ac92a..c27f4e9 100644 --- a/toise/externals/AtmosphericSelfVeto/__init__.py +++ b/toise/externals/AtmosphericSelfVeto/__init__.py @@ -1,4 +1,4 @@ -import numpy +import numpy as np from ...util import PDGCode from . import selfveto @@ -34,10 +34,10 @@ def __init__(self, kind="conventional", veto_threshold=1e3, floor=1e-4): nue = numu self._eval = dict() - self._eval[PDGCode.NuMu] = numpy.vectorize( + self._eval[PDGCode.NuMu] = np.vectorize( lambda enu, ct, depth: numu.evaluate_simple([enu, ct, depth]) ) - self._eval[PDGCode.NuE] = numpy.vectorize( + self._eval[PDGCode.NuE] = np.vectorize( lambda enu, ct, depth: nue.evaluate_simple([enu, ct, depth]) ) @@ -46,20 +46,20 @@ def pad_knots(knots, order=2): """ Pad knots out for full support at the boundaries """ - pre = knots[0] - (knots[1] - knots[0]) * numpy.arange(order, 0, -1) - post = knots[-1] + (knots[-1] - knots[-2]) * numpy.arange(1, order + 1) - return numpy.concatenate((pre, knots, post)) + pre = knots[0] - (knots[1] - knots[0]) * np.arange(order, 0, -1) + post = knots[-1] + (knots[-1] - knots[-2]) * np.arange(1, order + 1) + return np.concatenate((pre, knots, post)) def edges(centers): - dx = numpy.diff(centers)[0] / 2.0 - return numpy.concatenate((centers - dx, [centers[-1] + dx])) + dx = np.diff(centers)[0] / 2.0 + return np.concatenate((centers - dx, [centers[-1] + dx])) - log_enu, ct = numpy.linspace(1, 9, 51), numpy.linspace(self.ct_min, 1, 21) - depth = numpy.linspace(1e3, 3e3, 11) + log_enu, ct = np.linspace(1, 9, 51), np.linspace(self.ct_min, 1, 21) + depth = np.linspace(1e3, 3e3, 11) depth_g = depth[None, None, :] - log_enu_g, ct_g = list(map(numpy.transpose, numpy.meshgrid(log_enu, ct))) + log_enu_g, ct_g = list(map(np.transpose, np.meshgrid(log_enu, ct))) - pr = numpy.zeros(ct_g.shape + (depth.size,)) + pr = np.zeros(ct_g.shape + (depth.size,)) for i, d in enumerate(depth): slant = selfveto.overburden(ct_g, d) emu = selfveto.minimum_muon_energy(slant, veto_threshold) @@ -79,7 +79,7 @@ def _create_spline(self, kind, veto_threshold): pr, centers, knots, order, penalty, penorder = self._eval_grid( kind, veto_threshold ) - z, w = photospline.ndsparse.from_data(pr, numpy.ones(pr.shape)) + z, w = photospline.ndsparse.from_data(pr, np.ones(pr.shape)) spline = photospline.glam_fit(z, w, centers, knots, order, penalty, penorder) return spline @@ -107,32 +107,32 @@ def __call__(self, particleType, enu, ct, depth, spline=True): directly. Otherwise, use the much faster B-spline representation. """ - if numpy.isscalar(ct) and not ct > self.ct_min: - return numpy.array(1.0) + if np.isscalar(ct) and not ct > self.ct_min: + return np.array(1.0) emu = selfveto.minimum_muon_energy( selfveto.overburden(ct, depth), self.veto_threshold ) # Verify that we're using a sane encoding scheme assert abs(PDGCode.NuMuBar) == PDGCode.NuMu - particleType = abs(numpy.asarray(particleType)) + particleType = abs(np.asarray(particleType)) if spline: - pr = numpy.where( + pr = np.where( particleType == PDGCode.NuMu, - self._eval[PDGCode.NuMu](numpy.log10(enu), ct, depth), - numpy.where( + self._eval[PDGCode.NuMu](np.log10(enu), ct, depth), + np.where( particleType == PDGCode.NuE, - self._eval[PDGCode.NuE](numpy.log10(enu), ct, depth), + self._eval[PDGCode.NuE](np.log10(enu), ct, depth), 1, ), ) else: - enu, ct, depth = numpy.broadcast_arrays(enu, ct, depth) + enu, ct, depth = np.broadcast_arrays(enu, ct, depth) if self.kind == "conventional": - pr = numpy.where( + pr = np.where( particleType == PDGCode.NuMu, selfveto.uncorrelated_passing_rate(enu, emu, ct, kind="numu"), - numpy.where( + np.where( particleType == PDGCode.NuE, selfveto.uncorrelated_passing_rate(enu, emu, ct, kind="nue"), 1, @@ -148,10 +148,10 @@ def __call__(self, particleType, enu, ct, depth, spline=True): # decays of pions and kaons, but is at least a conservative estimate # for 3-body decays of D mesons. direct = selfveto.correlated_passing_rate(enu, emu, ct) - pr *= numpy.where(particleType == PDGCode.NuMu, direct, 1) + pr *= np.where(particleType == PDGCode.NuMu, direct, 1) - return numpy.where( + return np.where( ct > self.ct_min, - numpy.where(pr <= 1, numpy.where(pr >= self.floor, pr, self.floor), 1), + np.where(pr <= 1, np.where(pr >= self.floor, pr, self.floor), 1), 1, ) diff --git a/toise/externals/AtmosphericSelfVeto/selfveto.py b/toise/externals/AtmosphericSelfVeto/selfveto.py index 01b7439..9410107 100755 --- a/toise/externals/AtmosphericSelfVeto/selfveto.py +++ b/toise/externals/AtmosphericSelfVeto/selfveto.py @@ -32,7 +32,7 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -import numpy +import numpy as np def overburden(cos_theta, depth=1950, elevation=2400): @@ -49,7 +49,7 @@ def overburden(cos_theta, depth=1950, elevation=2400): r = 6371315 + elevation # this is secrety a translation in polar coordinates return ( - numpy.sqrt(2 * r * depth + (cos_theta * (r - depth)) ** 2 - depth**2) + np.sqrt(2 * r * depth + (cos_theta * (r - depth)) ** 2 - depth**2) - (r - depth) * cos_theta ) @@ -66,7 +66,7 @@ def polynomial(x, coefficients): return sum(c * x**i for i, c in enumerate(coefficients)) coeffs = [[2.793, -0.476, 0.187], [2.069, -0.201, 0.023], [-2.689, 3.882]] - a, b, c = (polynomial(numpy.log10(emin), c) for c in coeffs) + a, b, c = (polynomial(np.log10(emin), c) for c in coeffs) return 10 ** polynomial(distance, (a, b / 1e4, c / 1e10)) @@ -78,7 +78,7 @@ def effective_costheta(costheta): """ x = costheta p = [0.102573, -0.068287, 0.958633, 0.0407253, 0.817285] - return numpy.sqrt( + return np.sqrt( (x**2 + p[0] ** 2 + p[1] * x ** p[2] + p[3] * x ** p[4]) / (1 + p[0] ** 2 + p[1] + p[3]) ) @@ -93,10 +93,10 @@ def __init__(self, **kwargs): self.new_kwargs = kwargs def __enter__(self): - self.old_kwargs = numpy.seterr(**self.new_kwargs) + self.old_kwargs = np.seterr(**self.new_kwargs) def __exit__(self, *args): - numpy.seterr(**self.old_kwargs) + np.seterr(**self.old_kwargs) elbert_params = { @@ -155,13 +155,13 @@ def elbert_yield( decay_prob = 1.0 / (En * effective_costheta(cos_theta)) with fpe_context(all="ignore"): - icdf = numpy.where( + icdf = np.where( x >= 1, 0.0, a * primary_mass * decay_prob * x ** (-p1) * (1 - x**p3) ** p2, ) if differential: - icdf *= (1.0 / En) * numpy.where( + icdf *= (1.0 / En) * np.where( x >= 1, 0.0, (p1 / x + p2 * p3 * x ** (p3 - 1) / (1 - x**p3)) ) @@ -209,21 +209,21 @@ def gaisser_flux(energy, ptype): rigidity = [4e6, 30e6, 2e9] return sum( - n[idx] * energy ** (-g[idx]) * numpy.exp(-energy / (r * z)) + n[idx] * energy ** (-g[idx]) * np.exp(-energy / (r * z)) for n, g, r in zip(norm, gamma, rigidity) ) def logspace(start, stop, num): """ - A version of numpy.logspace that takes array arguments + A version of np.logspace that takes array arguments """ # Add a trailing dimension to the inputs - start = numpy.asarray(start)[..., None] - stop = numpy.asarray(stop)[..., None] + start = np.asarray(start)[..., None] + stop = np.asarray(stop)[..., None] num = int(num) step = (stop - start) / float(num - 1) - y = (numpy.core.numeric.arange(0, num) * step + start).squeeze() + y = (np.core.numeric.arange(0, num) * step + start).squeeze() return 10**y @@ -243,14 +243,14 @@ def response_function(enu, emu, cos_theta, kind="numu"): :returns: a tuple (response, muonyield, energy_per_nucleon) """ # make everything an array - enu, emu, cos_theta = list(map(numpy.asarray, (enu, emu, cos_theta))) - shape = numpy.broadcast(enu, emu, cos_theta).shape + enu, emu, cos_theta = list(map(np.asarray, (enu, emu, cos_theta))) + shape = np.broadcast(enu, emu, cos_theta).shape # contributions to the differential neutrino flux from chunks of the # primary spectrum for each element - contrib = numpy.zeros(shape + (5, 100)) + contrib = np.zeros(shape + (5, 100)) # mean integral muon yield from same chunks - muyield = numpy.zeros(shape + (5, 100)) - energy_per_nucleon = logspace(numpy.log10(enu), numpy.log10(enu) + 3, 101) + muyield = np.zeros(shape + (5, 100)) + energy_per_nucleon = logspace(np.log10(enu), np.log10(enu) + 3, 101) ptypes = [ getattr(ParticleType, pt) for pt in ("PPlus", "He4Nucleus", "N14Nucleus", "Al27Nucleus", "Fe56Nucleus") @@ -260,7 +260,7 @@ def response_function(enu, emu, cos_theta, kind="numu"): # primary energies that contribute to the neutrino flux at given energy penergy = a * energy_per_nucleon # width of energy bins - de = numpy.diff(penergy) + de = np.diff(penergy) # center of energy bins pe = penergy[..., :-1] + de / 2.0 # hobo-integrate the flux @@ -277,17 +277,17 @@ def response_function(enu, emu, cos_theta, kind="numu"): return ( contrib, muyield, - energy_per_nucleon[..., :-1] + numpy.diff(energy_per_nucleon) / 2.0, + energy_per_nucleon[..., :-1] + np.diff(energy_per_nucleon) / 2.0, ) def get_bin_edges(energy_grid): - half_width = 10 ** (numpy.diff(numpy.log10(energy_grid))[0] / 2) - return numpy.concatenate((energy_grid / half_width, [energy_grid[-1] * half_width])) + half_width = 10 ** (np.diff(np.log10(energy_grid))[0] / 2) + return np.concatenate((energy_grid / half_width, [energy_grid[-1] * half_width])) def get_bin_width(energy_grid): - return numpy.diff(get_bin_edges(energy_grid)) + return np.diff(get_bin_edges(energy_grid)) def extract_yields(yield_record, types): @@ -295,16 +295,16 @@ def extract_yields(yield_record, types): def interpolate_integral_yield(log_N, log_e, log_emin): - interp = numpy.interp(log_emin, log_e, log_N) - if numpy.isfinite(interp): - return numpy.exp(interp) + interp = np.interp(log_emin, log_e, log_N) + if np.isfinite(interp): + return np.exp(interp) else: return 0.0 def integral_yield(e_min, e_grid, diff_yield): edges = get_bin_edges(e_grid) - de = numpy.diff(edges) + de = np.diff(edges) intyield = de * diff_yield axis = 1 index = [slice(None)] * diff_yield.ndim @@ -313,12 +313,12 @@ def integral_yield(e_min, e_grid, diff_yield): # cumulative sum from the right N = intyield[index].cumsum(axis=axis)[index] - return numpy.apply_along_axis( + return np.apply_along_axis( interpolate_integral_yield, 1, - numpy.log(N), - numpy.log(edges[1:]), - numpy.log(e_min), + np.log(N), + np.log(edges[1:]), + np.log(e_min), ) @@ -333,11 +333,11 @@ def mceq_response_function(fname, emu, nu_types, prompt_muons=False): :returns: a tuple (response, muonyield, energy_per_nucleon) """ - bundle = numpy.load(fname) + bundle = np.load(fname) e_grid = bundle["e_grid"] - contrib = numpy.empty((e_grid.size, 5, e_grid.size)) - muyield = numpy.empty((e_grid.size, 5, e_grid.size)) + contrib = np.empty((e_grid.size, 5, e_grid.size)) + muyield = np.empty((e_grid.size, 5, e_grid.size)) ptypes = [ getattr(ParticleType, pt) for pt in ("PPlus", "He4Nucleus", "N14Nucleus", "Al27Nucleus", "Fe56Nucleus") @@ -380,7 +380,7 @@ def uncorrelated_passing_rate(enu, emu, cos_theta, kind="numu"): contrib /= contrib.sum(axis=(-1, -2), keepdims=True) # weight contributions by probability of having zero muons. if that # probability is always 1, then this returns 1 by construction - return (numpy.exp(-muyield) * contrib).sum(axis=(-1, -2)) + return (np.exp(-muyield) * contrib).sum(axis=(-1, -2)) def mceq_uncorrelated_passing_rate(fname, emu, nu_types, prompt_muons=False): @@ -391,7 +391,7 @@ def mceq_uncorrelated_passing_rate(fname, emu, nu_types, prompt_muons=False): contrib /= contrib.sum(axis=(-1, -2), keepdims=True) # weight contributions by probability of having zero muons. if that # probability is always 1, then this returns 1 by construction - return (numpy.exp(-muyield) * contrib).sum(axis=(-1, -2)) + return (np.exp(-muyield) * contrib).sum(axis=(-1, -2)) def analytic_numu_flux(enu, cos_theta, emu=None): @@ -427,16 +427,16 @@ def analytic_numu_flux(enu, cos_theta, emu=None): F = (GAMMA + 2.0) / (GAMMA + 1.0) B_PI = ( - F * (ALAM_PI - ALAM_N) / (ALAM_PI * numpy.log(ALAM_PI / ALAM_N) * (1.0 - R_PI)) + F * (ALAM_PI - ALAM_N) / (ALAM_PI * np.log(ALAM_PI / ALAM_N) * (1.0 - R_PI)) ) - B_K = F * (ALAM_K - ALAM_N) / (ALAM_K * numpy.log(ALAM_K / ALAM_N) * (1.0 - R_K)) + B_K = F * (ALAM_K - ALAM_N) / (ALAM_K * np.log(ALAM_K / ALAM_N) * (1.0 - R_K)) if emu is not None: z = 1 + emu / enu zpimin = 1.0 / (1.0 - R_PI) zkmin = 1.0 / (1.0 - R_K) - zzpi = numpy.where(z >= zpimin, z, zpimin) - zzk = numpy.where(z >= zkmin, z, zkmin) + zzpi = np.where(z >= zpimin, z, zpimin) + zzk = np.where(z >= zkmin, z, zkmin) A_PI = Z_NPI / ((1.0 - R_PI) * (GAMMA + 1) * zzpi ** (GAMMA + 1.0)) A_K = Z_NK / ((1.0 - R_K) * (GAMMA + 1) * zzk ** (GAMMA + 1.0)) @@ -484,10 +484,10 @@ def plot_passing_rate(depth): import pylab ax = pylab.gca() - enu = numpy.logspace(3, 7, 101) + enu = np.logspace(3, 7, 101) for zenith, color in zip((0, 70, 80, 85), colors): - cos_theta = numpy.cos(numpy.radians(zenith)) + cos_theta = np.cos(np.radians(zenith)) emu = minimum_muon_energy(overburden(cos_theta, depth)) correlated = correlated_passing_rate(enu, emu, cos_theta) uncorr_numu = uncorrelated_passing_rate(enu, emu, cos_theta, kind="numu") @@ -515,7 +515,7 @@ def plot_passing_rate(depth): def format_energy(fmt, energy): - places = int(numpy.log10(energy) / 3) * 3 + places = int(np.log10(energy) / 3) * 3 if places == 1: unit = "GeV" elif places == 3: @@ -547,7 +547,7 @@ def plot_response_function(enu, depth, cos_theta, kind): pylab.plot(energy_per_nucleon, response[i, :], color=color, lw=1, label=label) pylab.plot( energy_per_nucleon, - numpy.exp(-muyield[i, :]) * response[i, :], + np.exp(-muyield[i, :]) * response[i, :], color=color, ls="--", lw=1, @@ -556,7 +556,7 @@ def plot_response_function(enu, depth, cos_theta, kind): ax.plot(energy_per_nucleon, response.sum(axis=0), color="k", lw=2, label="Total") ax.plot( energy_per_nucleon, - (numpy.exp(-muyield) * response).sum(axis=-2), + (np.exp(-muyield) * response).sum(axis=-2), color="k", ls="--", lw=2, @@ -569,7 +569,7 @@ def plot_response_function(enu, depth, cos_theta, kind): ax.legend( loc="lower left", prop=dict(size="small"), title="Flux contributions", ncol=2 ) - passrate = (numpy.exp(-muyield) * response).sum(axis=(-2, -1)) / response.sum( + passrate = (np.exp(-muyield) * response).sum(axis=(-2, -1)) / response.sum( axis=(-2, -1) ) if kind == "charm": @@ -584,14 +584,14 @@ def plot_response_function(enu, depth, cos_theta, kind): % ( format_energy("%d", enu), kindlabel, - numpy.round(numpy.degrees(numpy.arccos(cos_theta))), + np.round(np.degrees(np.arccos(cos_theta))), depth, passrate * 100, ), size="medium", ) - logmax = numpy.ceil(numpy.log10(response.max())) + logmax = np.ceil(np.log10(response.max())) ax.set_ylim(10 ** (logmax - 8), 10 ** (logmax)) ax.xaxis.set_major_formatter(NullFormatter()) ax.yaxis.set_ticks(ax.yaxis.get_ticklocs()[2:-1]) @@ -600,7 +600,7 @@ def plot_response_function(enu, depth, cos_theta, kind): for i, color, label in zip(range(5), colors, elements): pylab.plot(energy_per_nucleon, muyield[i, :], color=color, lw=1, label=label) pylab.loglog() - logmax = numpy.ceil(numpy.log10(muyield.max())) + logmax = np.ceil(np.log10(muyield.max())) ax.set_ylim(10 ** (logmax - 4), 10 ** (logmax)) ax.xaxis.set_major_formatter(NullFormatter()) ax.set_ylabel(r"$\left 1\, \mathrm{TeV} \right>$") @@ -610,7 +610,7 @@ def plot_response_function(enu, depth, cos_theta, kind): for i, color, label in zip(range(5), colors, elements): pylab.plot( energy_per_nucleon, - numpy.exp(-muyield[i, :]), + np.exp(-muyield[i, :]), color=color, lw=1, label=label, @@ -637,7 +637,7 @@ def plot_response_function(enu, depth, cos_theta, kind): of the integrals of the solid and dashed black curves (%d%%). """ % ( format_energy("%d", enu), - numpy.round(numpy.degrees(numpy.arccos(cos_theta))), + np.round(np.degrees(np.arccos(cos_theta))), depth, passrate * 100, ) @@ -707,21 +707,21 @@ def plot_response_function(enu, depth, cos_theta, kind): plot_response_function( opts.energy, opts.depth, - numpy.cos(numpy.radians(opts.zenith)), + np.cos(np.radians(opts.zenith)), [opts.flavor, "charm"][opts.charm], ) sys.exit(0) # Calculate passing rate on a grid of energies and zenith angles - enu = numpy.logspace(3, 7, 101) - cos_theta = numpy.arange(0, 1, 0.05) + 0.05 - enu, cos_theta = numpy.meshgrid(enu, cos_theta) + enu = np.logspace(3, 7, 101) + cos_theta = np.arange(0, 1, 0.05) + 0.05 + enu, cos_theta = np.meshgrid(enu, cos_theta) emu = minimum_muon_energy(overburden(cos_theta, opts.depth)) if opts.flavor == "numu": passrate = correlated_passing_rate(enu, emu, cos_theta) else: - passrate = numpy.ones(enu.shape) + passrate = np.ones(enu.shape) if opts.charm: kind = "charm" @@ -729,17 +729,17 @@ def plot_response_function(enu, depth, cos_theta, kind): kind = opts.flavor passrate *= uncorrelated_passing_rate(enu, emu, cos_theta, kind=kind) - data = numpy.vstack( + data = np.vstack( list( map( - numpy.ndarray.flatten, + np.ndarray.flatten, (cos_theta, overburden(cos_theta, opts.depth), emu, enu, passrate), ) ) ).T fields = ["cos_theta", "overburden", "emu_min", "energy", "passrate"] - numpy.savetxt( + np.savetxt( sys.stdout, data, fmt="%12.3e", diff --git a/toise/factory.py b/toise/factory.py index 06f3f45..ce8504d 100644 --- a/toise/factory.py +++ b/toise/factory.py @@ -3,7 +3,7 @@ import pickle as pickle from functools import partial -import numpy +import numpy as np from scipy import interpolate from . import ( @@ -21,7 +21,7 @@ def make_key(opts, kwargs): key = dict(opts.__dict__) key.update(kwargs) for k, v in kwargs.items(): - if isinstance(v, numpy.ndarray): + if isinstance(v, np.ndarray): key[k] = (v[0], v[-1], len(v)) else: key[k] = v @@ -64,7 +64,7 @@ def create_aeff(opts, **kwargs): def selection_efficiency(emu, cos_theta): return seleff(emu, cos_theta=0) - # selection_efficiency = lambda emu, cos_theta: numpy.ones(emu.shape) + # selection_efficiency = lambda emu, cos_theta: np.ones(emu.shape) # selection_efficiency = effective_areas.get_muon_selection_efficiency("IceCube", None) else: selection_efficiency = seleff @@ -78,7 +78,7 @@ def selection_efficiency(emu, cos_theta): continue try: len(v) - kwargs[k] = numpy.asarray(v) + kwargs[k] = np.asarray(v) except TypeError: kwargs[k] = v @@ -144,9 +144,9 @@ def create_cascade_aeff(opts, **kwargs): for k in "psi_bins", "cos_theta": if k in kwargs: - kwargs[k] = numpy.asarray(kwargs[k]) + kwargs[k] = np.asarray(kwargs[k]) elif hasattr(opts, k): - kwargs[k] = numpy.asarray(getattr(opts, k)) + kwargs[k] = np.asarray(getattr(opts, k)) aeff = effective_areas.create_cascade_aeff( energy_resolution=effective_areas.get_energy_resolution( @@ -175,9 +175,9 @@ def create_starting_aeff(opts, **kwargs): for k in "psi_bins", "cos_theta": if k in kwargs: - kwargs[k] = numpy.asarray(kwargs[k]) + kwargs[k] = np.asarray(kwargs[k]) elif hasattr(opts, k): - kwargs[k] = numpy.asarray(getattr(opts, k)) + kwargs[k] = np.asarray(getattr(opts, k)) return effective_areas.create_starting_aeff( energy_resolution=effective_areas.get_energy_resolution( @@ -236,9 +236,9 @@ def _create(self, opts, **kwargs): psi_bins = kwargs.pop("psi_bins") for k in "cos_theta", "neutrino_energy": if k in kwargs: - kwargs[k] = numpy.asarray(kwargs[k]) + kwargs[k] = np.asarray(kwargs[k]) elif hasattr(opts, k): - kwargs[k] = numpy.asarray(getattr(opts, k)) + kwargs[k] = np.asarray(getattr(opts, k)) kwargs["psi_bins"] = psi_bins["radio"] if hasattr(opts, "config_file"): from . import radio_aeff_generation @@ -374,18 +374,18 @@ def b(x): return -0.82 + 14.54 / x def med(emu, b): - return 0.11 + b / numpy.sqrt(emu) + return 0.11 + b / np.sqrt(emu) scale = med(energy, b(scale)) / med(energy, b(1)) if ssmpe: # see: https://github.com/fbradascio/IceCube/blob/b8556b7b3d3c53a1cfab4bf53737bebff1264707/SensitivityStudy_SSMPE_vs_MPE.ipynb # https://doi.org/10.1051/epjconf/201920705002 # NB: this improvement was evaluated at 2x PDOM sensitivty - scale *= numpy.minimum( + scale *= np.minimum( 1, - numpy.polyval( + np.polyval( [0.01266943, -0.1901559, 0.80568256, 0.04948373], - numpy.log10(energy / 2), + np.log10(energy / 2), ), ) if mdom: @@ -413,7 +413,7 @@ def make_options(**kwargs): # icecube has no veto...yet if kwargs.get("geometry") == "IceCube": defaults["veto_area"] = 0.0 - defaults["veto_threshold"] = numpy.inf + defaults["veto_threshold"] = np.inf defaults.update(kwargs) return argparse.Namespace(**defaults) @@ -556,12 +556,12 @@ def scale_gen2_sensors( default_psi_bins = { - "tracks": numpy.linspace(0, numpy.radians(1.5) ** 2, 150) ** (1.0 / 2), - "cascades": numpy.linspace(0, numpy.radians(60) ** 2, 50) ** (1.0 / 2), - "radio": numpy.linspace(0, numpy.radians(15) ** 2, 50) ** (1.0 / 2), + "tracks": np.linspace(0, np.radians(1.5) ** 2, 150) ** (1.0 / 2), + "cascades": np.linspace(0, np.radians(60) ** 2, 50) ** (1.0 / 2), + "radio": np.linspace(0, np.radians(15) ** 2, 50) ** (1.0 / 2), } -# default_cos_theta_bins = numpy.linspace(-1, 1, 21) +# default_cos_theta_bins = np.linspace(-1, 1, 21) default_cos_theta_bins = [ -1.0, -0.95, diff --git a/toise/fictive.py b/toise/fictive.py index 60889a5..9979317 100644 --- a/toise/fictive.py +++ b/toise/fictive.py @@ -2,6 +2,7 @@ Idealized detectors used to estimate sensitivities in the WIS whitepaper """ +import numpy as np from numpy import vectorize from toolz import memoize @@ -9,8 +10,8 @@ def make_cylinder(volume=1.0, aspect=1.0): - r = numpy.cbrt(2 * volume * aspect / numpy.pi**2) - h = numpy.pi * r / 2 / aspect + r = np.cbrt(2 * volume * aspect / np.pi**2) + h = np.pi * r / 2 / aspect return surfaces.Cylinder(h * 1e3, r * 1e3) @@ -23,12 +24,12 @@ def sigma(self, loge): class GaussianPointSpreadFunction(object): - def __init__(self, median_opening_angle=numpy.radians(0.5)): - self._sigma = median_opening_angle / numpy.sqrt(2 * numpy.log(2)) + def __init__(self, median_opening_angle=np.radians(0.5)): + self._sigma = median_opening_angle / np.sqrt(2 * np.log(2)) def __call__(self, psi, energy, cos_theta): - psi, loge, ct = numpy.broadcast_arrays(psi / self._sigma, energy, cos_theta) - return 1.0 - numpy.exp(-(psi**2) / 2.0) + psi, loge, ct = np.broadcast_arrays(psi / self._sigma, energy, cos_theta) + return 1.0 - np.exp(-(psi**2) / 2.0) @memoize @@ -36,7 +37,7 @@ def base_aeff(): """ Create a horizontal neutrino effective area for a 1 km^2 detector """ - cos_theta = linspace(-1, 1, 40)[19:21] + cos_theta = np.linspace(-1, 1, 40)[19:21] ( e_nu, cos_theta, @@ -45,9 +46,9 @@ def base_aeff(): # Step 5: apply smearing for energy resolution response = FictiveEnergyResolution().get_response_matrix(e_mu, e_mu) - efficiency = numpy.apply_along_axis(numpy.inner, 3, efficiency, response) + efficiency = np.apply_along_axis(np.inner, 3, efficiency, response) - total_aeff = numpy.zeros((6,) + efficiency.shape[1:]) + total_aeff = np.zeros((6,) + efficiency.shape[1:]) total_aeff[2:4, ...] = efficiency * 1e6 # side-on geometry area: 1 km2 edges = (e_nu, cos_theta, e_mu) @@ -70,17 +71,17 @@ def get_aeff(angular_resolution=0.5, energy_threshold=1e3): idx = e_mu.searchsorted(energy_threshold) - psf = GaussianPointSpreadFunction(numpy.radians(angular_resolution)) - psi_bins = numpy.concatenate( + psf = GaussianPointSpreadFunction(np.radians(angular_resolution)) + psi_bins = np.concatenate( ( - sqrt(linspace(0, numpy.radians(angular_resolution * 4) ** 2, 101)), - [numpy.inf], + np.sqrt(np.linspace(0, np.radians(angular_resolution * 4) ** 2, 101)), + [np.inf], ) ) # Step 4: apply smearing for angular resolution cdf = psf(psi_bins[:-1], 0, 0) - smear = numpy.concatenate((diff(cdf), [1.0 - cdf[-1]])) + smear = np.concatenate((np.diff(cdf), [1.0 - cdf[-1]])) expand = [None] * 4 + [slice(None)] diff --git a/toise/figures/diffuse/dnnc.py b/toise/figures/diffuse/dnnc.py index ae2e82b..b4c3111 100644 --- a/toise/figures/diffuse/dnnc.py +++ b/toise/figures/diffuse/dnnc.py @@ -3,7 +3,6 @@ from pathlib import Path import healpy -import numpy import numpy as np import pandas as pd from scipy import interpolate, optimize @@ -39,7 +38,7 @@ def create_dnn_aeff(nside: int = 16, scale=1): values = np.array(list(values.values())).T[:, ::-1] # snap zenith bands to rings of a healpix map - pixel_centers = -healpy.ringinfo(nside, numpy.arange(1, 4 * nside))[2] + pixel_centers = -healpy.ringinfo(nside, np.arange(1, 4 * nside))[2] # broadcast effective area into each ring values = values[:, np.digitize(pixel_centers, ct_edges) - 1] # reset zenith bands to the bounds of the healpix rings diff --git a/toise/figures/diffuse/spectrum.py b/toise/figures/diffuse/spectrum.py index bc77b34..5ccb970 100644 --- a/toise/figures/diffuse/spectrum.py +++ b/toise/figures/diffuse/spectrum.py @@ -111,12 +111,14 @@ def ts_diff(value): energy_range = list(llh.components[key]._components.values())[0][0].energy_range if plotit and energy_range[0] > 1e6: - x = linspace(0, g0 * 2, 101) + import matplotlib.pyplot as plt + + x = np.linspace(0, g0 * 2, 101) energy_range = list(llh.components[key]._components.values())[0][0].energy_range - line = plot( + line = plt.plot( x, [ts_diff(x_) for x_ in x], label="%.1g-%.1g" % tuple(energy_range) )[0] - axvline(nom[key], color=line.get_color()) + plt.axvline(nom[key], color=line.get_color()) return lo, hi diff --git a/toise/multillh.py b/toise/multillh.py index 1940c54..54c72bb 100644 --- a/toise/multillh.py +++ b/toise/multillh.py @@ -2,7 +2,7 @@ import logging -import numpy +import numpy as np import scipy.optimize @@ -99,10 +99,10 @@ def differential_chunks(self, *args, **kwargs): "label": label, "livetime": livetime, "emin": max( - component.energy_range[0], kwargs.get("emin", -numpy.inf) + component.energy_range[0], kwargs.get("emin", -np.inf) ), "emax": min( - component.energy_range[1], kwargs.get("emax", numpy.inf) + component.energy_range[1], kwargs.get("emax", np.inf) ), "chunks": component.differential_chunks( *args, exclusive=True, **kwargs @@ -143,7 +143,7 @@ def differential_chunks(self, *args, **kwargs): all_done = True else: combo = Combination(components) - combo.energy_range = eranges[numpy.argmax(ecenters)] + combo.energy_range = eranges[np.argmax(ecenters)] combo.energy_center = max(ecenters) yield max(ecenters), combo @@ -222,18 +222,18 @@ def llh(self, **kwargs): llh += self.components[param].prior(kwargs[param], **kwargs) for prop in lamb: - with numpy.errstate(divide="ignore"): - log_lambda = numpy.log(lamb[prop]) - log_data = numpy.log(self.data[prop]) - log_lambda[numpy.isinf(log_lambda)] = 0 - log_data[numpy.isinf(log_data)] = 0 + with np.errstate(divide="ignore"): + log_lambda = np.log(lamb[prop]) + log_data = np.log(self.data[prop]) + log_lambda[np.isinf(log_lambda)] = 0 + log_data[np.isinf(log_data)] = 0 if self.unbinned: - norm = numpy.sum(lamb[prop]) + norm = np.sum(lamb[prop]) for event in self.data[prop]: - llh += numpy.sum(event * lamb[prop]) / norm + llh += np.sum(event * lamb[prop]) / norm llh -= norm else: - llh += numpy.sum(self.data[prop] * (log_lambda - log_data)) - numpy.sum( + llh += np.sum(self.data[prop] * (log_lambda - log_data)) - np.sum( lamb[prop] - self.data[prop] ) @@ -247,8 +247,8 @@ def llh_contributions(self, **kwargs): lamb = self.expectations(**kwargs) llh = dict() for prop in lamb: - log_lambda = numpy.log(lamb[prop]) - log_lambda[numpy.isinf(log_lambda)] = 0 + log_lambda = np.log(lamb[prop]) + log_lambda[np.isinf(log_lambda)] = 0 llh[prop] = self.data[prop] * log_lambda - lamb[prop] return llh @@ -264,10 +264,10 @@ def sat_llh(self, **kwargs): llh = 0 dof = 0 for prop in self.data: - log_lambda = numpy.log(self.data[prop]) - log_lambda[numpy.isinf(log_lambda)] = 0 - llh += numpy.sum(self.data[prop] * log_lambda - self.data[prop]) - dof += numpy.sum(self.data[prop] != 0) + log_lambda = np.log(self.data[prop]) + log_lambda[np.isinf(log_lambda)] = 0 + llh += np.sum(self.data[prop] * log_lambda - self.data[prop]) + dof += np.sum(self.data[prop] != 0) return (llh, dof + 1) def fit(self, minimizer_params=dict(), **fixedparams): @@ -323,7 +323,7 @@ def minllh(x): fixedparams.update(dict(zip(freeparams, bestfit["x"]))) return fixedparams else: - bestllh = numpy.inf + bestllh = np.inf bestparams = dict(fixedparams) for points in itertools.product( *tuple(fixedparams[k] for k in discrete_params) @@ -369,7 +369,7 @@ def profile1d(self, param, values, minimizer_params=dict(), **fixedparams): for i in range(len(dtypes)): if isinstance(llhpoints[-1][i], str): dtypes[i] = "|S32" - return numpy.array(llhpoints, dtype=list(zip(dkeys, dtypes))) + return np.array(llhpoints, dtype=list(zip(dkeys, dtypes))) def profile2d(self, param1, values1, param2, values2, **fixedparams): """ @@ -387,7 +387,7 @@ def profile2d(self, param1, values1, param2, values2, **fixedparams): fit = self.fit(**params) mlh = self.llh(**fit) llhpoints.append(list(fit.values()) + [mlh]) - return numpy.asarray(llhpoints).view( + return np.asarray(llhpoints).view( dtype=list( zip(list(fit.keys()) + ["LLH"], [float] * (len(list(fit.keys())) + 1)) ) @@ -407,7 +407,7 @@ def _pseudo_llh(components, poisson, **nominal): if poisson: pseudodata = dict() for tag in expectations: - pseudodata[tag] = numpy.random.poisson(expectations[tag]) + pseudodata[tag] = np.random.poisson(expectations[tag]) else: pseudodata = expectations allh.data = pseudodata diff --git a/toise/plotting.py b/toise/plotting.py index c3d4851..6a560c1 100644 --- a/toise/plotting.py +++ b/toise/plotting.py @@ -1,4 +1,4 @@ -import numpy +import numpy as np def stepped_path(edges, bins, cumulative=False): @@ -11,8 +11,8 @@ def stepped_path(edges, bins, cumulative=False): if len(edges) != len(bins) + 1: raise ValueError("edges must be 1 element longer than bins") - x = numpy.zeros((2 * len(edges))) - y = numpy.zeros((2 * len(edges))) + x = np.zeros((2 * len(edges))) + y = np.zeros((2 * len(edges))) if cumulative is not False: if cumulative == "<": @@ -27,7 +27,7 @@ def stepped_path(edges, bins, cumulative=False): def format_energy(fmt, energy): - places = int(numpy.log10(energy) / 3) * 3 + places = int(np.log10(energy) / 3) * 3 if places == 0: unit = "GeV" elif places == 3: @@ -47,11 +47,11 @@ def plot_profile2d(profile, x, y, levels=[68, 90, 99], colors="k", **kwargs): import matplotlib.pyplot as plt from scipy.stats import chi2 - xv = numpy.unique(profile[x]) - yv = numpy.unique(profile[y]) + xv = np.unique(profile[x]) + yv = np.unique(profile[y]) shape = (xv.size, yv.size) - ts = 2 * (numpy.nanmax(profile["LLH"]) - profile["LLH"]).reshape(shape) + ts = 2 * (np.nanmax(profile["LLH"]) - profile["LLH"]).reshape(shape) pvalue = chi2.cdf(ts.T, 2) * 100 ax = plt.gca() @@ -128,21 +128,21 @@ def label_curve(ax, line, x=None, y=None, orientation="parallel", offset=0, **kw xd = line.get_xdata() yd = line.get_ydata() # sort if necessary - if (numpy.diff(xd) < 0).any(): + if (np.diff(xd) < 0).any(): order = xd.argsort() xd = xd[order] yd = yd[order] # de-step if necessary - ux = numpy.unique(xd) + ux = np.unique(xd) if 2 * ux.size == xd.size and (ux == xd[::2]).all(): xd = (xd[::2] + xd[1::2]) / 2 yd = yd[1::2] # interpolate for x if y is supplied if x is None: - x = numpy.interp([y], yd, xd)[0] + x = np.interp([y], yd, xd)[0] # get points on either side of the anchor point - i = numpy.searchsorted(xd, x) + i = np.searchsorted(xd, x) if i > xd.size - 2: i = xd.size - 2 xb, yb = xd[i : i + 2], yd[i : i + 2] @@ -160,7 +160,7 @@ def label_curve(ax, line, x=None, y=None, orientation="parallel", offset=0, **kw p1 = ax.transData.transform_point([xb[0], yb[0]]) p2 = ax.transData.transform_point([xb[1], yb[1]]) if orientation == "parallel": - kw["rotation"] = numpy.degrees(numpy.arctan2(p2[1] - p1[1], p2[0] - p1[0])) + kw["rotation"] = np.degrees(np.arctan2(p2[1] - p1[1], p2[0] - p1[0])) kw.update(**kwargs) @@ -168,8 +168,8 @@ def label_curve(ax, line, x=None, y=None, orientation="parallel", offset=0, **kw # calculate normal in *screen coordinates* if offset != 0: xy = ax.transData.transform_point(text.get_position()) - norm = numpy.array([p2[1] - p1[1], p2[0] - p1[0]]) - norm = norm / (numpy.hypot(norm[0], norm[1]) / offset) + norm = np.array([p2[1] - p1[1], p2[0] - p1[0]]) + norm = norm / (np.hypot(norm[0], norm[1]) / offset) xy = ax.transData.inverted().transform_point((xy[0] - norm[0], xy[1] + norm[1])) text.set_position(xy) return text diff --git a/toise/surface_veto.py b/toise/surface_veto.py index 3a70aa2..837e937 100644 --- a/toise/surface_veto.py +++ b/toise/surface_veto.py @@ -3,7 +3,7 @@ from copy import copy import healpy -import numpy +import numpy as np from scipy import interpolate from scipy.optimize import fsolve from scipy.special import erf, erfc @@ -23,8 +23,8 @@ def surface_area(theta_max, volume): """ d = 1950 - volume._z_range[0] # depth of the bottom of the detector return ( - numpy.pi - * (d * numpy.tan(theta_max) + numpy.sqrt(volume.get_cap_area() / numpy.pi)) ** 2 + np.pi + * (d * np.tan(theta_max) + np.sqrt(volume.get_cap_area() / np.pi)) ** 2 ) @@ -54,7 +54,7 @@ def veto_cost_for_angle(theta_max, emu_min, surface): above emu_min out to theta_max """ fill = fill_factor_for_threshold(emu_min) - area = surface_area(numpy.radians(theta_max), surface) + area = surface_area(np.radians(theta_max), surface) return array_cost(area / 1e6, fill) / 1e6 @@ -79,7 +79,7 @@ def area_diff(margin): def get_geometric_coverage_for_area( - geometry, spacing, area, ct_bins=numpy.linspace(0, 1, 11), nsamples=int(1e4) + geometry, spacing, area, ct_bins=np.linspace(0, 1, 11), nsamples=int(1e4) ): """ Calculate the geometric coverage of a surface veto by Monte Carlo @@ -95,7 +95,7 @@ def get_geometric_coverage_for_area( margin = margin_for_area(ref_surface, area) veto_surface = ref_surface.expand(margin) - coverage = numpy.zeros(ct_bins.size - 1) + coverage = np.zeros(ct_bins.size - 1) if not area > 0: return coverage @@ -128,7 +128,7 @@ def __init__(self, geometry="Sunflower", spacing=240, area=10.0): else: self.cache = dict() - def __call__(self, ct_bins=numpy.linspace(-1, 1, 11)): + def __call__(self, ct_bins=np.linspace(-1, 1, 11)): key = ( self.geometry, self.spacing, @@ -162,17 +162,17 @@ def __init__(self, fname=os.path.join(data_dir, "veto", "vetoeffs.pickle")): z = 1 - (1 - v_mu) * (1 - v_e) self.spline = interpolate.RectBivariateSpline(x, y, z) # don't trust table beyond its statistical limits - self.pmax = numpy.nanmax(v_mu[v_mu < 1]) + self.pmax = np.nanmax(v_mu[v_mu < 1]) self.log_emin = x[0] self.log_emax = x[-1] self.ct_min = y[0] self.ct_max = y[-2] def __call__(self, energy, cos_theta): - logE, cos_theta = numpy.broadcast_arrays( - numpy.log10(energy / 1e3), numpy.clip(cos_theta, -1, self.ct_max) + logE, cos_theta = np.broadcast_arrays( + np.log10(energy / 1e3), np.clip(cos_theta, -1, self.ct_max) ) - p = numpy.clip(self.spline(logE, cos_theta, grid=False), 0, self.pmax) + p = np.clip(self.spline(logE, cos_theta, grid=False), 0, self.pmax) p[logE > self.log_emax] = self.pmax p[logE < self.log_emin] = 0 p[cos_theta < self.ct_min] = 0 @@ -232,7 +232,7 @@ def overburden(cos_theta, depth=1950, elevation=2400): r = 6371315 + elevation # this is secrety a translation in polar coordinates return ( - numpy.sqrt(2 * r * depth + (cos_theta * (r - depth)) ** 2 - depth**2) + np.sqrt(2 * r * depth + (cos_theta * (r - depth)) ** 2 - depth**2) - (r - depth) * cos_theta ) @@ -249,7 +249,7 @@ def polynomial(x, coefficients): return sum(c * x**i for i, c in enumerate(coefficients)) coeffs = [[2.793, -0.476, 0.187], [2.069, -0.201, 0.023], [-2.689, 3.882]] - a, b, c = (polynomial(numpy.log10(emin), c) for c in coeffs) + a, b, c = (polynomial(np.log10(emin), c) for c in coeffs) return 10 ** polynomial(distance, (a, b / 1e4, c / 1e10)) @@ -294,7 +294,7 @@ def gaisser_flux(energy, ptype): rigidity = [4e6, 30e6, 2e9] return sum( - n[idx] * energy ** (-g[idx]) * numpy.exp(-energy / (r * z)) + n[idx] * energy ** (-g[idx]) * np.exp(-energy / (r * z)) for n, g, r in zip(norm, gamma, rigidity) ) @@ -326,7 +326,7 @@ def bundle_energy_at_depth(eprim, a=1, cos_theta=1.0, depth=1950.0): et * a / cos_theta * alpha / (alpha - 1.0) * (1 * emin / eprim) ** (-alpha + 1) ) # assume that constant energy loss term is negligible - return surface_energy, numpy.exp(-bmu * X) + return surface_energy, np.exp(-bmu * X) def bundle_energy_distribution(emu_edges, eprim, a=1, cos_theta=1.0, depth=1950.0): @@ -339,20 +339,20 @@ def bundle_energy_distribution(emu_edges, eprim, a=1, cos_theta=1.0, depth=1950. :param depth: vertical depth of detector """ surface_energy, mean_loss = bundle_energy_at_depth(eprim, a, cos_theta, depth) - mu = numpy.log10(mean_loss * surface_energy) + mu = np.log10(mean_loss * surface_energy) # don't allow bundles to have more energy at the surface than the parent shower - hi = numpy.log10(numpy.minimum(emu_edges[1:], surface_energy)) - lo = numpy.log10(emu_edges[:-1]) + hi = np.log10(np.minimum(emu_edges[1:], surface_energy)) + lo = np.log10(emu_edges[:-1]) # width of log-normal distribution fit to (surface bundle energy / primary # energy) See: # https://wiki.icecube.wisc.edu/index.php/The_optimization_of_the_empirical_model_(IC22) - sigma = numpy.minimum( - 1.25 - 0.16 * numpy.log10(eprim) + 0.00563 * numpy.log10(eprim) ** 2, 1.0 + sigma = np.minimum( + 1.25 - 0.16 * np.log10(eprim) + 0.00563 * np.log10(eprim) ** 2, 1.0 ) - dist = numpy.maximum((erf((hi - mu) / sigma) - erf((lo - mu) / sigma)) / 2.0, 0.0) + dist = np.maximum((erf((hi - mu) / sigma) - erf((lo - mu) / sigma)) / 2.0, 0.0) # compensate for truncating the gaussian at the surface energy - upper_tail = erfc((numpy.log10(surface_energy) - mu) / sigma) / 2.0 + upper_tail = erfc((np.log10(surface_energy) - mu) / sigma) / 2.0 return dist / (1.0 - upper_tail) @@ -367,19 +367,19 @@ def bundle_flux_at_depth(emu, cos_theta): H/He/CNO/MgAlSi/Fe """ # make everything an array - emu, cos_theta = list(map(numpy.asarray, (emu, cos_theta))) - emu_center = 10 ** (center(numpy.log10(emu))) - shape = numpy.broadcast(emu_center, cos_theta).shape + emu, cos_theta = list(map(np.asarray, (emu, cos_theta))) + emu_center = 10 ** (center(np.log10(emu))) + shape = np.broadcast(emu_center, cos_theta).shape # primary spectrum for each element - contrib = numpy.zeros(shape + (5, 1000)) + contrib = np.zeros(shape + (5, 1000)) - penergy = numpy.logspace(2, 12, 1000) + penergy = np.logspace(2, 12, 1000) if cos_theta <= 0: return contrib, penergy - logstep = numpy.unique(numpy.diff(numpy.log10(penergy)))[0] - de = 10 ** (numpy.log10(penergy) + logstep / 2.0) - 10 ** ( - numpy.log10(penergy) - logstep / 2.0 + logstep = np.unique(np.diff(np.log10(penergy)))[0] + de = 10 ** (np.log10(penergy) + logstep / 2.0) - 10 ** ( + np.log10(penergy) - logstep / 2.0 ) ptypes = [ @@ -396,7 +396,7 @@ def bundle_flux_at_depth(emu, cos_theta): ) contrib[..., i, :] = weights * c # convert to differential flux - contrib /= numpy.diff(emu)[:, None, None] + contrib /= np.diff(emu)[:, None, None] return contrib, penergy @@ -411,7 +411,7 @@ def trigger_efficiency(eprim, threshold=10**5.5, sharpness=7): a step function """ return 1 / ( - 1 + numpy.exp(-(numpy.log10(eprim) - numpy.log10(threshold)) * sharpness) + 1 + np.exp(-(np.log10(eprim) - np.log10(threshold)) * sharpness) ) @@ -427,9 +427,9 @@ def __init__(self, effective_area, livetime=1.0): emu, cos_theta = effective_area.bin_edges[:2] # FIXME: account for healpix binning - self._solid_angle = 2 * numpy.pi * numpy.diff(self._aeff.bin_edges[1]) + self._solid_angle = 2 * np.pi * np.diff(self._aeff.bin_edges[1]) - flux = numpy.stack( + flux = np.stack( [ bundle_flux_at_depth(emu, ct)[0][:, :4, :].sum(axis=(1, 2)) for ct in center(cos_theta) @@ -438,11 +438,11 @@ def __init__(self, effective_area, livetime=1.0): # from icecube import MuonGun # model = MuonGun.load_model('GaisserH4a_atmod12_SIBYLL') - # flux, edist = numpy.vectorize(model.flux), numpy.vectorize(model.energy) - # emuc, ct = numpy.meshgrid(center(cos_theta), center(emu)) + # flux, edist = np.vectorize(model.flux), np.vectorize(model.energy) + # emuc, ct = np.meshgrid(center(cos_theta), center(emu)) # flux = flux(MuonGun.depth(0), ct, 1)*edist(MuonGun.depth(0), ct, 1, 0, emuc) - flux *= numpy.diff(emu)[:, None] + flux *= np.diff(emu)[:, None] if effective_area.is_healpix: flux *= healpy.nside2pixarea(effective_area.nside) else: @@ -485,7 +485,7 @@ def point_source_background( ), "Don't know how to make PS backgrounds from HEALpix maps yet" background = copy(self) - bin_areas = (numpy.pi * numpy.diff(psi_bins**2))[None, ...] + bin_areas = (np.pi * np.diff(psi_bins**2))[None, ...] # observation time shorter for triggered transient searches if livetime is not None: bin_areas *= livetime / self._livetime / constants.annum @@ -505,7 +505,7 @@ def point_source_background( # dimensions of the keys in expectations are now energy, radial bin if is_zenith_weight(zenith_index, self._aeff): background.expectations = ( - numpy.nansum( + np.nansum( (self.expectations * zenith_index[:, None]) / omega, axis=0 )[..., None] * bin_areas diff --git a/toise/surfaces.py b/toise/surfaces.py index 536a5f6..34c46cc 100644 --- a/toise/surfaces.py +++ b/toise/surfaces.py @@ -1,6 +1,6 @@ import os -import numpy +import numpy as np from .util import data_dir @@ -149,18 +149,18 @@ def cross(o, a, b): hull = lower[:-1] + upper[:-1] # convert into numpy array - return numpy.array(hull) + return np.array(hull) def hull_to_normals(points): # append first point at the end to close the hull - points = numpy.append(points, [points[0]], axis=0) + points = np.append(points, [points[0]], axis=0) vecs = points[1:] - points[:-1] - magn = numpy.sqrt(vecs[:, 0] ** 2 + vecs[:, 1] ** 2) + magn = np.sqrt(vecs[:, 0] ** 2 + vecs[:, 1] ** 2) - normals = numpy.array( - [vecs[:, 1] / magn, -vecs[:, 0] / magn, numpy.zeros(magn.shape)] + normals = np.array( + [vecs[:, 1] / magn, -vecs[:, 0] / magn, np.zeros(magn.shape)] ).T return normals @@ -168,11 +168,11 @@ def hull_to_normals(points): def hull_to_lengths(points): # append first point at the end to close the hull - points = numpy.append(points, [points[0]], axis=0) + points = np.append(points, [points[0]], axis=0) vecs = points[1:] - points[:-1] - return numpy.sqrt(vecs[:, 0] ** 2 + vecs[:, 1] ** 2) + return np.sqrt(vecs[:, 0] ** 2 + vecs[:, 1] ** 2) def signed_area(points): @@ -181,10 +181,10 @@ def signed_area(points): """ # append first point at the end to close the hull - points = numpy.append(points, [points[0]], axis=0) + points = np.append(points, [points[0]], axis=0) return ( - numpy.sum( + np.sum( points[:, 0][:-1] * points[:, 1][1:] - points[:, 0][1:] * points[:, 1][:-1] ) / 2.0 @@ -215,26 +215,26 @@ def azimuth_averaged_area(self, cos_theta): :param cos_theta: cosine of the zenith angle """ - return self.get_cap_area() * abs(cos_theta) + self.get_side_area() * numpy.sqrt( + return self.get_cap_area() * abs(cos_theta) + self.get_side_area() * np.sqrt( 1 - cos_theta**2 ) def get_maximum_area(self): - ct_max = numpy.cos(numpy.arctan(self.get_side_area() / self.get_cap_area())) + ct_max = np.cos(np.arctan(self.get_side_area() / self.get_cap_area())) return self.azimuth_averaged_area(ct_max) def sample_direction(self, cos_min=-1, cos_max=1, size=1): max_area = self.get_maximum_area() blocksize = min((size * 4, 65535)) accepted = 0 - directions = numpy.empty((size, 3)) + directions = np.empty((size, 3)) while accepted < size: - ct = numpy.random.uniform(cos_min, cos_max, blocksize) - st = numpy.sqrt((1 - ct) * (1 + ct)) - azi = numpy.random.uniform(0, 2 * numpy.pi, blocksize) - candidates = numpy.vstack((numpy.cos(azi) * st, numpy.sin(azi) * st, ct)).T + ct = np.random.uniform(cos_min, cos_max, blocksize) + st = np.sqrt((1 - ct) * (1 + ct)) + azi = np.random.uniform(0, 2 * np.pi, blocksize) + candidates = np.vstack((np.cos(azi) * st, np.sin(azi) * st, ct)).T candidates = candidates[ - numpy.random.uniform(0, max_area, blocksize) + np.random.uniform(0, max_area, blocksize) <= self.projected_area(candidates), :, ] @@ -251,10 +251,10 @@ def _integrate_area(a, b, cap, sides): cap * (b**2 - a**2) + sides * ( - numpy.arccos(a) - - numpy.arccos(b) - - numpy.sqrt(1 - a**2) * a - + numpy.sqrt(1 - b**2) * b + np.arccos(a) + - np.arccos(b) + - np.sqrt(1 - a**2) * a + + np.sqrt(1 - b**2) * b ) ) / 2.0 @@ -281,11 +281,11 @@ def etendue(self, cosMin=-1.0, cosMax=1.0): 0, cosMax, cap, sides ) else: - area = numpy.nan + area = np.nan raise ValueError( "Can't deal with zenith range [%.1e, %.1e]" % (cosMin, cosMax) ) - return 2 * numpy.pi * area + return 2 * np.pi * area def average_area(self, cosMin=-1, cosMax=1): """ @@ -296,7 +296,7 @@ def average_area(self, cosMin=-1, cosMax=1): :param cosMax: cosine of the minimum zenith angle :returns: the average projected area in the zenith angle range """ - return self.etendue(cosMin, cosMax) / (2 * numpy.pi * (cosMax - cosMin)) + return self.etendue(cosMin, cosMax) / (2 * np.pi * (cosMax - cosMin)) def volume(self): return self.get_cap_area() * self.length @@ -313,7 +313,7 @@ def __init__(self, xy_points, z_range): # hull points, in counterclockwise order self._x = hull # next neighbor in the hull - self._nx = numpy.roll(hull, -1, axis=0) + self._nx = np.roll(hull, -1, axis=0) # vector connecting each pair of points in the hull self._dx = self._nx - self._x @@ -324,10 +324,10 @@ def __init__(self, xy_points, z_range): self._side_lengths = hull_to_lengths(hull) side_areas = self._side_lengths * self.length cap_area = [signed_area(hull)] * 2 - cap_normals = numpy.array([[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]) + cap_normals = np.array([[0.0, 0.0, 1.0], [0.0, 0.0, -1.0]]) - self._areas = numpy.concatenate((side_areas, cap_area)) - self._normals = numpy.concatenate((side_normals, cap_normals)) + self._areas = np.concatenate((side_areas, cap_area)) + self._normals = np.concatenate((side_normals, cap_normals)) assert self._areas.size == self._normals.shape[0] def get_z_range(self): @@ -340,11 +340,11 @@ def get_side_area(self): # the projected area of a plane, averaged over a 2\pi rotation that # passes through the normal, is # A*\int_0^\pi \Theta(\sin\alpha)\sin\alpha d\alpha / 2\pi = A/\pi - return self._side_lengths.sum() * self.length / numpy.pi + return self._side_lengths.sum() * self.length / np.pi def projected_area(self, direction): - inner = numpy.dot(direction, self._normals.T) - areas = numpy.where(inner < 0, -inner * self._areas, 0) + inner = np.dot(direction, self._normals.T) + areas = np.where(inner < 0, -inner * self._areas, 0) return areas.sum(axis=areas.ndim - 1) def _sample_on_caps(self, directions, bottom, offset, scale): @@ -355,10 +355,10 @@ def _sample_on_caps(self, directions, bottom, offset, scale): size = len(directions) accepted = 0 blocksize = min((size * 4, 65535)) - positions = numpy.empty((len(directions), 3)) + positions = np.empty((len(directions), 3)) while accepted < size: - cpos = numpy.random.uniform(size=(blocksize, 2)) * scale + offset - mask = numpy.array(list(map(self._point_in_hull, cpos))) + cpos = np.random.uniform(size=(blocksize, 2)) * scale + offset + mask = np.array(list(map(self._point_in_hull, cpos))) cpos = cpos[mask] if len(cpos) + accepted > size: cpos = cpos[: size - accepted] @@ -371,8 +371,8 @@ def _sample_on_caps(self, directions, bottom, offset, scale): return positions def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): - directions = numpy.empty((size, 3)) - positions = numpy.empty((size, 3)) + directions = np.empty((size, 3)) + positions = np.empty((size, 3)) accepted = 0 blocksize = min((size * 4, 65535)) @@ -383,12 +383,12 @@ def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): block = min((blocksize, size - accepted)) cdir = self.sample_direction(cos_min, cos_max, block) directions[accepted : accepted + block] = cdir - inner = numpy.dot(cdir, self._normals.T) - areas = numpy.where(inner < 0, -inner * self._areas, 0) + inner = np.dot(cdir, self._normals.T) + areas = np.where(inner < 0, -inner * self._areas, 0) prob = areas.cumsum(axis=1) prob /= prob[:, -1:] - p = numpy.random.uniform(size=block) - target = numpy.array([prob[i, :].searchsorted(p[i]) for i in range(block)]) + p = np.random.uniform(size=block) + target = np.array([prob[i, :].searchsorted(p[i]) for i in range(block)]) # first, handle sides sides = target < len(self._areas) - 2 @@ -396,14 +396,14 @@ def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): nsides = sides.sum() xy = ( self._x[side_target] - + numpy.random.uniform(size=nsides)[:, None] * self._dx[side_target] + + np.random.uniform(size=nsides)[:, None] * self._dx[side_target] ) - xyz = numpy.concatenate( + xyz = np.concatenate( ( xy, ( self._z_range[0] - + numpy.random.uniform(size=nsides) * self.length + + np.random.uniform(size=nsides) * self.length )[:, None], ), axis=1, @@ -434,7 +434,7 @@ def expand(self, padding): # normalized vector connecting each vertex to the next one d = self._dx / self._side_lengths[:, None] # and the one connecting the previous vertex - prev_d = numpy.roll(d, 1, axis=0) + prev_d = np.roll(d, 1, axis=0) # sine of the inner angle of each vertex det = prev_d[:, 0] * d[:, 1] - prev_d[:, 1] * d[:, 0] assert (det != 0.0).all(), "Edges can't be [anti]parallel" @@ -459,7 +459,7 @@ def from_I3Geometry(cls, i3geo, padding=0): if omgeo.omtype != omgeo.IceTop: strings[omkey.string].append(list(omgeo.position)) mean_xy = [ - numpy.mean(positions, axis=0)[0:2] for positions in list(strings.values()) + np.mean(positions, axis=0)[0:2] for positions in list(strings.values()) ] zmax = max(max(p[2] for p in positions) for positions in list(strings.values())) zmin = min(min(p[2] for p in positions) for positions in list(strings.values())) @@ -472,11 +472,11 @@ def from_I3Geometry(cls, i3geo, padding=0): @classmethod def from_file(cls, fname, padding=0): - dats = numpy.loadtxt(fname) + dats = np.loadtxt(fname) zmax = dats[:, -1].max() zmin = dats[:, -1].min() mean_xy = [ - dats[dats[:, 0] == i].mean(axis=0)[2:4] for i in numpy.unique(dats[:, 0]) + dats[dats[:, 0] == i].mean(axis=0)[2:4] for i in np.unique(dats[:, 0]) ] self = cls(mean_xy, [zmin, zmax]) @@ -501,11 +501,11 @@ def from_i3file(cls, fname, padding=0): return cls.from_I3Geometry(fr["I3Geometry"], padding) def _direction_to_vec(self, cos_zenith, azimuth): - ct, azi = numpy.broadcast_arrays(cos_zenith, azimuth) - st = numpy.sqrt(1.0 - ct**2) - cp = numpy.cos(azi) - sp = numpy.sin(azi) - return -numpy.array([st * cp, st * sp, ct]) + ct, azi = np.broadcast_arrays(cos_zenith, azimuth) + st = np.sqrt(1.0 - ct**2) + cp = np.cos(azi) + sp = np.sin(azi) + return -np.array([st * cp, st * sp, ct]) def area(self, cos_zenith, azimuth): """ @@ -513,7 +513,7 @@ def area(self, cos_zenith, azimuth): """ vec = self._direction_to_vec(cos_zenith, azimuth) # inner product with component normals - inner = numpy.dot(self._normals, vec) + inner = np.dot(self._normals, vec) # only surfaces that face the requested direction count towards the area mask = inner < 0 return -(inner * self._areas[:, None] * mask).sum(axis=0) @@ -549,8 +549,8 @@ def _distance_to_hull(self, point, vec): # proportional distance along edge to intersection point # NB: if diry/dirx == dy/dx, the ray is parallel to the line segment nonparallel = diry * dx != dirx * dy - alpha = numpy.where( - nonparallel, (dirx * y - diry * x) / (diry * dx - dirx * dy), numpy.nan + alpha = np.where( + nonparallel, (dirx * y - diry * x) / (diry * dx - dirx * dy), np.nan ) # check whether the intersection is actually in the segment mask = (alpha >= 0) & (alpha < 1) @@ -562,16 +562,16 @@ def _distance_to_hull(self, point, vec): beta = ((y + alpha * dy) / diry)[mask] if beta.size == 0: - return (numpy.nan,) * 2 + return (np.nan,) * 2 else: - return (numpy.nanmin(beta), numpy.nanmax(beta)) + return (np.nanmin(beta), np.nanmax(beta)) def _distance_to_cap(self, point, dir, cap_z): d = (point[2] - cap_z) / dir[2] if self._point_in_hull(point + d * dir): return d else: - return numpy.nan + return np.nan def _distance_to_caps(self, point, dir): return sorted( @@ -582,7 +582,7 @@ def point_in_footprint(self, point): return self._point_in_hull(point) def intersections(self, x, y, z, cos_zenith, azimuth): - point = numpy.array((x, y, z)) + point = np.array((x, y, z)) vec = self._direction_to_vec(cos_zenith, azimuth) # perfectly vertical track: only check intersections with caps @@ -593,11 +593,11 @@ def intersections(self, x, y, z, cos_zenith, azimuth): return self._distance_to_hull(point, vec) # general case: both rho and z components nonzero else: - sin_zenith = numpy.sqrt(1.0 - cos_zenith**2) - sides = numpy.array(self._distance_to_hull(point, vec)) / sin_zenith + sin_zenith = np.sqrt(1.0 - cos_zenith**2) + sides = np.array(self._distance_to_hull(point, vec)) / sin_zenith caps = self._distance_to_caps(point, vec) - return numpy.nanmax((sides[0], caps[0])), numpy.nanmin((sides[1], caps[1])) + return np.nanmax((sides[0], caps[0])), np.nanmin((sides[1], caps[1])) class Cylinder(UprightSurface): @@ -609,28 +609,28 @@ def expand(self, margin): return Cylinder(self.length + 2 * margin, self.radius + margin) def point_in_footprint(self, point): - return numpy.hypot(point[0], point[1]) < self.radius + return np.hypot(point[0], point[1]) < self.radius def get_z_range(self): return (-self.length / 2.0, self.length / 2) def get_cap_area(self): - return numpy.pi * self.radius**2 + return np.pi * self.radius**2 def get_side_area(self): return 2 * self.radius * self.length - def area(self, cos_zenith, azimuth=numpy.nan): + def area(self, cos_zenith, azimuth=np.nan): return self.azimuth_averaged_area(cos_zenith) def projected_area(self, direction): ct = direction[..., -1] - st = numpy.sqrt((1 + ct) * (1 - ct)) + st = np.sqrt((1 + ct) * (1 - ct)) return self.get_cap_area() * abs(ct) + self.get_side_area() * st def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): - directions = numpy.empty((size, 3)) - positions = numpy.empty((size, 3)) + directions = np.empty((size, 3)) + positions = np.empty((size, 3)) accepted = 0 blocksize = min((size * 4, 65535)) @@ -639,32 +639,32 @@ def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): cdir = self.sample_direction(cos_min, cos_max, block) directions[accepted : accepted + block] = cdir ct = -cdir[:, -1] - st = numpy.sqrt((1 + ct) * (1 - ct)) + st = np.sqrt((1 + ct) * (1 - ct)) - areas = numpy.array( + areas = np.array( [ self.get_side_area() * st, - self.get_cap_area() * numpy.where(ct > 0, ct, 0), - self.get_cap_area() * numpy.where(ct < 0, abs(ct), 0), + self.get_cap_area() * np.where(ct > 0, ct, 0), + self.get_cap_area() * np.where(ct < 0, abs(ct), 0), ] ).T prob = areas.cumsum(axis=1) prob /= prob[:, -1:] - p = numpy.random.uniform(size=block) - target = numpy.array([prob[i, :].searchsorted(p[i]) for i in range(block)]) + p = np.random.uniform(size=block) + target = np.array([prob[i, :].searchsorted(p[i]) for i in range(block)]) # first, handle sides sides = target == 0 nsides = sides.sum() - beta = numpy.arcsin( - numpy.random.uniform(-1, 1, size=nsides) - ) + numpy.arctan2(-cdir[sides, 1], -cdir[sides, 0]) - positions[accepted : accepted + block][sides] = numpy.stack( + beta = np.arcsin( + np.random.uniform(-1, 1, size=nsides) + ) + np.arctan2(-cdir[sides, 1], -cdir[sides, 0]) + positions[accepted : accepted + block][sides] = np.stack( ( - self.radius * numpy.cos(beta) * st[sides], - self.radius * numpy.sin(beta) * st[sides], - numpy.random.uniform( + self.radius * np.cos(beta) * st[sides], + self.radius * np.sin(beta) * st[sides], + np.random.uniform( -self.length / 2, self.length / 2, size=nsides ), ) @@ -674,13 +674,13 @@ def sample_impact_ray(self, cos_min=-1, cos_max=1, size=1): caps = ~sides ncaps = caps.sum() cap_target = target[caps] - beta = numpy.random.uniform(0, 2 * numpy.pi, size=ncaps) - r = numpy.sqrt(numpy.random.uniform(size=ncaps)) * self.radius - positions[accepted : accepted + block][caps] = numpy.stack( + beta = np.random.uniform(0, 2 * np.pi, size=ncaps) + r = np.sqrt(np.random.uniform(size=ncaps)) * self.radius + positions[accepted : accepted + block][caps] = np.stack( ( - r * numpy.cos(beta), - r * numpy.sin(beta), - numpy.where(cap_target == 1, self.length / 2, -self.length / 2), + r * np.cos(beta), + r * np.sin(beta), + np.where(cap_target == 1, self.length / 2, -self.length / 2), ) ).T accepted += block