Init¶
from pymule import *
from pymule.plot import twopanel, threepanel
from pymule.errortools import combineNplots
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import arbuzov
from scipy import optimize
from scipy.interpolate import InterpolatedUnivariateSpline as spline
import scipy.integrate as integrate
import scipy.stats
from pymule.maths import zeta2, zeta3
zeta4 = pi**4/90.
We define $F$ and $G$ through \begin{align} \frac1{\Gamma_0} \frac{d^2\Gamma}{dx dc_\theta} = F(x,z) + P c_\theta G(x,z) \end{align} where $\Gamma_0 = G_F^2M^5/(192\pi^3)$
gamma0 = Mmu**5/192/pi**3
and $c_\theta=\cos\theta$ is the cosine of the scattering angle of the electron with respect to the beam. The dependence of these functions on the positron energy E is often expressed through the variable $x\equiv 2E/M$ and for a precise prediction at the energy endpoints $2z \le x \le (1+z^2)$ with $z\equiv m/M$ it is important to keep the electron-mass effects.
z = Mel / Mmu
Lz = log(z)
The conventions for the polarisation of the muon, $P$, are chosen such that for perfectly polarised surface $\mu^+$ entering the target along the $z$-axis, we have $P^{\rm th} = -1$. More generally, the polarisation vector for $\mu^+$ polarised along the $z$-axis is $\vec n = (0, 0, P)$.
If we are completely inclusive with respect to emitted photons, the functions $F$ and $G$ contain all the required information. We calculate them by calculating with $P=0.85$ \begin{align} \frac{d\Gamma_-}{d x} =\int_{-1}^0dc_\theta\ \frac{d^2\Gamma} {d x\,dc_\theta} \qquad\text{and}\qquad \frac{d\Gamma_+}{d x} =\int_{ 0}^1dc_\theta\ \frac{d^2\Gamma} {d x\,dc_\theta} \,. \end{align} Often it is the case that our statements or equations are equally valid for $F$ and $G$. In this case we use the generic notation $H$ as a place holder for either $F$ or $G$. We write the perturbative expansion of $H$ as \begin{align} H = \sum_{n=0} a^n h_n(x,z) \end{align} with $a=\alpha/pi$
a = alpha/pi
At LO we have with $z=0$ \begin{align} f_0 &= \tilde f_0 + \mathcal{O}(z^2) = x^2(3-2x) + \mathcal{O}(z^2)\\ g_0 &= \tilde g_0 + \mathcal{O}(z^2) = x^2(1-2x) + \mathcal{O}(z^2) \end{align} The numerical integration is carried out in $E$. To get a good enough sampling, we split it into four integrals \begin{align} \int_m^{E_\text{max}} dE = \int_{ 0}^{26}dE +\int_{26}^{42}dE +\int_{42}^{50}dE +\int_{50}^{54}dE \end{align}
Efull = np.concatenate((
np.linspace( 0., 26., 1000, False) + 0.026/2,
np.linspace(26., 42., 1000, False) + 0.016/2,
np.linspace(42., 50., 1000, False) + 0.008/2,
np.linspace(50., 54., 1000, False) + 0.004/2
))
This is implemented in user.f95
as follows
! 1st digit is cos sign (1 = -, 2 = +)
! 2nd digit is the energy range
whatplot = mod(p_encrypted,10)
if(p_encrypted/10 == 1) then
whatplot = - whatplot
endif
...
if(abs(whatplot) == 1) then
if(q2(4) > 26.) pass_cut = .false.
elseif(abs(whatplot) == 2) then
if(q2(4) < 26. .or. q2(4) > 42.) pass_cut = .false.
elseif(abs(whatplot) == 3) then
if(q2(4) < 42. .or. q2(4) > 50.) pass_cut = .false.
elseif(abs(whatplot) == 4) then
if(q2(4) < 50.) pass_cut = .false.
endif
if (whatplot > 0) then
! We are in the cos > 0 region
if (cos_th(q2,ez) < 0) pass_cut = .false.
else
! We are in the cos < 0 region
if (cos_th(q2,ez) > 0) pass_cut = .false.
endif
Of course this only makes sense if $m<E<\tfrac M2 + \tfrac{m^2}{2M}$
mask = (Mel < Efull) & (Efull < Mmu/2 + Mel**2/Mmu/2)
E = Efull[mask]
x = 2*E/Mmu
We can smooth our expressions with a spline integration
def spline_smooth(dat):
return dointegral(spline(dat[:,0],dat[:,1]))
Analytic LO and NLO¶
The LO and NLO McMule results have an unacceptably large Monte Carlo error. It is easier and cheaper to just use the analytic results and integrate them over the bin.
def dointegral(integrand):
diff = np.concatenate((
0.026*np.ones(981),
0.016*np.ones(1000),
0.008*np.ones(1000),
0.004*np.ones(707)
))
diff = 2*diff / Mmu
xs = np.column_stack((x-diff/2, x+diff/2))
xs[-1,-1] = 1 + z**2
ans = np.array([
np.insert(
np.array(integrate.quad(
integrand, i, j, epsrel=1e-5
)) / (j-i),
0,
(i + j)/2.
)
for i, j in xs
])
ans[-1,0] = x[-1]
return ans
$h_0$ is usually written in terms of the electron velocity $\beta=\sqrt{1-m^2/E^2} = \sqrt{1-4z^2/x^2}$
def inth0(x, o):
beta = sqrt(1-4*z**2/x**2)
f0 = x**2*beta * (3-2*x+x/4*(3*x-4)*(1-beta**2))
g0 = x**2*beta * ((1-2*x)*beta + 3*x**2/4 * beta * (1-beta**2))
return [f0, g0][o]
Taking the results from hep-ph/0110047 we can write $h_1$
def inth1(x, o):
beta = sqrt(1-4*z**2/x**2)
if o == 0:
return 0.5 * x**2*beta * arbuzov.fnlo(x, z)
elif o == 1:
return 0.5 * x**2*beta * arbuzov.gnlo(x, z)
f0 = dointegral(lambda x: inth0(x, 0))
g0 = dointegral(lambda x: inth0(x, 1))
f1 = dointegral(lambda x: inth1(x, 0))
g1 = dointegral(lambda x: inth1(x, 1))
Load McMule data¶
setup(folder='meg-2d/out.tar.bz2', cachefolder='/tmp/mcmule/')
Helper functions¶
These functions load the full dataset for a given sign
def lfunc0(s):
return [
scaleset(mergefks(
sigma('m2enn0', obs=s+obs)
), 1/gamma0)
for obs in ['1', '2', '3', '4']
]
def lfunc1(s):
return [
scaleset(mergefks(
sigma('m2ennR', obs=s+obs),
sigma('m2ennF', obs=s+obs)
), pi**1/gamma0)
for obs in ['1', '2', '3', '4']
]
def lfunc2g(s):
return [
scaleset(mergefks(
sigma(
'm2ennRF', obs=s+obs,
sanitycheck=lambda v:abs(v['value'])[0] < 1e10
),
sigma('m2ennRR', obs=s+obs),
sigma('m2ennFF', obs=s+obs, folder='meg-2d-nfpart/out.tar.bz2')
), pi**2/gamma0)
for obs in ['1', '2', '3', '4']
]
def lfunc2z(s):
return [
scaleset(mergefks(
sigma(
'm2ennRF', obs=s+obs,
sanitycheck=lambda v:abs(v['value'])[0] < 1e10
),
sigma('m2ennRR', obs=s+obs),
sigma('m2ennFFz', obs=s+obs, folder='meg-2d-nfpart/out.tar.bz2')
), pi**2/gamma0)
for obs in ['1', '2', '3', '4']
]
def lfunc2had(s):
return [
scaleset(mergefks(
sigma('m2ennNF', obs=s+obs, folder='meg-2d-had/out.tar.bz2')
), pi**2/gamma0)
for obs in ['1', '2', '3', '4']
]
def lfunc2lep(s):
return [
scaleset(mergefks(
sigma('m2ennNF', obs=s+obs, folder='meg-2d-nfpart/out.tar.bz2')
), pi**2/gamma0)
for obs in ['1', '2', '3', '4']
]
def lfunc2vp2(s):
return [
scaleset(mergefks(
sigma('m2ennNF', obs=s+obs, folder='meg-2d-nferr/out.tar.bz2')
), pi**3/gamma0)
for obs in ['1', '2', '3', '4']
]
This functions merges the different runs for one sign into one large run that has full coverage of the spectrum
def mergesign(lfunc, s):
sets = lfunc(s)
dic = {
'value': plusnumbers([i['value'] for i in sets]),
'time': __builtin__.sum(i['time'] for i in sets)
}
for i in range(4):
dic['Ee%d' % (i+1)] = sets[i]['Ee%d' % (i+1)][1:-1]
return dic
Assemble $f$ and $g$¶
We can now build $f$ and $g$ as \begin{align} f &= \frac{\Gamma_- + \Gamma_+}{2}\,,\\ g &= \frac{-\Gamma_- + \Gamma_+}{0.85} \end{align}
def getfandg(lfunc):
r1 = mergesign(lfunc, '1')
r2 = mergesign(lfunc, '2')
espec1 = np.concatenate((r1['Ee1'], r1['Ee2'], r1['Ee3'], r1['Ee4']))[mask]
espec2 = np.concatenate((r2['Ee1'], r2['Ee2'], r2['Ee3'], r2['Ee4']))[mask]
jac = Mmu / 2
return (
[1/jac,jac,jac] * addplots(espec1, espec2, sa=0.5, sb=0.5),
[1/jac,jac,jac] * addplots(espec1, espec2, sa=-1/0.85, sb=1/0.85),
r1['time'] + r2['time']
)
f0MM, g0MM, t0 = getfandg(lfunc0)
f1MM, g1MM, t1 = getfandg(lfunc1)
At NNLO we split our expression into the photonic contributions
$h_n^\gamma$ and the fermionic contributions $h_n^{\rm vp}$
(h2vp
). For the former, we have performed the calculation once
with full $z$ dependence (h2g
) and once using massification
(h2z
).
f2g, g2g, t2g = getfandg(lfunc2g)
f2z, g2z, t2z = getfandg(lfunc2z)
The latter are calculated by inserting a VP function $\Pi_i(q^2)$ into the loop. We have used the following $\Pi$
- $\Pi_{\rm had}$: hadronic
- $\Pi^{(1)}_\ell$: one-loop leptonic for $\ell=e,\mu,\tau$
- $\Pi^{(2)}_\ell$: two-loop leptonic for $\ell=e,\mu,\tau$
This split is necessary to estimate the errors
f2lep, g2lep, tlep = getfandg(lfunc2lep)
f2had, g2had, thad = getfandg(lfunc2had)
f2vp2, g2vp2, tvp2 = getfandg(lfunc2vp2)
f2vp2 = spline_smooth(mergebins(f2vp2, 20))
g2vp2 = spline_smooth(mergebins(g2vp2, 20))
twopanel(
'$x$',
labupleft='$f_0$', upleft=[f0, f0MM],
labdownleft=r'$1-\textsc{McMule}/{\rm analytic}$',
downleft=[divideplots(f0MM,f0,-1)[:-1]]
)
twopanel(
'$x$',
labupleft='$g_0$', upleft=[g0, g0MM],
labdownleft=r'$1-\textsc{McMule}/{\rm analytic}$',
downleft=[divideplots(g0MM,g0,-1)[:-1]]
)
ylim(-0.02,0.02)
$h_1$¶
twopanel(
'$x$',
labupleft='$f_1$', upleft=[f1, f1MM],
labdownleft=r'$1-\textsc{McMule}/{\rm analytic}$',
downleft=[divideplots(f1MM,f1,-1)[:-1]]
)
ylim(-0.002,0.002)
twopanel(
'$x$',
labupleft='$g_1$', upleft=[g1, g1MM],
labdownleft=r'$1-\textsc{McMule}/{\rm analytic}$',
downleft=[divideplots(g1MM,g1,-1)[:-1]]
)
ylim(-0.007,0.007)
$h_2$¶
We can take the logarithms from Arbuzov, sampling centre-bin
f2logs = (0.5*(-2*Lz)**2*arbuzov.f2LL(x) + (-2*Lz)*arbuzov.f2NLL(x))/4
figure()
errorband(f2g)
plot(x, f2logs)
xlabel('$x$')
ylabel('$f_2$')
legend([r'$\textsc{McMule}$', 'cLL+cNLL'])
ylim(-70,230)
g2logs = (0.5*(-2*Lz)**2*arbuzov.g2LL(x) + (-2*Lz)*arbuzov.g2NLL(x))/4
figure()
errorband(g2g)
plot(x, g2logs)
xlabel('$x$')
ylabel('$g_2$')
legend([r'$\textsc{McMule}$', 'cLL+cNLL'])
ylim(-330,70)
Open Lepton contributions¶
So far we have consider only $\mu\to e\nu\bar\nu + n\gamma$. However, it might not always be possible to clearly separate open-lepton contributions $\mu\to e\nu\bar\nu ee$.
setup(folder='meg-2d-ol/out-LO.tar.bz2')
def funcEE(s):
return [
scaleset(mergefks(
sigma('m2ennee0', obs=s+obs)
), pi**2/gamma0)
for obs in ['1', '2', '3', '4']
]
f2EE, g2EE, t2EE = getfandg(funcEE)
It would also be interesting to see the NLO to $\mu\to e\nu\bar\nu ee$. But because they are much slower and, presumably, hardly matter, we use a coarse binning.
setup(folder='meg-2d-ol/out.tar.bz2', obs='')
EE2 = scaleset(mergefks(sigma('m2ennee0')), pi**2/gamma0)
EE3 = scaleset(mergefks(
sigma('m2enneeC'), sigma('m2enneeR'),
anyxiV = sigma('m2enneeV'), anyxiA = sigma('m2enneeA')
), pi**3/gamma0)
f2EEc = [2/Mmu, Mmu/2, Mmu/2] * addplots(EE2['Ep2p'], EE2['Ep2m'], sa=0.5, sb=0.5)
g2EEc = [2/Mmu, Mmu/2, Mmu/2] * addplots(EE2['Ep2p'], EE2['Ep2m'], sa=+1/1.00, sb=-1/1.00)
f3EEc = [2/Mmu, Mmu/2, Mmu/2] * addplots(EE3['Ep2p'], EE3['Ep2m'], sa=0.5, sb=0.5)
g3EEc = [2/Mmu, Mmu/2, Mmu/2] * addplots(EE3['Ep2p'], EE3['Ep2m'], sa=+1/1.00, sb=-1/1.00)
To combine everything we need to interpolate the coarse binning
f3EE = np.column_stack((
x,
spline(f3EEc[1:-1,0], f3EEc[1:-1,1])(x),
np.zeros(x.shape)
))
g3EE = np.column_stack((
x,
spline(g3EEc[1:-1,0], g3EEc[1:-1,1])(x),
np.zeros(x.shape)
))
Make plot¶
_, (ax1, ax2) = twopanel(
r'$x$',
upleft=[f2EE, f2EEc],
downleft=[g2EE, g2EEc],
labupleft="$f_2^{ee}$",
labdownleft="$g_2^{ee}$",
)
ax1.legend(['interpolated', 'exact'])
ax1.set_title("Open lepton contribution @ LO")
nmerge = 40
Kmass = divideplots(
mergebins(f2z, nmerge),
mergebins(f2g, nmerge),
offset=-1
)
(fig, (ax1, ax2)) = twopanel(
r'$x$',
upleft=[
mergebins(f2g, nmerge),
mergebins(f2z, nmerge),
np.column_stack((x, f2logs, np.zeros_like(x)))
],
labupleft=r"$f_2(x)$",
downleft=[Kmass[:11], Kmass[11:62], Kmass[62:]],
labdownleft=r"$\rm 1 - massified / full$",
coldownleft=['C0','C0','C0']
)
sca(ax1) ; ylim(-70,140)
axhline(0, color='black', linewidth=0.4, zorder=1)
sca(ax2) ; ylim(-.05, 0.07)
legend(ax1.lines, [
r'$\rm full$',
r'$\rm massified$',
r'$\rm logs$'
], loc='upper left')
axhline(0, color='black', linewidth=0.4, zorder=1)
mulify(fig)
V2, full NNLO¶
nmerge = 40
FfoZ = addplots(addplots(f0, f1, sb=a), f2z, sb=a**2)
FfoG = addplots(addplots(f0, f1, sb=a), f2g, sb=a**2)
Kmass = [1,1e5,1e5]*divideplots(
mergebins(FfoZ, nmerge),
mergebins(FfoG, nmerge),
offset=-1
)
(fig, (ax1, ax2)) = twopanel(
r'$x$',
upleft=[
mergebins(f2g, nmerge),
mergebins(f2z, nmerge),
np.column_stack((x, f2logs, np.zeros_like(x)))
],
labupleft=r"$f_2(x)$",
downleft=[Kmass],
labdownleft=r"$10^5\times\Big(1 - {\rm massified} / {\rm full}\Big)$",
coldownleft=['C0']
)
sca(ax1) ; ylim(-70,140)
axhline(0, color='black', linewidth=0.4, zorder=1)
sca(ax2) ; ylim(-3.5, 0.5)
legend(ax1.lines, [
r'$\rm full$',
r'$\rm massified$',
r'$\rm logs$'
], loc='upper left')
axhline(0, color='black', linewidth=0.4, zorder=1)
mulify(fig)
Ls = log(1+z**2-x)
These follow YFS exponentiation, allowing us to resum the leading $(aL_s)^n$ terms by writing \begin{align} h_{[0^+]}^{\gamma,s} = h_0 \exp(a c_s L_s) = h_0 (1+z^2-x)^{a c_s} \end{align}
def YFS(pref, o, exponent=a):
def intYFS(x):
beta = sqrt(1-4*z**2/x**2)
f0 = x**2*beta * (3-2*x+x/4*(3*x-4)*(1-beta**2))
g0 = x**2*beta * ((1-2*x)*beta + 3*x**2/4 * beta * (1-beta**2))
return [f0, g0][o] * pref * (1+z**2-x)** (exponent * cs)
return dointegral(intYFS)
We generally use the subscript $n^+$ to denote a contribution that contains terms of order $n$ or higher. Since these contributions are not proportional to a fixed power of $a$ we include the coupling directly. After exponentiation, this result is point-wise finite which is indicated by the square brackets.
The coefficient $c_s$ can be obtained by calculating the limit of $h_i$ for $x\to (1+z^2)$ \begin{align} \lim_{x\to1+z^2} h_i \sim h_0(1+z^2)\times ac_sL_s = \begin{cases}+1&f\\-1&g\end{cases} \times ac_sL_s \end{align}
cs = -2. + (1.+z**2)/(-1.+z**2) * log(z**2)
Beyond NLO there are also further suppressed soft logarithms $a^nh_n \supset a^nL_s^j$ with $j < n$. To obtain them, we use \begin{align} h_{[n^+]}^{\gamma,s} = h_0 \ a^n k_n^\gamma(1+z^2-x)^{a c_s} \end{align} We can find the values of the $k_n^\gamma$ as we did for $c_s$. The $L_z^n$ and $L_z^{n-1}$ terms can be found analytically by using the collinear approximation (see below), writing \begin{align} k_n^\gamma = L_z^n k_{n,n}^\gamma + L_z^{n-1} k_{n,n-1}^\gamma + \cdots + k_{n,0}^\gamma \end{align}
klcoeff = np.array([
[1, 0],
[-3/2, -2],
[9./8. - 2*zeta2, 45./16. - 5*zeta2/2. - 3*zeta3],
[-9./16. + 3*zeta2 - 8*zeta3/3, -63./32. + 31*zeta2/4 - 7*zeta3/2],
[
27./128. - 9*zeta2/4. + 4*zeta3 + zeta4,
117./128. - 135*zeta2/16 + 335*zeta3/24 + 6*zeta2*zeta3 - 7*zeta4/2
]
])
klogs = (
klcoeff[:,0] * Lz**arange(0,len(klcoeff))
+ klcoeff[:,1] * Lz**arange(-1,len(klcoeff)-1)
)
With this, we can define \begin{align} h_{[n]}^\gamma = h_n^\gamma - \Bigg( \sum_{j\le n} h_{[j^+]}^{\gamma,s} \Bigg)_{a^n\text{ coeff}} = h_n^\gamma - h_0\sum_{i=0}^n k_{n-i}^\gamma \frac{c_s^iL_s^i}{i!} \end{align} This quantity goes to zero as $x\to(1+z^2)$.
The following function calculates $h_0 c_s^iL_s^i / i!$ if only i
is given and the whole sum if n
and k
are given
fac = scipy.special.factorial
def YFSsub(o, i=None, n=None, k=klogs):
if isinstance(i, int):
def intYFSsub(x):
beta = sqrt(1-4*z**2/x**2)
f0 = x**2*beta * (3-2*x+x/4*(3*x-4)*(1-beta**2))
g0 = x**2*beta * ((1-2*x)*beta + 3*x**2/4 * beta * (1-beta**2))
Ls = log(1+z**2-x)
return [f0, g0][o] * Ls**i * cs**i / fac(i)
return dointegral(intYFSsub)
elif isinstance(n, int) and isinstance(k, np.ndarray):
return combineNplots(addplots, [
[1,k[n-i],k[n-i]] * YFSsub(o, i=i)
for i in range(n+1)
])
elif isinstance(n, list) and isinstance(k, np.ndarray):
return combineNplots(addplots, [
k[n-i] * YFSsub(o, i=i)
for i in n
])
else:
raise ValueError
Finding $k_{2,0}^\gamma$¶
So far, we have only calculated the logarithms in $k_2^\gamma$.
However, since we know the exact result for $h_2$, we can find
$k_{2,0}^\gamma$. To do this, we calculate $h_{[2]}$ (h2f
)
f2fpre = addplots(f2g, YFSsub(0, n=2), sb=-1)[:-1]
g2fpre = addplots(g2g, YFSsub(1, n=2), sb=-1)[:-1]
We now perform a fit up to $x^3$ to average as many bins as possible
def polyfit(data, col='red', n=3, nep=-1):
cf = 0.95
xdata = data[:,0]
ydata = data[:,1]
edata = data[:,2]
def fitfunc(p, x):
return __builtin__.sum(p[i] * x**i for i in range(len(p)))
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
pinit = [1.0] * (n+1)
coeff, covar, _, _, _ = optimize.leastsq(
errfunc, pinit,
args=(xdata, ydata, edata), full_output=1
)
tval = scipy.stats.t.ppf((cf+1)/2, len(data) - len(coeff))
xvec = xdata[nep]**np.arange(0,n+1)
delta = tval*np.sqrt(np.matmul(np.matmul(xvec, covar), xvec))
centre = __builtin__.sum(xvec * coeff)
plot(xdata, fitfunc(coeff, xdata), col)
return np.array([centre, delta])
figure()
errorband(f2fpre)
errorband(g2fpre)
kestF = polyfit(f2fpre[2500:], 'red')
kestG = polyfit(g2fpre[3000:], 'green') * [-1,1]
xlabel('$x$')
ylabel(r'prel. $\pm h_{[2]}^{\gamma}$')
legend([
'$f_{[2]}^{\gamma}$',
'$-g_{[2]}^{\gamma}$',
f'$k_{2,0}^\gamma = {printnumber(kestF)}$',
f'$k_{2,0}^\gamma = {printnumber(kestG)}$'
])
We settle on $k_{2,0}^\gamma = -5(1)$
kest = [-5, 1]
kcoeff = klogs[:]
kcoeff[2] += kest[0]
Calculating $h_{[1]}^\gamma$ and $h_{[2]}^\gamma$¶
We can now calculate $h_{[1]}^\gamma$ and $h_{[2]}^\gamma$
f1fg = addplots(f1 , YFSsub(0, n=1, k=kcoeff), sb=-1) ; f1fg[-1,1:]=0
g1fg = addplots(g1 , YFSsub(1, n=1, k=kcoeff), sb=-1) ; g1fg[-1,1:]=0
f2fg = addplots(f2g, YFSsub(0, n=2, k=kcoeff), sb=-1) ; f2fg[-1,1:]=0
g2fg = addplots(g2g, YFSsub(1, n=2, k=kcoeff), sb=-1) ; g2fg[-1,1:]=0
Since $k_{2,0}^\gamma$ has large error, we need to calculate the contribution of that as well. We place it into the error of $h_{[2^+]}$ (see below).
Calculating $h_{[n^+]}^{\gamma,s}$¶
Since we have all the $k$ to a good enough approximation, we can now calculate $h_{[n^+]}^{\gamma,s}$ for $n=0,...,4$.
We need to fix the error due to $k_{2,0}^\gamma$. To do this, we calculate \begin{align} \delta h_{[2^+]}^{\gamma,s} = h_0 a^2 \delta k_2^\gamma (1+z^2-x)^{a c_s} \end{align}
df2pgs = YFS(a**2*kest[1], 0) ; dg2pgs = YFS(a**2*kest[1], 1)
f0pgs = YFS(a**0*kcoeff[0], 0) ; g0pgs = YFS(a**0*kcoeff[0], 1)
f1pgs = YFS(a**1*kcoeff[1], 0) ; g1pgs = YFS(a**1*kcoeff[1], 1)
f2pgs = YFS(a**2*kcoeff[2], 0) ; g2pgs = YFS(a**2*kcoeff[2], 1)
f3pgs = YFS(a**3*kcoeff[3], 0) ; g3pgs = YFS(a**3*kcoeff[3], 1)
f4pgs = YFS(a**4*kcoeff[4], 0) ; g4pgs = YFS(a**4*kcoeff[4], 1)
f2pgs[:,2] = sqrt(f2pgs[:,2]**2 + df2pgs[:,1]**2)
g2pgs[:,2] = sqrt(g2pgs[:,2]**2 + dg2pgs[:,1]**2)
cll = FileRecord.read('cLL.vegas')
cnll = FileRecord.read('cNLL.vegas')
Note that the Mathematica sampling used a different definition of the scaled energy \begin{align} x' = \frac{M^2}{m^2+M^2}\ x \end{align} We will fix this later in the interpolation
xp = Mmu**2/(Mel**2 + Mmu**2) * x
def load_clogs(what, pref=1, slog=np.zeros(len(x))):
if what[1] == what[2]:
dic = cll
else:
dic = cnll
ans = np.concatenate([
dic["%s%d" % (what, i)][1:-1] for i in [1,2,3,4]
])[mask]
ans[:,0 ] *= 2/Mmu * Mmu**2/(Mel**2 + Mmu**2)
ans[:,1:] *= pref
ans[:,1 ] += slog
return ans
Soft subtraction¶
Since we use centre-bin sampling and the function is sharply peaked, interpolation is insufficient to remove the sampling bias. However, once we have $h_{[n]}$, this problem goes away.
Unfortunately, we cannot use the function define above since it is meant for total expression and does not expand in $L_z$ properly. We have \begin{align} \frac{h_n}{h_0} \sim (L_z/2)^n \sum_{i=0}^n k_{n-i,n-i}^\gamma \frac{(-2L_s)^i}{i!} -(L_z/2)^{n-1}\Bigg( \sum_{i=0}^n k_{n-i,n-i-1}^\gamma \frac{(-2L_s)^i}{i!} +(-2L_s) \sum_{i=0}^{n-1} k_{n-i-1,n-i-1}^\gamma \frac{(-2L_s)^i}{i!} \Bigg) \end{align}
def YFSCTcLL(x, n):
return -__builtin__.sum(
klcoeff[n-i, 0] * (-2*log(1-x))**i/fac(i)
for i in range(0, n+1)
) / (-2)**n
def YFSCTcNLL(x, n):
ans = __builtin__.sum(
klcoeff[n-i, 1] * (-2*log(1-x))**i/fac(i)
for i in range(0, n+1)
) + (-2*log(1-x)) * __builtin__.sum(
klcoeff[n-i-1, 0] * (-2*log(1-x))**i/fac(i)
for i in range(0, n)
)
return -ans / (-2)**(n-1)
def get_hf(what, oa, ol, dosmooth=True):
if ol == oa:
s = YFSCTcLL(xp, oa)
elif ol == oa - 1:
s = YFSCTcNLL(xp, oa)
if what == 'F':
s *= xp**2*(3-2*xp)
elif what == 'G':
s *= xp**2*(1-2*xp)
ans = load_clogs(f"{what}{oa}{ol}", pi**oa, s)
if dosmooth:
ans = spline_smooth(ans)
ans[:,2] = sqrt((ans[:,1] * get_smooth())**2 + ans[:,2]**2)
return ans
Smoothing and sub-bin sampling¶
The centre-bin sampling causes a systematic error that can be reduced using interpolation. However, it is not perfect and we need to be able to estimate the error.
To get a handle on this, we consider the first contribution where this is truly necessary, namely \begin{align} f_{[3]}^{\gamma,{\rm cLL}} = f_3^{\gamma,{\rm cLL}}- f_{[3^+]}^{\gamma,s,{\rm cLL}}\Big|_{a^3} = f_3^{\gamma,{\rm cLL}} + f_0\frac18\Big( k_{3,3}^\gamma - 2k_{2,2}^\gamma L_s + 2k_{1,1}^\gamma L_s^2 - \frac43 k_{0,0}^\gamma L_s^3 \Big) \end{align} and integrate it properly
def inth3(x, o):
s = YFSCTcLL(x,3)
if o == 0:
s *= x**2*(3-2*x)
cll = arbuzov.f3LL(x)
elif o == 1:
s *= x**2*(1-2*x)
cll = arbuzov.g3LL(x)
return cll / 8 / 6 + s
f3fcLL = dointegral(lambda e: inth3(e, 0))
g3fcLL = dointegral(lambda e: inth3(e, 1))
We still have a residual error (see below) that we model as \begin{align} \log_{10}\mathcal{E} = \begin{cases} -7 & E<10\,{\rm MeV}\\ -13 & 10\,{\rm MeV} < E < E_b\\ -10^b (E_p-E)^a & E_b < E \end{cases} \end{align} with the parameters \begin{align} a &= 1/7\,,\\ b &= 1.07$\,,\\ E_b &= 48\,{rm MeV}\,,\\ E_p &= \big(52.83 + 10^{-b/a} 2^{1/a}\big) \end{align}
def get_smooth(a=1./7., b=1.07, ep=52.83, d0=2):
ep += 10**(-b/a) * d0**(1/a)
eb = 48
smooth = np.zeros(len(E))
smooth[E<10] = 1e-7
smooth[(10 < E) & (E < eb)] = 1e-13
smooth[eb < E] = 10**(-10**(a*log10(ep-E[eb<E])+b))
return smooth
Verify¶
diff = np.array([
get_hf('F', 3,3, False)[:,1],
get_hf('F', 3,3, True )[:,1]
])
diff = abs(1 - diff / f3fcLL[:,1])
figure()
plot(x, diff[0])
plot(x, diff[1])
plot(x, get_smooth())
yscale('log')
xlabel("$x$")
ylabel(r"relative error in $f_{[3]}^{\gamma,{\rm cLL}}$")
legend(['no interpolation', 'interpolation', 'model'], loc=2)
xl = np.array(xlim())
twiny()
xlim(Mmu*xl/2)
xlabel(r"$E\,/\,{\rm MeV}$")
Load all¶
f22fg = get_hf('F', 2, 2) ; g22fg = get_hf('G', 2, 2)
f21fg = get_hf('F', 2, 1) ; g21fg = get_hf('G', 2, 1)
f33fg = get_hf('F', 3, 3) ; g33fg = get_hf('G', 3, 3)
f32fg = get_hf('F', 3, 2) ; g32fg = get_hf('G', 3, 2)
f44fg = get_hf('F', 4, 4) ; g44fg = get_hf('G', 4, 4)
f43fg = get_hf('F', 4, 3) ; g43fg = get_hf('G', 4, 3)
f2fgLogs = addplots(f22fg, f21fg, sa=(-2*Lz)**2, sb=(-2*Lz)**1)
f3fg = addplots(f33fg, f32fg, sa=(-2*Lz)**3, sb=(-2*Lz)**2)
f4fg = addplots(f44fg, f44fg, sa=(-2*Lz)**4, sb=(-2*Lz)**3)
g2fgLogs = addplots(g22fg, g21fg, sa=(-2*Lz)**2, sb=(-2*Lz)**1)
g3fg = addplots(g33fg, g32fg, sa=(-2*Lz)**3, sb=(-2*Lz)**2)
g4fg = addplots(g44fg, g44fg, sa=(-2*Lz)**4, sb=(-2*Lz)**3)
k2vp = (
12991/1296 - 53/9 * zeta2 - 1/3 * zeta3
) + (
535/108 + 11/18 * zeta2 + 1/3 * zeta3
+ (397/108+2/3*zeta2) * Lz + 25/18*Lz**2 + 2/9 * Lz**3
)
As before, we define \begin{align} h_{[2^+]}^{{\rm vp},s} = h_0\ a^2 k_2^{\rm vp}(1+z^2-x)^{a c_s} \end{align}
f2pvps = YFS(a**2*k2vp, 0) ; g2pvps = YFS(a**2*k2vp, 1)
and \begin{align} h_{[2]}^{\rm vp} = h_2^{\rm vp} - h_{[2^+]}^{{\rm vp},s}\Big|_{a^2} = h_2^{\rm vp} - a^2 h_0 k_2^{\rm vp} \end{align}
f2fvp = addplots(addplots(f2lep, f2had), f0, sb=-k2vp)
g2fvp = addplots(addplots(g2lep, g2had), g0, sb=-k2vp)
Error estimates¶
We consider three sources of error in the VP
- parametric error in $\Pi_{\rm had}$: we account for this by taking 5% of $|h_2^{\rm had}|$
df2had = abs(0.05*a**2 * f2had[:,1])
dg2had = abs(0.05*a**2 * g2had[:,1])
- multiple VP insertions: we model this by taking $\delta h_{[2^+]}^{\rm vp} = 2/3aL_z |h_{[2^+]}^{\rm vp}|$
dfvpmul = abs(2./3. * a * Lz * f2pvps[:,1])
dgvpmul = abs(2./3. * a * Lz * g2pvps[:,1])
- higher order VP insertions: we approximate this using the $\Pi_2$ insertion
dfvph = a**3*abs(f2vp2)[:,1]
dgvph = a**3*abs(g2vp2)[:,1]
EW effects¶
We just include the $\Delta r$ EW effects
Mw = 80.379e3
dr = 3 * Mmu**2 / (5 * Mw**2)
fEW = f0[:,1] * dr ; gEW = g0[:,1] * dr
Total¶
We define our best prediction for the positron energy spectrum as \begin{align} h = h_{[0^+]}^s + \Big(h_{[1^+]}^{\gamma ,s} + a h_{[1]}^\gamma \Big) + \Big(h_{[2^+]}^{\gamma ,s} + a^2h_{[2]}^\gamma \Big) + \Big(h_{[2^+]}^{{\rm vp},s} + a^2h_{[2]}^{\rm vp} \Big) + a^3\Big(h_{[3 ]}^{\rm cLL } + h_{[3]}^{\rm cNLL}\Big) + a^2 h_2^{ee} \end{align} To remain as flexible as possible we treat $h_2^{ee}$ separately.
For the error we take \begin{align} \delta h = \delta h^{\rm MC} \oplus\Big( \delta h_2^{\rm had} + \delta h^s + \delta h^c + \delta h^{\rm vp} + \delta h^{\rm EW} \Big) \oplus \delta h^{ee} \end{align} where
- $\delta h_2^{\rm had}$: the parametric error discussed above
- $\delta h^s = |h_{[3^+]}^{\gamma,s}| \oplus \delta h_{[2^+]}^{\gamma,s}$
dfs = abs(f3pgs[:,1]) + abs(f2pgs[:,2])
dgs = abs(g3pgs[:,1]) + abs(g2pgs[:,2])
- $\delta h^c$
dfc = a**3*abs(f3fg[:,1])
dgc = a**3*abs(g3fg[:,1])
- $\delta h^{\rm vp}$
dfvp = abs(dfvpmul) + abs(dfvph)
dgvp = abs(dgvpmul) + abs(dgvph)
- $\delta h^{\rm EW} = \Delta r h_0$
dfEW = abs(f0[:,1] * dr)
dgEW = abs(g0[:,1] * dr)
def comb(*a):
return np.sum(a,0)
def comb(*a):
return np.max(a,0)
Fbest = np.column_stack((
x,
(
f0pgs[:,1] +
f1pgs[:,1] + a * f1fg[:,1] +
f2pgs[:,1] + a**2 * f2fg[:,1] +
f2pvps[:,1] + a**2 * f2fvp[:,1] +
a**3 * f3fg[:,1]
),
a**2 * f2EE[:,1],
sqrt(
f0pgs[:,2]**2 +
f1pgs[:,2]**2 + (a * f1fg[:,2])**2 +
f2pgs[:,2]**2 + (a**2 * f2fg[:,2])**2 +
f2pvps[:,2]**2 + (a**2 * f2fvp[:,2])**2 +
(a**3 * f3fg[:,2])**2
),
comb(df2had, dfs, dfc, dfvp, dfEW),
a**3 * f3EE[:,1]
))
Gbest = np.column_stack((
x,
(
g0pgs[:,1] +
g1pgs[:,1] + a * g1fg[:,1] +
g2pgs[:,1] + a**2 * g2fg[:,1] +
g2pvps[:,1] + a**2 * g2fvp[:,1] +
a**3 * g3fg[:,1]
),
a**2 * g2EE[:,1],
sqrt(
g0pgs[:,2]**2 +
g1pgs[:,2]**2 + (a * g1fg[:,2])**2 +
g2pgs[:,2]**2 + (a**2 * g2fg[:,2])**2 +
g2pvps[:,2]**2 + (a**2 * g2fvp[:,2])**2 +
(a**3 * g3fg[:,2])**2
),
comb(dg2had, dgs, dgc, dgvp, dgEW),
a**3 * g3EE[:,1]
))
from pymule import mpl_axes_aligner
def mklegend(cols, style, labs, **kwargs):
legend(
[matplotlib.lines.Line2D([0], [0], color=c, linestyle=s) for c,s in zip(cols, style)],
labs,
**kwargs
)
def mkplot(y, *args):
plot(
np.append(x[:len(y)], 1.0000910954763456),
np.append(y, 0),
*args
)
def blackout(x0=20, x1=30):
y0,y1=ylim()
for k in ['top', 'left', 'bottom', 'right']:
gca().spines[k].zorder=20
gca().add_patch(matplotlib.patches.Rectangle((x0,y0), x1-x0, y1-y0, color='lightgrey',zorder=10, alpha=0.6))
$f$¶
fig, axs = plt.subplots(
5, sharex=True, gridspec_kw={'hspace': 0, 'height_ratios':[1,1,0.7,1,1]},
figsize=(5.9, 7.84)
)
axs = list(axs)
sca(axs[0])
mkplot(f0[:,1], 'C2')
mkplot(Fbest[:,1], 'C1')
mklegend(['C2', 'C1'], ['-','-'], ['LO', '{\sc McMule}'], loc=(0.31,0.05), ncol=2)
ylabel(r"$F$")
sca(axs[1])
mkplot(1e2*a*f1fg [:,1], 'C0')
mkplot(1e2* f1pgs[:,1], 'C0--')
gca().tick_params(labelcolor='C0', axis='y')
ylabel(r'$10^2\times f(n=1)$', color='C0')
mklegend(
['black', 'black'], ['--', '-'],
[r'$f_{[n^+]}^{\gamma,{\rm s}}$', r'$f_{[n]}^{\gamma}$'],
loc=2
)
axs.append(gca().twinx())
mkplot(-1e4*a**2*f2fg [:-1,1], 'C3')
mkplot(-1e4* f2pgs[: ,1], 'C3--')
gca().tick_params(labelcolor='C3', axis='y')
ylabel(r'$-10^4\times f(n=2)$', color='C3')
ylim(-.5,2)
mpl_axes_aligner.yaxes(axs[1], gca(), 0,0)
sca(axs[2])
mkplot(-1e4*a**2*f2fvp[:-1,1], "C3")
mkplot(-1e4*f2pvps[:,1], "C3--")
mkplot( 1e4*a**2*f2EE[:,1], "C0")
ylabel(r'$10^4\times f$')
mklegend(
['C3', 'C3', 'C0'], ['--', '-', '-'],
[
r'$-f_{[2^+]}^{{\rm vp},{\rm s}}$',
r'$-f_{[2]}^{\rm vp}$',
r'$f_2^{ee}$'
],
loc=(0.14,0.64), ncol=3
)
sca(axs[3])
mkplot(1e6* a**3*(-2*Lz)**3*f33fg[:,1], 'C0--')
mkplot(1e6* a**3*(-2*Lz)**2*f32fg[:,1], 'C0')
ylabel(r'$10^6\times f(n=3)$', color='C0')
axs.append(gca().twinx())
mkplot(1e8* a**4*(-2*Lz)**4*f44fg[:,1], 'C3--')
mkplot(1e8* a**4*(-2*Lz)**3*f43fg[:,1], 'C3')
ylabel(r'$10^8\times f(n=4)$', color='C3')
mpl_axes_aligner.yaxes(axs[3], gca(), 0,0)
draw()
ylim(axs[3].get_ylim())
mklegend(
['black']*2, ['--', '-'],
[r'$f_{[n]}^{\rm cLL}$', r'$f_{[n]}^{\rm cNLL}$'], ncol=2,loc=4
)
sca(axs[4])
mkplot(-1e6* a**3*f3fg [:-1,1], 'C3')
mkplot(-5e6* f3pgs[:,1], 'C3--')
ylim(-1.1, 0.8)
ylabel(r'$10^6\times f$')
mklegend(
['C3', 'C3'], ['--', '-'],
[r'$5\times f_{[3^+]}^{\gamma,{\rm s}}$', r'$f_{[3]}^{\gamma}$'],
loc=8, ncol=2
)
xlabel(r'$x = 2E/M$')
axs.append(axs[0].twiny())
xlabel(r'$E\,/\,{\rm MeV}$')
xlim([Mmu*i/2 for i in axs[0].get_xlim()])
fig.align_labels()
draw()
fig.tight_layout()
mulify(fig, delx=-.15, dely=-.05)
fig.savefig("plots/F.pdf")
$g$¶
fig, axs = plt.subplots(
5, sharex=True, gridspec_kw={'hspace': 0, 'height_ratios':[1,1,0.7,1,1]},
figsize=(5.9, 7.84)
)
axs = list(axs)
sca(axs[0])
mkplot(g0[:,1], 'C2')
mkplot(Gbest[:,1], 'C1')
mklegend(['C2', 'C1'], ['-','-'], ['LO', '{\sc McMule}'], loc=8, ncol=2)
ylabel(r"$G$")
sca(axs[1])
mkplot(1e2*a*g1fg [:,1], 'C0')
mkplot(1e2* g1pgs[:,1], 'C0--')
gca().tick_params(labelcolor='C0', axis='y')
ylabel(r'$10^2\times g(n=1)$', color='C0')
mklegend(
['black', 'black'], ['--', '-'],
[r'$g_{[n^+]}^{\gamma,{\rm s}}$', r'$g_{[n]}^{\gamma}$'],
loc=3, ncol=2
)
axs.append(gca().twinx())
mkplot(-1e4*a**2*g2fg [:-1,1], 'C3')
mkplot(-1e4* g2pgs[: ,1], 'C3--')
gca().tick_params(labelcolor='C3', axis='y')
ylabel(r'$-10^4\times g(n=2)$', color='C3')
mpl_axes_aligner.yaxes(axs[1], gca(), 0,0)
sca(axs[2])
mkplot(-1e4*a**2*g2fvp[:-1,1], "C3")
mkplot(-1e4*g2pvps[:,1], "C3--")
mkplot( 1e4*a**2*g2EE[:,1], "C0")
ylabel(r'$10^4\times g$')
mklegend(
['C3', 'C3', 'C0'], ['--', '-', '-'],
[
r'$-g_{[2^+]}^{{\rm vp},{\rm s}}$',
r'$-g_{[2]}^{\rm vp}$',
r'$g_2^{ee}$'
],
loc=3, ncol=3
)
sca(axs[3])
mkplot(1e6* a**3*(-2*Lz)**3*g33fg[:,1], 'C0--')
mkplot(1e6* a**3*(-2*Lz)**2*g32fg[:,1], 'C0')
ylabel(r'$10^6\times g(n=3)$', color='C0')
mklegend(
['black']*2, ['--', '-'],
[r'$g_{[n]}^{\rm cLL}$', r'$g_{[n]}^{\rm cNLL}$'], ncol=2,loc=2
)
axs.append(gca().twinx())
mkplot(1e8* a**4*(-2*Lz)**4*g44fg[:,1], 'C3--')
mkplot(1e8* a**4*(-2*Lz)**3*g43fg[:,1], 'C3')
ylabel(r'$10^8\times g(n=4)$', color='C3')
mpl_axes_aligner.yaxes(axs[3], gca(), 0,0)
draw()
sca(axs[4])
mkplot(-1e6* a**3*g3fg [:-1,1], 'C3')
mkplot(-5e6* g3pgs[:,1], 'C3--')
ylim(-0.8, 6.0)
ylabel(r'$10^6\times g$')
mklegend(
['C3', 'C3'], ['--', '-'],
[r'$5\times g_{[3^+]}^{\gamma,{\rm s}}$', r'$g_{[3]}^{\gamma}$'],
loc=2
)
xlabel(r'$x = 2E/M$')
axs.append(axs[0].twiny())
xlabel(r'$E\,/\,{\rm MeV}$')
xlim([Mmu*i/2 for i in axs[0].get_xlim()])
fig.align_labels()
draw()
fig.tight_layout()
mulify(fig, delx=-.075, dely=.04)
fig.savefig("plots/G.pdf")
Error budget¶
fig, axs = plt.subplots(
2, sharex=True, gridspec_kw={'hspace': 0},
figsize=(5.9, 7.84)
)
axs = list(axs)
sca(axs[0])
plot(x, dfs / abs(Fbest[:,1]), 'C0')
plot(x, dfc / abs(Fbest[:,1]), 'C1')
plot(x, dfvp / abs(Fbest[:,1]), 'C2')
plot(x, df2had/ abs(Fbest[:,1]), 'C2--')
plot(x, dfEW / abs(Fbest[:,1]), 'C3')
ylim(2e-10,4e-5)
yscale('log')
ylabel('$\delta F/F$')
sca(axs[1])
plot(x, dgs / abs(Gbest[:,1]), 'C0')
plot(x, dgc / abs(Gbest[:,1]), 'C1')
plot(x, dgvp / abs(Gbest[:,1]), 'C2')
plot(x, dg2had/ abs(Gbest[:,1]), 'C2--')
plot(x, dgEW / abs(Gbest[:,1]), 'C3')
yscale('log')
ylim(2e-10,4e-5)
ylabel('$\delta G/G$')
blackout(2*20/Mmu, 2*30/Mmu)
legend([
r"$\delta h^s$",
r"$\delta h^c$",
r"$\delta h^{\rm vp}$",
r"$\delta h^{\rm had}$",
r"$\delta h^{\rm EW}$",
])
xlim(Mel/Mmu, 1.01)
xlabel(r'$x = 2E/M$')
axs.append(axs[0].twiny())
xlabel(r'$E\,/\,{\rm MeV}$')
xlim([Mmu*i/2 for i in axs[0].get_xlim()])
draw()
fig.tight_layout()
mulify(fig, delx=3.9, dely=2.2)
fig.savefig("plots/error.pdf")
Export¶
np.savetxt("Fbest.csv", Fbest)
np.savetxt("Gbest.csv", Gbest)
Previous results¶
To better compare with old results, let us define the previous best result as \begin{align} h &= h_0 + ah_1 + a^2 h_2 + a^2 h_2^{ee}\\ \delta h &= \delta h^{\rm MC} \oplus (a^2 h_2) \oplus \delta h^{ee} \end{align} To remain as flexible as possible we treat $h_2^{ee}$ separately.
np.savetxt("Fnnlo.csv", np.column_stack((
x,
(
f0[:,1] + a * f1[:,1] + a**2 * f2g[:,1]
),
a**2 * f2EE[:,1],
sqrt(
f0[:,2]**2 + (a * f1[:,2])**2 + (a**2 * f2g[:,2])**2
),
a**2 * f2g[:,1],
a**3 * f3EE[:,1]
)))
np.savetxt("Gnnlo.csv", np.column_stack((
x,
(
g0[:,1] + a * g1[:,1] + a**2 * g2g[:,1]
),
a**2 * g2EE[:,1],
sqrt(
g0[:,2]**2 + (a * g1[:,2])**2 + (a**2 * g2g[:,2])**2
),
a**2 * g2g[:,1],
a**3 * g3EE[:,1]
)))
np.savetxt("Fnlo.csv", np.column_stack((
x,
(
f0[:,1] + a * f1[:,1]
),
np.zeros(f2EE[:,1].shape),
sqrt(
f0[:,2]**2 + (a * f1[:,2])**2
),
a**2 * f2g[:,1],
np.zeros(f2EE[:,1].shape),
)))
np.savetxt("Gnlo.csv", np.column_stack((
x,
(
g0[:,1] + a * g1[:,1]
),
np.zeros(f2EE[:,1].shape),
sqrt(
g0[:,2]**2 + (a * g1[:,2])**2
),
a**2 * g2g[:,1],
np.zeros(f2EE[:,1].shape),
)))