From 217676c8a8c8b550f4925fb9b5744823c6613c56 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 7 Aug 2024 08:41:53 +0000 Subject: [PATCH] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- docs/lecture02.ipynb | 7 +- docs/lecture06-gammap.ipynb | 12 +- docs/lecture09.ipynb | 133 ++--- docs/lecture12a-gammap-solution.ipynb | 12 +- docs/lecture12b-kinematics.ipynb | 22 +- docs/lecture12c-angles.ipynb | 11 +- docs/lecture12d-phasespace.ipynb | 3 +- docs/lecture12e-longitudinal.ipynb | 7 +- docs/lecture17.ipynb | 8 +- docs/lecture24.ipynb | 702 ++++---------------------- 10 files changed, 162 insertions(+), 755 deletions(-) diff --git a/docs/lecture02.ipynb b/docs/lecture02.ipynb index 1c62bf8..54518c1 100644 --- a/docs/lecture02.ipynb +++ b/docs/lecture02.ipynb @@ -65,9 +65,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "filename = gdown.cached_download(\n", @@ -99,8 +97,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ diff --git a/docs/lecture06-gammap.ipynb b/docs/lecture06-gammap.ipynb index 5fd7dc8..f2d6636 100644 --- a/docs/lecture06-gammap.ipynb +++ b/docs/lecture06-gammap.ipynb @@ -98,18 +98,14 @@ }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "## Invariant mass distributions" ] }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "### Measured data" ] @@ -324,9 +320,7 @@ }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "### Monte Carlo data" ] diff --git a/docs/lecture09.ipynb b/docs/lecture09.ipynb index be9806e..f5df3ab 100644 --- a/docs/lecture09.ipynb +++ b/docs/lecture09.ipynb @@ -51,8 +51,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## Estimator Definition" @@ -290,9 +289,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "# Selected properties of the population M1-> Mean S1**2 is the variance\n", @@ -399,8 +396,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ @@ -424,8 +420,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ @@ -464,8 +459,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## Method of Moments (MoM)" @@ -637,8 +631,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## Maximum Likelihood" @@ -678,8 +671,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ @@ -699,8 +691,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ @@ -1109,8 +1100,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## Properties of the ML estimator" @@ -1339,9 +1329,7 @@ }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "## Fitting data – Method of least squares" ] @@ -1349,8 +1337,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Introduction" @@ -1406,8 +1393,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### χ² PDF" @@ -1501,8 +1487,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Linear least squares" @@ -1587,8 +1572,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ @@ -1666,9 +1650,7 @@ }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "#### Uncertainties and covariances for straight-line fit" ] @@ -1796,8 +1778,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Linear models" @@ -1930,8 +1911,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Non-linear model" @@ -1954,9 +1934,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "def func(x, a):\n", @@ -2117,8 +2095,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Residuals" @@ -2245,8 +2222,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Fitting Binned data: why a LSE might be biased here..." @@ -2262,9 +2238,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "np.random.seed(32)\n", @@ -2278,9 +2252,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "data_x = x[y > 0]\n", @@ -2291,9 +2263,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "# We generate noisy data on integer values of x to mimick the idea of bins and low-statistics...\n", @@ -2314,9 +2284,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "def func(x, a, b):\n", @@ -2326,9 +2294,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "gmodel = Model(func)\n", @@ -2368,9 +2334,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "def chi2(a, b):\n", @@ -2383,9 +2347,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "grid_size = 500\n", @@ -2402,8 +2364,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ @@ -2448,9 +2409,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "def like(param):\n", @@ -2464,9 +2423,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "grid_size = 500\n", @@ -2505,9 +2462,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "p0 = [10, -0.3]\n", @@ -2553,8 +2508,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Main exercise" @@ -2714,9 +2668,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "output_path = gdown.cached_download(\n", @@ -2886,8 +2838,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## Extended Max Likelihood" @@ -3000,9 +2951,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "output_path = gdown.cached_download(\n", @@ -3053,9 +3002,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "def llog(params):\n", @@ -3066,9 +3013,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "starting_values = [2000, 1.12, 0.01]\n", @@ -3086,9 +3031,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "# We can plot the Gaussian function on top of our histogram. We need to scale the Gaussian function\n", diff --git a/docs/lecture12a-gammap-solution.ipynb b/docs/lecture12a-gammap-solution.ipynb index e0f70ed..0db677b 100755 --- a/docs/lecture12a-gammap-solution.ipynb +++ b/docs/lecture12a-gammap-solution.ipynb @@ -98,18 +98,14 @@ }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "## Invariant mass distributions" ] }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "### Measured data" ] @@ -388,9 +384,7 @@ }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "### Monte Carlo data" ] diff --git a/docs/lecture12b-kinematics.ipynb b/docs/lecture12b-kinematics.ipynb index 8d03eb2..f77b1f4 100644 --- a/docs/lecture12b-kinematics.ipynb +++ b/docs/lecture12b-kinematics.ipynb @@ -97,9 +97,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "# final state\n", @@ -118,9 +116,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "E_nbar = np.sqrt(m_neutron**2 + data.pnbar**2)\n", @@ -131,9 +127,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "system12 = pi1 + pi2\n", @@ -158,8 +152,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ @@ -249,9 +242,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "s_lab = cm.m2\n", @@ -504,8 +495,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ diff --git a/docs/lecture12c-angles.ipynb b/docs/lecture12c-angles.ipynb index f718734..9dd483f 100644 --- a/docs/lecture12c-angles.ipynb +++ b/docs/lecture12c-angles.ipynb @@ -31,8 +31,7 @@ }, "mystnb": { "code_prompt_show": "Import Python libraries" - }, - "tags": [] + } }, "outputs": [], "source": [ @@ -81,9 +80,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "# final state\n", @@ -103,9 +100,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "# 4-momenta in the scenter of mass system\n", diff --git a/docs/lecture12d-phasespace.ipynb b/docs/lecture12d-phasespace.ipynb index 47e3ccd..aaaee3b 100644 --- a/docs/lecture12d-phasespace.ipynb +++ b/docs/lecture12d-phasespace.ipynb @@ -441,8 +441,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ diff --git a/docs/lecture12e-longitudinal.ipynb b/docs/lecture12e-longitudinal.ipynb index 99a2d36..798d39d 100644 --- a/docs/lecture12e-longitudinal.ipynb +++ b/docs/lecture12e-longitudinal.ipynb @@ -56,8 +56,7 @@ "metadata": { "jupyter": { "source_hidden": true - }, - "tags": [] + } }, "outputs": [], "source": [ @@ -208,9 +207,7 @@ }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "Try with another input file (see [here](https://drive.google.com/drive/folders/1-Knh70_vLuctCkcg9oIIIIBaX2EPYkea) on Google Drive, for instance the generated MonteCarlo events at 5 or 10 GeV incident momentum) and check the differences. The whole folder can be downloaded as follows:\n", "\n", diff --git a/docs/lecture17.ipynb b/docs/lecture17.ipynb index 8e14605..c2c02a6 100755 --- a/docs/lecture17.ipynb +++ b/docs/lecture17.ipynb @@ -62,9 +62,7 @@ }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "## Riemann sheets" ] @@ -281,9 +279,7 @@ }, { "cell_type": "markdown", - "metadata": { - "tags": [] - }, + "metadata": {}, "source": [ "## Definition of the G(s) functions" ] diff --git a/docs/lecture24.ipynb b/docs/lecture24.ipynb index ce67cb4..6140868 100644 --- a/docs/lecture24.ipynb +++ b/docs/lecture24.ipynb @@ -2,11 +2,7 @@ "cells": [ { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "# Lecture 24 – Maximum Likelihood" ] @@ -15,9 +11,6 @@ "cell_type": "code", "execution_count": null, "metadata": { - "slideshow": { - "slide_type": "skip" - }, "tags": [ "remove-cell" ] @@ -30,11 +23,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "::::{dropdown} Legend\n", "\n", @@ -51,11 +40,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "First we need to mount your drive directory and load some dependencies. Make sure your google drive system is configured\n", "\n", @@ -82,9 +67,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "remove-cell" ] @@ -103,11 +85,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "\n", "\n", @@ -121,9 +99,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-cell" ] @@ -149,11 +124,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## Dealing with backgrounds" @@ -169,9 +140,6 @@ "mystnb": { "code_prompt_show": "Import Python libraries" }, - "slideshow": { - "slide_type": "skip" - }, "tags": [ "hide-cell" ] @@ -185,11 +153,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Toy Dataset" @@ -198,11 +162,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "First we will import a simple toy data set for testing techniques on. This consists of 3 variables : mass, phi, and state. These will be loaded into 3 numpy arrays.\n", @@ -221,9 +181,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -260,11 +217,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Now we can plot each variable to see what we have. We will draw all, signal and background in different colours.\n", "\n", @@ -280,9 +233,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-input" ] @@ -303,11 +253,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Now phi" ] @@ -319,9 +265,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-input" ] @@ -340,11 +283,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "The phi distributions were flat as we summed over polarisation state h. Lets select only +ve h, i.e. +ve state" ] @@ -356,9 +295,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-input" ] @@ -375,11 +311,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "Now we see we have cos2ϕ distributions with signal having a larger asymmetry and background having a different signed asymmetry." ] @@ -387,11 +319,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Extended Maximum Likelihood Fit of Signal ϕ distribution" @@ -399,11 +327,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "Lets try some likelihood fits to see if we can estimate $\\Sigma$ for the signal. For simplicity we will assume only a single +ve polarisation state with degree of linear polarisation = 1.\n", "\n", @@ -427,11 +351,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "def PhotonAsymmetryPDF(xphi, Sigma):\n", @@ -451,11 +371,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "And lets plot this on the signal data" ] @@ -463,12 +379,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "fragment" - }, - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "# Filter +ve polarisation signal events\n", @@ -483,11 +394,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "Now to estimate the height of the function for the histogram, I know I have approximaely 15,000 events and 100 bins. So average height = 150. So I might guess I can just multiply my function by 150. But if I am using the PDF I must also scale by the $2\\pi$ factor.\n", "\n", @@ -501,9 +408,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-input" ] @@ -524,11 +428,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "So the guess was reasonable. But lets find the best values using Maxmimum Likelihood.\n", "\n", @@ -538,12 +438,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "fragment" - }, - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "from iminuit import Minuit\n", @@ -552,11 +447,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Now I need to give Minuit a function to minimise. This function is the negative log likelihood for my PDF summing over the data\n", "\n", @@ -568,12 +459,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "fragment" - }, - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "Ndata = sig_pos_phi.size\n", @@ -588,11 +474,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "And then I can perform the migrad algorithm. Here I will also use the hesse method for better uncertainty estimation.\n", "\n", @@ -602,12 +484,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - }, - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "mi.migrad()\n", @@ -625,11 +502,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "I should have got a value close to (within 1 or 2 standard deviations) of $\\Sigma = -0.8$.\n", "\n", @@ -643,9 +516,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-input" ] @@ -665,11 +535,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "This looks good.\n", "\n", @@ -679,11 +545,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## Signal and Background fit to Mass distribution" @@ -691,11 +553,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "In this example my discriminatory variable is going to be the mass distribution. This has distinct signal (Gaussian) and background (Unknown) components." ] @@ -703,11 +561,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "skip" - } - }, + "metadata": {}, "outputs": [], "source": [ "from scipy.stats import norm" @@ -716,11 +570,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Signal Distribution" @@ -728,11 +578,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "To define my signal PDF I can start from scipy norm distribution. However remember the PDF normalisation integral must be calculated within the fit ranges. Therefore I only need the integral of the distribution within these bounds rather than -∞ to ∞\n", "\n", @@ -746,9 +592,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -770,11 +613,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Here we are going to use Extended Maximum Likelihood for our fits as it is required for the sPlot covariance matrix.\n", "\n", @@ -788,9 +627,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-input" ] @@ -817,11 +653,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "And do the fit with `iminuit`:" ] @@ -830,9 +662,6 @@ "cell_type": "code", "execution_count": null, "metadata": { - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -867,11 +696,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "I should get a mean around 1.1, width around 0.02 and a signal yield of around my number of data events ~15,000.\n", "\n", @@ -887,9 +712,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -912,11 +734,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Background Distribution" @@ -924,11 +742,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "Often background distributions will not have a well defined PDF shape. As a proxy polynomial distributions can be used instead. This is likely to induce some systematic uncertainty in the final results due to mismodelling of the discriminatory variable fit, which should be investigated/estimated.\n", "\n", @@ -944,9 +758,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "scroll-input", "hide-input" @@ -993,22 +804,14 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "Watch out for -ve PDF values, these are not allowed and will screw up the minimisation. In practise we should edit our function to protect against this. Here it turn out not to be an issue, (we fix c0=1) but in general IT WILL BE!" ] }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Now define my function for `ExtendedUnbinnedNLL`" ] @@ -1016,11 +819,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "outputs": [], "source": [ "def BgMassNExt(xmass, c0, c1, c2, c3, c4, Nb):\n", @@ -1035,9 +834,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -1085,11 +881,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "And draw the fit result with the data" ] @@ -1101,9 +893,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-input" ] @@ -1124,22 +913,14 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "#### Simulated Signal and Background PDFs" ] }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Aside : It is not always the case that a signal peak will be well fitted by a Gaussian distribution, or that a background can be easily constrained to a polynomial. In general simulations may give the best approximate PDF shapes for your event types.\n", "\n", @@ -1149,11 +930,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Joint fit to 1D Signal and Background" @@ -1161,11 +938,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "We now have our signal and background PDF functions ($p_s$ and $p_b$) which we can use in Extended Maximim Likelihood fits. To proceed we must first combine this into a single distribution for fitting i.e.\n", "\n", @@ -1180,11 +953,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "outputs": [], "source": [ "def CombinedMassNExt(xmass, smean, swidth, bc0, bc1, bc2, bc3, bc4, Ys, Yb):\n", @@ -1197,11 +966,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "First make our combined data sets" ] @@ -1209,11 +974,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "outputs": [], "source": [ "# Filter all +ve polarisation events\n", @@ -1225,11 +986,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "Now run `iminuit` with `ExtendedUnbinnedNLL` on our combined PDF and dataset" ] @@ -1238,9 +995,6 @@ "cell_type": "code", "execution_count": null, "metadata": { - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -1297,11 +1051,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "And plot the fit result" ] @@ -1310,9 +1060,6 @@ "cell_type": "code", "execution_count": null, "metadata": { - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-input" ] @@ -1344,11 +1091,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "The fit should be pertty good although the polynomial background does not get such a good job close to threshold, if that is included in the range (Should be at 0.8). If the fit was succesfull then the results can now be used to generate weights for background subtraction (e.g. Sidebands or sPlots)." ] @@ -1356,11 +1099,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## sPlots" @@ -1368,11 +1107,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "We are going to use the sweights library provided by https://sweights.readthedocs.io/en/latest/about.html\n", "This is related to the referenced paper on Custom Orthogonal Weight functions, of which sPlots is a specific case of. https://www.sciencedirect.com/science/article/pii/S0168900222006076?via%3Dihub\n", @@ -1385,11 +1120,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "outputs": [], "source": [ "from sweights import SWeight # for classic sweights" @@ -1397,11 +1128,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Remember the sWeights formula,\n", "\n", @@ -1424,11 +1151,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "# create best value PDFs for signal and background\n", @@ -1451,11 +1174,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Plot the weights" ] @@ -1467,9 +1186,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -1499,11 +1215,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "> _Discuss if these weights look reasonable. Why are Signal Weights <0 in some regions?_\n", "\n", @@ -1517,9 +1229,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-input" ] @@ -1545,11 +1254,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "> _Investigate or discuss if these distributions and error bars look OK. Which are the correct error bars for the weighted data, blue or orange? Should weighted data have same, smaller or larger error bars than true signal events?_\n", "\n", @@ -1562,11 +1267,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "> _Lets consider the full mass range in the data file (draw with the next cell). We have restricted it in our test data and fits so far. Why might we a) restrict the range? b) use as large a range as we can?._" ] @@ -1574,11 +1275,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "outputs": [], "source": [ "fullrange = np.linspace(0.8, 1.4, 100)\n", @@ -1588,11 +1285,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## Inclusion of Weights in Maximum Likelihood" @@ -1600,22 +1293,14 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "We will again fit the Photon Asymmetry to extract $\\Sigma$, but now we will disentangle the signal response using the sWeights in the fit." ] }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "Here we need to define our own loss function where the log likelihood values are weighted with our background subtraction weights.\n", "\n", @@ -1629,11 +1314,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "# NLL for maximum likelihood with weights\n", @@ -1649,11 +1330,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "And perform the fit with `iminuit`:" ] @@ -1662,9 +1339,6 @@ "cell_type": "code", "execution_count": null, "metadata": { - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -1698,11 +1372,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "> _How do the results and uncertainties compare to the signal only fitting? Does this seem reasonable?_\n", "\n" @@ -1711,11 +1381,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "outputs": [], "source": [ "print(\"best value for signal only data Sigma = \", bestSigma, \"+-\", bestSigmaErr)" @@ -1723,11 +1389,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "Now we can plot the fit result with the background subtracted data." ] @@ -1739,9 +1401,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "fragment" - }, "tags": [ "hide-input" ] @@ -1766,11 +1425,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Uncertainties in Weighted Maximum Likelihood fits" @@ -1778,11 +1433,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "In general correctly accounting for the effect of the weights on the uncertainties is a non-trivial task. For in-depth discussion of the asymptotically correct method see \"Parameter uncertainties in weighted unbinned maximum likelihood fits\", Langenbruch, https://epjc.epj.org/articles/epjc/abs/2022/05/10052_2022_Article_10254/10052_2022_Article_10254.html\n", "\n", @@ -1798,11 +1449,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "# NLL for maximum likelihood with weights\n", @@ -1821,11 +1468,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "nllw2w = lambda Sigma: PhotonAsymWeightedW2WNLL(Sigma, pos_phi, sig_weights)\n", @@ -1847,11 +1490,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "As mentioned this may be a reasonable approximation (or not) but it does not fully propogate the uncertainty. For example the uncertainty on our signal PDF parameters (mean and width) are not incorporated. Next we consider bootstrapping, which is a more time-consuming, but more robust method, for determining uncertainties as sample distributions. Note that incoporporating the sigal PDF parameter uncertainties will be left as an exercise (2)." ] @@ -1859,11 +1498,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## More Uncertainties : Bootstrapping" @@ -1871,11 +1506,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "As outlined in the Langenbruch paper, Bootstrapping provides another independent method for estimating the uncertainties.\n", "\n", @@ -1894,11 +1525,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "Algorithm :\n", "\n", @@ -1914,11 +1541,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Bootstrap Signal Only Data\n", @@ -1929,11 +1552,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "# data_phi_sig_para = phi[state==1]\n", @@ -1966,11 +1585,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "plt.hist(bt_sigmas, bins=100)\n", @@ -1984,11 +1599,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "> _Why multiply the standard deviation by `sqrt(frac_to_sample)`?_\n", "\n", @@ -2020,11 +1631,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Bootstrap Background Subtracted Data\n", @@ -2033,11 +1640,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "Now we want to try bootstrapping on the background subtracted fits to test our uncertaintiy estimates.\n", "\n", @@ -2052,11 +1655,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "# remember to get signal weights we use sig_weights = sweighter.get_weight(0, para_mass)\n", @@ -2091,11 +1690,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "plt.hist(bt_wgted_sigmas, bins=100)\n", @@ -2109,11 +1704,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ ":::{exercise}\n", "\n", @@ -2143,23 +1734,14 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - }, - "tags": [] - }, + "metadata": {}, "source": [ "## Correcting for Acceptance" ] }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "fragment" - } - }, + "metadata": {}, "source": [ "Acceptance refers to the probabilty of detecting and reconstructing your reaction at a given point in variable space $x_{i,k}={x_{0,k},x_{1,k},x_{2,k}...}$. The $x_{i,k}$ may be any measured (e.g momentum) or calculated variable (e.g. invariant mass). It can be given by the ratio of all events to those detected and reconstructed (aka accepted).\n", "\n", @@ -2173,11 +1755,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "subslide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Maximum Likelihood to the rescue!" @@ -2185,11 +1763,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "In turns out, somewhat magically, the maximum likelihood method removes the need to determine $η(x_{i,k})$ for each event. It essentially just requires a single integration over full $x_{i,k}$ space. Now, in general, the integration needs to be done for every value of model parameters (used in the fitting) instead. This however can be done more accurately, than individual acceptances, for a given number of MC events.\n", "\n", @@ -2220,11 +1794,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "### Toy Acceptance Model" @@ -2233,11 +1803,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "The data we have used so far has been \"perfectly\" detected. to investigate acceptance effects we need to add in these to the data. To do this we will just define some simple ϕ dependent functions, with some degraded acceptance or holes." @@ -2246,12 +1812,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - }, - "tags": [] - }, + "metadata": {}, "outputs": [], "source": [ "def AcceptanceFilter(darr, condarr, acc_func):\n", @@ -2283,9 +1844,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -2302,11 +1860,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Now lets try fitting the filtered data ignoring the acceptance." ] @@ -2318,9 +1872,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -2341,11 +1892,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "This should probably be significantly removed from the true value of -0.8.\n", "Also the plots do not match at all (allthough we should not take them too seriuosly)\n", @@ -2366,9 +1913,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -2394,11 +1938,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "This should give a value much closer to -0.8.\n", "\n", @@ -2411,11 +1951,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ ":::{exercise}\n", "\n", @@ -2427,11 +1963,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "# include acceptance in normalisation integral\n", @@ -2452,11 +1984,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Now use this PDF with acceptance in normalisation integral in the likelihood fit." ] @@ -2468,9 +1996,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -2491,11 +2016,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ "Hopefully the value has turned out to be close to -0.8 again! But the uncertainty is different from the previous example where we divided out the acceptance.\n", "\n", @@ -2507,11 +2028,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "outputs": [], "source": [ "def bootstrapper(all_data, Nboot, frac, nll):\n", @@ -2545,9 +2062,6 @@ "jupyter": { "source_hidden": true }, - "slideshow": { - "slide_type": "subslide" - }, "tags": [ "hide-input" ] @@ -2563,11 +2077,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "subslide" - } - }, + "metadata": {}, "source": [ "How does the bootstrap uncertainty compare to the `iminuit` one?" ] @@ -2575,11 +2085,7 @@ { "cell_type": "markdown", "metadata": { - "jp-MarkdownHeadingCollapsed": true, - "slideshow": { - "slide_type": "slide" - }, - "tags": [] + "jp-MarkdownHeadingCollapsed": true }, "source": [ "## Further Exercises" @@ -2587,11 +2093,7 @@ }, { "cell_type": "markdown", - "metadata": { - "slideshow": { - "slide_type": "slide" - } - }, + "metadata": {}, "source": [ ":::{exercise}\n", "\n",