diff --git a/slides/information-theory/chapter-order.tex b/slides/information-theory/chapter-order.tex index b925394f..447c499f 100644 --- a/slides/information-theory/chapter-order.tex +++ b/slides/information-theory/chapter-order.tex @@ -40,6 +40,3 @@ \subsection{Entropy and Optimal Code Length II} \subsection{Mutual Information under Reparametrization (Deep-Dive)} \includepdf[pages=-]{../slides-pdf/slides-info-mi-deepdive.pdf} - - - diff --git a/slides/information-theory/slides-info-kl-ml.tex b/slides/information-theory/slides-info-kl-ml.tex index 5585e87b..e9c994ce 100644 --- a/slides/information-theory/slides-info-kl-ml.tex +++ b/slides/information-theory/slides-info-kl-ml.tex @@ -33,7 +33,7 @@ \framebreak \begin{itemize} \item \textbf{Probabilistic model fitting}\\ -Assume our learner is probabilistic, i.e., we model $p(y| \mathbf{x})$ for example (for example, ridge regression, logistic regression, ...). +Assume our learner is probabilistic, i.e., we model $p(y| \mathbf{x})$ (for example, logistic regression, Gaussian process, ...). \begin{center} \includegraphics[width=0.5\linewidth]{figure/ftrue.pdf} diff --git a/slides/information-theory/slides-info-sourcecoding2.tex b/slides/information-theory/slides-info-sourcecoding2.tex index 50f1cf69..a63110a7 100644 --- a/slides/information-theory/slides-info-sourcecoding2.tex +++ b/slides/information-theory/slides-info-sourcecoding2.tex @@ -3,7 +3,7 @@ \input{../../latex-math/basic-math} \input{../../latex-math/basic-ml} -\newcommand{\titlefigure}{figure_man/equal_decode.png} +\newcommand{\titlefigure}{figure_man/xent_pq.png} \newcommand{\learninggoals}{ \item Know connection between source coding and (cross-)entropy \item Know that the entropy of the source distribution is the lower bound for the average code length @@ -14,13 +14,13 @@ \begin{document} -\lecturechapter{Entropy and Optimal Code Length II} +\lecturechapter{Source Coding and Cross-Entropy} \lecture{Introduction to Machine Learning} %%%%%%% CUT HERE SECOND SOURCE CODING CHUNK -\begin{vbframe} {Source coding and (cross-)entropy} +\begin{vbframe} {Source coding and cross-entropy} \begin{itemize} \item For a random source / distribution $p$, the minimal number of bits to optimally encode messages from is the entropy $H(p)$.