Skip to content

Commit

Permalink
updates on titles, cites and l2 plots in regu
Browse files Browse the repository at this point in the history
Signed-off-by: Ziyu-Mu <mu.ziyu.ovo@gmail.com>
  • Loading branch information
Ziyu-Mu committed May 27, 2024
1 parent 4b1f6d6 commit 57c3980
Show file tree
Hide file tree
Showing 77 changed files with 955 additions and 872 deletions.
29 changes: 29 additions & 0 deletions slides/advriskmin/references.bib
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
@inproceedings{SOLLICH1999NINTH,
author={Sollich, P.},
booktitle={1999 Ninth International Conference on Artificial Neural Networks ICANN 99. (Conf. Publ. No. 470)},
title={Probabilistic interpretations and Bayesian methods for support vector machines},
year={1999},
volume={1},
number={},
pages={91-96 vol.1},
keywords={},
doi={10.1049/cp:19991090},
url={https://ieeexplore.ieee.org/abstract/document/819547}
}

@inproceedings{MEYER2021ALTERNATIVE,
title={An alternative probabilistic interpretation of the huber loss},
author={Meyer, Gregory P},
booktitle={Proceedings of the ieee/cvf conference on computer vision and pattern recognition},
pages={5261--5269},
year={2021},
url={https://openaccess.thecvf.com/content/CVPR2021/papers/Meyer_An_Alternative_Probabilistic_Interpretation_of_the_Huber_Loss_CVPR_2021_paper.pdf}
}

@article{SALEH2022STATISTICAL,
title={Statistical properties of the log-cosh loss function used in machine learning},
author={Saleh, Resve A and Saleh, AK},
journal={arXiv preprint arXiv:2208.04564},
year={2022},
url={https://arxiv.org/pdf/2208.04564}
}
26 changes: 12 additions & 14 deletions slides/advriskmin/slides-advriskmin-bias-variance-decomposition-deepdive.tex
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -4,26 +4,24 @@
\input{../../latex-math/basic-ml}
\input{../../latex-math/ml-eval}

\newcommand{\titlefigure}{figure/bias_variance_decomposition-linear_model_bias.png}
\newcommand{\learninggoals}{
\title{Introduction to Machine Learning}

\begin{document}

\titlemeta{% Chunk title (example: CART, Forests, Boosting, ...), can be empty
Advanced Risk Minimization
}{% Lecture title
Bias-Variance Decomposition (Deep-Dive)
}{% Relative path to title page image: Can be empty but must not start with slides/
figure/bias_variance_decomposition-linear_model_bias.png
}{
\item Understand how to decompose the generalization error of a learner into
\begin{itemize}
\item \footnotesize Bias of the learner
\item \footnotesize Variance of the learner
\item \footnotesize Inherent noise in the data
\end{itemize}
}

\title{Introduction to Machine Learning}
\date{}

\begin{document}

\lecturechapter{Bias-Variance Decomposition (Deep-Dive)}
\lecture{Introduction to Machine Learning}



}

\begin{vbframe} {Bias-Variance decomposition}

Expand Down
28 changes: 13 additions & 15 deletions slides/advriskmin/slides-advriskmin-bias-variance-decomposition.tex
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -4,27 +4,25 @@
\input{../../latex-math/basic-ml}
\input{../../latex-math/ml-eval}

\newcommand{\titlefigure}{figure/bias_variance_decomposition-linear_model_bias.png}
\newcommand{\learninggoals}{
\title{Introduction to Machine Learning}

\begin{document}

\titlemeta{% Chunk title (example: CART, Forests, Boosting, ...), can be empty
Advanced Risk Minimization
}{% Lecture title
Advanced Risk Minimization:\\
Bias-Variance Decomposition
}{% Relative path to title page image: Can be empty but must not start with slides/
figure/bias_variance_decomposition-linear_model_bias.png
}{
\item Understand how to decompose the generalization error of a learner into
\begin{itemize}
\item \footnotesize bias of the learner
\item \footnotesize variance of the learner
\item \footnotesize inherent noise in the data
\end{itemize}
}

\title{Introduction to Machine Learning}
\date{}

\begin{document}

\lecturechapter{Advanced Risk Minimization:\\
Bias-Variance Decomposition}
\lecture{Introduction to Machine Learning}



}

\begin{vbframe} {Bias-Variance decomposition}

Expand Down
25 changes: 11 additions & 14 deletions slides/advriskmin/slides-advriskmin-classification-01.tex
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -11,23 +11,20 @@
\input{../../latex-math/basic-math}
\input{../../latex-math/basic-ml}

\newcommand{\titlefigure}{figure/plot_loss_01.png}
\newcommand{\learninggoals}{
\item Derive the risk minimizer of the 0-1-loss
\item Derive the optimal constant model for the 0-1-loss
}

\title{Introduction to Machine Learning}
% \author{Bernd Bischl, Christoph Molnar, Daniel Schalk, Fabian Scheipl}
\institute{\href{https://compstat-lmu.github.io/lecture_i2ml/}{compstat-lmu.github.io/lecture\_i2ml}}
\date{}



\begin{document}

\lecturechapter{0-1-Loss}
\lecture{Introduction to Machine Learning}

\titlemeta{% Chunk title (example: CART, Forests, Boosting, ...), can be empty
Advanced Risk Minimization
}{% Lecture title
0-1-Loss
}{% Relative path to title page image: Can be empty but must not start with slides/
figure/plot_loss_01.png
}{
\item Derive the risk minimizer of the 0-1-loss
\item Derive the optimal constant model for the 0-1-loss
}

\begin{vbframe}{0-1-Loss}

Expand Down
27 changes: 11 additions & 16 deletions slides/advriskmin/slides-advriskmin-classification-bernoulli.tex
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -14,28 +14,23 @@
\input{../../latex-math/ml-eval}
\input{../../latex-math/ml-trees} % For the comparison of Brier and Gini index

\newcommand{\titlefigure}{figure/plot_bernoulli_prob}
\newcommand{\learninggoals}{
\title{Introduction to Machine Learning}

\begin{document}

\titlemeta{% Chunk title (example: CART, Forests, Boosting, ...), can be empty
Advanced Risk Minimization
}{% Lecture title
Bernoulli Loss
}{% Relative path to title page image: Can be empty but must not start with slides/
figure/plot_bernoulli_prob.png
}{
\item Know the Bernoulli loss and related losses (log-loss, logistic loss, Binomial loss)
\item Derive the risk minimizer
\item Derive the optimal constant model
\item Understand the connection between log-loss and entropy splitting
}

\title{Introduction to Machine Learning}
% \author{Bernd Bischl, Christoph Molnar, Daniel Schalk, Fabian Scheipl}
\institute{\href{https://compstat-lmu.github.io/lecture_i2ml/}{compstat-lmu.github.io/lecture\_i2ml}}
\date{}



\begin{document}

\lecturechapter{Bernoulli Loss}
\lecture{Introduction to Machine Learning}



\begin{vbframe}{Bernoulli Loss}

\vspace*{-0.5cm}
Expand Down
24 changes: 10 additions & 14 deletions slides/advriskmin/slides-advriskmin-classification-brier.tex
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -13,27 +13,23 @@
\input{../../latex-math/basic-ml}
\input{../../latex-math/ml-trees} % For the comparison of Brier and Gini index

\title{Introduction to Machine Learning}

\newcommand{\titlefigure}{figure/plot_brier.png}
\newcommand{\learninggoals}{
\begin{document}

\titlemeta{% Chunk title (example: CART, Forests, Boosting, ...), can be empty
Advanced Risk Minimization
}{% Lecture title
Brier Score
}{% Relative path to title page image: Can be empty but must not start with slides/
figure/plot_brier.png
}{
\item Know the Brier score
\item Derive the risk minimizer
\item Derive the optimal constant model
\item Understand the connection between Brier score and Gini splitting
}

\title{Introduction to Machine Learning}
% \author{Bernd Bischl, Christoph Molnar, Daniel Schalk, Fabian Scheipl}
\institute{\href{https://compstat-lmu.github.io/lecture_i2ml/}{compstat-lmu.github.io/lecture\_i2ml}}
\date{}


\begin{document}

\lecturechapter{Brier Score}
\lecture{Introduction to Machine Learning}


% \begin{vbframe}{Classification Losses: (Naive) L2-Loss}


Expand Down
27 changes: 12 additions & 15 deletions slides/advriskmin/slides-advriskmin-classification-deepdive.tex
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -11,25 +11,22 @@
\input{../../latex-math/basic-math}
\input{../../latex-math/basic-ml}

\newcommand{\titlefigure}
{figure/plot_bernoulli_prob}
\newcommand{\learninggoals}{
\item Derive the optimal constant model for the binary empirical log loss risk
\item Derive the optimal constant model for the empirical multiclass log loss risk
}
\newcommand{\argminl}{\mathop{\operatorname{arg\,min}}\limits}

\title{Introduction to Machine Learning}
% \author{Bernd Bischl, Christoph Molnar, Daniel Schalk, Fabian Scheipl}
\institute{\href{https://compstat-lmu.github.io/lecture_i2ml/}{compstat-lmu.github.io/lecture\_i2ml}}
\date{}

\newcommand{\argminl}{\mathop{\operatorname{arg\,min}}\limits}

\begin{document}

\lecturechapter{Optimal constant model for the empirical log loss risk (Deep-Dive)}
\lecture{Introduction to Machine Learning}


\titlemeta{% Chunk title (example: CART, Forests, Boosting, ...), can be empty
Advanced Risk Minimization
}{% Lecture title
Optimal constant model for the empirical log loss risk (Deep-Dive)
}{% Relative path to title page image: Can be empty but must not start with slides/
figure/plot_bernoulli_prob.png
}{
\item Derive the optimal constant model for the binary empirical log loss risk
\item Derive the optimal constant model for the empirical multiclass log loss risk
}

\begin{vbframe}{Binary log loss: Emp. Risk Minimizer}

Expand Down
23 changes: 10 additions & 13 deletions slides/advriskmin/slides-advriskmin-classification-furtherlosses.tex
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -14,26 +14,23 @@
\input{../../latex-math/ml-eval}
\input{../../latex-math/ml-trees} % For the comparison of Brier and Gini index

\title{Introduction to Machine Learning}

\newcommand{\titlefigure}{figure/plot_loss_overview_classif.png}
\newcommand{\learninggoals}{
\begin{document}

\titlemeta{% Chunk title (example: CART, Forests, Boosting, ...), can be empty
Advanced Risk Minimization
}{% Lecture title
Advanced Classification Losses
}{% Relative path to title page image: Can be empty but must not start with slides/
figure/plot_loss_overview_classif.png
}{
\item Know the (squared) hinge loss
\item Know the $L2$ loss defined on scores
\item Know the exponential loss
\item Know the AUC loss
}

\title{Introduction to Machine Learning}
% \author{Bernd Bischl, Christoph Molnar, Daniel Schalk, Fabian Scheipl}
\institute{\href{https://compstat-lmu.github.io/lecture_i2ml/}{compstat-lmu.github.io/lecture\_i2ml}}
\date{}


\begin{document}

\lecturechapter{Advanced Classification Losses}
\lecture{Introduction to Machine Learning}

\begin{vbframe}{Hinge Loss}

\begin{itemize}
Expand Down
29 changes: 13 additions & 16 deletions slides/advriskmin/slides-advriskmin-logreg-deepdive.tex
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -11,26 +11,23 @@
\input{../../latex-math/basic-math}
\input{../../latex-math/basic-ml}

\newcommand{\titlefigure}
{figure/plot_bernoulli_prob}
\newcommand{\learninggoals}{
\item Derive the gradient of the logistic regression
\item Derive the Hessian of the logistic regression
\item Show that the logistic regression is a convex problem
}
\newcommand{\argminl}{\mathop{\operatorname{arg\,min}}\limits}

\title{Introduction to Machine Learning}
% \author{Bernd Bischl, Christoph Molnar, Daniel Schalk, Fabian Scheipl}
\institute{\href{https://compstat-lmu.github.io/lecture_i2ml/}{compstat-lmu.github.io/lecture\_i2ml}}
\date{}

\newcommand{\argminl}{\mathop{\operatorname{arg\,min}}\limits}

\begin{document}

\lecturechapter{Logistic regression (Deep-Dive)}
\lecture{Introduction to Machine Learning}


\titlemeta{% Chunk title (example: CART, Forests, Boosting, ...), can be empty
Advanced Risk Minimization
}{% Lecture title
Logistic regression (Deep-Dive)
}{% Relative path to title page image: Can be empty but must not start with slides/
figure/plot_bernoulli_prob.png
}{
\item Derive the gradient of the logistic regression
\item Derive the Hessian of the logistic regression
\item Show that the logistic regression is a convex problem
}

\begin{vbframe}{Logistic regression: Risk Problem}

Expand Down
27 changes: 11 additions & 16 deletions slides/advriskmin/slides-advriskmin-losses-properties.tex
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -15,29 +15,24 @@

\usepackage{booktabs}

\newcommand{\titlefigure}{figure_man/vgg_example.png}
\newcommand{\learninggoals}{
\title{Introduction to Machine Learning}

\begin{document}

\titlemeta{% Chunk title (example: CART, Forests, Boosting, ...), can be empty
Advanced Risk Minimization
}{% Lecture title
Properties of Loss Functions
}{% Relative path to title page image: Can be empty but must not start with slides/
figure_man/vgg_example.png
}{
% \item Understand why you should care about properties of loss functions
\item Statistical properties
\item Robustness
\item Numerical properties
\item Some fundamental terminology

}

\title{Introduction to Machine Learning}
% \author{Bernd Bi{}schl, Christoph Molnar, Daniel Schalk, Fabian Scheipl}
\institute{\href{https://compstat-lmu.github.io/lecture_i2ml/}{compstat-lmu.github.io/lecture\_i2ml}}
\date{}


\begin{document}

% ------------------------------------------------------------------------------

\lecturechapter{Properties of Loss Functions}
\lecture{Introduction to Machine Learning}

\begin{vbframe}{The role of Loss Functions}

Why should we care about the choice of the loss function $\Lxy$?
Expand Down
Loading

0 comments on commit 57c3980

Please sign in to comment.