Commit 51979ea6 authored by Azat Garifullin's avatar Azat Garifullin
Browse files

unsure slides

parents
%% $Id$
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Ylätunnisteet
\newcommand{\lutkansi}[1][\ ]{
\thispagestyle{empty}
\setlength{\unitlength}{1mm}
\begin{picture}(170,20)(0,-10)
\parbox[b][20mm][t]{170mm}{%
\parbox[b][15mm][t]{100mm}{%
\usefont{OT1}{phv}{m}{n} \large LAPPEENRANTA \\
\usefont{OT1}{phv}{m}{n} \large UNIVERSITY OF TECHNOLOGY
}
\parbox[b][15mm][t]{70mm}{%
\usefont{OT1}{phv}{m}{n} #1 \hfill \arabic{page}
} \\
\parbox[b][5mm][b]{100mm}{%
\usefont{OT1}{phv}{m}{n} \normalsize LUT Machine Vision and Pattern
Recognition
}
\parbox[b][5mm][b]{70mm}{%
\usefont{OT1}{phv}{m}{n}
\number\year-\nplpadding{2}\numprint{\number\month}-\nplpadding{2}\numprint{\number\day}
}
}
\end{picture} \\
\begin{picture}(170,15)(0,210)
\parbox[t][15mm][b]{170mm}{%
\usefont{OT1}{phv}{m}{n} \footnotesize
PO BOX 20, FI-53851 LAPPEENRANTA, FINLAND \\
%Puhelin: 040 759 1720 \\
%Telefax: 042 759 1720 \\
E-mail: lasse.lensu@lut.fi
}
\end{picture}
\vspace*{-7em}
}
\newcommand{\lutlogokansi}[1][\ ]{
\thispagestyle{empty}
\setlength{\unitlength}{1mm}
\begin{picture}(170,30)(0,-10)
\parbox[b][32mm][t]{170mm}{%
\parbox[b][25mm][t]{100mm}{%
\resizebox{!}{25mm}{\includegraphics{resources/LUT_logo_new_cmyk.eps}}
}
\parbox[b][25mm][t]{70mm}{%
\usefont{OT1}{phv}{m}{n} #1 \hfill \arabic{page}
} \\
\parbox[b][5mm][b]{100mm}{%
\usefont{OT1}{phv}{m}{n} LUT Machine Vision and Pattern Recognition
}
\parbox[b][5mm][b]{70mm}{%
\usefont{OT1}{phv}{m}{n} \number\year-\nplpadding{2}\numprint{\number\month}-\nplpadding{2}\numprint{\number\day}
}
}
\end{picture} \\
\begin{picture}(170,15)(0,200)
\parbox[t][15mm][b]{170mm}{%
\usefont{OT1}{phv}{m}{n} \footnotesize
PO BOX 20, FI-53851 LAPPEENRANTA, FINLAND \\
%Puhelin: 040 759 1720 \\
%Telefax: 042 759 1720 \\
E-mail: lasse.lensu@lut.fi \\
}
\end{picture}
\vspace*{-7em}
}
%% Otsikon uudelleenmäärittely
\newcommand{\lutotsikko}{%
\begin{center}%
{\large \@title \par}%
{\begin{tabular}[t]{c}%
\@author%\ \number\day.\number\month.\number\year
\end{tabular}\par}%
\end{center}%
\par}
%% Allekirjoitus
\newcommand{\lutallekir}[1][\ ]{
\parbox{170mm}{%
\bigskip
Lappeenranta \number\year-\nplpadding{2}\numprint{\number\month}-\nplpadding{2}\numprint{\number\day} \\
\medskip \\
\rule{50mm}{0.25mm} \\
\@author \\
#1
}
}
%% Sivujen ala- ja ylätunnisteet
\def\ps@myheadings{
\let\@oddfoot\@empty
\let\@evenfoot\@empty
\def\@evenhead{\@title \hfill \thepage}
\def\@oddhead{\@title \hfill \thepage}
\let\@mkboth\@gobbletwo
\let\sectionmark\@gobble
\let\subsectionmark\@gobble
}
@inproceedings{cheng2019bayesian,
title = {A Bayesian Perspective on the Deep Image Prior},
author = {Cheng, Zezhou and Gadelha, Matheus and Maji, Subhransu and Sheldon, Daniel},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages = {5443--5451},
year = {2019}
}
@inproceedings{ulyanov2018deep,
title = {Deep image prior},
author = {Ulyanov, Dmitry and Vedaldi, Andrea and Lempitsky, Victor},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages = {9446--9454},
year = {2018}
}
@article{badrinarayanan2017segnet,
title = {Segnet: A deep convolutional encoder-decoder architecture for image segmentation},
author = {Badrinarayanan, Vijay and Kendall, Alex and Cipolla, Roberto},
journal = {IEEE transactions on pattern analysis and machine intelligence},
volume = {39},
number = {12},
pages = {2481--2495},
year = {2017},
publisher = {IEEE}
}
@article{zhang2016understanding,
title = {Understanding deep learning requires rethinking generalization},
author = {Zhang, Chiyuan and Bengio, Samy and Hardt, Moritz and Recht, Benjamin and Vinyals, Oriol},
journal = {arXiv preprint arXiv:1611.03530},
year = {2016}
}
@article{cybenko1989approximation,
title = {Approximation by superpositions of a sigmoidal function},
author = {Cybenko, George},
journal = {Mathematics of control, signals and systems},
volume = {2},
number = {4},
pages = {303--314},
year = {1989},
publisher = {Springer}
}
@article{wann,
author = {Gaier, Adam and Ha, David},
year = {2019},
month = {06},
pages = {},
title = {Weight Agnostic Neural Networks},
journal = {To appear at NeurIPS 2019, selected for a spotlight presentation}
}
@phdthesis{neal1995bayesian,
title = {BAYESIAN LEARNING FOR NEURAL NETWORKS},
author = {Neal, Radford M},
year = {1995},
school = {Citeseer}
}
@inproceedings{gal2015dropout,
title = {Dropout as a Bayesian approximation: Insights and applications},
author = {Gal, Yarin and Ghahramani, Zoubin},
year = {2015},
booktitle = {Deep Learning Workshop, ICML}
}
@inproceedings{johnson2016perceptual,
title = {Perceptual losses for real-time style transfer and super-resolution},
author = {Johnson, Justin and Alahi, Alexandre and Fei-Fei, Li},
booktitle = {European conference on computer vision},
pages = {694--711},
year = {2016},
organization = {Springer}
}
@inproceedings{welling2011bayesian,
title = {Bayesian learning via stochastic gradient Langevin dynamics},
author = {Welling, Max and Teh, Yee W},
booktitle = {Proceedings of the 28th international conference on machine learning (ICML-11)},
pages = {681--688},
year = {2011}
}
@inproceedings{ma2015complete,
title = {A complete recipe for stochastic gradient MCMC},
author = {Ma, Yi-An and Chen, Tianqi and Fox, Emily},
booktitle = {Advances in Neural Information Processing Systems},
pages = {2917--2925},
year = {2015}
}
@inproceedings{unsure_dip,
author = {Laves, Max-Heinrich
and T{\"o}lle, Malte
and Ortmaier, Tobias},
title = {Uncertainty Estimation in Medical Image Denoising with Bayesian Deep Image Prior},
booktitle = {Uncertainty for Safe Utilization of Machine Learning in Medical Imaging, and Graphs in Biomedical Image Analysis},
year = {2020},
publisher = {Springer International Publishing},
address = {Cham},
pages = {81--96},
abstract = {Uncertainty quantification in inverse medical imaging tasks with deep learning has received little attention. However, deep models trained on large data sets tend to hallucinate and create artifacts in the reconstructed output that are not anatomically present. We use a randomly initialized convolutional network as parameterization of the reconstructed image and perform gradient descent to match the observation, which is known as deep image prior. In this case, the reconstruction does not suffer from hallucinations as no prior training is performed. We extend this to a Bayesian approach with Monte Carlo dropout to quantify both aleatoric and epistemic uncertainty. The presented method is evaluated on the task of denoising different medical imaging modalities. The experimental results show that our approach yields well-calibrated uncertainty. That is, the predictive uncertainty correlates with the predictive error. This allows for reliable uncertainty estimates and can tackle the problem of hallucinations and artifacts in inverse medical imaging tasks.},
isbn = {978-3-030-60365-6}
}
@article{srivastava14a,
author = {Nitish Srivastava and Geoffrey Hinton and Alex Krizhevsky and Ilya Sutskever and Ruslan Salakhutdinov},
title = {Dropout: A Simple Way to Prevent Neural Networks from Overfitting},
journal = {Journal of Machine Learning Research},
year = {2014},
volume = {15},
number = {56},
pages = {1929-1958},
url = {http://jmlr.org/papers/v15/srivastava14a.html}
}
@article{farquhar2020liberty,
title = {Liberty or depth: Deep Bayesian neural nets do not need complex weight posterior approximations},
author = {Farquhar, Sebastian and Smith, Lewis and Gal, Yarin},
journal = {Advances in Neural Information Processing Systems},
volume = {33},
year = {2020}
}
@inproceedings{guo2017calibration,
title = {On Calibration of Modern Neural Networks},
author = {Guo, Chuan and Pleiss, Geoff and Sun, Yu and Weinberger, Kilian Q},
booktitle = {International Conference on Machine Learning},
pages = {1321--1330},
year = {2017}
}
@article{thulasidasan2019mixup,
title = {On mixup training: Improved calibration and predictive uncertainty for deep neural networks},
author = {Thulasidasan, Sunil and Chennupati, Gopinath and Bilmes, Jeff A and Bhattacharya, Tanmoy and Michalak, Sarah},
journal = {Advances in Neural Information Processing Systems},
volume = {32},
pages = {13888--13899},
year = {2019}
}
@inproceedings{seo2019learning,
title = {Learning for single-shot confidence calibration in deep neural networks through stochastic inferences},
author = {Seo, Seonguk and Seo, Paul Hongsuck and Han, Bohyung},
booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition},
pages = {9030--9038},
year = {2019}
}
@inproceedings{unsure_calib,
author = {Thiagarajan, Jayaraman J.
and Venkatesh, Bindya
and Rajan, Deepta
and Sattigeri, Prasanna},
title = {Improving Reliability of Clinical Models Using Prediction Calibration},
booktitle = {Uncertainty for Safe Utilization of Machine Learning in Medical Imaging, and Graphs in Biomedical Image Analysis},
year = {2020},
publisher = {Springer International Publishing},
address = {Cham},
pages = {71--80},
abstract = {The wide-spread adoption of representation learning technologies in clinical decision making strongly emphasizes the need for characterizing model reliability and enabling rigorous introspection of model behavior. In supervised and semi-supervised learning, prediction calibration has emerged as a key technique to achieve improved generalization and to promote trust in learned models. In this paper, we investigate the effectiveness of different prediction calibration techniques in improving the reliability of clinical models. First, we introduce reliability plots, which measures the trade-off between model autonomy and generalization, to quantify model reliability. Second, we propose to utilize an interval calibration objective in lieu of the standard cross entropy loss to build classification models. Finally, using a lesion classification problem with dermoscopy images, we evaluate the proposed prediction calibration approach against both uncalibrated models as well as existing prediction calibration techniques such as mixup and single-shot calibration.},
isbn = {978-3-030-60365-6}
}
@inproceedings{unsure_measure,
author = {Camarasa, Robin
and Bos, Daniel
and Hendrikse, Jeroen
and Nederkoorn, Paul
and Kooi, Eline
and van der Lugt, Aad
and de Bruijne, Marleen},
title = {Quantitative Comparison of Monte-Carlo Dropout Uncertainty Measures for Multi-class Segmentation},
booktitle = {Uncertainty for Safe Utilization of Machine Learning in Medical Imaging, and Graphs in Biomedical Image Analysis},
year = {2020},
publisher = {Springer International Publishing},
address = {Cham},
pages = {32--41},
abstract = {Over the past decade, deep learning has become the gold standard for automatic medical image segmentation. Every segmentation task has an underlying uncertainty due to image resolution, annotation protocol, etc. Therefore, a number of methods and metrics have been proposed to quantify the uncertainty of neural networks mostly based on Bayesian deep learning, ensemble learning methods or output probability calibration. The aim of our research is to assess how reliable the different uncertainty metrics found in the literature are. We propose a quantitative and statistical comparison of uncertainty measures based on the relevance of the uncertainty map to predict misclassification. Four uncertainty metrics were compared over a set of 144 models. The application studied is the segmentation of the lumen and vessel wall of carotid arteries based on multiple sequences of magnetic resonance (MR) images in multi-center data.},
isbn = {978-3-030-60365-6}
}
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment