@inbook{957f9b38b2ce47979d763327ba721795,
title = "How Few Annotations are Needed for Segmentation Using a Multi-planar U-Net?",
abstract = "U-Net architectures are an extremely powerful tool for segmenting 3D volumes, and the recently proposed multi-planar U-Net has reduced the computational requirement for using the U-Net architecture on three-dimensional isotropic data to a subset of two-dimensional planes. While multi-planar sampling considerably reduces the amount of training data needed, providing the required manually annotated data can still be a daunting task. In this article, we investigate the multi-planar U-Net{\textquoteright}s ability to learn three-dimensional structures in isotropic sampled images from sparsely annotated training samples. We extend the multi-planar U-Net with random annotations, and we present our empirical findings on two public domains, fully annotated by an expert. Surprisingly we find that the multi-planar U-Net on average outperforms the 3D U-Net in most cases in terms of dice, sensitivity, and specificity and that similar performance from the multi-planar unit can be obtained from half the number of annotations by doubling the number of automatically generated training planes. Thus, sometimes less is more!",
keywords = "3D imaging, Deep learning, Segmentation, Sparse annotations, U-Net",
author = "Laprade, {William Michael} and Mathias Perslev and Jon Sporring",
year = "2021",
doi = "10.1007/978-3-030-88210-5_20",
language = "English",
isbn = "978-3-030-88209-9",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer",
pages = "209--216",
booktitle = "MICCAI Workshop on Deep Generative Models",
}