@inbook{fa935ae1e9634a728561ed46cfea750c,
title = "ARHNet: Adaptive Region Harmonization for Lesion-Aware Augmentation to Improve Segmentation Performance",
abstract = "Accurately segmenting brain lesions in MRI scans is critical for providing patients with prognoses and neurological monitoring. However, the performance of CNN-based segmentation methods is constrained by the limited training set size. Advanced data augmentation is an effective strategy to improve the model{\textquoteright}s robustness. However, they often introduce intensity disparities between foreground and background areas and boundary artifacts, which weakens the effectiveness of such strategies. In this paper, we propose a foreground harmonization framework (ARHNet) to tackle intensity disparities and make synthetic images look more realistic. In particular, we propose an Adaptive Region Harmonization (ARH) module to dynamically align foreground feature maps to the background with an attention mechanism. We demonstrate the efficacy of our method in improving the segmentation performance using real and synthetic images. Experimental results on the ATLAS 2.0 dataset show that ARHNet outperforms other methods for image harmonization tasks, and boosts the down-stream segmentation performance. Our code is publicly available at https://github.com/King-HAW/ARHNet.",
keywords = "Adaptive image harmonization, Lesion-aware augmentation, Stroke segmentation",
author = "Jiayu Huo and Yang Liu and Xi Ouyang and Alejandro Granados and S{\'e}bastien Ourselin and Rachel Sparks",
note = "Funding Information: Acknowledgement. This work was supported by Centre for Doctoral Training in Surgical and Interventional Engineering at King{\textquoteright}s College London; King{\textquoteright}s-China Scholarship Council PhD Scholarship programme (K-CSC); and the Engineering and Physical Sciences Research Council Doctoral Training Partnership (EPSRC DTP) grant EP/T517963/1. This publication represents, in part, independent research commissioned by the Wellcome Innovator Award [218380/Z/19/Z]. The views expressed in this publication are those of the authors and not necessarily those of the Wellcome Trust. Funding Information: This work was supported by Centre for Doctoral Training in Surgical and Interventional Engineering at King{\textquoteright}s College London; King{\textquoteright}s-China Scholarship Council PhD Scholarship programme (K-CSC); and the Engineering and Physical Sciences Research Council Doctoral Training Partnership (EPSRC DTP) grant EP/T517963/1. This publication represents, in part, independent research commissioned by the Wellcome Innovator Award [218380/Z/19/Z]. The views expressed in this publication are those of the authors and not necessarily those of the Wellcome Trust. Publisher Copyright: {\textcopyright} 2024, The Author(s), under exclusive license to Springer Nature Switzerland AG.; 14th International Workshop on Machine Learning in Medical Imaging, MLMI 2023 ; Conference date: 08-10-2023 Through 08-10-2023",
year = "2024",
doi = "10.1007/978-3-031-45676-3_38",
language = "English",
isbn = "9783031456756",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Science and Business Media Deutschland GmbH",
pages = "377--386",
editor = "Xiaohuan Cao and Xi Ouyang and Xuanang Xu and Islem Rekik and Zhiming Cui",
booktitle = "Machine Learning in Medical Imaging - 14th International Workshop, MLMI 2023, Held in Conjunction with MICCAI 2023, Proceedings",
address = "Germany",
}