@inproceedings{raidou_2019_preha, title = "preha: Establishing Precision Rehabilitation with Visual Analytics", author = "Georg Bernold and Kresimir Matkovic and Eduard Gr\"{o}ller and Renata Raidou", year = "2019", abstract = "This design study paper describes preha, a novel visual analytics application in the field of in-patient rehabilitation. We conducted extensive interviews with the intended users, i.e., engineers and clinical rehabilitation experts, to determine specific requirements of their analytical process.We identified nine tasks, for which suitable solutions have been designed and developed in the flexible environment of kibana. Our application is used to analyze existing rehabilitation data from a large cohort of 46,000 patients, and it is the first integrated solution of its kind. It incorporates functionalities for data preprocessing (profiling, wrangling and cleansing), storage, visualization, and predictive analysis on the basis of retrospective outcomes. A positive feedback from the first evaluation with domain experts indicates the usefulness of the newly proposed approach and represents a solid foundation for the introduction of visual analytics to the rehabilitation domain.", month = sep, event = "Eurographics Workshop on Visual Computing for Biology and Medicine (2019)", doi = "10.2312/vcbm.20191234", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine (2019)", pages = "79--89", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/raidou_2019_preha/", } @article{raidou_star2019, title = "State-of-the-Art Report: Visual Computing in Radiation Therapy Planning", author = "Matthias Schlachter and Renata Raidou and Ludvig Paul Muren and Bernhard Preim and Katja B\"{u}hler", year = "2019", abstract = "Radiation therapy (RT) is one of the major curative approaches for cancer. It is a complex and risky treatment approach, which requires precise planning, prior to the administration of the treatment. Visual Computing (VC) is a fundamental component of RT planning, providing solutions in all parts of the process—from imaging to delivery. Despite the significant technological advancements of RT over the last decades, there are still many challenges to address. This survey provides an overview of the compound planning process of RT, and of the ways that VC has supported RT in all its facets. The RT planning process is described to enable a basic understanding in the involved data, users and workflow steps. A systematic categorization and an extensive analysis of existing literature in the joint VC/RT research is presented, covering the entire planning process. The survey concludes with a discussion on lessons learnt, current status, open challenges, and future directions in VC/RT research.", month = jun, journal = "Computer Graphics Forum", volume = "3", number = "38", pages = "753--779", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/raidou_star2019/", } @inproceedings{raidou_2019_pelvisrunner, title = "Pelvis Runner: Visualizing Pelvic Organ Variability in a Cohort of Radiotherapy Patients", author = "Nicolas Grossmann and Oscar Casares-Magaz and Ludvig Paul Muren and Vitali Moiseenko and John P. Einck and Eduard Gr\"{o}ller and Renata Raidou", year = "2019", abstract = "In radiation therapy, anatomical changes in the patient might lead to deviations between the planned and delivered dose--including inadequate tumor coverage, and overradiation of healthy tissues. Exploring and analyzing anatomical changes throughout the entire treatment period can help clinical researchers to design appropriate treatment strategies, while identifying patients that are more prone to radiation-induced toxicity. We present the Pelvis Runner, a novel application for exploring the variability of segmented pelvic organs in multiple patients, across the entire radiation therapy treatment process. Our application addresses (i) the global exploration and analysis of pelvic organ shape variability in an abstracted tabular view and (ii) the local exploration and analysis thereof in anatomical 2D/3D views, where comparative and ensemble visualizations are integrated. The workflow is based on available retrospective cohort data, which incorporate segmentations of the bladder, the prostate, and the rectum through the entire radiation therapy process. The Pelvis Runner is applied to four usage scenarios, which were conducted with two clinical researchers, i.e., medical physicists. Our application provides clinical researchers with promising support in demonstrating the significance of treatment plan adaptation to anatomical changes.", month = sep, event = "Eurographics Workshop on Visual Computing for Biology and Medicine (2019)", doi = "10.2312/vcbm.20191233", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine (2019)", pages = "69--78", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/raidou_2019_pelvisrunner/", } @incollection{raidou_2019_springer, title = "Visual Analytics for the Representation, Exploration and Analysis of High-Dimensional, Multi-Faceted Medical Data", author = "Renata Raidou", year = "2019", abstract = "Medicine is among research fields with a significant impact on humans and their health. Already for decades, medicine has established a tight coupling with the visualization domain, proving the importance of developing visualization techniques, designed exclusively for this research discipline. However, medical data is steadily increasing in complexity with the appearance of heterogeneous, multi-modal, multiparametric, cohort or population, as well as uncertain data. To deal with this kind of complex data, the field of Visual Analytics has emerged. In this chapter, we discuss the many dimensions and facets of medical data. Based on this classification, we provide a general overview of state-of-the-art visualization systems and solutions dealing with highdimensional, multi-faceted data. Our particular focus will be on multimodal, multi-parametric data, on data from cohort or population studies and on uncertain data, especially with respect to Visual Analytics applications for the representation, exploration, and analysis of highdimensional, multi-faceted medical data.", month = jul, booktitle = "Biomedical Visualisation", chapter = "10", doi = "https://doi.org/10.1007/978-3-030-14227-8_10", editor = "Springer", note = "https://www.springer.com/gp/book/9783030142261", publisher = "Springer", volume = "2", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/raidou_2019_springer/", } @inproceedings{raidou_RO2019, title = "PO-0962 Bladder changes during first week of RT for prostate cancer determine the risk of urinary toxicity", author = "Oscar Casares-Magaz and Renata Raidou and NJ Pettersson and Vitali Moiseenko and John P. Einck and A Hopper and R Knopp and Ludvig Paul Muren", year = "2019", month = apr, event = "ESTRO 38", doi = "https://doi.org/10.1016/S0167-8140(19)31382-9", booktitle = "Radiotherapy and Oncology", pages = "S522--S523", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/raidou_RO2019/", } @article{vitruvian_2019, title = "The Vitruvian Baby: Interactive Reformation of Fetal Ultrasound Data to a T-Position", author = "Eric M\"{o}rth and Renata Raidou and Ivan Viola and Noeska Natasja Smit", year = "2019", abstract = "Three-dimensional (3D) ultrasound imaging and visualization is often used in medical diagnostics, especially in prenatal screening. Screening the development of the fetus is important to assess possible complications early on. State of the art approaches involve taking standardized measurements to compare them with standardized tables. The measurements are taken in a 2D slice view, where precise measurements can be difficult to acquire due to the fetal pose. Performing the analysis in a 3D view would enable the viewer to better discriminate between artefacts and representative information. Additionally making data comparable between different investigations and patients is a goal in medical imaging techniques and is often achieved by standardization. With this paper, we introduce a novel approach to provide a standardization method for 3D ultrasound fetus screenings. Our approach is called “The Vitruvian Baby” and incorporates a complete pipeline for standardized measuring in fetal 3D ultrasound. The input of the method is a 3D ultrasound screening of a fetus and the output is the fetus in a standardized T-pose. In this pose, taking measurements is easier and comparison of different fetuses is possible. In addition to the transformation of the 3D ultrasound data, we create an abstract representation of the fetus based on accurate measurements. We demonstrate the accuracy of our approach on simulated data where the ground truth is known. ", month = sep, journal = "Eurographics Workshop on Visual Computing for Biology and Medicine (2019)", volume = "9", doi = "10.2312/vcbm.20191245", pages = "201--205", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/vitruvian_2019/", } @inproceedings{amirkhanov-2018-withteeth, title = "WithTeeth: Denture Preview in Augmented Reality", author = "Aleksandr Amirkhanov and Artem Amirkhanov and Matthias Bernhard and Zsolt Toth and Sabine Stiller and Andreas Geier and Eduard Gr\"{o}ller", year = "2018", abstract = "Dentures are prosthetic devices replacing missing or damaged teeth, often used for dental reconstruction. Dental reconstruction improves the functional state and aesthetic appearance of teeth. State-of-the-art methods used by dental technicians typically do not include the aesthetic analysis, which often leads to unsatisfactory results for patients. In this paper, we present a virtual mirror approach for a dental treatment preview in augmented reality. Different denture presets are visually evaluated and compared by switching them on the fly. Our main goals are to provide a virtual dental treatment preview to facilitate early feedback, and hence to build the confidence and trust of patients in the outcome. The workflow of our algorithm is as follows. First, the face is detected and 2D facial landmarks are extracted. Then, 3D pose estimation of upper and lower jaws is performed and high-quality 3D models of the upper and lower dentures are fitted. The fitting uses the occlusal plane angle as determined manually by dental technicians. To provide a realistic impression of the virtual teeth, the dentures are rendered with motion blur. We demonstrate the robustness and visual quality of our approach by comparing the results of a webcam to a DSLR camera under natural, as well as controlled lighting conditions.", month = oct, isbn = "978-3-03868-072-7", address = "https://diglib.eg.org/handle/10.2312/vmv20181250", event = "VMV18", editor = "Beck, Fabian and Dachsbacher, Carsten and Sadlo, Filip", booktitle = "Vision, Modeling and Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/amirkhanov-2018-withteeth/", } @article{raidou2018visualflatter, title = "VisualFlatter - Visual Analysis of Distortions in the Projection of Biomedical Structures", author = "Nicolas Grossmann and Thomas K\"{o}ppel and Eduard Gr\"{o}ller and Renata Raidou", year = "2018", abstract = "Projections of complex anatomical or biological structures from 3D to 2D are often used by visualization and domain experts to facilitate inspection and understanding. Representing complex structures, such as organs or molecules, in a simpler 2D way often requires less interaction, while enabling comparability. However, the most commonly employed projection methods introduce size or shape distortions, in the resulting 2D representations. While simple projections display known distortion patterns, more complex projection algorithms are not easily predictable.We propose the VisualFlatter, a visual analysis tool that enables visualization and domain experts to explore and analyze projection-induced distortions, in a structured way. Our tool provides a way to identify projected regions with semantically relevant distortions and allows users to comparatively analyze distortion outcomes, either from alternative projection methods or due to different setups through the projection pipeline. The user is given the ability to improve the initial projection configuration, after comparing different setups. We demonstrate the functionality of our tool using four scenarios of 3D to 2D projections, conducted with the help of domain or visualization experts working on different application fields. We also performed a wider evaluation with 13 participants, familiar with projections, to assess the usability and functionality of the Visual Flatter.", month = sep, journal = "Eurographics Proceedings", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/raidou2018visualflatter/", } @misc{Karall2018_2, title = "Comparative Visual Analytics in a Cohort of Breast Cancer Patients", author = "Nikolaus Karall", year = "2018", abstract = "The most common cancer among the female population in the economically developed world is breast cancer. To signifcantly reduce the mortality among affected women an early diagnosis is essential, and also treatment strategies need to be selected carefully. Clinical researchers working on the selection of chemotherapy treatment need to analyze the progress of the disease during and after treatment and to understand how diffent groups of patients respond to selected treatments. Currently this is a diffcult task because of the multitude of involved imaging and non-imaging) data, for which adequate visualizations are required. The aim of this work is to help clinical researchers, who are working on the analysis of the progress of chemotherapy, to understand and explore the multitude of data they have. To this end, the following three tasks were realized in a web-based framework: 1. Functionality for single patient follow-up studies (intra-patient study) 2. Functionality to compare two different patients (pairwise inter-patient study) 3. Functionality to compare groups of patients (groupwise inter-patient study) In the examples below, we demonstrate only the latter, as it can be considered an overset of the other two tasks.", month = may, event = "EPILOG ", Conference date = "Poster presented at EPILOG (2018-06-18)", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Karall2018_2/", } @article{rraidou_clinical, title = "Uncertainty evaluation of image-based tumour control probability models in radiotherapy of prostate cancer using a visual analytic tool", author = "Oscar Casares-Magaz and Renata Raidou and Jarle Roervik and Anna Vilanova i Bartroli and Ludvig Paul Muren", year = "2018", abstract = "Functional imaging techniques provide radiobiological information that can be included into tumour control probability (TCP) models to enable individualized outcome predictions in radiotherapy. However, functional imaging and the derived radiobiological information are influenced by uncertainties, translating into variations in individual TCP predictions. In this study we applied a previously developed analytical tool to quantify dose and TCP uncertainty bands when initial cell density is estimated from MRI-based apparent diffusion coefficient maps of eleven patients. TCP uncertainty bands of 16% were observed at patient level, while dose variations bands up to 8 Gy were found at voxel level for an iso-TCP approach.", month = jan, journal = "Physics and Imaging in Radiation Oncology", number = "5", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/rraidou_clinical/", } @article{EuroVis2018-ShortPapers-Karall, title = "ChemoExplorer: A Dashboard for the Visual Analysis of ChemotherapyResponse in Breast Cancer Patients", author = "Nikolaus Karall and Eduard Gr\"{o}ller and Renata Raidou", year = "2018", abstract = "In breast cancer chemotherapy treatment, different alternative strategies can be employed. Clinical researchers working on the optimization of chemotherapy strategies need to analyze the progress of the treatment and to understand how different groups of patients respond to selected therapies. This is a challenging task, because of the multitude of imaging and non-imaging health record data involved. We, hereby, introduce a web-based dashboard that facilitates the comparison and analysis of publicly available breast cancer chemotherapy response data, consisting of a follow-up study of 63 patients. Each patient received one of two available therapeutic strategies and their treatment response was documented. Our dashboard provides an initial basis for clinical researchers working on chemotherapy optimization, to analyze the progress of treatment and to compare the response of (groups of) patients with distinct treatment characteristics. Our approach consists of multiple linked representations that provide interactive views on different aspects of the available imaging and non-imaging data. To illustrate the functionality of the ChemoExplorer, we conducted a usage scenario that shows the initial results of our work.", journal = "Computer Graphics Forum", doi = "10.2312/eurovisshort.20181077", pages = "049-053", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/EuroVis2018-ShortPapers-Karall/", } @article{raidou_2018_bladderrunner, title = "Bladder Runner: Visual Analytics for the Exploration of RT-Induced Bladder Toxicity in a Cohort Study", author = "Renata Raidou and Oscar Casares-Magaz and Aleksandr Amirkhanov and Vitali Moiseenko and Ludvig Paul Muren and John P. Einck and Anna Vilanova i Bartroli and Eduard Gr\"{o}ller", year = "2018", abstract = "We present the Bladder Runner, a novel tool to enable detailed visual exploration and analysis of the impact of bladder shape variation on the accuracy of dose delivery, during the course of prostate cancer radiotherapy (RT). Our tool enables the investigation of individual patients and cohorts through the entire treatment process, and it can give indications of RT-induced complications for the patient. In prostate cancer RT treatment, despite the design of an initial plan prior to dose administration, bladder toxicity remains very common. The main reason is that the dose is delivered in multiple fractions over a period of weeks, during which, the anatomical variation of the bladder - due to differences in urinary filling - causes deviations between planned and delivered doses. Clinical researchers want to correlate bladder shape variations to dose deviations and toxicity risk through cohort studies, to understand which specific bladder shape characteristics are more prone to side effects. This is currently done with Dose-Volume Histograms (DVHs), which provide limited, qualitative insight. The effect of bladder variation on dose delivery and the resulting toxicity cannot be currently examined with the DVHs. To address this need, we designed and implemented the Bladder Runner, which incorporates visualization strategies in a highly interactive environment with multiple linked views. Individual patients can be explored and analyzed through the entire treatment period, while inter-patient and temporal exploration, analysis and comparison are also supported. We demonstrate the applicability of our presented tool with a usage scenario, employing a dataset of 29 patients followed through the course of the treatment, across 13 time points. We conducted an evaluation with three clinical researchers working on the investigation of RT-induced bladder toxicity. All participants agreed that Bladder Runner provides better understanding and new opportunities for the exploration and analysis of the involved cohort data.", journal = "Computer Graphics Forum", volume = "37", number = "3", issn = "1467-8659", doi = "10.1111/cgf.13413", pages = "205-216", pages = "205--216", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/raidou_2018_bladderrunner/", } @article{mindek-2017-dsn, title = "Data-Sensitive Visual Navigation", author = "Peter Mindek and Gabriel Mistelbauer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2017", abstract = "In visualization systems it is often the case that the changes of the input parameters are not proportional to the visual change of the generated output. In this paper, we propose a model for enabling data-sensitive navigation for user-interface elements. This model is applied to normalize the user input according to the visual change, and also to visually communicate this normalization. In this way, the exploration of heterogeneous data using common interaction elements can be performed in an efficient way. We apply our model to the field of medical visualization and present guided navigation tools for traversing vascular structures and for camera rotation around 3D volumes. The presented examples demonstrate that the model scales to user-interface elements where multiple parameters are set simultaneously.", month = oct, journal = "Computers & Graphics", volume = "67", number = "C", pages = "77--85", keywords = "navigation, exploration, medical visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/", } @article{rraidou_EG17, title = "Visual Analytics for Digital Radiotherapy: Towards a Comprehensible Pipeline", author = "Renata Raidou and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2017", abstract = "Prostate cancer is one of the most frequently occurring types of cancer in males. It is often treated with radiation therapy,which aims at irradiating tumors with a high dose, while sparing the surrounding healthy tissues. In the course of the years,radiotherapy technology has undergone great advancements. However, tumors are not only different from each other, theyare also highly heterogeneous within, consisting of regions with distinct tissue characteristics, which should be treated withdifferent radiation doses. Tailoring radiotherapy planning to the specific needs and intra-tumor tissue characteristics of eachpatient is expected to lead to more effective treatment strategies. Currently, clinical research is moving towards this direction,but an understanding of the specific tumor characteristics of each patient, and the integration of all available knowledge into apersonalizable radiotherapy planning pipeline are still required. The present work describes solutions from the field of VisualAnalytics, which aim at incorporating the information from the distinct steps of the personalizable radiotherapy planningpipeline, along with eventual sources of uncertainty, into comprehensible visualizations. All proposed solutions are meantto increase the – up to now, limited – understanding and exploratory capabilities of clinical researchers. These approachescontribute towards the interactive exploration, visual analysis and understanding of the involved data and processes at differentsteps of the radiotherapy planning pipeline, creating a fertile ground for future research in radiotherapy planning.", month = apr, journal = "Computer Graphics Forum (Proceedings of Eurographics)", volume = "36", booktitle = "Computer Graphics Forum (Proceedings of Eurographics)", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/rraidou_EG17/", } @article{miao_tvcg_2017, title = "Placenta Maps: In Utero Placental Health Assessment of the Human Fetus", author = "Haichao Miao and Gabriel Mistelbauer and Alexey Karimov and Amir Alansary and Alice Davidson and David F.A. Lloyd and Mellisa Damodaram and Lisa Story and Jana Hutter and Joseph V. Hajnal and Mary Rutherford and Bernhard Preim and Bernhard Kainz and Eduard Gr\"{o}ller", year = "2017", abstract = "null", journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "23", number = "6", pages = "1612--1623", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/miao_tvcg_2017/", } @article{Groeller_2016_P4, title = "Visual Analytics for the Exploration and Assessment of Segmentation Errors", author = "Renata Raidou and Freek Marcelis and Marcel Breeuwer and Eduard Gr\"{o}ller and Anna Vilanova i Bartroli and Huub van de Wetering", year = "2016", abstract = "Several diagnostic and treatment procedures require the segmentation of anatomical structures from medical images. However, the automatic model-based methods that are often employed, may produce inaccurate segmentations. These, if used as input for diagnosis or treatment, can have detrimental effects for the patients. Currently, an analysis to predict which anatomic regions are more prone to inaccuracies, and to determine how to improve segmentation algorithms, cannot be performed. We propose a visual tool to enable experts, working on model-based segmentation algorithms, to explore and analyze the outcomes and errors of their methods. Our approach supports the exploration of errors in a cohort of pelvic organ segmentations, where the performance of an algorithm can be assessed. Also, it enables the detailed exploration and assessment of segmentation errors, in individual subjects. To the best of our knowledge, there is no other tool with comparable functionality. A usage scenario is employed to explore and illustrate the capabilities of our visual tool. To further assess the value of the proposed tool, we performed an evaluation with five segmentation experts. The evaluation participants confirmed the potential of the tool in providing new insight into their data and employed algorithms. They also gave feedback for future improvements.", month = sep, journal = "Eurographics Workshop on Visual Computing for Biology and Medicine", pages = "193--202", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P4/", } @article{Mistelbauer_Gabriel_2016, title = "Aortic Dissection Maps: Comprehensive Visualization of Aortic Dissections for Risk Assessment", author = "Gabriel Mistelbauer and Johanna Schmidt and A.M. Sailer and Kathrin B\"{a}umler and Shannon Walters and Dominik Fleischmann", year = "2016", abstract = "Aortic dissection is a life threatening condition of the aorta, characterized by separation of its wall layers into a true and false lumen. A subset of patients require immediate surgical or endovascular repair. All survivors of the acute phase need long-term surveillance with imaging to monitor chronic degeneration and dilatation of the false lumen and prevent late adverse events such as rupture, or malperfusion. We introduce four novel plots displaying features of aortic dissections known or presumed to be associated with risk of future adverse events: Aortic diameter, the blood supply (outflow) to the aortic branches from the true and false lumen, the previous treatment, and an estimate of adverse event-free probabilities in one, two and 5 years. Aortic dissection maps, the composite visualization of these plots, provide a baseline for visual comparison of the complex features and associated risk of aortic dissection. These maps may lead to more individualized monitoring and improved, patient-centric treatment planning in the future.", month = sep, journal = "Eurographics Workshop on Visual Computing for Biology and Medicine (2016)", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Mistelbauer_Gabriel_2016/", } @phdthesis{karimov-2016-GIVE, title = "Guided Interactive Volume Editing in Medicine", author = "Alexey Karimov", year = "2016", abstract = "Various medical imaging techniques, such as Computed Tomography, Magnetic Resonance Imaging, Ultrasonic Imaging, are now gold standards in the diagnosis of different diseases. The diagnostic process can be greatly improved with the aid of automatic and interactive analysis tools, which, however, require certain prerequisites in order to operate. Such analysis tools can, for example, be used for pathology assessment, various standardized measurements, treatment and operation planning. One of the major requirements of such tools is the segmentation mask of an object-of-interest. However, the segmentation of medical data remains subject to errors and mistakes. Often, physicians have to manually inspect and correct the segmentation results, as (semi-)automatic techniques do not immediately satisfy the required quality. To this end, interactive segmentation editing is an integral part of medical image processing and visualization. In this thesis, we present three advanced segmentation-editing techniques. They are focused on simple interaction operations that allow the user to edit segmentation masks quickly and effectively. These operations are based on a topology-aware representation that captures structural features of the segmentation mask of the object-of-interest. Firstly, in order to streamline the correction process, we classify segmentation defects according to underlying structural features and propose a correction procedure for each type of defect. This alleviates users from manually applying the proper editing operations, but the segmentation defects still have to be located by users. Secondly, we extend the basic editing process by detecting regions that potentially contain defects. With subsequently suggested correction scenarios, users are hereby immediately able to correct a specific defect, instead of manually searching for defects beforehand. For each suggested correction scenario, we automatically determine the corresponding region of the respective defect in the segmentation mask and propose a suitable correction operation. In order to create the correction scenarios, we detect dissimilarities within the data values of the mask and then classify them according to the characteristics of a certain type of defect. Potential findings are presented with a glyph-based visualization that facilitates users to interactively explore the suggested correction scenarios on different levels-of-detail. As a consequence, our approach even offers users the possibility to fine-tune the chosen correction scenario instead of directly manipulating the segmentation mask, which is a time-consuming and cumbersome task. Third and finally, we guide users through the multitude of suggested correction scenarios of the entire correction process. After statistically evaluating all suggested correction scenarios, we rank them according to their significance of dissimilarities, offering fine-grained editing capabilities at a user-specified level-of-detail. As we visually convey this ranking in a radial layout, users can easily spot and select the most (or the least) dissimilar correction scenario, which improves the segmentation mask mostly towards the desired result. All techniques proposed within this thesis have been evaluated by collaborating radiologists. We assessed the usability, interaction aspects, the accuracy of the results and the expenditure of time of the entire correction process. The outcome of the assessment showed that our guided volume editing not only leads to acceptable segmentation results with only a few interaction steps, but also is applicable to various application scenarios.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/karimov-2016-GIVE/", } @techreport{karimov-2016-SD, title = "Statistics-Driven Localization of Dissimilarities in Data", author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Eduard Gr\"{o}ller", year = "2016", abstract = "The identification of dissimilar regions in spatial and temporal data is a fundamental part of data exploration. This process takes place in applications, such as biomedical image processing as well as climatic data analysis. We propose a general solution for this task by employing well-founded statistical tools. From a large set of candidate regions, we derive an empirical distribution of the data and perform statistical hypothesis testing to obtain p-values as measures of dissimilarity. Having p-values, we quantify differences and rank regions on a global scale according to their dissimilarity to user-specified exemplar regions. We demonstrate our approach and its generality with two application scenarios, namely interactive exploration of climatic data and segmentation editing in the medical domain. In both cases our data exploration protocol unifies the interactive data analysis, guiding the user towards regions with the most relevant dissimilarity characteristics. The dissimilarity analysis results are conveyed with a radial tree, which prevents the user from searching exhaustively through all the data.", month = apr, number = "TR-186-2-16-1", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/karimov-2016-SD/", } @article{raidou_miccai16, title = "Employing Visual Analytics to Aid the Design of White Matter Hyperintensity Classifiers.", author = "Renata Raidou and Hugo J. Kuijf and Neda Sepasian and Nicola Pezzotti and Willem H. Bouvy and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2016", abstract = "Accurate segmentation of brain white matter hyperintensi-ties (WMHs) is important for prognosis and disease monitoring. To thisend, classi ers are often trained { usually, using T1 and FLAIR weightedMR images. Incorporating additional features, derived from di usionweighted MRI, could improve classi cation. However, the multitude ofdi usion-derived features requires selecting the most adequate. For this,automated feature selection is commonly employed, which can often besub-optimal. In this work, we propose a di erent approach, introducing asemi-automated pipeline to select interactively features for WMH classi -cation. The advantage of this solution is the integration of the knowledgeand skills of experts in the process. In our pipeline, a Visual Analytics(VA) system is employed, to enable user-driven feature selection. Theresulting features are T1, FLAIR, Mean Di usivity (MD), and RadialDi usivity (RD) { and secondarily,CSand Fractional Anisotropy (FA).The next step in the pipeline is to train a classi er with these features,and compare its results to a similar classi er, used in previous work withautomated feature selection. Finally, VA is employed again, to analyzeand understand the classi er performance and results.", journal = "Proceedings of International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI)", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/raidou_miccai16/", } @article{malan_fluoro, title = "A fluoroscopy-based planning and guidance software tool for minimally invasive hip refixation by cement injection.", author = "DF Malan and SJ van der Walt and Renata Raidou and B van den Berg and BC Stoel and CP Botha and RG Nelissen and ER Valstar", year = "2016", abstract = "PURPOSE: In orthopaedics, minimally invasive injection of bone cement is an established technique. We present HipRFX, a software tool for planning and guiding a cement injection procedure for stabilizing a loosening hip prosthesis. HipRFX works by analysing a pre-operative CT and intraoperative C-arm fluoroscopic images. METHODS: HipRFX simulates the intraoperative fluoroscopic views that a surgeon would see on a display panel. Structures are rendered by modelling their X-ray attenuation. These are then compared to actual fluoroscopic images which allow cement volumes to be estimated. Five human cadaver legs were used to validate the software in conjunction with real percutaneous cement injection into artificially created periprothetic lesions. RESULTS: Based on intraoperatively obtained fluoroscopic images, our software was able to estimate the cement volume that reached the pre-operatively planned targets. The actual median target lesion volume was 3.58 ml (range 3.17-4.64 ml). The median error in computed cement filling, as a percentage of target volume, was 5.3% (range 2.2-14.8%). Cement filling was between 17.6 and 55.4% (median 51.8%). CONCLUSIONS: As a proof of concept, HipRFX was capable of simulating intraoperative fluoroscopic C-arm images. Furthermore, it provided estimates of the fraction of injected cement deposited at its intended target location, as opposed to cement that leaked away. This level of knowledge is usually unavailable to the surgeon viewing a fluoroscopic image and may aid in evaluating the success of a percutaneous cement injection intervention.", journal = "International journal of computer assisted radiology and surgery,", number = "2", volume = "11", pages = "281--296", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/malan_fluoro/", } @article{miao_2016_cgf, title = "Visual Quantification of the Circle of Willis: An Automated Identification and Standardized Representation", author = "Haichao Miao and Gabriel Mistelbauer and Christian Nasel and Eduard Gr\"{o}ller", year = "2016", abstract = "This paper presents a method for the visual quantification of cerebral arteries, known as the Circle of Willis (CoW). It is an arterial structure with the responsibility of supplying the brain with blood, however, dysfunctions can lead to strokes. The diagnosis of such a time-critical/urgent event depends on the expertise of radiologists and the applied software tools. They use basic display methods of the volumetric data without any support of advanced image processing and visualization techniques. The goal of this paper is to present an automated method for the standardized description of cerebral arteries in stroke patients in order to provide an overview of the CoW's configuration. This novel representation provides visual indications of problematic areas as well as straightforward comparisons between multiple patients. Additionally, we offer a pipeline for extracting the CoW from Time-of-Flight Magnetic Resonance Angiography (TOF-MRA) data sets together with an enumeration technique for labelling the arterial segments by detecting the main supplying arteries of the CoW. We evaluated the feasibility of our visual quantification approach in a study of 63 TOF-MRA data sets and compared our findings to those of three radiologists. The obtained results demonstrate that our proposed techniques are effective in detecting the arteries and visually capturing the overall configuration of the CoW.", issn = "1467-8659", journal = "Computer Graphics Forum", keywords = "Circle of Willis, medical visualization, information visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/miao_2016_cgf/", } @article{raidou_eurovis16, title = "Visual Analysis of Tumor Control Models for Prediction of Radiotherapy Response.", author = "Renata Raidou and Oscar Casares-Magaz and Ludvig Paul Muren and Uulke A van der Heide and Jarle Roervik and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2016", abstract = "In radiotherapy, tumors are irradiated with a high dose, while surrounding healthy tissues are spared. To quantify the prob-ability that a tumor is effectively treated with a given dose, statistical models were built and employed in clinical research.These are called tumor control probability (TCP) models. Recently, TCP models started incorporating additional informationfrom imaging modalities. In this way, patient-specific properties of tumor tissues are included, improving the radiobiologicalaccuracy of models. Yet, the employed imaging modalities are subject to uncertainties with significant impact on the modelingoutcome, while the models are sensitive to a number of parameter assumptions. Currently, uncertainty and parameter sensitivityare not incorporated in the analysis, due to time and resource constraints. To this end, we propose a visual tool that enablesclinical researchers working on TCP modeling, to explore the information provided by their models, to discover new knowledgeand to confirm or generate hypotheses within their data. Our approach incorporates the following four main components: (1)It supports the exploration of uncertainty and its effect on TCP models; (2) It facilitates parameter sensitivity analysis to com-mon assumptions; (3) It enables the identification of inter-patient response variability; (4) It allows starting the analysis fromthe desired treatment outcome, to identify treatment strategies that achieve it. We conducted an evaluation with nine clinicalresearchers. All participants agreed that the proposed visual tool provides better understanding and new opportunities for theexploration and analysis of TCP modeling.", journal = "EuroVis - Eurographics/IEEE-VGTC Symposium on Visualization 2016", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/raidou_eurovis16/", } @article{Solteszova2016, title = "Output-Sensitive Filtering of Streaming Volume Data", author = "Veronika Solteszova and {\AA}smund Birkeland and Sergej Stoppel and Ivan Viola and Stefan Bruckner", year = "2016", abstract = "Real-time volume data acquisition poses substantial challenges for the traditional visualization pipeline where data enhancement is typically seen as a pre-processing step. In the case of 4D ultrasound data, for instance, costly processing operations to reduce noise and to remove artefacts need to be executed for every frame. To enable the use of high-quality filtering operations in such scenarios, we propose an output-sensitive approach to the visualization of streaming volume data. Our method evaluates the potential contribution of all voxels to the final image, allowing us to skip expensive processing operations that have little or no effect on the visualization. As filtering operations modify the data values which may affect the visibility, our main contribution is a fast scheme to predict their maximum effect on the final image. Our approach prioritizes filtering of voxels with high contribution to the final visualization based on a maximal permissible error per pixel. With zero permissible error, the optimized filtering will yield a result that is identical to filtering of the entire volume. We provide a thorough technical evaluation of the approach and demonstrate it on several typical scenarios that require on-the-fly processing.", journal = "Computer Graphics Forum", volume = "35", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Solteszova2016/", } @inproceedings{Miao_2015_VCBM, title = "CoWRadar: Visual Quantification of the Circle of Willis in Stroke Patients", author = "Haichao Miao and Gabriel Mistelbauer and Christian Nasel and Eduard Gr\"{o}ller", year = "2015", abstract = "This paper presents a method for the visual quantification of cerebral arteries, known as the Circle of Willis (CoW). The CoW is an arterial structure that is responsible for the brain’s blood supply. Dysfunctions of this arterial circle can lead to strokes. The diagnosis relies on the radiologist’s expertise and the software tools used. These tools consist of very basic display methods of the volumetric data without support of advanced technologies in medical image processing and visualization. The goal of this paper is to create an automated method for the standardized description of cerebral arteries in stroke patients in order to provide an overview of the CoW’s configuration. This novel display provides visual indications of problematic areas as well as straightforward comparisons between multiple patients. Additionally, we offer a pipeline for extracting the CoW from Time-of-Flight Magnetic Resonance Angiography (TOF-MRA) data sets. An enumeration technique for the labeling of the arterial segments is therefore suggested. We also propose a method for detecting the CoW’s main supplying arteries by analyzing the coronal, sagittal and transverse image planes of the data sets. We evaluated the feasibility of our visual quantification approach in a study of 63 TOF-MRA data sets and compared our findings to those of three radiologists. The obtained results demonstrate that our proposed techniques are effective in detecting the arteries of the CoW.", month = sep, isbn = "978-3-905674-82-8", publisher = "The Eurographics Association", organization = "EG Digital Library", location = "Chester, United Kingdom", issn = "2070-5786", editor = "Katja B\"{u}hler and Lars Linsen and Nigel W. John", booktitle = "EG Workshop on Visual Computing for Biology and Medicine", pages = "1--10", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Miao_2015_VCBM/", } @article{karimov-2015-HD, title = "Guided Volume Editing based on Histogram Dissimilarity", author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Stefan Bruckner", year = "2015", abstract = "Segmentation of volumetric data is an important part of many analysis pipelines, but frequently requires manual inspection and correction. While plenty of volume editing techniques exist, it remains cumbersome and error-prone for the user to find and select appropriate regions for editing. We propose an approach to improve volume editing by detecting potential segmentation defects while considering the underlying structure of the object of interest. Our method is based on a novel histogram dissimilarity measure between individual regions, derived from structural information extracted from the initial segmentation. Based on this information, our interactive system guides the user towards potential defects, provides integrated tools for their inspection, and automatically generates suggestions for their resolution. We demonstrate that our approach can reduce interaction effort and supports the user in a comprehensive investigation for high-quality segmentations. ", month = may, journal = "Computer Graphics Forum", volume = "34", number = "3", pages = "91--100", keywords = "Edge and feature detection, Image Processing and Computer Vision, Computer Graphics, Display algorithms, Picture/Image Generation, Segmentation, Methodology and Techniques, Interaction techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/karimov-2015-HD/", } @article{raidou_EuroVis15, title = "Visual analytics for the exploration of tumor tissue characterization", author = "Renata Raidou and Uulke A van der Heide and Cuong V Dinh and Ghazaleh Ghobadi and Jesper Follsted Kallehauge and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2015", abstract = "Tumors are heterogeneous tissues consisting of multiple regions with distinct characteristics. Characterization ofthese intra-tumor regions can improve patient diagnosis and enable a better targeted treatment. Ideally, tissuecharacterization could be performed non-invasively, using medical imaging data, to derive per voxel a number offeatures, indicative of tissue properties. However, the high dimensionality and complexity of this imaging-derivedfeature space is prohibiting for easy exploration and analysis - especially when clinical researchers require toassociate observations from the feature space to other reference data, e.g., features derived from histopathologicaldata. Currently, the exploratory approach used in clinical research consists of juxtaposing these data, visuallycomparing them and mentally reconstructing their relationships. This is a time consuming and tedious process,from which it is difficult to obtain the required insight. We propose a visual tool for: (1) easy exploration and visualanalysis of the feature space of imaging-derived tissue characteristics and (2) knowledge discovery and hypothesisgeneration and confirmation, with respect to reference data used in clinical research. We employ, as central view,a 2D embedding of the imaging-derived features. Multiple linked interactive views provide functionality for theexploration and analysis of the local structure of the feature space, enabling linking to patient anatomy andclinical reference data. We performed an initial evaluation with ten clinical researchers. All participants agreedthat, unlike current practice, the proposed visual tool enables them to identify, explore and analyze heterogeneousintra-tumor regions and particularly, to generate and confirm hypotheses, with respect to clinical reference data.", journal = "In Computer Graphics Forum", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/raidou_EuroVis15/", } @inproceedings{Viola_Ivan_DAC, title = "Deriving Anatomical Context from 4D Ultrasound", author = "Markus M\"{u}ller and Linn E. S. Helljesen and Raphael Prevost and Ivan Viola and Kim Nylund and Odd Helge Gilja and Nassir Navab and Wolfgang Wein", year = "2014", abstract = "Real-time three-dimensional (also known as 4D) ultrasound imaging using matrix array probes has the potential to create large-volume information of entire organs such as the liver without external tracking hardware. This information can in turn be placed into the context of a CT or MRI scan of the same patient. However for such an approach many image processing challenges need to be overcome and sources of error addressed, including reconstruction drift, anatomical deformations, varying appearance of anatomy, and imaging artifacts. In this work,we present a fully automatic system including robust image-based ultrasound tracking, a novel learning-based global initialization of the anatomical context, and joint mono- and multi-modal registration. In an evaluation on 4D US sequences and MRI scans of eight volunteers we achieve automatic reconstruction and registration without any user interaction, assess the registration errors based on physician-defined landmarks, and demonstrate realtime tracking of free-breathing sequences.", month = sep, isbn = "978-3-905674-62-0", publisher = "Eurographics Association", note = "The electronic version of the proceedings is available from the Eurographics Digital Library at http://diglib.eg.org", location = "Vienna, Austria", issn = "2070-5778", event = "4th Eurographics Workshop on Visual Computing for Biology and Medicine", editor = "Ivan Viola and Katja Buehler and Timo Ropinski", booktitle = "Proceedings of EG VCBM14", pages = "173--180", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_DAC/", } @inproceedings{Viola_Ivan_VDP, title = "Visibility-Driven Processing of Streaming Volume Data", author = "Veronika Solteszova and {\AA}smund Birkeland and Ivan Viola and Stefan Bruckner", year = "2014", abstract = "In real-time volume data acquisition, such as 4D ultrasound, the raw data is challenging to visualize directly without additional processing. Noise removal and feature detection are common operations, but many methods are too costly to compute over the whole volume when dealing with live streamed data. In this paper, we propose a visibility-driven processing scheme for handling costly on-the-fly processing of volumetric data in real-time. In contrast to the traditional visualization pipeline, our scheme utilizes a fast computation of the potentially visible subset of voxels which significantly reduces the amount of data required to process. As filtering operations modify the data values which may affect their visibility, our method for visibility-mask generation ensures that the set of elements deemed visible does not change after processing. Our approach also exploits the visibility information for the storage of intermediate values when multiple operations are performed in sequence, and can therefore significantly reduce the memory overhead of longer filter pipelines. We provide a thorough technical evaluation of the approach and demonstrate it on several typical scenarios where on-the-fly processing is required.", month = sep, isbn = "978-3-905674-62-0", publisher = "Eurographics Association", location = "Vienna, Austria", issn = "2070-5778", event = "4th EG Workshop on Visual Computing and Biology Medicine", editor = "Ivan Viola and Katja Buehler and Timo Ropinski", booktitle = "Proceedings of EG VCBM 2014", pages = "127--136", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_VDP/", } @misc{fmistelbauer-2014-adict, title = "ActiveDICOM - Enhancing Static Medical Images with Interaction", author = "Florian Mistelbauer and Gabriel Mistelbauer and Eduard Gr\"{o}ller", year = "2014", abstract = "Digital Imaging and Communications in Medicine (DICOM) is a well-establish standard in medical imaging, consisting not only of image data, but sensitive data such as patient and examination information. Nowadays, although having a large variety of advanced rendering techniques available, DICOM images are still generated and sent to the Picture Archiving and Communication System (PACS). These images are then fetched by the medical doctor from a workstation and used for medical reporting. The user has no other possibilities than being able to change the windowing function for displaying the DICOM images. If a certain region is of special interest, either images of the whole data set are generated or have to be specifically requested. Both approaches consume a considerable amount of time. Secondly, the image generation on demand remains pending until done by the responsible assistant. Despite supporting a broad range of features and being widely applied, DICOM images remain static. We propose a visualization mapping language, Active DICOM Script (ADICT), which enhances conventional DICOM with interactive elements by combining heterogeneous data, interaction and visualization. Such DICOM images are then called Active Digital Imaging and Communications in Medicine (ActiveDICOM).", month = sep, series = "EG VCBM 2014", location = "Vienna, Austria", event = "Eurographics Workshop on Visual Computing for Biology and Medicine", booktitle = "Posters at Eurographics Workshop on Visual Computing for Biology and Medicine", Conference date = "Poster presented at Eurographics Workshop on Visual Computing for Biology and Medicine (2014-09-03--2014-09-05)", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/fmistelbauer-2014-adict/", } @incollection{Viola_Ivan_UVP, title = "The Ultrasound Visualization Pipeline", author = "{\AA}smund Birkeland and Veronika Solteszova and Dieter H\"{o}nigmann and Odd Helge Gilja and Svein Brekke and Timo Ropinski and Ivan Viola", year = "2014", abstract = "Radiology is one of the main tools in modern medicine. A numerous set of deceases, ailments and treatments utilize accurate images of the patient. Ultrasound is one of the most frequently used imaging modality in medicine. The high spatial resolution, its interactive nature and non-invasiveness makes it the first choice in many examinations. Image interpretation is one of ultrasound’s main challenges. Much training is required to obtain a confident skill level in ultrasound-based diagnostics. State-of-the-art graphics techniques is needed to provide meaningful visualizations of ultrasound in real-time. In this paper we present the process-pipeline for ultrasound visualization, including an overview of the tasks performed in the specific steps. To provide an insight into the trends of ultrasound visualization research, we have selected a set of significant publications and divided them into a technique-based taxonomy covering the topics pre-processing, segmentation, registration, rendering and augmented reality. For the different technique types we discuss the difference between ultrasound-based techniques and techniques for other modalities.", month = sep, address = "http://link.springer.com/chapter/10.1007%2F978-1-4471-6497-5_24", booktitle = "Scientific Visualization", chapter = "Uncertainty, Multifield, Biomedical, and Scalable Visualization", editor = "Charles D. Hansen, Min Chen, Christopher R. Johnson, Arie E. Kaufman, Hans Hagen", isbn = "978-1-4471-6496-8", publisher = "Springer London", series = "Mathematics and Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_UVP/", } @article{raidou_vis14, title = "Visual analytics for the exploration of multiparametric cancer imaging", author = "Renata Raidou and Marta Paes Moreira and Wouter van Elmpt and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2014", abstract = "Tumor tissue characterization can play an important role in thediagnosis and design of effective treatment strategies. In orderto gather and combine the necessary tissue information, multi-modal imaging is used to derive a number of parameters indica-tive of tissue properties. The exploration and analysis of relation-ships between parameters and, especially, of differences among dis-tinct intra-tumor regions is particularly interesting for clinical re-searchers to individualize tumor treatment. However, due to highdata dimensionality and complexity, the current clinical workflowis time demanding and does not provide the necessary intra-tumorinsight. We implemented a new application for the exploration ofthe relationships between parameters and heterogeneity within tu-mors. In our approach, we employ a well-known dimensionalityreduction technique [5] to map the high-dimensional space of tis-sue properties into a 2D information space that can be interactivelyexplored with integrated information visualization techniques. Weconducted several usage scenarios with real-patient data, of whichwe present a case of advanced cervical cancer. First indicationsshow that our application introduces new features and functionali-ties that are not available within the current clinical approach.", journal = "In Visual Analytics Science and Technology (VAST), 2014 IEEE Conference on Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/raidou_vis14/", } @article{raidou_vcbm14, title = "The iCoCooN:Integration of Cobweb Charts with Parallel Coordinates forVisual Analysis of DCE-MRI Modeling Variations", author = "Renata Raidou and Uulke A van der Heide and PJ van Houdt and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2014", abstract = "Efficacy of radiotherapy treatment depends on the specific characteristics of tumorous tissues. For the determi-nation of these characteristics, clinical practice uses Dynamic Contrast Enhanced (DCE) Magnetic ResonanceImaging (MRI). DCE-MRI data is acquired and modeled using pharmacokinetic modeling, to derive per voxela set of parameters, indicative of tissue properties. Different pharmacokinetic modeling approaches make differ-ent assumptions, resulting in parameters with different distributions. A priori, it is not known whether there aresignificant differences between modeling assumptions and which assumption is best to apply. Therefore, clinicalresearchers need to know at least how different choices in modeling affect the resulting pharmacokinetic parame-ters and also where parameter variations appear. In this paper, we introduce iCoCooN: a visualization applicationfor the exploration and analysis of model-induced variations in pharmacokinetic parameters. We designed a visualrepresentation, the Cocoon, by integrating perpendicularly Parallel Coordinate Plots (PCPs) with Cobweb Charts(CCs). PCPs display the variations in each parameter between modeling choices, while CCs present the relationsin a whole parameter set for each modeling choice. The Cocoon is equipped with interactive features to supportthe exploration of all data aspects in a single combined view. Additionally, interactive brushing allows to link theobservations from the Cocoon to the anatomy. We conducted evaluations with experts and also general users. Theclinical experts judged that the Cocoon in combination with its features facilitates the exploration of all significantinformation and, especially, enables them to find anatomical correspondences. The results of the evaluation withgeneral users indicate that the Cocoon produces more accurate results compared to independent multiples", journal = "Eurographics Workshop on Visual Computing for Biology and Medicine ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/raidou_vcbm14/", } @article{Auzinger_Mistelbauer_2013_CSR, title = "Vessel Visualization using Curved Surface Reformation", author = "Thomas Auzinger and Gabriel Mistelbauer and Ivan Baclija and R\"{u}diger Schernthaner and Arnold K\"{o}chl and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2013", abstract = "Visualizations of vascular structures are frequently used in radiological investigations to detect and analyze vascular diseases. Obstructions of the blood flow through a vessel are one of the main interests of physicians, and several methods have been proposed to aid the visual assessment of calcifications on vessel walls. Curved Planar Reformation (CPR) is a wide-spread method that is designed for peripheral arteries which exhibit one dominant direction. To analyze the lumen of arbitrarily oriented vessels, Centerline Reformation (CR) has been proposed. Both methods project the vascular structures into 2D image space in order to reconstruct the vessel lumen. In this paper, we propose Curved Surface Reformation (CSR), a technique that computes the vessel lumen fully in 3D. This offers high-quality interactive visualizations of vessel lumina and does not suffer from problems of earlier methods such as ambiguous visibility cues or premature discretization of centerline data. Our method maintains exact visibility information until the final query of the 3D lumina data. We also present feedback from several domain experts.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE Scientific Visualization 2013)", volume = "19", number = "12", pages = "2858--2867", keywords = "Surface Approximation, Vessel, Reformation, Volume Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_Mistelbauer_2013_CSR/", } @article{karimov-2013-vivisection, title = "ViviSection: Skeleton-based Volume Editing", author = "Alexey Karimov and Gabriel Mistelbauer and Johanna Schmidt and Peter Mindek and Elisabeth Schmidt and Timur Sharipov and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Volume segmentation is important in many applications, particularly in the medical domain. Most segmentation techniques, however, work fully automatically only in very restricted scenarios and cumbersome manual editing of the results is a common task. In this paper, we introduce a novel approach for the editing of segmentation results. Our method exploits structural features of the segmented object to enable intuitive and robust correction and verification. We demonstrate that our new approach can significantly increase the segmentation quality even in difficult cases such as in the presence of severe pathologies.", month = jun, journal = "Computer Graphics Forum", volume = "32", number = "3", pages = "461--470", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/karimov-2013-vivisection/", } @article{mistelbauer-2013-cfa, title = "Vessel Visualization using Curvicircular Feature Aggregation", author = "Gabriel Mistelbauer and Anca Morar and Andrej Varchola and R\"{u}diger Schernthaner and Ivan Baclija and Arnold K\"{o}chl and Armin Kanitsar and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Radiological investigations are common medical practice for the diagnosis of peripheral vascular diseases. Existing visualization methods such as Curved Planar Reformation (CPR) depict calcifications on vessel walls to determine if blood is still able to flow. While it is possible with conventional CPR methods to examine the whole vessel lumen by rotating around the centerline of a vessel, we propose Curvicircular Feature Aggregation (CFA), which aggregates these rotated images into a single view. By eliminating the need for rotation, vessels can be investigated by inspecting only one image. This method can be used as a guidance and visual analysis tool for treatment planning. We present applications of this technique in the medical domain and give feedback from radiologists.", month = jun, journal = "Computer Graphics Forum", volume = "32", number = "3", pages = "231--240", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mistelbauer-2013-cfa/", } @inproceedings{Viola_Ivan_2013_D3D, title = "Doppler-based 3D Blood Flow Imaging and Visualization", author = "{\AA}smund Birkeland and Dag Magne Ulvang and Kim Nylund and Trygve Hausken and Odd Helge Gilja and Ivan Viola", year = "2013", abstract = "Blood flow is a very important part of human physiology. In this paper, we present a new method for estimating and visualizing 3D blood flow on-the-fly based on Doppler ultrasound. We add semantic information about the geometry of the blood vessels in order to recreate the actual velocities of the blood. Assuming a laminar flow, the flow direction is related to the general direction of the vessel. Based on the center line of the vessel, we create a vector field representing the direction of the vessel at any given point. The actual flow velocity is then estimated from the Doppler ultrasound signal by back-projecting the velocity in the measured direction, onto the vessel direction. Additionally, we estimate the flux at user-selected cross-sections of the vessel by integrating the velocities over the area of the cross-section. In order to visualize the flow and the flux, we propose a visualization design based on traced particles colored by the flux. The velocities are visualized by animating particles in the flow field. Further, we propose a novel particle velocity legend as a means for the user to estimate the numerical value of the current velocity. Finally, we perform an evaluation of the technique where the accuracy of the velocity estimation is measured using a 4D MRI dataset as a basis for the ground truth.", month = may, isbn = "978-80-223-3377-1", publisher = "ACM Publishing House", location = "Smolenice, Slovak Republic", booktitle = "SCCG 2013 - 29th Proceedings Spring conference on Computer Graphics", pages = "128--135", keywords = "Medical Visualization, Biomedical", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_D3D/", } @inproceedings{Viola_Ivan_2013_HQ3, title = "High-Quality 3D Visualization of In-Situ Ultrasonography", author = "Ivan Viola and {\AA}smund Birkeland and Veronika Solteszova and Linn Helljesen and Helwig Hauser and Spiros Kotopoulis and Kim Nylund and Dag Magne Ulvang and Ola Kristoffer Øye and Trygve Hausken and Odd Helge Gilja", year = "2013", abstract = "In recent years medical ultrasound has experienced a rapid development in the quality of real-time 3D ultrasound (US) imaging. The image quality of the 3D volume that was previously possible to achieve within the range of a few seconds, is now possible in a fraction of a second. This technological advance offers entirely new opportunities for the use of US in the clinic. In our project, we investigate how real-time 3D US can be combined with high-performance processing of today’s graphics hardware to allow for high-quality 3D visualization and precise navigation during the examination.", month = may, publisher = "Eurogrpahics", note = "1st Prize - Medical Prize Short Paper", location = "Girona, Spain", booktitle = "EG 2013 - Dirk Bartz Prize", pages = "1--4", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_HQ3/", } @inproceedings{mistelbauer-2012-ssv, title = "Smart Super Views - A Knowledge-Assisted Interface for Medical Visualization", author = "Gabriel Mistelbauer and Hamed Bouzari and R\"{u}diger Schernthaner and Ivan Baclija and Arnold K\"{o}chl and Stefan Bruckner and Milo\v{s} \v{S}r\'{a}mek and Eduard Gr\"{o}ller", year = "2012", abstract = "Due to the ever growing volume of acquired data and information, users have to be constantly aware of the methods for their exploration and for interaction. Of these, not each might be applicable to the data at hand or might reveal the desired result. Owing to this, innovations may be used inappropriately and users may become skeptical. In this paper we propose a knowledge-assisted interface for medical visualization, which reduces the necessary effort to use new visualization methods, by providing only the most relevant ones in a smart way. Consequently, we are able to expand such a system with innovations without the users to worry about when, where, and especially how they may or should use them. We present an application of our system in the medical domain and give qualitative feedback from domain experts.", month = oct, publisher = "IEEE Computer Society", location = "Seattle, WA, USA", booktitle = "IEEE Conference on Visual Analytics Science and Technology (IEEE VAST) 2012", pages = "163--172", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/mistelbauer-2012-ssv/", } @article{Birkeland-2012-IMC, title = "Illustrative Membrane Clipping", author = "{\AA}smund Birkeland and Stefan Bruckner and Andrea Brambilla and Ivan Viola", year = "2012", abstract = "Clipping is a fast, common technique for resolving occlusions. It only requires simple interaction, is easily understandable, and thus has been very popular for volume exploration. However, a drawback of clipping is that the technique indiscriminately cuts through features. Illustrators, for example, consider the structures in the vicinity of the cut when visualizing complex spatial data and make sure that smaller structures near the clipping plane are kept in the image and not cut into fragments. In this paper we present a new technique, which combines the simple clipping interaction with automated selective feature preservation using an elastic membrane. In order to prevent cutting objects near the clipping plane, the deformable membrane uses underlying data properties to adjust itself to salient structures. To achieve this behaviour, we translate data attributes into a potential field which acts on the membrane, thus moving the problem of deformation into the soft-body dynamics domain. This allows us to exploit existing GPU-based physics libraries which achieve interactive frame rates. For manual adjustment, the user can insert additional potential fields, as well as pinning the membrane to interesting areas. We demonstrate that our method can act as a flexible and non-invasive replacement of traditional clipping planes.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "905--914", keywords = "illustrative visualization, volume rendering, clipping", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Birkeland-2012-IMC/", } @inproceedings{Ford-2012-HRV, title = "HeartPad: Real-Time Visual Guidance for Cardiac Ultrasound", author = "Steven Ford and Gabriel Kiss and Ivan Viola and Stefan Bruckner and Hans Torp", year = "2012", abstract = "Medical ultrasound is a challenging modality when it comes to image interpretation. The goal we address in this work is to assist the ultrasound examiner and partially alleviate the burden of interpretation. We propose to address this goal with visualization that provides clear cues on the orientation and the correspondence between anatomy and the data being imaged. Our system analyzes the stream of 3D ultrasound data and in real-time identifies distinct features that are basis for a dynamically deformed mesh model of the heart. The heart mesh is composited with the original ultrasound data to create the data-to-anatomy correspondence. The visualization is broadcasted over the internet allowing, among other opportunities, a direct visualization on the patient on a tablet computer. The examiner interacts with the transducer and with the visualization parameters on the tablet. Our system has been characterized by domain specialist as useful in medical training and for navigating occasional ultrasound users.", booktitle = "Proceedings of the Workshop at SIGGRAPH Asia 2012", keywords = "medical visualization, ultrasound", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ford-2012-HRV/", } @inproceedings{mistelbauer-2012-cr, title = "Centerline Reformations of Complex Vascular Structures", author = "Gabriel Mistelbauer and Andrej Varchola and Hamed Bouzari and Juraj Starinsky and Arnold K\"{o}chl and R\"{u}diger Schernthaner and Dominik Fleischmann and Eduard Gr\"{o}ller and Milo\v{s} \v{S}r\'{a}mek", year = "2012", abstract = "Visualization of vascular structures is a common and frequently performed task in the field of medical imaging. There exist well established and applicable methods such as Maximum Intensity Projection (MIP) and Curved Planar Reformation (CPR). However, when calcified vessel walls are investigated, occlusion hinders exploration of the vessel interior with MIP. In contrast, CPR offers the possibility to visualize the vessel lumen by cutting a single vessel along its centerline. Extending the idea of CPR, we propose a novel technique, called Centerline Reformation (CR), which is capable of visualizing the lumen of spatially arbitrarily oriented vessels not necessarily connected in a tree structure. In order to visually emphasize depth, overlap and occlusion, halos can optionally envelope the vessel lumen. The required vessel centerlines are obtained from volumetric data by performing a scale-space based feature extraction. We present the application of the proposed technique in a focus and context setup. Further, we demonstrate how it facilitates the investigation of dense vascular structures, particularly cervical vessels or vessel data featuring peripheral arterial occlusive diseases or pulmonary embolisms. Finally, feedback from domain experts is given.", isbn = "978-1-4673-0863-2", location = "Songdo, Korea (South) ", booktitle = "Pacific Visualization Symposium (PacificVis), 2012 IEEE", pages = "233--240", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/mistelbauer-2012-cr/", } @inproceedings{Balabanian-2010-IIV, title = "Interactive Illustrative Visualization of Hierarchical Volume Data", author = "Jean-Paul Balabanian and Ivan Viola and Eduard Gr\"{o}ller", year = "2010", abstract = "In scientific visualization the underlying data often has an inherent abstract and hierarchical structure. Therefore, the same dataset can simultaneously be studied with respect to its characteristics in the three-dimensional space and in the hierarchy space. Often both characteristics are equally important to convey. For such scenarios we explore the combination of hierarchy visualization and scientific visualization, where both data spaces are effectively integrated. We have been inspired by illustrations of species evolutions where hierarchical information is often present. Motivated by these traditional illustrations, we introduce integrated visualizations for hierarchically organized volumetric datasets. The hierarchy data is displayed as a graph, whose nodes are visually augmented to depict the corresponding 3D information. These augmentations include images due to volume raycasting, slicing of 3D structures, and indicators of structure visibility from occlusion testing. New interaction metaphors are presented that extend visualizations and interactions, typical for one visualization space, to control visualization parameters of the other space. Interaction on a node in the hierarchy influences visual representations of 3D structures and vice versa. We integrate both the abstract and the scientific visualizations into one view which avoids frequent refocusing typical for interaction with linked-view layouts. We demonstrate our approach on different volumetric datasets enhanced with hierarchical information.", month = jun, location = "Ottawa, Ontario, Canada", booktitle = "Proceedings of Graphics Interface 2010", pages = "137--144", keywords = "visualization, volume data, hierarchical", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Balabanian-2010-IIV/", } @article{solteszova-2010-MOS, title = "A Multidirectional Occlusion Shading Model for Direct Volume Rendering", author = "Veronika Solteszova and Daniel Patel and Stefan Bruckner and Ivan Viola", year = "2010", abstract = "In this paper, we present a novel technique which simulates directional light scattering for more realistic interactive visualization of volume data. Our method extends the recent directional occlusion shading model by enabling light source positioning with practically no performance penalty. Light transport is approximated using a tilted cone-shaped function which leaves elliptic footprints in the opacity buffer during slice-based volume rendering. We perform an incremental blurring operation on the opacity buffer for each slice in front-to-back order. This buffer is then used to define the degree of occlusion for the subsequent slice. Our method is capable of generating high-quality soft shadowing effects, allows interactive modification of all illumination and rendering parameters, and requires no pre-computation.", month = jun, journal = "Computer Graphics Forum", volume = "29", number = "3", pages = "883--891", keywords = "global illumination, volume rendering, shadows, optical model", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/solteszova-2010-MOS/", } @article{bruckner-2010-HVC, title = "Hybrid Visibility Compositing and Masking for Illustrative Rendering", author = "Stefan Bruckner and Peter Rautek and Ivan Viola and Mike Roberts and Mario Costa Sousa and Eduard Gr\"{o}ller", year = "2010", abstract = "In this paper, we introduce a novel framework for the compositing of interactively rendered 3D layers tailored to the needs of scientific illustration. Currently, traditional scientific illustrations are produced in a series of composition stages, combining different pictorial elements using 2D digital layering. Our approach extends the layer metaphor into 3D without giving up the advantages of 2D methods. The new compositing approach allows for effects such as selective transparency, occlusion overrides, and soft depth buffering. Furthermore, we show how common manipulation techniques such as masking can be integrated into this concept. These tools behave just like in 2D, but their influence extends beyond a single viewpoint. Since the presented approach makes no assumptions about the underlying rendering algorithms, layers can be generated based on polygonal geometry, volumetric data, pointbased representations, or others. Our implementation exploits current graphics hardware and permits real-time interaction and rendering.", journal = "Computers & Graphics", number = "34", pages = "361--369", keywords = "compositing, masking, illustration", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-HVC/", } @inproceedings{balabanian-2008-hvv, title = "Hierarchical Volume Visualization of Brain Anatomy", author = "Jean-Paul Balabanian and Martin Ystad and Ivan Viola and Arvid Lundervold and Helwig Hauser and Eduard Gr\"{o}ller", year = "2008", month = oct, isbn = "978-3-89838-609-8", location = "Konstanz, Deutschland", editor = "Oliver Deussen, Daniel Keim, Dietmar Saupe", booktitle = "VMV 2008, Vision, Modeling and Visualization", pages = "313--322", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/balabanian-2008-hvv/", } @inproceedings{ruiz-2008-OVR, title = "Obscurance-based Volume Rendering Framework", author = "Marc Ruiz and Imma Boada and Ivan Viola and Stefan Bruckner and Miquel Feixas and Mateu Sbert", year = "2008", abstract = "Obscurances, from which ambient occlusion is a particular case, is a technology that produces natural-looking lighting effects in a faster way than global illumination. Its application in volume visualization is of special interest since it permits us to generate a high quality rendering at a low cost. In this paper, we propose an obscurance-based framework that allows us to obtain realistic and illustrative volume visualizations in an interactive manner. Obscurances can include color bleeding effects without additional cost. Moreover, we obtain a saliency map from the gradient of obscurances and we show its application to enhance volume visualization and to select the most salient views.", booktitle = "Proceedings of Volume Graphics 2008", keywords = "volume rendering, illustrative visualization, ambient occlusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-OVR/", } @inproceedings{ruiz-2008-SEV, title = "Similarity-based Exploded Views", author = "Marc Ruiz and Ivan Viola and Imma Boada and Stefan Bruckner and Miquel Feixas and Mateu Sbert", year = "2008", abstract = "Exploded views are often used in illustration to overcome the problem of occlusion when depicting complex structures. In this paper, we propose a volume visualization technique inspired by exploded views that partitions the volume into a number of parallel slabs and shows them apart from each other. The thickness of slabs is driven by the similarity between partitions. We use an information-theoretic technique for the generation of exploded views. First, the algorithm identifies the viewpoint from which the structure is the highest. Then, the partition of the volume into the most informative slabs for exploding is obtained using two complementary similarity-based strategies. The number of slabs and the similarity parameter are freely adjustable by the user.", booktitle = "Proceedings of Smart Graphics 2008", pages = "154--165", keywords = "volume visualization, illustrative visualization, exploded views", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-SEV/", } @inproceedings{burns-2007-fea, title = "Feature Emphasis and Contextual Cutaways for Multimodal Medical Visualization", author = "Michael Burns and Martin Haidacher and Wolfgang Wein and Ivan Viola and Eduard Gr\"{o}ller", year = "2007", abstract = "Dense clinical data like 3D Computed Tomography (CT) scans can be visualized together with real-time imaging for a number of medical intervention applications. However, it is difficult to provide a fused visualization that allows sufficient spatial perception of the anatomy of interest, as derived from the rich pre-operative scan, while not occluding the real-time image displayed embedded within the volume. We propose an importance-driven approach that presents the embedded data such that it is clearly visible along with its spatial relation to the surrounding volumetric material. To support this, we present and integrate novel techniques for importance specification, feature emphasis, and contextual cutaway generation. We show results in a clinical context where a pre-operative CT scan is visualized alongside a tracked ultrasound image, such that the important vasculature is depicted between the viewpoint and the ultrasound image, while a more opaque representation of the anatomy is exposed in the surrounding area.", month = may, isbn = "9783905673456", publisher = "IEEE", location = "Nork\"{o}pping, Schweden", editor = "K. Museth, T. M\"{o}ller, A. Ynnerman", booktitle = "Proceedings of Eurographics / IEEE VGTC Symposium on Visualization (EuroVis 2007)", pages = "275--282", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/burns-2007-fea/", }