@talk{Groeller_V42020, title = "Medicinae Notitia Visibilis Fac – Quo Vadis?", author = "Eduard Gr\"{o}ller", year = "2020", abstract = "Medical Visualization is a scientific field that takes advantage of human vision and perception to amplify cognition and gain insight in (complex) medical data. The interdisciplinarity and the diversity of stakeholders and their greatly varying expertises and expectations, make it a demanding area with many overlapping, but distinct domains. Collaboration and communication is challenged by: “Die Grenzen meiner Sprache bedeuten die Grenzen meiner Welt“ (Ludwig Wittgenstein). This talk reflects on the feedback from an ad hoc and random sampling of my professional network with comments, e.g., from basic and applied visual and medical computing experts, commercial developers of medical software, clinical researchers and practitioners.", month = oct, event = "IEEE Vis 2020 Application Spotlight (virtual): Recent Challenges in Medical Visualization", location = "Salt Lake City, USA", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/Groeller_V42020/", } @article{Mazza_2020, title = "Homomorphic-Encrypted Volume Rendering", author = "Sebastian Mazza and Daniel Patel and Ivan Viola", year = "2020", abstract = "Computationally demanding tasks are typically calculated in dedicated data centers, and real-time visualizations also follow this trend. Some rendering tasks, however, require the highest level of confidentiality so that no other party, besides the owner, can read or see the sensitive data. Here we present a direct volume rendering approach that performs volume rendering directly on encrypted volume data by using the homomorphic Paillier encryption algorithm. This approach ensures that the volume data by using the homomorphic Paillier encryption algorithm. This approach ensures that the volume data and rendered image are uninterpretable to the rendering server. Our volume rendering pipeline introduces novel approaches for encrypted-data compositing, interpolation, and opacity modulation, as well as simple transfer function design, where each of these routines maintains the highest level of privacy. We present performance and memory overhead analysis that is associated with our privacy-preserving scheme. Our approach is open and secure by design, as opposed to secure through obscurity. Owners of the data only have to keep their secure key confidential to guarantee the privacy of their volume data and the rendered images. Our work is, to our knowledge, the first privacy-preserving remote volume-rendering approach that does not require that any server involved be trustworthy; even in cases when the server is compromised, no sensitive data will be leaked to a foreign party.", month = oct, journal = "IEEE Transactions on Visualization andComputer Graphics", volume = "27", doi = "10.1109/TVCG.2020.3030436", pages = "1--10", keywords = "Volume Rendering, Transfer Function, Homomorphic-Encryption, Paillier", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/Mazza_2020/", } @article{furmanova_2020, title = "VAPOR: Visual Analytics for the Exploration of Pelvic Organ Variability in Radiotherapy", author = "Katar\'{i}na Furmanov\'{a} and Nicolas Grossmann and Ludvig Paul Muren and Oscar Casares-Magaz and Vitali Moiseenko and John P. Einck and Eduard Gr\"{o}ller and Renata Raidou", year = "2020", abstract = "In radiation therapy (RT) for prostate cancer, changes in patient anatomy during treatment might lead to inadequate tumor coverage and higher irradiation of healthy tissues in the nearby pelvic organs. Exploring and analyzing anatomical variability throughout the course of RT can support the design of more robust treatment strategies, while identifying patients that are prone to radiation-induced toxicity. We present VAPOR, a novel application for the exploration of pelvic organ variability in a cohort of patients, across the entire treatment process. Our application addresses: (i) the global exploration and analysis of anatomical variability in an abstracted tabular view, (ii) the local exploration and analysis thereof in anatomical 2D/3D views, where comparative and ensemble visualizations are integrated, and (iii) the correlation of anatomical variability with radiation doses and potential toxicity. The workflow is based on available retrospective cohort data, which include segmentations of the bladder, the prostate, and the rectum through the entire treatment period. VAPOR is applied to four usage scenarios, which were conducted with two medical physicists. Our application provides clinical researchers with promising support in demonstrating the significance of treatment adaptation to anatomical changes.", month = oct, doi = "https://doi.org/10.1016/j.cag.2020.07.001", journal = "Computer & Graphics", note = "Special Section on VCBM 2019", volume = "91", pages = "25--38", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/furmanova_2020/", } @article{raidou_slicedice, title = "Slice and Dice: A PhysicalizationWorkflow for Anatomical Edutainment", author = "Renata Raidou and Eduard Gr\"{o}ller and Hsiang-Yun Wu", year = "2020", abstract = "During the last decades, anatomy has become an interesting topic in education—even for laymen or schoolchildren. As medical imaging techniques become increasingly sophisticated, virtual anatomical education applications have emerged. Still, anatomical models are often preferred, as they facilitate 3D localization of anatomical structures. Recently, data physicalizations (i.e., physical visualizations) have proven to be effective and engaging—sometimes, even more than their virtual counterparts. So far, medical data physicalizations involve mainly 3D printing, which is still expensive and cumbersome. We investigate alternative forms of physicalizations, which use readily available technologies (home printers) and inexpensive materials (paper or semi-transparent films) to generate crafts for anatomical edutainment. To the best of our knowledge, this is the first computer-generated crafting approach within an anatomical edutainment context. Our approach follows a cost-effective, simple, and easy-to-employ workflow, resulting in assemblable data sculptures (i.e., semi-transparent sliceforms). It primarily supports volumetric data (such as CT or MRI), but mesh data can also be imported. An octree slices the imported volume and an optimization step simplifies the slice configuration, proposing the optimal order for easy assembly. A packing algorithm places the resulting slices with their labels, annotations, and assembly instructions on a paper or transparent film of user-selected size, to be printed, assembled into a sliceform, and explored. We conducted two user studies to assess our approach, demonstrating that it is an initial positive step towards the successful creation of interactive and engaging anatomical physicalizations.", month = oct, journal = "Computer Graphics Forum (CGF)", volume = "x", pages = "1--12", keywords = "Data Physicalization, Life and Medical Sciences, Anatomical Education", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/raidou_slicedice/", } @inproceedings{schindler_2020vis, title = "The Anatomical Edutainer", author = "Marwin Schindler and Hsiang-Yun Wu and Renata Raidou", year = "2020", abstract = "Physical visualizations (i.e., data representations by means of physical objects) have been used for many centuries in medical and anatomical education. Recently, 3D printing techniques started also to emerge. Still, other medical physicalizations that rely on affordable and easy-to-find materials are limited, while smart strategies that take advantage of the optical properties of our physical world have not been thoroughly investigated. We propose the Anatomical Edutainer, a workflow to guide the easy, accessible, and affordable generation of physicalizations for tangible, interactive anatomical edutainment. The Anatomical Edutainer supports 2D printable and 3D foldable physicalizations that change their visual properties (i.e., hues of the visible spectrum) under colored lenses or colored lights, to reveal distinct anatomical structures through user interaction.", month = oct, event = "IEEE Vis 2020", booktitle = "IEEE Vis Short Papers 2020", pages = "1--5", keywords = "Data Physicalization, Medical Visualization, Anatomical Education", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/schindler_2020vis/", } @inproceedings{raidou_visgap2020, title = "Lessons Learnt from Developing Visual Analytics Applications for Adaptive Prostate Cancer Radiotherapy", author = "Renata Raidou and Katar\'{i}na Furmanov\'{a} and Nicolas Grossmann and Oscar Casares-Magaz and Vitali Moiseenko and John P. Einck and Eduard Gr\"{o}ller and Ludvig Paul Muren", year = "2020", abstract = "In radiotherapy (RT), changes in patient anatomy throughout the treatment period might lead to deviations between planned and delivered dose, resulting in inadequate tumor coverage and/or overradiation of healthy tissues. Adapting the treatment to account for anatomical changes is anticipated to enable higher precision and less toxicity to healthy tissues. Corresponding tools for the in-depth exploration and analysis of available clinical cohort data were not available before our work. In this paper, we discuss our on-going process of introducing visual analytics to the domain of adaptive RT for prostate cancer. This has been done through the design of three visual analytics applications, built for clinical researchers working on the deployment of robust RT treatment strategies. We focus on describing our iterative design process, and we discuss the lessons learnt from our fruitful collaboration with clinical domain experts and industry, interested in integrating our prototypes into their workflow.", month = may, event = "EGEV2020 - VisGap Workshop", booktitle = "The Gap between Visualization Research and Visualization Software (VisGap) (2020)", pages = "1--8", keywords = "Visual Analytics, Life and Medical Sciences", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/raidou_visgap2020/", } @bachelorsthesis{schindler2020, title = "Anatomical Entertainer: Physical Visualization in a Medical Context", author = "Marwin Schindler", year = "2020", abstract = "Visualizations are essential for anatomical education of the general public. Traditional visualization methods focus on 2D and 3D information representations, either digital or printed, but visualizations also have a physical form. Physical visualization is a subdomain of the traditional visualization domain, where the data is represented by means of a physical object. Physical visualizations have been reported to lead to greater information insights for the interacting user, but a lot of the fabrication methods to create physical visualizations of the anatomy are not accessible for the general public. In this thesis, we present a workflow to ease the process of creating physical visualizations, made out of paper. The proposed workflow can be used to create two different types of anatomical visualizations. First, we generate 2D visualizations, examinable with color filters that enhance the interactivity of the visualization. To encode multiple channels of information from the anatomical structures, a specific method of color blending is used, which enables the users to access the different anatomical structures selectively, without occlusion. That way the users explore the single layers of the printed visualizations using color filters. Second, 3D papercrafts are generated, which are also examinable with color filters. The anatomical model is unfolded on the paper sheet, can be printed and the user can assemble it and examine it under the color lenses, similarly to the 2D case. The papercrafts may be used as an educational toy in school teaching or for entertainment, since they are very easy to produce and to distribute. We present several 2D and 3D examples of the workflow of the Anatomical Entertainer on models for anatomical education.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/schindler2020/", } @mastersthesis{Spelitz2020, title = "BrainGait: Gait Event Detection and Visualization for Robotic Rehabilitation", author = "Stefan Spelitz", year = "2020", abstract = "Mobility impairment in adults is one of most prevalent types of disabilities in developed countries. Gait rehabilitation can be used to regain some or all motor functions, especially after a stroke. In recent years, robot-assisted gait training attracted increasing interest in rehabilitation facilities and scientific research. With this advent of robotic recovery comes the need to objectively measure the patient’s performance. Physiotherapists need essential information about the current status during training and how to improve the patient’s gait, presented in an easy to grasp and compact form. On the other hand, physicians rely on statistical measures in order to evaluate the patient’s progress throughout the therapy. This thesis discusses commonly used visualizations and statistics while proposing improvements and adaptations in the context of PerPedes, a novel robotic gait rehabilitation device. In order to measure the patient’s performance, a new algorithm for gait event detection was developed, based on force data from pressure plates. The following work demonstrates that standard algorithms fail with PerPedes, while the proposed solution can robustly handle highly distorted gait patterns, such as hemiplegic gait, foot drop, or walking backwards. The software application developed during this thesis provides feedback to the therapist and generates suggestions for gait improvement. Furthermore, gait statistics are inferred from each therapy session and collected in order to be used for future analysis and inter-patient comparison.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "Mobility rehabilitation, Electroencephalography", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/Spelitz2020/", } @misc{raidou_shonan167, title = "NII Shonan Meeting Report No. 167: Formalizing Biological and Medical Visualization", author = "Renata Raidou and Barbora Kozlikova and Johanna Beyer and Timo Ropinski and Issei Fujishiro", year = "2020", abstract = "Medicine and biology are among the most important research fields, having a significant impact on humans and their health. For decades, these fields have been highly dependent on visualization—establishing a tight coupling which is crucial for the development of visualization techniques, designed exclusively for the disciplines of medicine and biology. These visualization techniques can be generalized by the term Biological and Medical Visualization—for short,BioMedical Visualization. BioMedical Visualization is not only an enabler for medical diagnosis and treatment, but also an influential component of today’s life science research. Many BioMedical domains can now be studied at various scales and dimensions, with different imaging modalities and simulations, and for a variety of purposes. Accordingly, BioMedical Visualization has also innumerable contributions in industrial applications. However, despite its proven scientific maturity and societal value, BioMedical Visualization is often treated within Computer Science as a mere application subdomain of the broader field of Visualization.To enable BioMedical Visualization to further thrive, it is important to formalize its characteristics independently from the general field of Visualization.Also, several lessons learnt within the context of BioMedical Visualization may be applicable and extensible to other application domains or to the parent field of Visualization. Formalization has become particularly urgent, with the latest advances of BioMedical Visualization—in particular, with respect to dealing with Big Data Visualization, e.g., for the visualization of multi-scale, multi-modal,cohort, or computational biology data. Rapid changes and new opportunities in the field, also regarding the incorporation of Artificial Intelligence with“human-in-the-loop” concepts within the field of Visual Analytics, compel further this formalization. By enabling the BioMedical Visualization community to have intensive discussions on the systematization of current knowledge, we can adequately prepare ourselves for future prospects and challenges, while also contributing to the broader Visualization community. During this 4-day seminar, which was the 150th NII Shonan meeting to be organized, we brought together 25 visualization experts from diverse institutions,backgrounds and expertise to discuss, identify, formalize, and document the specifics of our field. This has been a great opportunity to cover a range of relevant and contemporary topics, and as a systematic effort towards establishing better fundaments for the field and towards determining novel future challenges.In the upcoming sections of this report, we summarize the content of invited talks and of the eight main topics that were discussed within the working groups during the seminar.", month = feb, note = "ISSN 2186-7437", number = "TR-193-02-2020-1", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/raidou_shonan167/", } @phdthesis{Weissenboeck_2019_PhD, title = "Visual Analysis of Methods for Processing 3D X-ray Computed Tomography Data of Advanced Composites", author = "Johannes Weissenb\"{o}ck", year = "2019", abstract = "Advanced composites have excellent mechanical properties at low weight and can be realized as complex components that can be manufactured quickly and cost-effectively. Due to these outstanding characteristics, these materials are used in many di˙erent areas of industry, such as aviation and automotive. Industrial 3D X-ray computed tomography (XCT) is used as a non-destructive testing (NDT) method to inspect the quality of components and to develop new advanced composite materials. XCT has the ability to determine the inner and outer geometries of a specimen non-destructively. For example, interesting features in fiber-reinforced polymers (FRPs) such as fibers, pores, and higher-density inclusions can be detected. The high resolutions of modern XCT devices generate large volume datasets, which reveal very fine structures. However, this high information content makes the exploration and analysis of the datasets with conventional methods very diÿcult and time-consuming. In this doctoral thesis, typical NDT application scenarios of advanced composites using XCT are addressed and visual analysis methods and visualization techniques are designed to provide material experts with tools to improve their workflow and to eÿciently analyze the XCT data, so that domain-specific questions can be answered easily and quickly. This work describes a novel visualization system for the interactive exploration and detailed analysis of FRPs, a tool for the visual analysis and evaluation of segmentation filters to accurately determine porosity in FRPs, and a more general system for the visual comparison of interesting features in an ensemble of XCT datasets are presented. The results of the individual visualization systems are presented using real-world and simulated XCT data. The proposed visual analysis methods support the experts in their workflows by enabling improved data analysis processes that are simple, fast, and well-founded, and provide new insights into material characterization with XCT.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Weissenboeck_2019_PhD/", } @misc{grossmann_2019_pelvisrunner_poster, title = "Pelvis Runner: A Visual Analytics Tool for Pelvic Organ Variability Exploration in Prostate Cancer Cohorts", author = "Nicolas Grossmann and Oscar Casares-Magaz and Ludvig Paul Muren and Vitali Moiseenko and John P. Einck and Eduard Gr\"{o}ller and Renata Raidou", year = "2019", abstract = "Pelvis Runner is a visual analysis tool for the exploration of the variability of segmented pelvic organs in multiple patients, across the course of radiation therapy treatment. Radiation treatment is performed through the course of weeks, during which the anatomy of the patient changes. This variability may be responsible for side effects, due to the potential over-irradiation of healthy tissues. Exploring and analyzing organ variability in patient cohorts can help clinical researchers to design more robust treatment strategies. Our work addresses, first, the global exploration and analysis of pelvic organ shape variability in an abstracted tabular view for the entire cohort. Second, local exploration and analysis of the variability are provided on-demand in anatomical 2D/3D views for cohort partitions. The Pelvis Runner has been evaluated by two clinical researchers and is a promising basis for the exploration of pelvic organ variability.", month = oct, event = "IEEE VIS VAST", Conference date = "Poster presented at IEEE VIS VAST (2019-10-20--2019-10-25)", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/grossmann_2019_pelvisrunner_poster/", } @inproceedings{raidou_2019_pelvisrunner, title = "Pelvis Runner: Visualizing Pelvic Organ Variability in a Cohort of Radiotherapy Patients", author = "Nicolas Grossmann and Oscar Casares-Magaz and Ludvig Paul Muren and Vitali Moiseenko and John P. Einck and Eduard Gr\"{o}ller and Renata Raidou", year = "2019", abstract = "In radiation therapy, anatomical changes in the patient might lead to deviations between the planned and delivered dose--including inadequate tumor coverage, and overradiation of healthy tissues. Exploring and analyzing anatomical changes throughout the entire treatment period can help clinical researchers to design appropriate treatment strategies, while identifying patients that are more prone to radiation-induced toxicity. We present the Pelvis Runner, a novel application for exploring the variability of segmented pelvic organs in multiple patients, across the entire radiation therapy treatment process. Our application addresses (i) the global exploration and analysis of pelvic organ shape variability in an abstracted tabular view and (ii) the local exploration and analysis thereof in anatomical 2D/3D views, where comparative and ensemble visualizations are integrated. The workflow is based on available retrospective cohort data, which incorporate segmentations of the bladder, the prostate, and the rectum through the entire radiation therapy process. The Pelvis Runner is applied to four usage scenarios, which were conducted with two clinical researchers, i.e., medical physicists. Our application provides clinical researchers with promising support in demonstrating the significance of treatment plan adaptation to anatomical changes.", month = sep, event = "Eurographics Workshop on Visual Computing for Biology and Medicine (2019)", doi = "10.2312/vcbm.20191233", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine (2019)", pages = "69--78", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/raidou_2019_pelvisrunner/", } @inproceedings{raidou_2019_preha, title = "preha: Establishing Precision Rehabilitation with Visual Analytics", author = "Georg Bernold and Kresimir Matkovic and Eduard Gr\"{o}ller and Renata Raidou", year = "2019", abstract = "This design study paper describes preha, a novel visual analytics application in the field of in-patient rehabilitation. We conducted extensive interviews with the intended users, i.e., engineers and clinical rehabilitation experts, to determine specific requirements of their analytical process.We identified nine tasks, for which suitable solutions have been designed and developed in the flexible environment of kibana. Our application is used to analyze existing rehabilitation data from a large cohort of 46,000 patients, and it is the first integrated solution of its kind. It incorporates functionalities for data preprocessing (profiling, wrangling and cleansing), storage, visualization, and predictive analysis on the basis of retrospective outcomes. A positive feedback from the first evaluation with domain experts indicates the usefulness of the newly proposed approach and represents a solid foundation for the introduction of visual analytics to the rehabilitation domain.", month = sep, event = "Eurographics Workshop on Visual Computing for Biology and Medicine (2019)", doi = "10.2312/vcbm.20191234", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine (2019)", pages = "79--89", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/raidou_2019_preha/", } @article{vitruvian_2019, title = "The Vitruvian Baby: Interactive Reformation of Fetal Ultrasound Data to a T-Position", author = "Eric M\"{o}rth and Renata Raidou and Ivan Viola and Noeska Natasja Smit", year = "2019", abstract = "Three-dimensional (3D) ultrasound imaging and visualization is often used in medical diagnostics, especially in prenatal screening. Screening the development of the fetus is important to assess possible complications early on. State of the art approaches involve taking standardized measurements to compare them with standardized tables. The measurements are taken in a 2D slice view, where precise measurements can be difficult to acquire due to the fetal pose. Performing the analysis in a 3D view would enable the viewer to better discriminate between artefacts and representative information. Additionally making data comparable between different investigations and patients is a goal in medical imaging techniques and is often achieved by standardization. With this paper, we introduce a novel approach to provide a standardization method for 3D ultrasound fetus screenings. Our approach is called “The Vitruvian Baby” and incorporates a complete pipeline for standardized measuring in fetal 3D ultrasound. The input of the method is a 3D ultrasound screening of a fetus and the output is the fetus in a standardized T-pose. In this pose, taking measurements is easier and comparison of different fetuses is possible. In addition to the transformation of the 3D ultrasound data, we create an abstract representation of the fetus based on accurate measurements. We demonstrate the accuracy of our approach on simulated data where the ground truth is known. ", month = sep, journal = "Eurographics Workshop on Visual Computing for Biology and Medicine (2019)", volume = "9", doi = "10.2312/vcbm.20191245", pages = "201--205", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/vitruvian_2019/", } @phdthesis{miao_thesis_2019, title = "Geometric Abstraction for Effective Visualization and Modeling", author = "Haichao Miao", year = "2019", abstract = "In this cumulative thesis, I describe geometric abstraction as a strategy to create an integrated visualization system for spatial scientific data. The proposed approach creates a multitude of representations of spatial data in two dominant ways. Along the spatiality axis, it gradually removes spatial details and along the visual detail axis, the features are increasingly aggregated and represented by different visual objects. These representations are then integrated into a conceptual abstraction space that enables users to efficiently change the representation to adjust the abstraction level to a task in mind. To enable the expert to perceive correspondence between these representations, controllable animated transitions are provided. Finally, the abstraction space can record user interactions and provides visual indications to guide the expert towards interesting representations for a particular task and data set. Mental models of the experts play a crucial role in the understanding of the abstract representations and are considered in the design of the visualization system to keep the cognitive load low on the user’s side. This approach is demonstrated in two distinct fields of placenta research and in silico design of DNA nanostructures. For both fields geometric abstraction facilitates effective visual inspection and modeling. The Adenita toolkit, a software for the design of novel DNA nanostructures, implements the proposed visualization concepts. This toolkit, together with the proposed visualization concepts, is currently deployed to several research groups to help them in nanotechnology research.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/miao_thesis_2019/", } @bachelorsthesis{Hainoun2019, title = "Visualization of Data Flags in Table Lens Views to Improve the Readability of Metadata and the Tracking of Data Cleaning", author = "Muhammad Mujahed Hainoun", year = "2019", abstract = "Recent evaluation indicates that wrong decisions resulting from systems operating based on bad data costed worldwide about $30 billion in the year 2006. This work addresses the importance of Data Quality (DQ) as a critical requirement in any information system. In this regard, DQ criteria and problems such as missing entries, duplicates, and faulty values are identified. Different approaches and techniques used for data cleaning to fix DQ issues are reviewed. In this work a new technique is integrated into VISPLORE, a framework for data analysis and visualization, that allows the framework to visualize multiple types of per-value meta-information. We will show how our work enhances the readability of the table lens view, one of the many viewing modes provided in VISPLORE, and helps the user understand the status of data entries to decide on what entries need to be cleaned and how. This work also expands on the interactive data cleaning tools provided by VISPLORE, by allowing the user to manually delete implausible values or replace them with more plausible ones, while keeping track of this cleaning process. With the integrated new features to the table lens view, VISPLORE is now able to present more detailed data with enhanced visualization features and interactive data cleaning.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Hainoun2019/", } @mastersthesis{Grossmann_MA, title = "Pelvis Runner - Comparative Visualization of Anatomical Changes", author = "Nicolas Grossmann", year = "2019", abstract = "Pelvic organs such as the bladder, rectum or prostate have highly variable shapes that change over time, due to their soft and flexible tissue and varying filling. Recent clinical work suggests that these variations might affect the effectiveness of radiation therapy treatment in patients with prostate cancer. Although in clinical practice small correction steps are performed to re-align the treated region if the organs are shifted, a more in-depth understanding and modeling might prove beneficial for the adaptation of the employed treatment planning strategy. To evaluate the viability and to account for the variability in the population of certain treatment strategies, cohort studies are performed analyzing the shape and position variability of pelvic organs. In this thesis, we propose a web-based tool that is able to analyze a cohort of pelvic organs from 24 patients across 13 treatment instances. Hereby we have two goals: On the one hand, we want to support medical researchers analyzing large groups of patients for their shape variability and the possible correlations to side effects. On the other hand, we want to provide support for medical experts performing individual patient treatment planning. Our tool offers both the option to analyze a large cohort of different organ shapes, by first modeling them in a shape space and then analyzing the shape variations on a per-patient basis. While this first part aims at providing users with an overview of the data, we also give them the option to perform a detailed shape analysis, where we highlight the statistically aggregated shape of a patient or a specified group using a contour variability plot. Finally, we demonstrate several possible usage scenarios for our tool and perform an informal evaluation with two medical experts. Our tool is the first significant step in supporting medical experts in demonstrating the need for adaptation in radiation therapy treatments to account for shape variability.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Grossmann_MA/", } @incollection{raidou_2019_springer, title = "Visual Analytics for the Representation, Exploration and Analysis of High-Dimensional, Multi-Faceted Medical Data", author = "Renata Raidou", year = "2019", abstract = "Medicine is among research fields with a significant impact on humans and their health. Already for decades, medicine has established a tight coupling with the visualization domain, proving the importance of developing visualization techniques, designed exclusively for this research discipline. However, medical data is steadily increasing in complexity with the appearance of heterogeneous, multi-modal, multiparametric, cohort or population, as well as uncertain data. To deal with this kind of complex data, the field of Visual Analytics has emerged. In this chapter, we discuss the many dimensions and facets of medical data. Based on this classification, we provide a general overview of state-of-the-art visualization systems and solutions dealing with highdimensional, multi-faceted data. Our particular focus will be on multimodal, multi-parametric data, on data from cohort or population studies and on uncertain data, especially with respect to Visual Analytics applications for the representation, exploration, and analysis of highdimensional, multi-faceted medical data.", month = jul, booktitle = "Biomedical Visualisation", chapter = "10", doi = "https://doi.org/10.1007/978-3-030-14227-8_10", editor = "Springer", note = "https://www.springer.com/gp/book/9783030142261", publisher = "Springer", volume = "2", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/raidou_2019_springer/", } @phdthesis{ganglberger2019, title = "From Neurons to Behavior: Visual Analytics Methods for Heterogeneous Spatial Big Brain Data ", author = "Florian Ganglberger", year = "2019", abstract = "Advances in neuro-imaging have allowed big brain initiatives and consortia to create vast resources of brain data that can be mined for insights into mental processes and biological principles. Research in this area does not only relate to mind and consciousness, but also to the understanding of many neurological disorders, such as Alzheimer’s disease, autism, and anxiety. Exploring the relationships between genes, brain circuitry, and behavior is therefore a key element in research that requires the joint analysis of a heterogeneous set of spatial brain data, including 3D imaging data, anatomical data, and brain networks at varying scales, resolutions, and modalities. Due to high-throughput imaging platforms, this data’s size and complexity goes beyond the state-of-the-art by several orders of magnitude. Current analytical workflows involve time-consuming manual data aggregation and extensive computational analysis in script-based toolboxes. Visual analytics methods for exploring big brain data can support neuroscientists in this process, so they can focus on understanding the data rather than handling it. In this thesis, several contributions that target this problem are presented. The first contribution is a computational method that fuses genetic information with spatial gene expression data and connectivity data to predict functional neuroanatomical maps. These maps indicate, which brain areas might be related to a specific function or behavior. The approach has been applied to predict yet unknown functional neuroanatomy underlying multigeneic behavioral traits identified in genetic association studies and has demonstrated that rather than being randomly distributed throughout the brain, functionally-related gene sets accumulate in specific networks. The second contribution is the creation of a data structure that enables the interactive exploration of big brain network data with billions of edges. By utilizing the resulting hierarchical and spatial organization of the data, this approach allows neuroscientists on-demand queries of incoming/outgoing connections of arbitrary regions of interest on different anatomical scales. These queries would otherwise exceed the limits of current consumer level PCs. The data structure is used in the third contribution, a novel web-based framework to explore neurobiological imaging and connectivity data of different types, modalities, and scale. It employs a query-based interaction scheme to retrieve 3D spatial gene expressions and various types of connectivity to enable an interactive dissection of networks in real-time with respect to their genetic composition. The data is related to a hierarchical organization of common anatomical atlases that enables neuroscientists to compare multimodal networks on different scales in their anatomical context. Furthermore, the framework is designed to facilitate collaborative work with shareable comprehensive workflows on the web. As a result, the approaches presented in this thesis may assist neuroscientists to refine their understanding of the functional organization of the brain beyond simple anatomical domains and expand their knowledge about how our genes affect our mind. ", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/ganglberger2019/", } @article{raidou_star2019, title = "State-of-the-Art Report: Visual Computing in Radiation Therapy Planning", author = "Matthias Schlachter and Renata Raidou and Ludvig Paul Muren and Bernhard Preim and Katja B\"{u}hler", year = "2019", abstract = "Radiation therapy (RT) is one of the major curative approaches for cancer. It is a complex and risky treatment approach, which requires precise planning, prior to the administration of the treatment. Visual Computing (VC) is a fundamental component of RT planning, providing solutions in all parts of the process—from imaging to delivery. Despite the significant technological advancements of RT over the last decades, there are still many challenges to address. This survey provides an overview of the compound planning process of RT, and of the ways that VC has supported RT in all its facets. The RT planning process is described to enable a basic understanding in the involved data, users and workflow steps. A systematic categorization and an extensive analysis of existing literature in the joint VC/RT research is presented, covering the entire planning process. The survey concludes with a discussion on lessons learnt, current status, open challenges, and future directions in VC/RT research.", month = jun, journal = "Computer Graphics Forum", volume = "3", number = "38", pages = "753--779", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/raidou_star2019/", } @inproceedings{raidou_RO2019, title = "PO-0962 Bladder changes during first week of RT for prostate cancer determine the risk of urinary toxicity", author = "Oscar Casares-Magaz and Renata Raidou and NJ Pettersson and Vitali Moiseenko and John P. Einck and A Hopper and R Knopp and Ludvig Paul Muren", year = "2019", month = apr, event = "ESTRO 38", doi = "https://doi.org/10.1016/S0167-8140(19)31382-9", booktitle = "Radiotherapy and Oncology", pages = "S522--S523", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/raidou_RO2019/", } @mastersthesis{moerth-2018-tpose, title = "Interactive Reformation of Fetal Ultrasound Data to a T-Position", author = "Eric M\"{o}rth", year = "2019", abstract = "Three dimensional ultrasound images are commonly used in prenatal screening. The acquisition delivers detailed information about the skin as well as the inner organs of the fetus. Prenatal screenings in terms of growth analysis are very important to support a healthy development of the fetus. The analysis of this data involves viewing of two dimensional (2D) slices in order to take measurements or calculate the volume and weight of the fetus. These steps involve manual investigation and are dependent on the skills of the person who performs them. These measurements and calculations are very important to analyze the development of the fetus and for the birth preparation. Ultrasound imaging is a˙ected by artifacts like speckles, noise and also of structures obstructing the regions of interest. These artifacts occur because the imaging technique is using sound waves and their echo to create images. 2D slices as used as basis for the measurement of the fetus therefore might not be the best solution. Analyzing the data in a three dimensional (3D) way would enable the viewer to have a better overview and to better distinguish between artifacts and the real data of the fetus. The growth of a fetus can be analysed by comparing standardized measurements like the crown foot length, the femur length or the derived head circumference as well as the abdominal circumference. Standardization is well known in many fields of medicine and is used to enable compa-rability between investigations of the same patient or between patients. Therefore we introduce a standardized way of analyzing 3D ultrasound images of fetuses. Bringing the fetus in a standardized position would enable automatized measurements by the machine and there could also be new measurements applied like the volume of specific body parts. A standardized pose would also provide possibilities to compare the re-sults of di˙erent measurements of one fetus as well as the measurements of di˙erent fetuses. The novel method consists of six steps, namely the loading of the data, the preprocessing, the rigging of the model, the weighting of the data, the actual transformation called the "Vitruvian Baby" and at the end the analysis of the result. We tried to automatize the workflow as far as possible resulting in some manual tasks and some automatic ones. The loading of the data works with standard medical image formats and the preprocessing involves some interaction in order to get rid of the ultrasound induced artifacts. Transforming data into a specific position is a complex task which might involve a manual processing steps. In the method presented in this work one step of the transformation namely the rigging of the model, where a skeleton is placed in the data, is performed manually. The weighting as well as the transformation although are performed completely automatically resulting in a T-pose representation of the data. We analysed the performance of our novel approach in several ways. We first use a phantom model which has been used as a reference already presented in a T-pose. After using seven di˙erent fetus poses of the model as input the result was an average of 79,02%voxel overlapping between the output of the method and the goal T-pose. When having a look at the similarity of the finger to finger span and the head to toe measurement we considered a value of 91,08% and 94,05% in average. The time needed for the most complex manual task was in average seven minutes. After using a phantom model of a man, we also assessed the performance of the method using a computer model of a fetus and a phantom model of a 3D ultrasound investigation. The results also look very promising.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/moerth-2018-tpose/", } @mastersthesis{gutekunst_2019, title = "Guided Data Cleansing of Large Connectivity Matrices", author = "Florence Gutekunst", year = "2019", abstract = "Understanding the organization principle of the brain and its function is a continuing quest in neuroscience and psychiatry. Thus, understanding how the brain works, how it is functionally, structurally correlated as well as how the genes are expressed within the brain is one of the most important aims in neuroscience. The Biomedical Image Analysis Group at VRVis developed with the Wulf Haubensak Group at the Institute of Molecular Medicine an interactive framework that allows the real time exploration of large brain connectivity networks on multiple scales. The networks, represented as connectivity matrices, can be up to hundreds of gigabytes, and are too large to hold in current machines’ memory. Moreover, these connectivity matrices are redundant and noisy. A cleansing step to threshold noisy connections and group together similar rows and columns can decrease the required size and thus ease the computations in order to mine the matrices. However, the choice of a good threshold and similarity value is not a trivial task. This document presents a visual guided cleansing tool. The sampling is based on random sampling within the anatomical brain hierarchies on a user-defined global hierarchical level and sampling size ratio. This tool will be a step in the connectivity matrices preprocessing pipeline. ", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/gutekunst_2019/", } @inproceedings{amirkhanov-2018-withteeth, title = "WithTeeth: Denture Preview in Augmented Reality", author = "Aleksandr Amirkhanov and Artem Amirkhanov and Matthias Bernhard and Zsolt Toth and Sabine Stiller and Andreas Geier and Eduard Gr\"{o}ller", year = "2018", abstract = "Dentures are prosthetic devices replacing missing or damaged teeth, often used for dental reconstruction. Dental reconstruction improves the functional state and aesthetic appearance of teeth. State-of-the-art methods used by dental technicians typically do not include the aesthetic analysis, which often leads to unsatisfactory results for patients. In this paper, we present a virtual mirror approach for a dental treatment preview in augmented reality. Different denture presets are visually evaluated and compared by switching them on the fly. Our main goals are to provide a virtual dental treatment preview to facilitate early feedback, and hence to build the confidence and trust of patients in the outcome. The workflow of our algorithm is as follows. First, the face is detected and 2D facial landmarks are extracted. Then, 3D pose estimation of upper and lower jaws is performed and high-quality 3D models of the upper and lower dentures are fitted. The fitting uses the occlusal plane angle as determined manually by dental technicians. To provide a realistic impression of the virtual teeth, the dentures are rendered with motion blur. We demonstrate the robustness and visual quality of our approach by comparing the results of a webcam to a DSLR camera under natural, as well as controlled lighting conditions.", month = oct, isbn = "978-3-03868-072-7", address = "https://diglib.eg.org/handle/10.2312/vmv20181250", event = "VMV18", editor = "Beck, Fabian and Dachsbacher, Carsten and Sadlo, Filip", booktitle = "Vision, Modeling and Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/amirkhanov-2018-withteeth/", } @article{raidou2018visualflatter, title = "VisualFlatter - Visual Analysis of Distortions in the Projection of Biomedical Structures", author = "Nicolas Grossmann and Thomas K\"{o}ppel and Eduard Gr\"{o}ller and Renata Raidou", year = "2018", abstract = "Projections of complex anatomical or biological structures from 3D to 2D are often used by visualization and domain experts to facilitate inspection and understanding. Representing complex structures, such as organs or molecules, in a simpler 2D way often requires less interaction, while enabling comparability. However, the most commonly employed projection methods introduce size or shape distortions, in the resulting 2D representations. While simple projections display known distortion patterns, more complex projection algorithms are not easily predictable.We propose the VisualFlatter, a visual analysis tool that enables visualization and domain experts to explore and analyze projection-induced distortions, in a structured way. Our tool provides a way to identify projected regions with semantically relevant distortions and allows users to comparatively analyze distortion outcomes, either from alternative projection methods or due to different setups through the projection pipeline. The user is given the ability to improve the initial projection configuration, after comparing different setups. We demonstrate the functionality of our tool using four scenarios of 3D to 2D projections, conducted with the help of domain or visualization experts working on different application fields. We also performed a wider evaluation with 13 participants, familiar with projections, to assess the usability and functionality of the Visual Flatter.", month = sep, journal = "Eurographics Proceedings", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/raidou2018visualflatter/", } @bachelorsthesis{Bauer_David_2018, title = "Automated Visual Assessment of Osteoarthritis", author = "David Bauer", year = "2018", abstract = "Computer-aided visualisations are a powerful tool to make large datasets more accessible. Artificial intelligence (AI) also offers diverse ways in which to extract semantic values from large data stocks. It enables users to analyse records in ways that often exceed conventional methods in their specificity and accuracy. Medicine - more specifically those specialisations requiring imaging methods - are in need of sophisticated visualisation techniques. Our team at ImageBiopsy Lab [Lju17] runs development and research in the field of AI aided visualisations in medicine. For my thesis I developed a system for measuring the joint space in x-rays of the knee, based on existing concepts. Results of the measurements are processed and presented to the user as an augmented picture. This is achieved by employing different layers of graphical overlays on top of the original image. All measurements are based on parameters of the Kellgren and Lawrence System (KLS) for classification of Osteoarthritis (OA). The proposed method enables its users to asses the stage and tendency of OA in the knee at first glance as compared to conventional methods, which can be tedious and time-consuming. Calculated focus points in the mask layers can also be adjusted in real time to accommodate for statistical outliers. The system was incorporated into an existing web-based framework which already demonstrates its potential in a clinical environment.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Bauer_David_2018/", } @bachelorsthesis{Masopust_2018, title = "Web-Based Osteoarthritis-Analysis Generating Data from Native Libraries and Machine-Learning Models", author = "Lukas Masopust", year = "2018", abstract = "As artificial intelligence (AI) progresses with seemingly unstoppable speed, its wide field of applications broadens by the day. One area where AI advancements appear to be especially promising is their employment in the medical sector. Nowadays, due to the wider availability of processing power, algorithms based on neuronal networks can be used to generate far more data in areas where it previously seemed unthinkable. Traditional image-processing-algorithms often utilize computer vision (CV)-algorithms such as edge-detection to generate data from pixel input. While this method of gaining data worked well in the past, AI can help to improve the precision of such an analysis. The area I focussed on in this thesis is the generation of data from x-ray images of the knee joint. ImageBiopsy Lab (IB Lab)’s algorithms relied heavily on CV-based analysis for the diagnosis of osteoarthritis (OA) in the knee. While this yielded good results in the past, this work will show that the use of deep neuronal networks improves accuracy in a significant way. Further, neuronal networks can provide additional information that was a lot harder to be gained before, such as the laterality of a given image. The aim of this project was to diagnose OA faster and more precisely than in the past and to embed it into a web-based solution for broader accessibility. To showcase the benefits of the described method, at the time of writing, our software is in the stage of being rolled out in a hospital in Lower Austria. Because of the advancements mentioned above, this work will focus on the description and comparison of gaining information from x-ray images for a meaningful and efficient diagnosis of OA in the knee. ", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Masopust_2018/", } @mastersthesis{Oancea_2018_1, title = "Four Texture Algorithms for Recognizing Early Signs of Osteoarthritis. Data from the Multicenter Osteoarthritis Study.", author = "Stefan Ovidiu Oancea", year = "2018", abstract = "This master thesis aims to provide an in-depth comparison of four texture algorithms in their capacity of discriminating patients with osteoarthritis (OA) from the ones without, recognizing early signs of Osteoarthritis and tracking disease progression from 2D radiographs of the knee trabecular bone (TB). Given the fractal properties of the trabecular bone (TB), two fractal-based algorithms (Bone Variance Value (BVV) and Bone Score Value (BSV)) that try to characterize the complexity of the underlying 3D structure of the bone are presented. The third algorithm (Bone Entropy Value (BEV), based on Shannon’s Entropy) stems from the information theory and aims to describe the bone structure in terms of information complexity. The last algorithm (Bone Coocurrence Value (BCV)) is based on the co-occurrence matrix of an image and describes the image texture in terms of certain Haralick features. If successful, such algorithms posses a great potential to lower the costs (financial, time) associated with the diagnosis of osteoarthritis (OA) through automation of the procedure, and with the treatment. The earlier treatments and risk reduction measures are less costly than the procedures involved due to a more advanced stage of the disease (surgery, implants, etc.). First, a motivation for the detection of early osteoarthritis (OA) is given. Second, a detailed description and mathematical background of the algorithms are presented and validated on sample, artificial data. Third, the employed data sets used for classification tests are introduced. Fourth, the statistical methods and neural network models employed are presented and discussed. Fifth, the features produced by each algorithm are discussed and their independent and combined capacity of discriminating between bones with early signs of OA and healthy bones. Also the capacity of tracking OA progression through the years is quantified by statistical tests. Also in this part we present the best classification scores obtained from the most optimal neural networks for each use case. Finally, thoughts on future improvements and the generalization of the algorithms in other anatomical contexts, for other diseases or in other fields, like histology and mammography, are made. In this work we show that the state-of-the-art in OA prediction can be surpassed by utilizing only models based on texture features alone. Our gender-stratified analysis produces a prediction score of 83% for males and 81% for females in terms of Area Under the Receiver Operating Characteristic Curve (ROC-AUC).", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Oancea_2018_1/", } @misc{Karall2018_2, title = "Comparative Visual Analytics in a Cohort of Breast Cancer Patients", author = "Nikolaus Karall", year = "2018", abstract = "The most common cancer among the female population in the economically developed world is breast cancer. To signifcantly reduce the mortality among affected women an early diagnosis is essential, and also treatment strategies need to be selected carefully. Clinical researchers working on the selection of chemotherapy treatment need to analyze the progress of the disease during and after treatment and to understand how diffent groups of patients respond to selected treatments. Currently this is a diffcult task because of the multitude of involved imaging and non-imaging) data, for which adequate visualizations are required. The aim of this work is to help clinical researchers, who are working on the analysis of the progress of chemotherapy, to understand and explore the multitude of data they have. To this end, the following three tasks were realized in a web-based framework: 1. Functionality for single patient follow-up studies (intra-patient study) 2. Functionality to compare two different patients (pairwise inter-patient study) 3. Functionality to compare groups of patients (groupwise inter-patient study) In the examples below, we demonstrate only the latter, as it can be considered an overset of the other two tasks.", month = may, event = "EPILOG ", Conference date = "Poster presented at EPILOG (2018-06-18)", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Karall2018_2/", } @mastersthesis{Karall2017CVAB, title = "Comparative Visual Analytics in a Cohort of Breast Cancer Patients", author = "Nikolaus Karall", year = "2018", abstract = "The most common cancer among the female population in the economically developed world is breast cancer. To significantly reduce the mortality among affected women, an early diagnosis is essential, and also treatment strategies need to be selected carefully. Clinical researchers working on the selection of chemotherapy treatment need to analyze the progress of the disease during and after treatment and to understand how different groups of patients respond to selected treatments. Currently this is a difficult task because of the multitude of involved (imaging and non-imaging) data, for which adequate visualizations are required. The aim of this work is to help clinical researchers working on the analysis of the progress of chemotherapy to understand and explore the multitude of data they have. This thesis introduces a web-based framework realizing three tasks of exploring and analyzing imaging and non-imaging data of breast cancer patients in a cohort. A functionality for single patient follow-up studies (intra-patient study), a functionality to compare two different patients (pairwise inter-patient study) and a functionality to compare groups of patients (groupwise inter-patient study) are provided to enable an easier exploration and analysis of the available multivariate cohort data. To begin with, the imaging and non-imaging data underwent some preprocessing steps, such as registration, segmentation and calculation of tumor probability maps, to make them comparable. Afterwards, we carefully designed and implemented several multiple linked views, where interactive representations show distinct aspects of the data from which the clinical researcher can understand and analyze the available cohort data. A number of use cases to demonstrate the results that can be achieved with the provided framework are performed and they illustrate the functionality and also the importance of the designed and implemented visual analytics framework. Using this framework, clinical researchers are able to visually explore and analyze the multitude of both imaging and non-imaging data of a patient and compare patients within a cohort, which was not possible before with any available exploratory tools.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "comparative visual analytics, breast cancer", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Karall2017CVAB/", } @bachelorsthesis{Tramberger_2018, title = "Automatic Breast Lesion Evaluation for Comparative Studies", author = "Thomas Tramberger", year = "2018", abstract = "Breast cancer is the most common cancer with a high mortality rate. Neoadjuvant chemotherapie is conducted before surgery to reduce the breast tumor mass. Currently, a lot of trials are taking place, with the purpose of understanding the effects of different chemotherapy strategies. In this work a software is developed to analyse and compare the influence of these treatments. The study data is available as 4D Dynamic Contrast-Enhanced Magnetic Resonance Imaging data. To reduce the time of manual segmentation and the connection of segmented lesions over time a automatic procedure was implemented. This process uses the time-signal intensity curve and a support vector machine to classify lesions with calculated morphological features. To analyse the data, two views are available. The Intra-patient view visualizes the tumor behaviour of an individual patient over time. With the Multi-patient view the user is able to compare multiple patients’ lesions and additional added patient data. Both views are implemented with JavaScript and can be expanded easily. Because of missing ground truth an evaluation of the automatic segmentation method was not possible.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Tramberger_2018/", } @mastersthesis{Reiter2017CVAP, title = "Comparative Visualization of Pelvic Organ Segmentations", author = "Oliver Reiter", year = "2018", abstract = "Automatic segmentation of pelvic organs plays a major role in prostate cancer treatment and has high accuracy requirements. Segmentation experts are continuously working on improving their algorithms. However, natural anatomical variability of organs and structures is a common reason for which segmentation algorithms fail. Understanding why an algorithm fails in a specific case is of major importance. Segmentation experts expect that the shape and size of the organs can play an important role in the performance of their algorithms, but current means of exploration and analysis are limited and do not provide the necessary insight. This thesis discusses the design and implementation of a web-based application allowing for easy exploration and analysis of shape variability in order to generate hypotheses about the relation between algorithm performance and shape of organs. A new way of comparatively visualizing multiple organs of multiple patients is introduced for a detailed shape comparison. The application was tested with segmentation meshes of a cohort of 17 patients, each consisting of four pelvic organs and two organ-interfaces, which are labeled and have per-triangle correspondence. The proposed tools already allow users to quickly identify mis-segmented organs and hypothesize about the relation of variability to anatomical features as well as segmentation quality. The approach was applied on pelvic organ segmentations, but it can be extended to other applications like comparison of segmentation algorithms or analysis of anatomical variability in general.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "comparative visualization, statistical shape analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Reiter2017CVAP/", } @article{rraidou_clinical, title = "Uncertainty evaluation of image-based tumour control probability models in radiotherapy of prostate cancer using a visual analytic tool", author = "Oscar Casares-Magaz and Renata Raidou and Jarle Roervik and Anna Vilanova i Bartroli and Ludvig Paul Muren", year = "2018", abstract = "Functional imaging techniques provide radiobiological information that can be included into tumour control probability (TCP) models to enable individualized outcome predictions in radiotherapy. However, functional imaging and the derived radiobiological information are influenced by uncertainties, translating into variations in individual TCP predictions. In this study we applied a previously developed analytical tool to quantify dose and TCP uncertainty bands when initial cell density is estimated from MRI-based apparent diffusion coefficient maps of eleven patients. TCP uncertainty bands of 16% were observed at patient level, while dose variations bands up to 8 Gy were found at voxel level for an iso-TCP approach.", month = jan, journal = "Physics and Imaging in Radiation Oncology", number = "5", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/rraidou_clinical/", } @article{raidou_2018_bladderrunner, title = "Bladder Runner: Visual Analytics for the Exploration of RT-Induced Bladder Toxicity in a Cohort Study", author = "Renata Raidou and Oscar Casares-Magaz and Aleksandr Amirkhanov and Vitali Moiseenko and Ludvig Paul Muren and John P. Einck and Anna Vilanova i Bartroli and Eduard Gr\"{o}ller", year = "2018", abstract = "We present the Bladder Runner, a novel tool to enable detailed visual exploration and analysis of the impact of bladder shape variation on the accuracy of dose delivery, during the course of prostate cancer radiotherapy (RT). Our tool enables the investigation of individual patients and cohorts through the entire treatment process, and it can give indications of RT-induced complications for the patient. In prostate cancer RT treatment, despite the design of an initial plan prior to dose administration, bladder toxicity remains very common. The main reason is that the dose is delivered in multiple fractions over a period of weeks, during which, the anatomical variation of the bladder - due to differences in urinary filling - causes deviations between planned and delivered doses. Clinical researchers want to correlate bladder shape variations to dose deviations and toxicity risk through cohort studies, to understand which specific bladder shape characteristics are more prone to side effects. This is currently done with Dose-Volume Histograms (DVHs), which provide limited, qualitative insight. The effect of bladder variation on dose delivery and the resulting toxicity cannot be currently examined with the DVHs. To address this need, we designed and implemented the Bladder Runner, which incorporates visualization strategies in a highly interactive environment with multiple linked views. Individual patients can be explored and analyzed through the entire treatment period, while inter-patient and temporal exploration, analysis and comparison are also supported. We demonstrate the applicability of our presented tool with a usage scenario, employing a dataset of 29 patients followed through the course of the treatment, across 13 time points. We conducted an evaluation with three clinical researchers working on the investigation of RT-induced bladder toxicity. All participants agreed that Bladder Runner provides better understanding and new opportunities for the exploration and analysis of the involved cohort data.", journal = "Computer Graphics Forum", volume = "37", number = "3", issn = "1467-8659", doi = "10.1111/cgf.13413", pages = "205-216", pages = "205--216", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/raidou_2018_bladderrunner/", } @misc{raidou_bestphd, title = "EuroVis Best PhD Award 2018—Visual Analytics for Digital Radiotherapy: Towards a Comprehensible Pipeline", author = "Renata Raidou", year = "2018", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/raidou_bestphd/", } @article{EuroVis2018-ShortPapers-Karall, title = "ChemoExplorer: A Dashboard for the Visual Analysis of ChemotherapyResponse in Breast Cancer Patients", author = "Nikolaus Karall and Eduard Gr\"{o}ller and Renata Raidou", year = "2018", abstract = "In breast cancer chemotherapy treatment, different alternative strategies can be employed. Clinical researchers working on the optimization of chemotherapy strategies need to analyze the progress of the treatment and to understand how different groups of patients respond to selected therapies. This is a challenging task, because of the multitude of imaging and non-imaging health record data involved. We, hereby, introduce a web-based dashboard that facilitates the comparison and analysis of publicly available breast cancer chemotherapy response data, consisting of a follow-up study of 63 patients. Each patient received one of two available therapeutic strategies and their treatment response was documented. Our dashboard provides an initial basis for clinical researchers working on chemotherapy optimization, to analyze the progress of treatment and to compare the response of (groups of) patients with distinct treatment characteristics. Our approach consists of multiple linked representations that provide interactive views on different aspects of the available imaging and non-imaging data. To illustrate the functionality of the ChemoExplorer, we conducted a usage scenario that shows the initial results of our work.", journal = "Computer Graphics Forum", doi = "10.2312/eurovisshort.20181077", pages = "049-053", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/EuroVis2018-ShortPapers-Karall/", } @mastersthesis{Pezenka-2016-MT, title = "BrainXPlore - Decision finding in Brain Biopsy Planning", author = "Lukas Pezenka", year = "2017", abstract = "Neurosurgeons make decisions based on expert knowledge that takes factors such as safety margins, the avoidance of risk structures, trajectory length and trajectory angle into consideration. While some of those factors are mandatory, others can be optimized in order to obtain the best possible trajectory under the given circumstances. Through comparison with the actually chosen trajectories from real biopsies and qualitative interviews with domain experts, we identified important rules for trajectory planning. In this thesis, we present BrainXplore, an interactive visual analysis tool for aiding neurosurgeons in planning brain biopsies. BrainXplore is an extendable Biopsy Planning framework that incorporates those rules while at the same time leaving full flexibility for their customization and adding of new structures at risk. Automatically computed candidate trajectories can be incrementally refined in an interactive manner until an optimal trajectory is found. We employ a spatial index server as part of our system that allows us to access distance information on an unlimited number of risk structures at arbitrary resolution. Furthermore, we implemented InfoVis techniques such as Parallel Coordinates and risk signature charts to drive the decision process. As a case study, BrainXPlore offers a variety of information visualization modalities to present multivariate data in different ways. We evaluated BrainXPlore on a real dataset and accomplished acceptable results. The participating neurosurgeon gave us the feedback that BrainXPlore can decrease the time needed for biopsy planning and aid novice users in their decision making process.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Pezenka-2016-MT/", } @bachelorsthesis{unger_2017, title = "Evaluation of Machine Learning Frameworks on Tuberculosis Classification of Chest Radiographs", author = "Katharina Unger", year = "2017", abstract = "In this thesis different state-of-the-art machine learning frameworks were implemented and evaluated on chest radiographs to classify them into tuberculotic or healthy radiographs. Traditional explicit feature engineering was performed, as well as different deep learning approaches were applied. For the deep learning experiments different publicly available architectures were compared in two different tasks. The first task with deep learning was to use a Convolutional Neural Network, already trained on a different task, to extract features of the chest radiographs. These features were then classified separately. The second experiment was to use a Convolutional Neural Network, again pretrained on a different task, and train this network carefully again on the chest radiographs. The results of the different frameworks were summarized, evaluated and presented in tables.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/unger_2017/", } @article{mindek-2017-dsn, title = "Data-Sensitive Visual Navigation", author = "Peter Mindek and Gabriel Mistelbauer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2017", abstract = "In visualization systems it is often the case that the changes of the input parameters are not proportional to the visual change of the generated output. In this paper, we propose a model for enabling data-sensitive navigation for user-interface elements. This model is applied to normalize the user input according to the visual change, and also to visually communicate this normalization. In this way, the exploration of heterogeneous data using common interaction elements can be performed in an efficient way. We apply our model to the field of medical visualization and present guided navigation tools for traversing vascular structures and for camera rotation around 3D volumes. The presented examples demonstrate that the model scales to user-interface elements where multiple parameters are set simultaneously.", month = oct, journal = "Computers & Graphics", volume = "67", number = "C", pages = "77--85", keywords = "navigation, exploration, medical visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/", } @article{rraidou_EG17, title = "Visual Analytics for Digital Radiotherapy: Towards a Comprehensible Pipeline", author = "Renata Raidou and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2017", abstract = "Prostate cancer is one of the most frequently occurring types of cancer in males. It is often treated with radiation therapy,which aims at irradiating tumors with a high dose, while sparing the surrounding healthy tissues. In the course of the years,radiotherapy technology has undergone great advancements. However, tumors are not only different from each other, theyare also highly heterogeneous within, consisting of regions with distinct tissue characteristics, which should be treated withdifferent radiation doses. Tailoring radiotherapy planning to the specific needs and intra-tumor tissue characteristics of eachpatient is expected to lead to more effective treatment strategies. Currently, clinical research is moving towards this direction,but an understanding of the specific tumor characteristics of each patient, and the integration of all available knowledge into apersonalizable radiotherapy planning pipeline are still required. The present work describes solutions from the field of VisualAnalytics, which aim at incorporating the information from the distinct steps of the personalizable radiotherapy planningpipeline, along with eventual sources of uncertainty, into comprehensible visualizations. All proposed solutions are meantto increase the – up to now, limited – understanding and exploratory capabilities of clinical researchers. These approachescontribute towards the interactive exploration, visual analysis and understanding of the involved data and processes at differentsteps of the radiotherapy planning pipeline, creating a fertile ground for future research in radiotherapy planning.", month = apr, journal = "Computer Graphics Forum (Proceedings of Eurographics)", volume = "36", booktitle = "Computer Graphics Forum (Proceedings of Eurographics)", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/rraidou_EG17/", } @book{rraidou_phdbook, title = "Visual Analytics for Digital Radiotherapy: Towards a Comprehensible Pipeline.", author = "Renata Raidou", year = "2017", month = mar, isbn = "ISBN987-90-386-4230-7", pages = "1-240", publisher = "TU Eindhoven", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/rraidou_phdbook/", } @bachelorsthesis{Gall_2017, title = "Comparison of Vessel Segmentation Techniques", author = "Alexander Gall", year = "2017", abstract = "Image segmentation is an important processing step in various applications and crucial in the medical field. When a new segmentation technique is introduced, validation and evaluation are essential for medical image analysis. But the automation of these processes is still not sufficient. Many algorithms have been published but there is still no satisfying way to assess whether an algorithm produces more accurate segmentations than another. More effort is spent on the development of algorithms than on their evaluation and therefore many researchers use the less complex subjective methods. For these techniques multiple experts are needed to visually compare several segmentation results, which is a very time-consuming process. Another way of comparing different results is the supervised evaluation method. Here we need experts, who manually segment reference images, which are used for comparison. As seen in recent researches there is a need for unsupervised methods due to many applications, in which user assistance is infeasible. The aim of this thesis is to provide an environment to visually and objectively evaluate segmentation results in the field of vessel segmentations. Our framework enables the comparison at voxel-level with various visualization techniques and objective measurements. These methods are meant to make the comparison more understandable for users. A subjective evaluation is realized through a comparative visualization by using a two- and three-dimensional comparison of voxels. Another general overview is provided by a maximum-intensity projection, which highlights the vessel structure. As purely objective evaluation technique, various metrics are used, to assure independence from experts or a ground truth. By using these techniques this paper presents an approach for evaluating differences in medical images, which does not rely on a permanent presence of an expert.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Gall_2017/", } @bachelorsthesis{Heim-2017, title = "Semiautomated Editing of Vessel Segmentation Masks", author = "Anja Heim", year = "2017", abstract = "This thesis describes a technique for editing segmentation results of vessels, which should enhance usage and reduce work duration for physicians by using a simple and fast way of interaction. Moreover also a quick calculation of an accurate result was of primary interest. Since vascular structures are vulnerable to diseases, vessels are the main focus of this thesis. Nowadays, Image Analysis is able to facilitate the medical diagnosis procedure. Since stroke treatment is time-crucial, appropriate algorithms should be fast and enable an accurate depiction of the arteries to simplify the diagnostic process. However, because automatic segmentation is often quite inaccurate and manual segmentation is tedious, neither of these two methods alone is often adequate for usage. Because of this we suggest to combine the fast automatic segmentation and the exact manual editing done by clinical experts. To reduce effort and working time of the medical staff, this thesis describes different techniques, which were developed to modify and, more importantly, to improve the segmentation results. The segmentation mask can be altered as its components can be separately removed and independent elements can be connected. A framework was implemented, with which a user is able to perform these tasks interactively. The deletion process is supported by various metrics, which enable the search and removal of similar structures. Also this framework assists the reconnection of vessels by finding the most likely connection by the means of image intensities and their gradients. The main goal of this thesis was to facilitate and accelerate the editing process by implementing fast semi-automatic algorithms. Intuitive interaction methods also had a major impact on the design.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Heim-2017/", } @bachelorsthesis{Eckelt_2017, title = "Vascular Printing - 3D Printing of Aortic Dissections", author = "Klaus Eckelt", year = "2017", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Eckelt_2017/", } @talk{miao_stateKeylabTalk_2017, title = "Visualization of Brain Arteries, the Placenta and DNA Nanostructures in the Context of Abstraction", author = "Haichao Miao", year = "2017", event = "State Key Lab - Zhejiang University", location = "State Key Lab - Zhejiang University", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/miao_stateKeylabTalk_2017/", } @article{miao_tvcg_2017, title = "Placenta Maps: In Utero Placental Health Assessment of the Human Fetus", author = "Haichao Miao and Gabriel Mistelbauer and Alexey Karimov and Amir Alansary and Alice Davidson and David F.A. Lloyd and Mellisa Damodaram and Lisa Story and Jana Hutter and Joseph V. Hajnal and Mary Rutherford and Bernhard Preim and Bernhard Kainz and Eduard Gr\"{o}ller", year = "2017", abstract = "null", journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "23", number = "6", pages = "1612--1623", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/miao_tvcg_2017/", } @article{Groeller_2016_P4, title = "Visual Analytics for the Exploration and Assessment of Segmentation Errors", author = "Renata Raidou and Freek Marcelis and Marcel Breeuwer and Eduard Gr\"{o}ller and Anna Vilanova i Bartroli and Huub van de Wetering", year = "2016", abstract = "Several diagnostic and treatment procedures require the segmentation of anatomical structures from medical images. However, the automatic model-based methods that are often employed, may produce inaccurate segmentations. These, if used as input for diagnosis or treatment, can have detrimental effects for the patients. Currently, an analysis to predict which anatomic regions are more prone to inaccuracies, and to determine how to improve segmentation algorithms, cannot be performed. We propose a visual tool to enable experts, working on model-based segmentation algorithms, to explore and analyze the outcomes and errors of their methods. Our approach supports the exploration of errors in a cohort of pelvic organ segmentations, where the performance of an algorithm can be assessed. Also, it enables the detailed exploration and assessment of segmentation errors, in individual subjects. To the best of our knowledge, there is no other tool with comparable functionality. A usage scenario is employed to explore and illustrate the capabilities of our visual tool. To further assess the value of the proposed tool, we performed an evaluation with five segmentation experts. The evaluation participants confirmed the potential of the tool in providing new insight into their data and employed algorithms. They also gave feedback for future improvements.", month = sep, journal = "Eurographics Workshop on Visual Computing for Biology and Medicine", pages = "193--202", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P4/", } @article{Mistelbauer_Gabriel_2016, title = "Aortic Dissection Maps: Comprehensive Visualization of Aortic Dissections for Risk Assessment", author = "Gabriel Mistelbauer and Johanna Schmidt and A.M. Sailer and Kathrin B\"{a}umler and Shannon Walters and Dominik Fleischmann", year = "2016", abstract = "Aortic dissection is a life threatening condition of the aorta, characterized by separation of its wall layers into a true and false lumen. A subset of patients require immediate surgical or endovascular repair. All survivors of the acute phase need long-term surveillance with imaging to monitor chronic degeneration and dilatation of the false lumen and prevent late adverse events such as rupture, or malperfusion. We introduce four novel plots displaying features of aortic dissections known or presumed to be associated with risk of future adverse events: Aortic diameter, the blood supply (outflow) to the aortic branches from the true and false lumen, the previous treatment, and an estimate of adverse event-free probabilities in one, two and 5 years. Aortic dissection maps, the composite visualization of these plots, provide a baseline for visual comparison of the complex features and associated risk of aortic dissection. These maps may lead to more individualized monitoring and improved, patient-centric treatment planning in the future.", month = sep, journal = "Eurographics Workshop on Visual Computing for Biology and Medicine (2016)", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Mistelbauer_Gabriel_2016/", } @bachelorsthesis{Gadllah_Hani_2016, title = "Comparative Visualization of the Circle of Willis", author = "Hani Gadllah", year = "2016", abstract = "The human brain is supplied with blood by arteries that form a collateral circulation, the so-called Circle of Willis (CoW). The anatomy of the CoW varies considerably among the population. In fact, depending on the study, just 13% to 72% of the population does have the typical textbook illustration of the CoW. Although divergent configurations are usually not pathological, some incomplete configurations increase the risk of stroke. Furthermore, studies suggest an association between certain neurological diseases and abnormal configurations of the CoW. Thus, for the diagnosis and treatment of diverse neurological diseases the assessment of the patient’s CoW is an important issue. This thesis addresses the development of a software for a comparative visualization of the CoWs of a population with the CoWs of a second population. For this purpose, an average CoW is calculated for each of the populations. The two resulting CoWs are then visualized side-by-side, so that the viewer is able to distinguish differences between the CoWs of the two populations with relatively little effort. The aim of this visualization is the support of studies that consider the clinical significance of the different CoW configurations as well as the support of diagnosis and treatment of diseases that are caused by an abnormal configuration of the CoW. The latter can be achieved by comparing the patient’s CoW with datasets of risk groups or with a dataset of a healthy population. ", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Gadllah_Hani_2016/", } @phdthesis{karimov-2016-GIVE, title = "Guided Interactive Volume Editing in Medicine", author = "Alexey Karimov", year = "2016", abstract = "Various medical imaging techniques, such as Computed Tomography, Magnetic Resonance Imaging, Ultrasonic Imaging, are now gold standards in the diagnosis of different diseases. The diagnostic process can be greatly improved with the aid of automatic and interactive analysis tools, which, however, require certain prerequisites in order to operate. Such analysis tools can, for example, be used for pathology assessment, various standardized measurements, treatment and operation planning. One of the major requirements of such tools is the segmentation mask of an object-of-interest. However, the segmentation of medical data remains subject to errors and mistakes. Often, physicians have to manually inspect and correct the segmentation results, as (semi-)automatic techniques do not immediately satisfy the required quality. To this end, interactive segmentation editing is an integral part of medical image processing and visualization. In this thesis, we present three advanced segmentation-editing techniques. They are focused on simple interaction operations that allow the user to edit segmentation masks quickly and effectively. These operations are based on a topology-aware representation that captures structural features of the segmentation mask of the object-of-interest. Firstly, in order to streamline the correction process, we classify segmentation defects according to underlying structural features and propose a correction procedure for each type of defect. This alleviates users from manually applying the proper editing operations, but the segmentation defects still have to be located by users. Secondly, we extend the basic editing process by detecting regions that potentially contain defects. With subsequently suggested correction scenarios, users are hereby immediately able to correct a specific defect, instead of manually searching for defects beforehand. For each suggested correction scenario, we automatically determine the corresponding region of the respective defect in the segmentation mask and propose a suitable correction operation. In order to create the correction scenarios, we detect dissimilarities within the data values of the mask and then classify them according to the characteristics of a certain type of defect. Potential findings are presented with a glyph-based visualization that facilitates users to interactively explore the suggested correction scenarios on different levels-of-detail. As a consequence, our approach even offers users the possibility to fine-tune the chosen correction scenario instead of directly manipulating the segmentation mask, which is a time-consuming and cumbersome task. Third and finally, we guide users through the multitude of suggested correction scenarios of the entire correction process. After statistically evaluating all suggested correction scenarios, we rank them according to their significance of dissimilarities, offering fine-grained editing capabilities at a user-specified level-of-detail. As we visually convey this ranking in a radial layout, users can easily spot and select the most (or the least) dissimilar correction scenario, which improves the segmentation mask mostly towards the desired result. All techniques proposed within this thesis have been evaluated by collaborating radiologists. We assessed the usability, interaction aspects, the accuracy of the results and the expenditure of time of the entire correction process. The outcome of the assessment showed that our guided volume editing not only leads to acceptable segmentation results with only a few interaction steps, but also is applicable to various application scenarios.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/karimov-2016-GIVE/", } @techreport{karimov-2016-SD, title = "Statistics-Driven Localization of Dissimilarities in Data", author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Eduard Gr\"{o}ller", year = "2016", abstract = "The identification of dissimilar regions in spatial and temporal data is a fundamental part of data exploration. This process takes place in applications, such as biomedical image processing as well as climatic data analysis. We propose a general solution for this task by employing well-founded statistical tools. From a large set of candidate regions, we derive an empirical distribution of the data and perform statistical hypothesis testing to obtain p-values as measures of dissimilarity. Having p-values, we quantify differences and rank regions on a global scale according to their dissimilarity to user-specified exemplar regions. We demonstrate our approach and its generality with two application scenarios, namely interactive exploration of climatic data and segmentation editing in the medical domain. In both cases our data exploration protocol unifies the interactive data analysis, guiding the user towards regions with the most relevant dissimilarity characteristics. The dissimilarity analysis results are conveyed with a radial tree, which prevents the user from searching exhaustively through all the data.", month = apr, number = "TR-186-2-16-1", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/karimov-2016-SD/", } @bachelorsthesis{Oancea_Stefan_2016_VOT, title = "Variance Orientation Transform Detection of Early Osteoarthritis in Knee Trabecular Bone", author = "Stefan Ovidiu Oancea", year = "2016", abstract = "Since the fractal properties of the knee trabecular bone were discovered, fractal methods for analyzing bone surface radiographic projections have gained more attention. This is partly due to the fact that radiography is the cheapest imaging technique in routine clinical screening and partly due to the fact that it was shown that the trabecular bones of osteoarthritic patients indicate early deformations, even long before the characteristic join loss occurs. The ultimate goal of such an algorithm would be to differentiate healthy from unhealthy trabecular bone. This paper presents a report of our implementation of the Variance Orientation Transform (VOT) algorithm, a fractal method, which unlike other similar methods, is able to quantify bone texture in different directions and over different scales of measurement. It is based on the idea that a single fractal dimension value is not enough to describe such a complex structure as the trabecular bone and thus, VOT calculates more descriptive fractal dimensions called fractal signatures (FSs). In Chapters 1 and 2 we introduce the notion of fractals and the theoretical background behind them and the VOT algorithm. In Chapter 3 similar techniques for analyzing trabecular bone are presented and in Chapter 4 our particular attempt at implementing VOT is described in detail; moreover, in the same Chapter VOT is validated using some artificially generated fractal surfaces and the ability of differentiating healthy and affected bone is also investigated. The last Chapter, Chapter 5, covers further possible ideas of improving and testing of the algorithm.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Oancea_Stefan_2016_VOT/", } @article{raidou_miccai16, title = "Employing Visual Analytics to Aid the Design of White Matter Hyperintensity Classifiers.", author = "Renata Raidou and Hugo J. Kuijf and Neda Sepasian and Nicola Pezzotti and Willem H. Bouvy and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2016", abstract = "Accurate segmentation of brain white matter hyperintensi-ties (WMHs) is important for prognosis and disease monitoring. To thisend, classi ers are often trained { usually, using T1 and FLAIR weightedMR images. Incorporating additional features, derived from di usionweighted MRI, could improve classi cation. However, the multitude ofdi usion-derived features requires selecting the most adequate. For this,automated feature selection is commonly employed, which can often besub-optimal. In this work, we propose a di erent approach, introducing asemi-automated pipeline to select interactively features for WMH classi -cation. The advantage of this solution is the integration of the knowledgeand skills of experts in the process. In our pipeline, a Visual Analytics(VA) system is employed, to enable user-driven feature selection. Theresulting features are T1, FLAIR, Mean Di usivity (MD), and RadialDi usivity (RD) { and secondarily,CSand Fractional Anisotropy (FA).The next step in the pipeline is to train a classi er with these features,and compare its results to a similar classi er, used in previous work withautomated feature selection. Finally, VA is employed again, to analyzeand understand the classi er performance and results.", journal = "Proceedings of International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI)", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/raidou_miccai16/", } @article{Solteszova2016, title = "Output-Sensitive Filtering of Streaming Volume Data", author = "Veronika Solteszova and {\AA}smund Birkeland and Sergej Stoppel and Ivan Viola and Stefan Bruckner", year = "2016", abstract = "Real-time volume data acquisition poses substantial challenges for the traditional visualization pipeline where data enhancement is typically seen as a pre-processing step. In the case of 4D ultrasound data, for instance, costly processing operations to reduce noise and to remove artefacts need to be executed for every frame. To enable the use of high-quality filtering operations in such scenarios, we propose an output-sensitive approach to the visualization of streaming volume data. Our method evaluates the potential contribution of all voxels to the final image, allowing us to skip expensive processing operations that have little or no effect on the visualization. As filtering operations modify the data values which may affect the visibility, our main contribution is a fast scheme to predict their maximum effect on the final image. Our approach prioritizes filtering of voxels with high contribution to the final visualization based on a maximal permissible error per pixel. With zero permissible error, the optimized filtering will yield a result that is identical to filtering of the entire volume. We provide a thorough technical evaluation of the approach and demonstrate it on several typical scenarios that require on-the-fly processing.", journal = "Computer Graphics Forum", volume = "35", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Solteszova2016/", } @article{raidou_eurovis16, title = "Visual Analysis of Tumor Control Models for Prediction of Radiotherapy Response.", author = "Renata Raidou and Oscar Casares-Magaz and Ludvig Paul Muren and Uulke A van der Heide and Jarle Roervik and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2016", abstract = "In radiotherapy, tumors are irradiated with a high dose, while surrounding healthy tissues are spared. To quantify the prob-ability that a tumor is effectively treated with a given dose, statistical models were built and employed in clinical research.These are called tumor control probability (TCP) models. Recently, TCP models started incorporating additional informationfrom imaging modalities. In this way, patient-specific properties of tumor tissues are included, improving the radiobiologicalaccuracy of models. Yet, the employed imaging modalities are subject to uncertainties with significant impact on the modelingoutcome, while the models are sensitive to a number of parameter assumptions. Currently, uncertainty and parameter sensitivityare not incorporated in the analysis, due to time and resource constraints. To this end, we propose a visual tool that enablesclinical researchers working on TCP modeling, to explore the information provided by their models, to discover new knowledgeand to confirm or generate hypotheses within their data. Our approach incorporates the following four main components: (1)It supports the exploration of uncertainty and its effect on TCP models; (2) It facilitates parameter sensitivity analysis to com-mon assumptions; (3) It enables the identification of inter-patient response variability; (4) It allows starting the analysis fromthe desired treatment outcome, to identify treatment strategies that achieve it. We conducted an evaluation with nine clinicalresearchers. All participants agreed that the proposed visual tool provides better understanding and new opportunities for theexploration and analysis of TCP modeling.", journal = "EuroVis - Eurographics/IEEE-VGTC Symposium on Visualization 2016", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/raidou_eurovis16/", } @book{Chen-Information-2016, title = "Information Theory Tools for Visualization", author = "Min Chen and Miquel Feixas and Ivan Viola and Anton Bardera and Mateu Sbert and Han Wei Shen", year = "2016", isbn = "9781498740937", pages = "194", publisher = "CRC Press", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Chen-Information-2016/", } @article{malan_fluoro, title = "A fluoroscopy-based planning and guidance software tool for minimally invasive hip refixation by cement injection.", author = "DF Malan and SJ van der Walt and Renata Raidou and B van den Berg and BC Stoel and CP Botha and RG Nelissen and ER Valstar", year = "2016", abstract = "PURPOSE: In orthopaedics, minimally invasive injection of bone cement is an established technique. We present HipRFX, a software tool for planning and guiding a cement injection procedure for stabilizing a loosening hip prosthesis. HipRFX works by analysing a pre-operative CT and intraoperative C-arm fluoroscopic images. METHODS: HipRFX simulates the intraoperative fluoroscopic views that a surgeon would see on a display panel. Structures are rendered by modelling their X-ray attenuation. These are then compared to actual fluoroscopic images which allow cement volumes to be estimated. Five human cadaver legs were used to validate the software in conjunction with real percutaneous cement injection into artificially created periprothetic lesions. RESULTS: Based on intraoperatively obtained fluoroscopic images, our software was able to estimate the cement volume that reached the pre-operatively planned targets. The actual median target lesion volume was 3.58 ml (range 3.17-4.64 ml). The median error in computed cement filling, as a percentage of target volume, was 5.3% (range 2.2-14.8%). Cement filling was between 17.6 and 55.4% (median 51.8%). CONCLUSIONS: As a proof of concept, HipRFX was capable of simulating intraoperative fluoroscopic C-arm images. Furthermore, it provided estimates of the fraction of injected cement deposited at its intended target location, as opposed to cement that leaked away. This level of knowledge is usually unavailable to the surgeon viewing a fluoroscopic image and may aid in evaluating the success of a percutaneous cement injection intervention.", journal = "International journal of computer assisted radiology and surgery,", number = "2", volume = "11", pages = "281--296", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/malan_fluoro/", } @article{miao_2016_cgf, title = "Visual Quantification of the Circle of Willis: An Automated Identification and Standardized Representation", author = "Haichao Miao and Gabriel Mistelbauer and Christian Nasel and Eduard Gr\"{o}ller", year = "2016", abstract = "This paper presents a method for the visual quantification of cerebral arteries, known as the Circle of Willis (CoW). It is an arterial structure with the responsibility of supplying the brain with blood, however, dysfunctions can lead to strokes. The diagnosis of such a time-critical/urgent event depends on the expertise of radiologists and the applied software tools. They use basic display methods of the volumetric data without any support of advanced image processing and visualization techniques. The goal of this paper is to present an automated method for the standardized description of cerebral arteries in stroke patients in order to provide an overview of the CoW's configuration. This novel representation provides visual indications of problematic areas as well as straightforward comparisons between multiple patients. Additionally, we offer a pipeline for extracting the CoW from Time-of-Flight Magnetic Resonance Angiography (TOF-MRA) data sets together with an enumeration technique for labelling the arterial segments by detecting the main supplying arteries of the CoW. We evaluated the feasibility of our visual quantification approach in a study of 63 TOF-MRA data sets and compared our findings to those of three radiologists. The obtained results demonstrate that our proposed techniques are effective in detecting the arteries and visually capturing the overall configuration of the CoW.", issn = "1467-8659", journal = "Computer Graphics Forum", keywords = "Circle of Willis, medical visualization, information visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/miao_2016_cgf/", } @bachelorsthesis{moerth_eric-2016-3DF, title = "3D-Printing of Fetal Ultrasound", author = "Eric M\"{o}rth", year = "2016", abstract = "The 3D ultrasound in prenatal diagnostics is nowadays a standard investigation in the field of medical informatics. The acquired data can be used in lots of different applications. One of them is to fabricate the fetus model using a 3D printer. The problem here is to convert the given volume data into a structure that can be printed. Current generation of 3D printers expect as an input objects defined by closed surfaces. This work handles the problem of how to calculate such surfaces. Our solution relies on the marching cubes algorithm that extracts the surface out of the volume data. The extracted surface is then refined. The last processing step is to save the data into an suitable data format. The results demonstrate that it is possible to print the fetus model from the 3D ultrasound data and that people are able to perceive the face of the fetus in the fabricated objects.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/moerth_eric-2016-3DF/", } @inproceedings{Miao_2015_VCBM, title = "CoWRadar: Visual Quantification of the Circle of Willis in Stroke Patients", author = "Haichao Miao and Gabriel Mistelbauer and Christian Nasel and Eduard Gr\"{o}ller", year = "2015", abstract = "This paper presents a method for the visual quantification of cerebral arteries, known as the Circle of Willis (CoW). The CoW is an arterial structure that is responsible for the brain’s blood supply. Dysfunctions of this arterial circle can lead to strokes. The diagnosis relies on the radiologist’s expertise and the software tools used. These tools consist of very basic display methods of the volumetric data without support of advanced technologies in medical image processing and visualization. The goal of this paper is to create an automated method for the standardized description of cerebral arteries in stroke patients in order to provide an overview of the CoW’s configuration. This novel display provides visual indications of problematic areas as well as straightforward comparisons between multiple patients. Additionally, we offer a pipeline for extracting the CoW from Time-of-Flight Magnetic Resonance Angiography (TOF-MRA) data sets. An enumeration technique for the labeling of the arterial segments is therefore suggested. We also propose a method for detecting the CoW’s main supplying arteries by analyzing the coronal, sagittal and transverse image planes of the data sets. We evaluated the feasibility of our visual quantification approach in a study of 63 TOF-MRA data sets and compared our findings to those of three radiologists. The obtained results demonstrate that our proposed techniques are effective in detecting the arteries of the CoW.", month = sep, isbn = "978-3-905674-82-8", publisher = "The Eurographics Association", organization = "EG Digital Library", location = "Chester, United Kingdom", issn = "2070-5786", editor = "Katja B\"{u}hler and Lars Linsen and Nigel W. John", booktitle = "EG Workshop on Visual Computing for Biology and Medicine", pages = "1--10", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Miao_2015_VCBM/", } @article{karimov-2015-HD, title = "Guided Volume Editing based on Histogram Dissimilarity", author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Stefan Bruckner", year = "2015", abstract = "Segmentation of volumetric data is an important part of many analysis pipelines, but frequently requires manual inspection and correction. While plenty of volume editing techniques exist, it remains cumbersome and error-prone for the user to find and select appropriate regions for editing. We propose an approach to improve volume editing by detecting potential segmentation defects while considering the underlying structure of the object of interest. Our method is based on a novel histogram dissimilarity measure between individual regions, derived from structural information extracted from the initial segmentation. Based on this information, our interactive system guides the user towards potential defects, provides integrated tools for their inspection, and automatically generates suggestions for their resolution. We demonstrate that our approach can reduce interaction effort and supports the user in a comprehensive investigation for high-quality segmentations. ", month = may, journal = "Computer Graphics Forum", volume = "34", number = "3", pages = "91--100", keywords = "Edge and feature detection, Image Processing and Computer Vision, Computer Graphics, Display algorithms, Picture/Image Generation, Segmentation, Methodology and Techniques, Interaction techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/karimov-2015-HD/", } @mastersthesis{Miao_Haichao_2015_VQC, title = "Visual Quantification of the Circle of Willis in Stroke Patients", author = "Haichao Miao", year = "2015", abstract = "This thesis presents a novel method for the visual quantification of cerebral arteries. The Circle of Willis (CoW) is an arterial structure that is responsible for the brain’s blood supply. Dysfunctions of this arterial circle can lead to strokes. The diagnosis of stroke patients is complex and relies on the radiologist’s expertise and the software tools used. These tools consist of very basic display methods of the volumetric data without support of state-of-the-art technologies in medical image processing and visualization. The goal of this thesis is to create an automated method for the standardized visualization of cerebral arteries in stroke patients in order to allow visual indications of problematic areas as well as straightforward inter-patient comparisons. Prior to the visualization, this work offers a solution for the extraction of the CoW from Time-of-Flight Magnetic Resonance Angiography (TOF-MRA) images. An enumeration technique for the labeling of the segments is therefore suggested. Furthermore, it proposes a method for the detection of the CoW’s main supplying arteries by analyzing the coronal, sagittal and transverse image planes of the volume. This work gives a comprehensive account of the entire pipeline that is required to extract the arteries in the CoW and to build a model for the standardized visualization. The final goal of this thesis is to create an effective display of the arteries based on a radial tree layout. The feasibility of the visual quantification method is tested in a study of 63 TOF-MRAs. With the proposed methodology applied to the subjects, the results were compared to the findings from radiologists. The obtained results demonstrate that the proposed techniques are effective in detecting the arteries of the CoW. Finally, we focused our methods on the identification of the main arteries.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Miao_Haichao_2015_VQC/", } @article{raidou_EuroVis15, title = "Visual analytics for the exploration of tumor tissue characterization", author = "Renata Raidou and Uulke A van der Heide and Cuong V Dinh and Ghazaleh Ghobadi and Jesper Follsted Kallehauge and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2015", abstract = "Tumors are heterogeneous tissues consisting of multiple regions with distinct characteristics. Characterization ofthese intra-tumor regions can improve patient diagnosis and enable a better targeted treatment. Ideally, tissuecharacterization could be performed non-invasively, using medical imaging data, to derive per voxel a number offeatures, indicative of tissue properties. However, the high dimensionality and complexity of this imaging-derivedfeature space is prohibiting for easy exploration and analysis - especially when clinical researchers require toassociate observations from the feature space to other reference data, e.g., features derived from histopathologicaldata. Currently, the exploratory approach used in clinical research consists of juxtaposing these data, visuallycomparing them and mentally reconstructing their relationships. This is a time consuming and tedious process,from which it is difficult to obtain the required insight. We propose a visual tool for: (1) easy exploration and visualanalysis of the feature space of imaging-derived tissue characteristics and (2) knowledge discovery and hypothesisgeneration and confirmation, with respect to reference data used in clinical research. We employ, as central view,a 2D embedding of the imaging-derived features. Multiple linked interactive views provide functionality for theexploration and analysis of the local structure of the feature space, enabling linking to patient anatomy andclinical reference data. We performed an initial evaluation with ten clinical researchers. All participants agreedthat, unlike current practice, the proposed visual tool enables them to identify, explore and analyze heterogeneousintra-tumor regions and particularly, to generate and confirm hypotheses, with respect to clinical reference data.", journal = "In Computer Graphics Forum", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/raidou_EuroVis15/", } @studentproject{PERNDORFER-2015-ECS, title = "Exploring Cells via Serious Gaming", author = "Rafael Perndorfer and Thomas Stipsits", year = "2015", abstract = "Es wurde ein Spiel entwickelt, dass z.B. Sch\"{u}lern grundlegende Mechanismen der Zellbiologie spielerisch n\"{a}herbringt.", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/PERNDORFER-2015-ECS/", } @inproceedings{Viola_Ivan_DAC, title = "Deriving Anatomical Context from 4D Ultrasound", author = "Markus M\"{u}ller and Linn E. S. Helljesen and Raphael Prevost and Ivan Viola and Kim Nylund and Odd Helge Gilja and Nassir Navab and Wolfgang Wein", year = "2014", abstract = "Real-time three-dimensional (also known as 4D) ultrasound imaging using matrix array probes has the potential to create large-volume information of entire organs such as the liver without external tracking hardware. This information can in turn be placed into the context of a CT or MRI scan of the same patient. However for such an approach many image processing challenges need to be overcome and sources of error addressed, including reconstruction drift, anatomical deformations, varying appearance of anatomy, and imaging artifacts. In this work,we present a fully automatic system including robust image-based ultrasound tracking, a novel learning-based global initialization of the anatomical context, and joint mono- and multi-modal registration. In an evaluation on 4D US sequences and MRI scans of eight volunteers we achieve automatic reconstruction and registration without any user interaction, assess the registration errors based on physician-defined landmarks, and demonstrate realtime tracking of free-breathing sequences.", month = sep, isbn = "978-3-905674-62-0", publisher = "Eurographics Association", note = "The electronic version of the proceedings is available from the Eurographics Digital Library at http://diglib.eg.org", location = "Vienna, Austria", issn = "2070-5778", event = "4th Eurographics Workshop on Visual Computing for Biology and Medicine", editor = "Ivan Viola and Katja Buehler and Timo Ropinski", booktitle = "Proceedings of EG VCBM14", pages = "173--180", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_DAC/", } @inproceedings{Viola_Ivan_VDP, title = "Visibility-Driven Processing of Streaming Volume Data", author = "Veronika Solteszova and {\AA}smund Birkeland and Ivan Viola and Stefan Bruckner", year = "2014", abstract = "In real-time volume data acquisition, such as 4D ultrasound, the raw data is challenging to visualize directly without additional processing. Noise removal and feature detection are common operations, but many methods are too costly to compute over the whole volume when dealing with live streamed data. In this paper, we propose a visibility-driven processing scheme for handling costly on-the-fly processing of volumetric data in real-time. In contrast to the traditional visualization pipeline, our scheme utilizes a fast computation of the potentially visible subset of voxels which significantly reduces the amount of data required to process. As filtering operations modify the data values which may affect their visibility, our method for visibility-mask generation ensures that the set of elements deemed visible does not change after processing. Our approach also exploits the visibility information for the storage of intermediate values when multiple operations are performed in sequence, and can therefore significantly reduce the memory overhead of longer filter pipelines. We provide a thorough technical evaluation of the approach and demonstrate it on several typical scenarios where on-the-fly processing is required.", month = sep, isbn = "978-3-905674-62-0", publisher = "Eurographics Association", location = "Vienna, Austria", issn = "2070-5778", event = "4th EG Workshop on Visual Computing and Biology Medicine", editor = "Ivan Viola and Katja Buehler and Timo Ropinski", booktitle = "Proceedings of EG VCBM 2014", pages = "127--136", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_VDP/", } @misc{fmistelbauer-2014-adict, title = "ActiveDICOM - Enhancing Static Medical Images with Interaction", author = "Florian Mistelbauer and Gabriel Mistelbauer and Eduard Gr\"{o}ller", year = "2014", abstract = "Digital Imaging and Communications in Medicine (DICOM) is a well-establish standard in medical imaging, consisting not only of image data, but sensitive data such as patient and examination information. Nowadays, although having a large variety of advanced rendering techniques available, DICOM images are still generated and sent to the Picture Archiving and Communication System (PACS). These images are then fetched by the medical doctor from a workstation and used for medical reporting. The user has no other possibilities than being able to change the windowing function for displaying the DICOM images. If a certain region is of special interest, either images of the whole data set are generated or have to be specifically requested. Both approaches consume a considerable amount of time. Secondly, the image generation on demand remains pending until done by the responsible assistant. Despite supporting a broad range of features and being widely applied, DICOM images remain static. We propose a visualization mapping language, Active DICOM Script (ADICT), which enhances conventional DICOM with interactive elements by combining heterogeneous data, interaction and visualization. Such DICOM images are then called Active Digital Imaging and Communications in Medicine (ActiveDICOM).", month = sep, series = "EG VCBM 2014", location = "Vienna, Austria", event = "Eurographics Workshop on Visual Computing for Biology and Medicine", booktitle = "Posters at Eurographics Workshop on Visual Computing for Biology and Medicine", Conference date = "Poster presented at Eurographics Workshop on Visual Computing for Biology and Medicine (2014-09-03--2014-09-05)", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/fmistelbauer-2014-adict/", } @incollection{Viola_Ivan_UVP, title = "The Ultrasound Visualization Pipeline", author = "{\AA}smund Birkeland and Veronika Solteszova and Dieter H\"{o}nigmann and Odd Helge Gilja and Svein Brekke and Timo Ropinski and Ivan Viola", year = "2014", abstract = "Radiology is one of the main tools in modern medicine. A numerous set of deceases, ailments and treatments utilize accurate images of the patient. Ultrasound is one of the most frequently used imaging modality in medicine. The high spatial resolution, its interactive nature and non-invasiveness makes it the first choice in many examinations. Image interpretation is one of ultrasound’s main challenges. Much training is required to obtain a confident skill level in ultrasound-based diagnostics. State-of-the-art graphics techniques is needed to provide meaningful visualizations of ultrasound in real-time. In this paper we present the process-pipeline for ultrasound visualization, including an overview of the tasks performed in the specific steps. To provide an insight into the trends of ultrasound visualization research, we have selected a set of significant publications and divided them into a technique-based taxonomy covering the topics pre-processing, segmentation, registration, rendering and augmented reality. For the different technique types we discuss the difference between ultrasound-based techniques and techniques for other modalities.", month = sep, address = "http://link.springer.com/chapter/10.1007%2F978-1-4471-6497-5_24", booktitle = "Scientific Visualization", chapter = "Uncertainty, Multifield, Biomedical, and Scalable Visualization", editor = "Charles D. Hansen, Min Chen, Christopher R. Johnson, Arie E. Kaufman, Hans Hagen", isbn = "978-1-4471-6496-8", publisher = "Springer London", series = "Mathematics and Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_UVP/", } @article{raidou_vis14, title = "Visual analytics for the exploration of multiparametric cancer imaging", author = "Renata Raidou and Marta Paes Moreira and Wouter van Elmpt and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2014", abstract = "Tumor tissue characterization can play an important role in thediagnosis and design of effective treatment strategies. In orderto gather and combine the necessary tissue information, multi-modal imaging is used to derive a number of parameters indica-tive of tissue properties. The exploration and analysis of relation-ships between parameters and, especially, of differences among dis-tinct intra-tumor regions is particularly interesting for clinical re-searchers to individualize tumor treatment. However, due to highdata dimensionality and complexity, the current clinical workflowis time demanding and does not provide the necessary intra-tumorinsight. We implemented a new application for the exploration ofthe relationships between parameters and heterogeneity within tu-mors. In our approach, we employ a well-known dimensionalityreduction technique [5] to map the high-dimensional space of tis-sue properties into a 2D information space that can be interactivelyexplored with integrated information visualization techniques. Weconducted several usage scenarios with real-patient data, of whichwe present a case of advanced cervical cancer. First indicationsshow that our application introduces new features and functionali-ties that are not available within the current clinical approach.", journal = "In Visual Analytics Science and Technology (VAST), 2014 IEEE Conference on Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/raidou_vis14/", } @article{raidou_vcbm14, title = "The iCoCooN:Integration of Cobweb Charts with Parallel Coordinates forVisual Analysis of DCE-MRI Modeling Variations", author = "Renata Raidou and Uulke A van der Heide and PJ van Houdt and Marcel Breeuwer and Anna Vilanova i Bartroli", year = "2014", abstract = "Efficacy of radiotherapy treatment depends on the specific characteristics of tumorous tissues. For the determi-nation of these characteristics, clinical practice uses Dynamic Contrast Enhanced (DCE) Magnetic ResonanceImaging (MRI). DCE-MRI data is acquired and modeled using pharmacokinetic modeling, to derive per voxela set of parameters, indicative of tissue properties. Different pharmacokinetic modeling approaches make differ-ent assumptions, resulting in parameters with different distributions. A priori, it is not known whether there aresignificant differences between modeling assumptions and which assumption is best to apply. Therefore, clinicalresearchers need to know at least how different choices in modeling affect the resulting pharmacokinetic parame-ters and also where parameter variations appear. In this paper, we introduce iCoCooN: a visualization applicationfor the exploration and analysis of model-induced variations in pharmacokinetic parameters. We designed a visualrepresentation, the Cocoon, by integrating perpendicularly Parallel Coordinate Plots (PCPs) with Cobweb Charts(CCs). PCPs display the variations in each parameter between modeling choices, while CCs present the relationsin a whole parameter set for each modeling choice. The Cocoon is equipped with interactive features to supportthe exploration of all data aspects in a single combined view. Additionally, interactive brushing allows to link theobservations from the Cocoon to the anatomy. We conducted evaluations with experts and also general users. Theclinical experts judged that the Cocoon in combination with its features facilitates the exploration of all significantinformation and, especially, enables them to find anatomical correspondences. The results of the evaluation withgeneral users indicate that the Cocoon produces more accurate results compared to independent multiples", journal = "Eurographics Workshop on Visual Computing for Biology and Medicine ", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/raidou_vcbm14/", } @article{Auzinger_Mistelbauer_2013_CSR, title = "Vessel Visualization using Curved Surface Reformation", author = "Thomas Auzinger and Gabriel Mistelbauer and Ivan Baclija and R\"{u}diger Schernthaner and Arnold K\"{o}chl and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2013", abstract = "Visualizations of vascular structures are frequently used in radiological investigations to detect and analyze vascular diseases. Obstructions of the blood flow through a vessel are one of the main interests of physicians, and several methods have been proposed to aid the visual assessment of calcifications on vessel walls. Curved Planar Reformation (CPR) is a wide-spread method that is designed for peripheral arteries which exhibit one dominant direction. To analyze the lumen of arbitrarily oriented vessels, Centerline Reformation (CR) has been proposed. Both methods project the vascular structures into 2D image space in order to reconstruct the vessel lumen. In this paper, we propose Curved Surface Reformation (CSR), a technique that computes the vessel lumen fully in 3D. This offers high-quality interactive visualizations of vessel lumina and does not suffer from problems of earlier methods such as ambiguous visibility cues or premature discretization of centerline data. Our method maintains exact visibility information until the final query of the 3D lumina data. We also present feedback from several domain experts.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE Scientific Visualization 2013)", volume = "19", number = "12", pages = "2858--2867", keywords = "Surface Approximation, Vessel, Reformation, Volume Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_Mistelbauer_2013_CSR/", } @article{mistelbauer-2013-cfa, title = "Vessel Visualization using Curvicircular Feature Aggregation", author = "Gabriel Mistelbauer and Anca Morar and Andrej Varchola and R\"{u}diger Schernthaner and Ivan Baclija and Arnold K\"{o}chl and Armin Kanitsar and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Radiological investigations are common medical practice for the diagnosis of peripheral vascular diseases. Existing visualization methods such as Curved Planar Reformation (CPR) depict calcifications on vessel walls to determine if blood is still able to flow. While it is possible with conventional CPR methods to examine the whole vessel lumen by rotating around the centerline of a vessel, we propose Curvicircular Feature Aggregation (CFA), which aggregates these rotated images into a single view. By eliminating the need for rotation, vessels can be investigated by inspecting only one image. This method can be used as a guidance and visual analysis tool for treatment planning. We present applications of this technique in the medical domain and give feedback from radiologists.", month = jun, journal = "Computer Graphics Forum", volume = "32", number = "3", pages = "231--240", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mistelbauer-2013-cfa/", } @article{karimov-2013-vivisection, title = "ViviSection: Skeleton-based Volume Editing", author = "Alexey Karimov and Gabriel Mistelbauer and Johanna Schmidt and Peter Mindek and Elisabeth Schmidt and Timur Sharipov and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Volume segmentation is important in many applications, particularly in the medical domain. Most segmentation techniques, however, work fully automatically only in very restricted scenarios and cumbersome manual editing of the results is a common task. In this paper, we introduce a novel approach for the editing of segmentation results. Our method exploits structural features of the segmented object to enable intuitive and robust correction and verification. We demonstrate that our new approach can significantly increase the segmentation quality even in difficult cases such as in the presence of severe pathologies.", month = jun, journal = "Computer Graphics Forum", volume = "32", number = "3", pages = "461--470", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/karimov-2013-vivisection/", } @inproceedings{Viola_Ivan_2013_D3D, title = "Doppler-based 3D Blood Flow Imaging and Visualization", author = "{\AA}smund Birkeland and Dag Magne Ulvang and Kim Nylund and Trygve Hausken and Odd Helge Gilja and Ivan Viola", year = "2013", abstract = "Blood flow is a very important part of human physiology. In this paper, we present a new method for estimating and visualizing 3D blood flow on-the-fly based on Doppler ultrasound. We add semantic information about the geometry of the blood vessels in order to recreate the actual velocities of the blood. Assuming a laminar flow, the flow direction is related to the general direction of the vessel. Based on the center line of the vessel, we create a vector field representing the direction of the vessel at any given point. The actual flow velocity is then estimated from the Doppler ultrasound signal by back-projecting the velocity in the measured direction, onto the vessel direction. Additionally, we estimate the flux at user-selected cross-sections of the vessel by integrating the velocities over the area of the cross-section. In order to visualize the flow and the flux, we propose a visualization design based on traced particles colored by the flux. The velocities are visualized by animating particles in the flow field. Further, we propose a novel particle velocity legend as a means for the user to estimate the numerical value of the current velocity. Finally, we perform an evaluation of the technique where the accuracy of the velocity estimation is measured using a 4D MRI dataset as a basis for the ground truth.", month = may, isbn = "978-80-223-3377-1", publisher = "ACM Publishing House", location = "Smolenice, Slovak Republic", booktitle = "SCCG 2013 - 29th Proceedings Spring conference on Computer Graphics", pages = "128--135", keywords = "Medical Visualization, Biomedical", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_D3D/", } @inproceedings{Viola_Ivan_2013_HQ3, title = "High-Quality 3D Visualization of In-Situ Ultrasonography", author = "Ivan Viola and {\AA}smund Birkeland and Veronika Solteszova and Linn Helljesen and Helwig Hauser and Spiros Kotopoulis and Kim Nylund and Dag Magne Ulvang and Ola Kristoffer Øye and Trygve Hausken and Odd Helge Gilja", year = "2013", abstract = "In recent years medical ultrasound has experienced a rapid development in the quality of real-time 3D ultrasound (US) imaging. The image quality of the 3D volume that was previously possible to achieve within the range of a few seconds, is now possible in a fraction of a second. This technological advance offers entirely new opportunities for the use of US in the clinic. In our project, we investigate how real-time 3D US can be combined with high-performance processing of today’s graphics hardware to allow for high-quality 3D visualization and precise navigation during the examination.", month = may, publisher = "Eurogrpahics", note = "1st Prize - Medical Prize Short Paper", location = "Girona, Spain", booktitle = "EG 2013 - Dirk Bartz Prize", pages = "1--4", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_HQ3/", } @inproceedings{mistelbauer-2012-ssv, title = "Smart Super Views - A Knowledge-Assisted Interface for Medical Visualization", author = "Gabriel Mistelbauer and Hamed Bouzari and R\"{u}diger Schernthaner and Ivan Baclija and Arnold K\"{o}chl and Stefan Bruckner and Milo\v{s} \v{S}r\'{a}mek and Eduard Gr\"{o}ller", year = "2012", abstract = "Due to the ever growing volume of acquired data and information, users have to be constantly aware of the methods for their exploration and for interaction. Of these, not each might be applicable to the data at hand or might reveal the desired result. Owing to this, innovations may be used inappropriately and users may become skeptical. In this paper we propose a knowledge-assisted interface for medical visualization, which reduces the necessary effort to use new visualization methods, by providing only the most relevant ones in a smart way. Consequently, we are able to expand such a system with innovations without the users to worry about when, where, and especially how they may or should use them. We present an application of our system in the medical domain and give qualitative feedback from domain experts.", month = oct, publisher = "IEEE Computer Society", location = "Seattle, WA, USA", booktitle = "IEEE Conference on Visual Analytics Science and Technology (IEEE VAST) 2012", pages = "163--172", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/mistelbauer-2012-ssv/", } @article{Birkeland-2012-IMC, title = "Illustrative Membrane Clipping", author = "{\AA}smund Birkeland and Stefan Bruckner and Andrea Brambilla and Ivan Viola", year = "2012", abstract = "Clipping is a fast, common technique for resolving occlusions. It only requires simple interaction, is easily understandable, and thus has been very popular for volume exploration. However, a drawback of clipping is that the technique indiscriminately cuts through features. Illustrators, for example, consider the structures in the vicinity of the cut when visualizing complex spatial data and make sure that smaller structures near the clipping plane are kept in the image and not cut into fragments. In this paper we present a new technique, which combines the simple clipping interaction with automated selective feature preservation using an elastic membrane. In order to prevent cutting objects near the clipping plane, the deformable membrane uses underlying data properties to adjust itself to salient structures. To achieve this behaviour, we translate data attributes into a potential field which acts on the membrane, thus moving the problem of deformation into the soft-body dynamics domain. This allows us to exploit existing GPU-based physics libraries which achieve interactive frame rates. For manual adjustment, the user can insert additional potential fields, as well as pinning the membrane to interesting areas. We demonstrate that our method can act as a flexible and non-invasive replacement of traditional clipping planes.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "905--914", keywords = "illustrative visualization, volume rendering, clipping", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Birkeland-2012-IMC/", } @inproceedings{mistelbauer-2012-cr, title = "Centerline Reformations of Complex Vascular Structures", author = "Gabriel Mistelbauer and Andrej Varchola and Hamed Bouzari and Juraj Starinsky and Arnold K\"{o}chl and R\"{u}diger Schernthaner and Dominik Fleischmann and Eduard Gr\"{o}ller and Milo\v{s} \v{S}r\'{a}mek", year = "2012", abstract = "Visualization of vascular structures is a common and frequently performed task in the field of medical imaging. There exist well established and applicable methods such as Maximum Intensity Projection (MIP) and Curved Planar Reformation (CPR). However, when calcified vessel walls are investigated, occlusion hinders exploration of the vessel interior with MIP. In contrast, CPR offers the possibility to visualize the vessel lumen by cutting a single vessel along its centerline. Extending the idea of CPR, we propose a novel technique, called Centerline Reformation (CR), which is capable of visualizing the lumen of spatially arbitrarily oriented vessels not necessarily connected in a tree structure. In order to visually emphasize depth, overlap and occlusion, halos can optionally envelope the vessel lumen. The required vessel centerlines are obtained from volumetric data by performing a scale-space based feature extraction. We present the application of the proposed technique in a focus and context setup. Further, we demonstrate how it facilitates the investigation of dense vascular structures, particularly cervical vessels or vessel data featuring peripheral arterial occlusive diseases or pulmonary embolisms. Finally, feedback from domain experts is given.", isbn = "978-1-4673-0863-2", location = "Songdo, Korea (South) ", booktitle = "Pacific Visualization Symposium (PacificVis), 2012 IEEE", pages = "233--240", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/mistelbauer-2012-cr/", } @inproceedings{Ford-2012-HRV, title = "HeartPad: Real-Time Visual Guidance for Cardiac Ultrasound", author = "Steven Ford and Gabriel Kiss and Ivan Viola and Stefan Bruckner and Hans Torp", year = "2012", abstract = "Medical ultrasound is a challenging modality when it comes to image interpretation. The goal we address in this work is to assist the ultrasound examiner and partially alleviate the burden of interpretation. We propose to address this goal with visualization that provides clear cues on the orientation and the correspondence between anatomy and the data being imaged. Our system analyzes the stream of 3D ultrasound data and in real-time identifies distinct features that are basis for a dynamically deformed mesh model of the heart. The heart mesh is composited with the original ultrasound data to create the data-to-anatomy correspondence. The visualization is broadcasted over the internet allowing, among other opportunities, a direct visualization on the patient on a tablet computer. The examiner interacts with the transducer and with the visualization parameters on the tablet. Our system has been characterized by domain specialist as useful in medical training and for navigating occasional ultrasound users.", booktitle = "Proceedings of the Workshop at SIGGRAPH Asia 2012", keywords = "medical visualization, ultrasound", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ford-2012-HRV/", } @inproceedings{Balabanian-2010-IIV, title = "Interactive Illustrative Visualization of Hierarchical Volume Data", author = "Jean-Paul Balabanian and Ivan Viola and Eduard Gr\"{o}ller", year = "2010", abstract = "In scientific visualization the underlying data often has an inherent abstract and hierarchical structure. Therefore, the same dataset can simultaneously be studied with respect to its characteristics in the three-dimensional space and in the hierarchy space. Often both characteristics are equally important to convey. For such scenarios we explore the combination of hierarchy visualization and scientific visualization, where both data spaces are effectively integrated. We have been inspired by illustrations of species evolutions where hierarchical information is often present. Motivated by these traditional illustrations, we introduce integrated visualizations for hierarchically organized volumetric datasets. The hierarchy data is displayed as a graph, whose nodes are visually augmented to depict the corresponding 3D information. These augmentations include images due to volume raycasting, slicing of 3D structures, and indicators of structure visibility from occlusion testing. New interaction metaphors are presented that extend visualizations and interactions, typical for one visualization space, to control visualization parameters of the other space. Interaction on a node in the hierarchy influences visual representations of 3D structures and vice versa. We integrate both the abstract and the scientific visualizations into one view which avoids frequent refocusing typical for interaction with linked-view layouts. We demonstrate our approach on different volumetric datasets enhanced with hierarchical information.", month = jun, location = "Ottawa, Ontario, Canada", booktitle = "Proceedings of Graphics Interface 2010", pages = "137--144", keywords = "visualization, volume data, hierarchical", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Balabanian-2010-IIV/", } @article{solteszova-2010-MOS, title = "A Multidirectional Occlusion Shading Model for Direct Volume Rendering", author = "Veronika Solteszova and Daniel Patel and Stefan Bruckner and Ivan Viola", year = "2010", abstract = "In this paper, we present a novel technique which simulates directional light scattering for more realistic interactive visualization of volume data. Our method extends the recent directional occlusion shading model by enabling light source positioning with practically no performance penalty. Light transport is approximated using a tilted cone-shaped function which leaves elliptic footprints in the opacity buffer during slice-based volume rendering. We perform an incremental blurring operation on the opacity buffer for each slice in front-to-back order. This buffer is then used to define the degree of occlusion for the subsequent slice. Our method is capable of generating high-quality soft shadowing effects, allows interactive modification of all illumination and rendering parameters, and requires no pre-computation.", month = jun, journal = "Computer Graphics Forum", volume = "29", number = "3", pages = "883--891", keywords = "global illumination, volume rendering, shadows, optical model", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/solteszova-2010-MOS/", } @article{bruckner-2010-HVC, title = "Hybrid Visibility Compositing and Masking for Illustrative Rendering", author = "Stefan Bruckner and Peter Rautek and Ivan Viola and Mike Roberts and Mario Costa Sousa and Eduard Gr\"{o}ller", year = "2010", abstract = "In this paper, we introduce a novel framework for the compositing of interactively rendered 3D layers tailored to the needs of scientific illustration. Currently, traditional scientific illustrations are produced in a series of composition stages, combining different pictorial elements using 2D digital layering. Our approach extends the layer metaphor into 3D without giving up the advantages of 2D methods. The new compositing approach allows for effects such as selective transparency, occlusion overrides, and soft depth buffering. Furthermore, we show how common manipulation techniques such as masking can be integrated into this concept. These tools behave just like in 2D, but their influence extends beyond a single viewpoint. Since the presented approach makes no assumptions about the underlying rendering algorithms, layers can be generated based on polygonal geometry, volumetric data, pointbased representations, or others. Our implementation exploits current graphics hardware and permits real-time interaction and rendering.", journal = "Computers & Graphics", number = "34", pages = "361--369", keywords = "compositing, masking, illustration", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-HVC/", } @inproceedings{balabanian-2008-hvv, title = "Hierarchical Volume Visualization of Brain Anatomy", author = "Jean-Paul Balabanian and Martin Ystad and Ivan Viola and Arvid Lundervold and Helwig Hauser and Eduard Gr\"{o}ller", year = "2008", month = oct, isbn = "978-3-89838-609-8", location = "Konstanz, Deutschland", editor = "Oliver Deussen, Daniel Keim, Dietmar Saupe", booktitle = "VMV 2008, Vision, Modeling and Visualization", pages = "313--322", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/balabanian-2008-hvv/", } @inproceedings{ruiz-2008-SEV, title = "Similarity-based Exploded Views", author = "Marc Ruiz and Ivan Viola and Imma Boada and Stefan Bruckner and Miquel Feixas and Mateu Sbert", year = "2008", abstract = "Exploded views are often used in illustration to overcome the problem of occlusion when depicting complex structures. In this paper, we propose a volume visualization technique inspired by exploded views that partitions the volume into a number of parallel slabs and shows them apart from each other. The thickness of slabs is driven by the similarity between partitions. We use an information-theoretic technique for the generation of exploded views. First, the algorithm identifies the viewpoint from which the structure is the highest. Then, the partition of the volume into the most informative slabs for exploding is obtained using two complementary similarity-based strategies. The number of slabs and the similarity parameter are freely adjustable by the user.", booktitle = "Proceedings of Smart Graphics 2008", pages = "154--165", keywords = "volume visualization, illustrative visualization, exploded views", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-SEV/", } @inproceedings{ruiz-2008-OVR, title = "Obscurance-based Volume Rendering Framework", author = "Marc Ruiz and Imma Boada and Ivan Viola and Stefan Bruckner and Miquel Feixas and Mateu Sbert", year = "2008", abstract = "Obscurances, from which ambient occlusion is a particular case, is a technology that produces natural-looking lighting effects in a faster way than global illumination. Its application in volume visualization is of special interest since it permits us to generate a high quality rendering at a low cost. In this paper, we propose an obscurance-based framework that allows us to obtain realistic and illustrative volume visualizations in an interactive manner. Obscurances can include color bleeding effects without additional cost. Moreover, we obtain a saliency map from the gradient of obscurances and we show its application to enhance volume visualization and to select the most salient views.", booktitle = "Proceedings of Volume Graphics 2008", keywords = "volume rendering, illustrative visualization, ambient occlusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-OVR/", } @inproceedings{burns-2007-fea, title = "Feature Emphasis and Contextual Cutaways for Multimodal Medical Visualization", author = "Michael Burns and Martin Haidacher and Wolfgang Wein and Ivan Viola and Eduard Gr\"{o}ller", year = "2007", abstract = "Dense clinical data like 3D Computed Tomography (CT) scans can be visualized together with real-time imaging for a number of medical intervention applications. However, it is difficult to provide a fused visualization that allows sufficient spatial perception of the anatomy of interest, as derived from the rich pre-operative scan, while not occluding the real-time image displayed embedded within the volume. We propose an importance-driven approach that presents the embedded data such that it is clearly visible along with its spatial relation to the surrounding volumetric material. To support this, we present and integrate novel techniques for importance specification, feature emphasis, and contextual cutaway generation. We show results in a clinical context where a pre-operative CT scan is visualized alongside a tracked ultrasound image, such that the important vasculature is depicted between the viewpoint and the ultrasound image, while a more opaque representation of the anatomy is exposed in the surrounding area.", month = may, isbn = "9783905673456", publisher = "IEEE", location = "Nork\"{o}pping, Schweden", editor = "K. Museth, T. M\"{o}ller, A. Ynnerman", booktitle = "Proceedings of Eurographics / IEEE VGTC Symposium on Visualization (EuroVis 2007)", pages = "275--282", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/burns-2007-fea/", }