@article{Diehl-2017-Albero, title = "Albero: A Visual Analytics Approach for Probabilistic Weather Forecasting", author = "Alexandra Diehl and Leandro Pelorosso and Claudio Delrieux and Kresimir Matkovic and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2017", abstract = "Probabilistic weather forecasts are amongst the most popular ways to quantify numerical forecast uncertainties. The analog regression method can quantify uncertainties and express them as probabilities. The method comprises the analysis of errors from a large database of past forecasts generated with a specific numerical model and observational data. Current visualization tools based on this method are essentially automated and provide limited analysis capabilities. In this paper, we propose a novel approach that breaks down the automatic process using the experience and knowledge of the users and creates a new interactive visual workflow. Our approach allows forecasters to study probabilistic forecasts, their inner analogs and observations, their associated spatial errors, and additional statistical information by means of coordinated and linked views. We designed the presented solution following a participatory methodology together with domain experts. Several meteorologists with different backgrounds validated the approach. Two case studies illustrate the capabilities of our solution. It successfully facilitates the analysis of uncertainty and systematic model biases for improved decision-making and process-quality measurements.", month = oct, journal = "Computer Graphics Forum 36(7) 135-144 (2017)", doi = "10.1111/cgf.13279", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Diehl-2017-Albero/", } @article{mindek-2017-dsn, title = "Data-Sensitive Visual Navigation", author = "Peter Mindek and Gabriel Mistelbauer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2017", abstract = "In visualization systems it is often the case that the changes of the input parameters are not proportional to the visual change of the generated output. In this paper, we propose a model for enabling data-sensitive navigation for user-interface elements. This model is applied to normalize the user input according to the visual change, and also to visually communicate this normalization. In this way, the exploration of heterogeneous data using common interaction elements can be performed in an efficient way. We apply our model to the field of medical visualization and present guided navigation tools for traversing vascular structures and for camera rotation around 3D volumes. The presented examples demonstrate that the model scales to user-interface elements where multiple parameters are set simultaneously.", month = oct, journal = "Computers & Graphics", volume = "67", number = "C", pages = "77--85", keywords = "navigation, exploration, medical visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-dsn/", } @article{Kolesar-Fractional-2016, title = "A Fractional Cartesian Composition Model for Semi-Spatial ComparativeVisualization Design", author = "Ivan Koles\'{a}r and Stefan Bruckner and Ivan Viola and Helwig Hauser", year = "2017", month = jan, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "23", number = "1", doi = "10.1109/TVCG.2016.2598870", pages = "851-860", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Kolesar-Fractional-2016/", } @techreport{TR1862162, title = "Visual Analysis of Volume Ensembles Based on Local Features", author = "Johanna Schmidt and Bernhard Fr\"{o}hler and Reinhold Preiner and Johannes Kehrer and Eduard Gr\"{o}ller and Stefan Bruckner and Christoph Heinzl", year = "2016", abstract = "Ensemble datasets describe a specific phenomenon (e.g., a simulation scenario or a measurements series) through a large set of individual ensemble members. These individual members typically do not differ too much from each other but rather feature slightly changing characteristics. In many cases, the ensemble members are defined in 3D space, which implies severe challenges when exploring the complete ensembles such as handling occlusions, focus and context or its sheer datasize. In this paper we address these challenges and put our focus on the exploration of local features in 3D volumetric ensemble datasets, not only by visualizing local characteristics, but also by identifying connections to other local features with similar characteristics in the data. We evaluate the variance in the dataset and use the the spatial median (medoid) of the ensemble to visualize the differences in the dataset. This medoid is subsequently used as a representative of the ensemble in 3D. The variance information is used to guide users during the exploration, as regions of high variance also indicate larger changes within the ensemble members. The local characteristics of the regions can be explored by using our proposed 3D probing widgets. These widgets consist of a 3D sphere, which can be positioned at any point in 3D space. While moving a widget, the local data characteristics at the corresponding position are shown in a separate detail view, which depicts the local outliers and their surfaces in comparison to the medoid surface. The 3D probing widgets can also be fixed at a user-defined position of interest. The fixed probing widgets are arranged in a similarity graph to indicate similar local data characteristics. The similarity graph thus allows to explore whether high variances in a certain region are caused by the same dataset members or not. Finally, it is also possible to compare a single member against the rest of the ensemble. We evaluate our technique through two demonstration cases using volumetric multi-label segmentation mask datasets, two from the industrial domain and two from the medical domain.", month = may, number = "TR-186-2-16-2", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "ensemble visualization, guided local exploration, variance analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/TR1862162/", } @misc{klein-2016-WCL, title = "Towards Interactive Visual Exploration of Parallel Programs using a Domain-Specific Language", author = "Tobias Klein and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2016", abstract = "The use of GPUs and the massively parallel computing paradigm have become wide-spread. We describe a framework for the interactive visualization and visual analysis of the run-time behavior of massively parallel programs, especially OpenCL kernels. This facilitates understanding a program's function and structure, finding the causes of possible slowdowns, locating program bugs, and interactively exploring and visually comparing different code variants in order to improve performance and correctness. Our approach enables very specific, user-centered analysis, both in terms of the recording of the run-time behavior and the visualization itself. Instead of having to manually write instrumented code to record data, simple code annotations tell the source-to-source compiler which code instrumentation to generate automatically. The visualization part of our framework then enables the interactive analysis of kernel run-time behavior in a way that can be very specific to a particular problem or optimization goal, such as analyzing the causes of memory bank conflicts or understanding an entire parallel algorithm.", month = apr, publisher = "ACM", location = "Vienna, Austria", event = "4th International Workshop on OpenCL (IWOCL '16)", Conference date = "Poster presented at 4th International Workshop on OpenCL (IWOCL '16) ()", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/klein-2016-WCL/", } @article{Labschuetz_Matthias_2016_JITT, title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure", author = "Matthias Labsch\"{u}tz and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2016", abstract = "Sparse volume data structures enable the efficient representation of large but sparse volumes in GPU memory for computation and visualization. However, the choice of a specific data structure for a given data set depends on several factors, such as the memory budget, the sparsity of the data, and data access patterns. In general, there is no single optimal sparse data structure, but a set of several candidates with individual strengths and drawbacks. One solution to this problem are hybrid data structures which locally adapt themselves to the sparsity. However, they typically suffer from increased traversal overhead which limits their utility in many applications. This paper presents JiTTree, a novel sparse hybrid volume data structure that uses just-in-time compilation to overcome these problems. By combining multiple sparse data structures and reducing traversal overhead we leverage their individual advantages. We demonstrate that hybrid data structures adapt well to a large range of data sets. They are especially superior to other sparse data structures for data sets that locally vary in sparsity. Possible optimization criteria are memory, performance and a combination thereof. Through just-in-time (JIT) compilation, JiTTree reduces the traversal overhead of the resulting optimal data structure. As a result, our hybrid volume data structure enables efficient computations on the GPU, while being superior in terms of memory usage when compared to non-hybrid data structures.", month = jan, issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", note = "Published in January 2016", number = "1", volume = "22", event = "IEEE SciVis 2015", location = "Chicago, IL, USA", pages = "1025--1034", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Labschuetz_Matthias_2016_JITT/", } @article{Solteszova2016, title = "Output-Sensitive Filtering of Streaming Volume Data", author = "Veronika Solteszova and {\AA}smund Birkeland and Sergej Stoppel and Ivan Viola and Stefan Bruckner", year = "2016", abstract = "Real-time volume data acquisition poses substantial challenges for the traditional visualization pipeline where data enhancement is typically seen as a pre-processing step. In the case of 4D ultrasound data, for instance, costly processing operations to reduce noise and to remove artefacts need to be executed for every frame. To enable the use of high-quality filtering operations in such scenarios, we propose an output-sensitive approach to the visualization of streaming volume data. Our method evaluates the potential contribution of all voxels to the final image, allowing us to skip expensive processing operations that have little or no effect on the visualization. As filtering operations modify the data values which may affect the visibility, our main contribution is a fast scheme to predict their maximum effect on the final image. Our approach prioritizes filtering of voxels with high contribution to the final visualization based on a maximal permissible error per pixel. With zero permissible error, the optimized filtering will yield a result that is identical to filtering of the entire volume. We provide a thorough technical evaluation of the approach and demonstrate it on several typical scenarios that require on-the-fly processing.", journal = "Computer Graphics Forum", volume = "35", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Solteszova2016/", } @misc{Diehl_2015, title = "Albero: A Visual Analytics Tool for Probabilistic Weather Forecasting.", author = "Alexandra Diehl and Leandro Pelorosso and Kresimir Matkovic and Claudio Delrieux and Marc Ruiz and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2015", month = nov, location = "University of Buenos Aires", event = "Poster at Workshop Big Data & Environment", Conference date = "Poster presented at Poster at Workshop Big Data & Environment (2015-11)", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Diehl_2015/", } @article{Labschuetz_Matthias_2015_JIT, title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure", author = "Matthias Labsch\"{u}tz and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2015", abstract = "Sparse volume data structures enable the efficient representation of large but sparse volumes in GPU memory for computation and visualization. However, the choice of a specific data structure for a given data set depends on several factors, such as the memory budget, the sparsity of the data, and data access patterns. In general, there is no single optimal sparse data structure, but a set of several candidates with individual strengths and drawbacks. One solution to this problem are hybrid data structures which locally adapt themselves to the sparsity. However, they typically suffer from increased traversal overhead which limits their utility in many applications. This paper presents JiTTree, a novel sparse hybrid volume data structure that uses just-in-time compilation to overcome these problems. By combining multiple sparse data structures and reducing traversal overhead we leverage their individual advantages. We demonstrate that hybrid data structures adapt well to a large range of data sets. They are especially superior to other sparse data structures for data sets that locally vary in sparsity. Possible optimization criteria are memory, performance and a combination thereof. Through just-in-time (JIT) compilation, JiTTree reduces the traversal overhead of the resulting optimal data structure. As a result, our hybrid volume data structure enables efficient computations on the GPU, while being superior in terms of memory usage when compared to non-hybrid data structures.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "22", number = "1", note = "Published in January 2016", issn = "1077-2626", pages = "1025--1034", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Labschuetz_Matthias_2015_JIT/", } @article{karimov-2015-HD, title = "Guided Volume Editing based on Histogram Dissimilarity", author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Stefan Bruckner", year = "2015", abstract = "Segmentation of volumetric data is an important part of many analysis pipelines, but frequently requires manual inspection and correction. While plenty of volume editing techniques exist, it remains cumbersome and error-prone for the user to find and select appropriate regions for editing. We propose an approach to improve volume editing by detecting potential segmentation defects while considering the underlying structure of the object of interest. Our method is based on a novel histogram dissimilarity measure between individual regions, derived from structural information extracted from the initial segmentation. Based on this information, our interactive system guides the user towards potential defects, provides integrated tools for their inspection, and automatically generates suggestions for their resolution. We demonstrate that our approach can reduce interaction effort and supports the user in a comprehensive investigation for high-quality segmentations. ", month = may, journal = "Computer Graphics Forum", volume = "34", number = "3", pages = "91--100", keywords = "Edge and feature detection, Image Processing and Computer Vision, Computer Graphics, Display algorithms, Picture/Image Generation, Segmentation, Methodology and Techniques, Interaction techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/karimov-2015-HD/", } @inproceedings{Bruckner_Stefan_2015_VAS, title = "Visual Analysis of Spatio-Temporal Data: Applications in Weather Forecasting", author = "Alexandra Diehl and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2015", abstract = "Weather conditions affect multiple aspects of human life such as economy, safety, security, and social activities. For this reason, weather forecast plays a major role in society. Currently weather forecasts are based on Numerical Weather Prediction (NWP) models that generate a representation of the atmospheric flow. Interactive visualization of geo-spatial data has been widely used in order to facilitate the analysis of NWP models. This paper presents a visualization system for the analysis of spatio-temporal patterns in short-term weather forecasts. For this purpose, we provide an interactive visualization interface that guides users from simple visual overviews to more advanced visualization techniques. Our solution presents multiple views that include a timeline with geo-referenced maps, an integrated webmap view, a forecast operation tool, a curve-pattern selector, spatial filters, and a linked meteogram. Two key contributions of this work are the timeline with geo-referenced maps and the curve-pattern selector. The latter provides novel functionality that allows users to specify and search for meaningful patterns in the data. The visual interface of our solution allows users to detect both possible weather trends and errors in the weather forecast model.We illustrate the usage of our solution with a series of case studies that were designed and validated in collaboration with domain experts.", month = may, location = "Cagliari, Sardinia, Italy", booktitle = "Computer Graphic Forum", pages = "381--390", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Bruckner_Stefan_2015_VAS/", } @inproceedings{mindek-2015-mc, title = "Automatized Summarization of Multiplayer Games", author = "Peter Mindek and Ladislav \v{C}mol\'{i}k and Ivan Viola and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2015", abstract = "We present a novel method for creating automatized gameplay dramatization of multiplayer video games. The dramatization serves as a visual form of guidance through dynamic 3D scenes with multiple foci, typical for such games. Our goal is to convey interesting aspects of the gameplay by animated sequences creating a summary of events which occurred during the game. Our technique is based on processing many cameras, which we refer to as a flock of cameras, and events captured during the gameplay, which we organize into a so-called event graph. Each camera has a lifespan with a certain time interval and its parameters such as position or look-up vector are changing over time. Additionally, during its lifespan each camera is assigned an importance function, which is dependent on the significance of the structures that are being captured by the camera. The images captured by the cameras are composed into a single continuous video using a set of operators based on cinematographic effects. The sequence of operators is selected by traversing the event graph and looking for specific patterns corresponding to the respective operators. In this way, a large number of cameras can be processed to generate an informative visual story presenting the gameplay. Our compositing approach supports insets of camera views to account for several important cameras simultaneously. Additionally, we create seamless transitions between individual selected camera views in order to preserve temporal continuity, which helps the user to follow the virtual story of the gameplay.", month = apr, isbn = "978-80-223-3844-8", publisher = "Comenius University, Bratislava", location = "Smolenice, Slovakia", editor = "Joaquim Jorge, Luis Paulo Santos, Roman Durikovic", booktitle = "Proceedings of Spring Conference on Computer Graphics 2015", pages = "93--100", keywords = "storytelling, game visualization, animation", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/mindek-2015-mc/", } @article{mindek-2014-mcs, title = "Managing Spatial Selections with Contextual Snapshots", author = "Peter Mindek and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2014", abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing particular features, they can be analyzed and compared in different views. However, the semantics of such selections often depend on specific parameter settings and it can be difficult to reconstruct them without additional information. In this paper, we present the concept of contextual snapshots as an effective means for managing spatial selections in visualized data. The selections are automatically associated with the context in which they have been created. Contextual snapshots can also be used as the basis for interactive integrated and linked views, which enable in-place investigation and comparison of multiple visual representations of data. Our approach is implemented as a flexible toolkit with well-defined interfaces for integration into existing systems. We demonstrate the power and generality of our techniques by applying them to several distinct scenarios such as the visualization of simulation data, the analysis of historical documents, and the display of anatomical data.", month = dec, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "33", pages = "132--144", keywords = "annotations, spatial selections, visual analytics, interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/mindek-2014-mcs/", } @inproceedings{ymca, title = "YMCA - Your Mesh Comparison Application", author = "Johanna Schmidt and Reinhold Preiner and Thomas Auzinger and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2014", abstract = "Polygonal meshes can be created in several different ways. In this paper we focus on the reconstruction of meshes from point clouds, which are sets of points in 3D. Several algorithms that tackle this task already exist, but they have different benefits and drawbacks, which leads to a large number of possible reconstruction results (i.e., meshes). The evaluation of those techniques requires extensive comparisons between different meshes which is up to now done by either placing images of rendered meshes side-by-side, or by encoding differences by heat maps. A major drawback of both approaches is that they do not scale well with the number of meshes. This paper introduces a new comparative visual analysis technique for 3D meshes which enables the simultaneous comparison of several meshes and allows for the interactive exploration of their differences. Our approach gives an overview of the differences of the input meshes in a 2D view. By selecting certain areas of interest, the user can switch to a 3D representation and explore the spatial differences in detail. To inspect local variations, we provide a magic lens tool in 3D. The location and size of the lens provide further information on the variations of the reconstructions in the selected area. With our comparative visualization approach, differences between several mesh reconstruction algorithms can be easily localized and inspected.", month = nov, series = "VAST ", publisher = "IEEE Computer Society", note = "http://dx.doi.org/10.1109/VAST.2014.7042491", location = "Paris, France", booktitle = "IEEE Visual Analytics Science and Technology", keywords = "mesh comparison, 3D data exploration, focus+context, comparative visualization, Visual analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/ymca/", } @article{Viola_Ivan_IIP, title = "Interactively illustrating polymerization using three-level model fusion", author = "Ivan Koles\'{a}r and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser", year = "2014", abstract = "Background: Research in cell biology is steadily contributing new knowledge about many aspects of physiological processes, both with respect to the involved molecular structures as well as their related function. llustrations of the spatio-temporal development of such processes are not only used in biomedical education, but also can serve scientists as an additional platform for in-silico experiments. Results: In this paper, we contribute a new, three-level modeling approach to illustrate physiological processes from the class of polymerization at different time scales. We integrate physical and empirical modeling, according to which approach best suits the different involved levels of detail, and we additionally enable a form of interactive steering, while the process is illustrated. We demonstrate the suitability of our approach in the context of several polymerization processes and report from a first evaluation with domain experts. Conclusion: We conclude that our approach provides a new, hybrid modeling approach for illustrating the process of emergence in physiology, embedded in a densely filled environment. Our approach of a complementary fusion of three systems combines the strong points from the different modeling approaches and is capable to bridge different spatial and temporal scales.", month = oct, issn = "1471-2105", journal = "BMC Bioinformatics 2014", number = "345", volume = "15", pages = "1--16", keywords = "Multi-agent modeling, L-system modeling, Biochemical visualization, Visualization of physiology, Polymerization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_IIP/", } @inproceedings{waldner-2014-ghi, title = "Graphical Histories of Information Foraging", author = "Manuela Waldner and Stefan Bruckner and Ivan Viola", year = "2014", abstract = "During information foraging, knowledge workers iteratively seek, filter, read, and extract information. When using multiple information sources and different applications for information processing, re-examination of activities for validation of previous decisions or re-discovery of previously used information sources is challenging. In this paper, we present a novel representation of cross-application histories to support recall of past operations and re-discovery of information resources. Our graphical history consists of a cross-scale visualization combining an overview node-link diagram of used desktop resources with nested (animated) snapshot sequences, based on a recording of the visual screen output during the users’ desktop work. This representation makes key elements of the users’ tasks visually stand out, while exploiting the power of visual memory to recover subtle details of their activities. In a preliminary study, users found our graphical history helpful to recall details of an information foraging task and commented positively on the ability to expand overview nodes into snapshot and video sequences.", month = oct, isbn = "978-1-4503-2542-4", publisher = "ACM", organization = "NordiCHI’14 - Nordic Conference on Human-Computer Interaction", location = "Helsinki, Finland", booktitle = "Proceedings of the 8th Nordic Conference on Human-Computer Interaction: Fun, Fast, Foundational ", pages = "295--304", keywords = "Graph visualization, Interaction history, Provenance", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/", } @inproceedings{Viola_Ivan_VDP, title = "Visibility-Driven Processing of Streaming Volume Data", author = "Veronika Solteszova and {\AA}smund Birkeland and Ivan Viola and Stefan Bruckner", year = "2014", abstract = "In real-time volume data acquisition, such as 4D ultrasound, the raw data is challenging to visualize directly without additional processing. Noise removal and feature detection are common operations, but many methods are too costly to compute over the whole volume when dealing with live streamed data. In this paper, we propose a visibility-driven processing scheme for handling costly on-the-fly processing of volumetric data in real-time. In contrast to the traditional visualization pipeline, our scheme utilizes a fast computation of the potentially visible subset of voxels which significantly reduces the amount of data required to process. As filtering operations modify the data values which may affect their visibility, our method for visibility-mask generation ensures that the set of elements deemed visible does not change after processing. Our approach also exploits the visibility information for the storage of intermediate values when multiple operations are performed in sequence, and can therefore significantly reduce the memory overhead of longer filter pipelines. We provide a thorough technical evaluation of the approach and demonstrate it on several typical scenarios where on-the-fly processing is required.", month = sep, isbn = "978-3-905674-62-0", publisher = "Eurographics Association", location = "Vienna, Austria", issn = "2070-5778", event = "4th EG Workshop on Visual Computing and Biology Medicine", editor = "Ivan Viola and Katja Buehler and Timo Ropinski", booktitle = "Proceedings of EG VCBM 2014", pages = "127--136", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_VDP/", } @inproceedings{kolesar-ivan-2014-polymers, title = "Illustrating Polymerization using Three-level Model Fusion", author = "Ivan Koles\'{a}r and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser", year = "2014", abstract = "Research in cell biology is steadily contributing new knowledge about many di?erent aspects of physiological processes like polymerization, both with respect to the involved molecular structures as well as their related function. Illustrations of the spatio-temporal development of such processes are not only used in biomedical education, but also can serve scientists as an additional platform for in-silico experiments. In this paper, we contribute a new, three-level modeling approach to illustrate physiological processes from the class of polymerization at di?erent time scales. We integrate physical and empirical modeling, according to which approach suits the di?erent involved levels of detail best, and we additionally enable a simple form of interactive steering while the process is illustrated. We demonstrate the suitability of our approach in the context of several polymerization processes and report from a ?rst evaluation with domain experts.", month = jul, publisher = "IEEE Digital Library", organization = "4th Symposium on Biological Data Visualization (in Conjunction with the International Conference on Intelligent Systems for Molecular Biology (ISMB 2014)) ", location = "Boston, USA", booktitle = "Proceedings of IEEE BioVis 2014", pages = "1--22", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/kolesar-ivan-2014-polymers/", } @article{Groeller_2014_UPS, title = "Guest editorial—Uncertainty and parameter space analysis in visualization", author = "Christoph Heinzl and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2014", abstract = "Within the past decades visualization advanced to a powerful means of exploring and analyzing data. Recent developments in both hard- and software contributed to previously unthinkable evaluations and visualizations of data with strongly increasing sizes and levels of complexity. Providing just insight into available data of a problem seems not to be sufficient anymore: Uncertainty and parameter space analyses in visualization are becoming more prevalent and may be found in astronomic, (bio)-medical, industrial, and engineering applications. The major goal is to find out, at which stage of the pipeline - from data acquisition to the final rendering of the output image - how much uncertainty is introduced and consequently how the desired result (e.g., a dimensional measurement feature) is affected. Therefore effective methods and techniques are required by domain specialists, which help to understand how data is generated, how reliable is the generated data, and where and why data is uncertain. Furthermore, as the problems to investigate are becoming increasingly complex, also finding suitable algorithms providing the desired solution tends to be more difficult. Additional questions may arise, e.g., how does a slight parameter change modify the result, how stable is a parameter, in which range is a parameter stable or which parameter set is optimal for a specific problem. Metaphorically speaking, an algorithm for solving a problem may be seen as finding a path through some rugged terrain (the core problem) ranging from the high grounds of theory to the haunted swamps of heuristics. There are many different paths through this terrain with different levels of comfort, length, and stability. Finding all possible paths corresponds in our case to doing an analysis of all possible parameters of a problem solving algorithm, which yields a typically multi-dimensional parameter space. This parameter space allows for an analysis of the quality and stability of a specific parameter set. In many cases of conventional visualization approaches the issues of uncertainty and parameter space analyses are neglected. For a long time, uncertainty - if visualized at all - used to be depicted as blurred data. But in most cases the uncertainty in the base data is not considered at all and just the quantities of interest are calculated. And even to calculate these quantities of interest, too often an empirically found parameter set is used to parameterize the underlying algorithms without exploring its sensitivity to changes and without exploring the whole parameter space to find the global or a local optimum. This tutorial aims to open minds and to look at our data and the parameter sets of our algorithms with a healthy skepticism. In the tutorial we combine uncertainty visualization and parameter space analyses which we believe is essential for the acceptance and applicability of future algorithms and techniques. The tutorial provides six sessions starting with an overview of uncertainty visualization including a historical perspective, uncertainty modeling and statistical visualization. The second part of the tutorial will be dedicated to structural uncertainty, parameter space analysis, industrial applications of uncertainty visualization and an outlook in this domain. ", month = jun, journal = "Computer & Graphics", volume = "41", pages = "A1--A2", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_2014_UPS/", } @article{Viola_Ivan_CLD, title = "Continuous Levels-of-Detail and Visual Abstraction for Seamless Molecular Visualization", author = "Julius Parulek and Daniel J\"{o}nsson and Timo Ropinski and Stefan Bruckner and Anders Ynnerman and Ivan Viola", year = "2014", abstract = "Molecular visualization is often challenged with rendering of large molecular structures in real time. We introduce a novel approach that enables us to show even large protein complexes. Our method is based on the level-of-detail concept, where we exploit three different abstractions combined in one visualization. Firstly, molecular surface abstraction exploits three different surfaces, solvent-excluded surface (SES), Gaussian kernels and van der Waals spheres, combined as one surface by linear interpolation. Secondly, we introduce three shading abstraction levels and a method for creating seamless transitions between these representations. The SES representation with full shading and added contours stands in focus while on the other side a sphere representation of a cluster of atoms with constant shading and without contours provide the context. Thirdly, we propose a hierarchical abstraction based on a set of clusters formed on molecular atoms. All three abstraction models are driven by one importance function classifying the scene into the near-, mid- and far-field. Moreover, we introduce a methodology to render the entire molecule directly using the A-buffer technique, which further improves the performance. The rendering performance is evaluated on series of molecules of varying atom counts.", month = may, issn = "0167-7055", journal = "Computer Graphics Forum", number = "6", volume = "33", pages = "276--287", keywords = "clustering, implicit surfaces, level of detail algorithms, scientific visualization, Computer Applications", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_CLD/", } @article{Rautek_Peter_2014_VSA, title = "ViSlang: A System for Interpreted Domain-Specific Languages for Scientific Visualization", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger", year = "2014", abstract = "Researchers from many domains use scientific visualization in their daily practice. Existing implementations of algorithms usually come with a graphical user interface (high-level interface), or as software library or source code (low-level interface). In this paper we present a system that integrates domain-specific languages (DSLs) and facilitates the creation of new DSLs. DSLs provide an effective interface for domain scientists avoiding the difficulties involved with low-level interfaces and at the same time offering more flexibility than high-level interfaces. We describe the design and implementation of ViSlang, an interpreted language specifically tailored for scientific visualization. A major contribution of our design is the extensibility of the ViSlang language. Novel DSLs that are tailored to the problems of the domain can be created and integrated into ViSlang. We show that our approach can be added to existing user interfaces to increase the flexibility for expert users on demand, but at the same time does not interfere with the user experience of novice users. To demonstrate the flexibility of our approach we present new DSLs for volume processing, querying and visualization. We report the implementation effort for new DSLs and compare our approach with Matlab and Python implementations in terms of run-time performance.", issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "12", volume = "20", pages = "2388--2396", keywords = " Volume visualization framework , Volume visualization, Domain-specific languages", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Rautek_Peter_2014_VSA/", } @incollection{Groeller_Eduard_2014_THS, title = "The Haunted Swamps of Heuristics: Uncertainty in Problem Solving", author = "Artem Amirkhanov and Stefan Bruckner and Christoph Heinzl and Eduard Gr\"{o}ller", year = "2014", abstract = "In scientific visualization the key task of research is the provision of insight into a problem. Finding the solution to a problem may be seen as finding a path through some rugged terrain which contains mountains, chasms, swamps, and few flatlands. This path—an algorithm discovered by the researcher—helps users to easily move around this unknown area. If this way is a wide road paved with stones it will be used for a long time by many travelers. However, a narrow footpath leading through deep forests and deadly swamps will attract only a few adventure seekers. There are many different paths with different levels of comfort, length, and stability, which are uncertain during the research process. Finding a systematic way to deal with this uncertainty can greatly assist the search for a safe path which is in our case the development of a suitable visualization algorithm for a specific problem. In this work we will analyze the sources of uncertainty in heuristically solving visualization problems and will propose directions to handle these uncertainties.", booktitle = "Scientific Visualization", chapter = "Uncertainty, Multifield, Biomedical, and Scalable Visualization", editor = "Charles D. Hansen, Min Chen, Christopher R. Johnson, Arie E. Kaufman, Hans Hagen", isbn = "978-1-4471-6496-8", note = "Chapter 5", publisher = "Springer London", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_Eduard_2014_THS/", } @article{Auzinger_Mistelbauer_2013_CSR, title = "Vessel Visualization using Curved Surface Reformation", author = "Thomas Auzinger and Gabriel Mistelbauer and Ivan Baclija and R\"{u}diger Schernthaner and Arnold K\"{o}chl and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2013", abstract = "Visualizations of vascular structures are frequently used in radiological investigations to detect and analyze vascular diseases. Obstructions of the blood flow through a vessel are one of the main interests of physicians, and several methods have been proposed to aid the visual assessment of calcifications on vessel walls. Curved Planar Reformation (CPR) is a wide-spread method that is designed for peripheral arteries which exhibit one dominant direction. To analyze the lumen of arbitrarily oriented vessels, Centerline Reformation (CR) has been proposed. Both methods project the vascular structures into 2D image space in order to reconstruct the vessel lumen. In this paper, we propose Curved Surface Reformation (CSR), a technique that computes the vessel lumen fully in 3D. This offers high-quality interactive visualizations of vessel lumina and does not suffer from problems of earlier methods such as ambiguous visibility cues or premature discretization of centerline data. Our method maintains exact visibility information until the final query of the 3D lumina data. We also present feedback from several domain experts.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE Scientific Visualization 2013)", volume = "19", number = "12", pages = "2858--2867", keywords = "Surface Approximation, Vessel, Reformation, Volume Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_Mistelbauer_2013_CSR/", } @article{vaico, title = "VAICo: Visual Analysis for Image Comparison", author = "Johanna Schmidt and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2013", abstract = "Scientists, engineers, and analysts are confronted with ever larger and more complex sets of data, whose analysis poses special challenges. In many situations it is necessary to compare two or more datasets. Hence there is a need for comparative visualization tools to help analyze differences or similarities among datasets. In this paper an approach for comparative visualization for sets of images is presented. Well-established techniques for comparing images frequently place them side-by-side. A major drawback of such approaches is that they do not scale well. Other image comparison methods encode differences in images by abstract parameters like color. In this case information about the underlying image data gets lost. This paper introduces a new method for visualizing differences and similarities in large sets of images which preserves contextual information, but also allows the detailed analysis of subtle variations. Our approach identifies local changes and applies cluster analysis techniques to embed them in a hierarchy. The results of this process are then presented in an interactive web application which allows users to rapidly explore the space of differences and drill-down on particular features. We demonstrate the flexibility of our approach by applying it to multiple distinct domains.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "19", number = "12", note = "Demo: http://www.cg.tuwien.ac.at/~jschmidt/vaico/", pages = "2090--2099", keywords = "focus+context, image-set comparison, Comparative visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/vaico/", } @misc{diehl-2013-VTA, title = "Visual Trend Analysis in Weather Forecast", author = "Alexandra Diehl and Stefan Bruckner and Eduard Gr\"{o}ller and Claudio Delrieux and Celeste Saulo", year = "2013", abstract = "Weather conditions affect multiple aspects of human life such as economy, safety, security, and social activities. Weather forecast significantly influences decision and policy making, construction planning, productivity, and environmental risk management. Visualization of weather conditions and trends assists the anticipation of unexpected meteorological events and thus helps with appropriate actions and mitigation systems to minimize the impact of them on human life and activities. In this work, we propose an interactive approach for visual analysis of weather trends and forecast errors in short-term weather forecast simulations. Our solution consists of a multi-aspect system that provides different methods to visualize and analyze multiple runs, time-dependent data, and forecast errors. A key contribution of this work is the comparative visualization technique that allows users to analyze possible weather trends and patterns. We illustrate the usage of our approach with a case study designed and validated in conjunction with domain experts.", month = oct, location = "Atlanta, Georgia, USA", event = "IEEE VIS 2013 Conference", Conference date = "Poster presented at IEEE VIS 2013 Conference (2013-10-13--2013-10-18)", keywords = "Interactive Visual Analysis, Comparative Visualization, Weather Forecast Research", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/diehl-2013-VTA/", } @article{karimov-2013-vivisection, title = "ViviSection: Skeleton-based Volume Editing", author = "Alexey Karimov and Gabriel Mistelbauer and Johanna Schmidt and Peter Mindek and Elisabeth Schmidt and Timur Sharipov and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Volume segmentation is important in many applications, particularly in the medical domain. Most segmentation techniques, however, work fully automatically only in very restricted scenarios and cumbersome manual editing of the results is a common task. In this paper, we introduce a novel approach for the editing of segmentation results. Our method exploits structural features of the segmented object to enable intuitive and robust correction and verification. We demonstrate that our new approach can significantly increase the segmentation quality even in difficult cases such as in the presence of severe pathologies.", month = jun, journal = "Computer Graphics Forum", volume = "32", number = "3", pages = "461--470", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/karimov-2013-vivisection/", } @article{mistelbauer-2013-cfa, title = "Vessel Visualization using Curvicircular Feature Aggregation", author = "Gabriel Mistelbauer and Anca Morar and Andrej Varchola and R\"{u}diger Schernthaner and Ivan Baclija and Arnold K\"{o}chl and Armin Kanitsar and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Radiological investigations are common medical practice for the diagnosis of peripheral vascular diseases. Existing visualization methods such as Curved Planar Reformation (CPR) depict calcifications on vessel walls to determine if blood is still able to flow. While it is possible with conventional CPR methods to examine the whole vessel lumen by rotating around the centerline of a vessel, we propose Curvicircular Feature Aggregation (CFA), which aggregates these rotated images into a single view. By eliminating the need for rotation, vessels can be investigated by inspecting only one image. This method can be used as a guidance and visual analysis tool for treatment planning. We present applications of this technique in the medical domain and give feedback from radiologists.", month = jun, journal = "Computer Graphics Forum", volume = "32", number = "3", pages = "231--240", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mistelbauer-2013-cfa/", } @inproceedings{mindek-2013-csl, title = "Contextual Snapshots: Enriched Visualization with Interactive Spatial Annotations", author = "Peter Mindek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing particular features, they can be analyzed and compared in different views. However, the semantics of such selections are often dependent on other parameter settings and it can be difficult to reconstruct them without additional information. In this paper, we present the concept of contextual snapshots as an effective means for managing spatial selections in visualized data. The selections are automatically associated with the context in which they have been created. Contextual snapshots can be also used as the basis for interactive integrated and linked views, which enable in-place investigation and comparison of multiple visual representations of data. Our approach is implemented as a flexible toolkit with welldefined interfaces for integration into existing systems. We demonstrate the power and generality of our techniques by applying them to several distinct scenarios such as the visualization of simulation data and the analysis of historical documents.", month = may, series = "SCCG ", location = "Smolenice, Slovakia", booktitle = "Proceedings of the 29th Spring Conference on Computer Graphics", keywords = "spatial selections, annotations, linked views, provenance", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-csl/", } @article{mindek-2013-pel, title = "Visual Parameter Exploration in GPU Shader Space", author = "Peter Mindek and Stefan Bruckner and Peter Rautek and Eduard Gr\"{o}ller", year = "2013", abstract = "The wide availability of high-performance GPUs has made the use of shader programs in visualization ubiquitous. Understanding shaders is a challenging task. Frequently it is dif?cult to mentally reconstruct the nature and types of transformations applied to the underlying data during the visualization process. We propose a method for the visual analysis of GPU shaders, which allows the ?exible exploration and investigation of algorithms, parameters, and their effects. We introduce a method for extracting feature vectors composed of several attributes of the shader, as well as a direct manipulation interface for assigning semantics to them. The user interactively classi?es pixels of images which are rendered with the investigated shader. The two resulting classes, a positive class and a negative one, are employed to steer the visualization. Based on this information, we can extract a wide variety of additional attributes and visualize their relation to this classi?cation. Our system allows an interactive exploration of shader space and we demonstrate its utility for several different applications.", journal = "Journal of WSCG", volume = "21", number = "3", issn = "1213-6972", pages = "225--234", keywords = "shader augmentation, parameter space exploration", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-pel/", } @article{Ropinski-2012-UBT, title = "Unified Boundary-Aware Texturing for Interactive Volume Rendering", author = "Timo Ropinski and Stefan Diepenbrock and Stefan Bruckner and Klaus Hinrichs and Eduard Gr\"{o}ller", year = "2012", abstract = "In this paper, we describe a novel approach for applying texture mapping to volumetric data sets. In contrast to previous approaches, the presented technique enables a unified integration of 2D and 3D textures and thus allows to emphasize material boundaries as well as volumetric regions within a volumetric data set at the same time. One key contribution of this paper is a parametrization technique for volumetric data sets, which takes into account material boundaries and volumetric regions. Using this technique, the resulting parametrizations of volumetric data sets enable texturing effects which create a higher degree of realism in volume rendered images. We evaluate the quality of the parametrization and demonstrate the usefulness of the proposed concepts by combining volumetric texturing with volumetric lighting models to generate photorealistic volume renderings. Furthermore, we show the applicability in the area of illustrative visualization.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", number = "11", volume = "18", pages = "1942--1955", keywords = "interactive volume rendering, volumetric texturing", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ropinski-2012-UBT/", } @inproceedings{Csebfalvi-2012-IOM, title = "Illumination-Driven Opacity Modulation for Expressive Volume Rendering", author = "Bal\'{a}zs Cs\'{e}bfalvi and Bal\'{a}zs T\'{o}th and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2012", abstract = "Using classical volume visualization, typically a couple of isosurface layers are rendered semi-transparently to show the internal structures contained in the data. However, the opacity transfer function is often difficult to specify such that all the isosurfaces are of high contrast and sufficiently perceivable. In this paper, we propose a volumerendering technique which ensures that the different layers contribute to fairly different regions of the image space. Since the overlapping between the effected regions is reduced, an outer translucent isosurface does not decrease significantly the contrast of a partially hidden inner isosurface. Therefore, the layers of the data become visually well separated. Traditional transfer functions assign color and opacity values to the voxels depending on the density and the gradient. In contrast, we assign also different illumination directions to different materials, and modulate the opacities view-dependently based on the surface normals and the directions of the light sources, which are fixed to the viewing angle. We will demonstrate that this model allows an expressive visualization of volumetric data.", month = nov, location = "Magdeburg, Germany", booktitle = "Proceedings of Vision, Modeling & Visualization 2012", pages = "103--109", keywords = "illustrative visualization, illumination, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Csebfalvi-2012-IOM/", } @WorkshopTalk{VisWeek-Tutorial-2012-Uncertainty, title = "IEEE VisWeek 2012 Tutorial on Uncertainty and Parameter Space Analysis in Visualization", author = "Christoph Heinzl and Stefan Bruckner and Eduard Gr\"{o}ller and Alex Pang and Hans-Christian Hege and Kristin Potter and R\"{u}diger Westermann and Tobias Pfaffelmoser and Torsten M\"{o}ller", year = "2012", abstract = "Within the past decades visualization advanced to a powerful means of exploring and analyzing data. Recent developments in both hard- and software contributed to previously unthinkable evaluations and visualizations of data with strongly increasing sizes and levels of complexity. Providing just insight into available data of a problem seems not to be sufficient anymore: Uncertainty and parameter space analyses in visualization are becoming more prevalent and may be found in astronomic, (bio)-medical, industrial, and engineering applications. The major goal is to find out, at which stage of the pipeline - from data acquisition to the final rendering of the output image - how much uncertainty is introduced and consequently how the desired result (e.g., a dimensional measurement feature) is affected. Therefore effective methods and techniques are required by domain specialists, which help to understand how data is generated, how reliable is the generated data, and where and why data is uncertain. Furthermore, as the problems to investigate are becoming increasingly complex, also finding suitable algorithms providing the desired solution tends to be more difficult. Additional questions may arise, e.g., how does a slight parameter change modify the result, how stable is a parameter, in which range is a parameter stable or which parameter set is optimal for a specific problem. Metaphorically speaking, an algorithm for solving a problem may be seen as finding a path through some rugged terrain (the core problem) ranging from the high grounds of theory to the haunted swamps of heuristics. There are many different paths through this terrain with different levels of comfort, length, and stability. Finding all possible paths corresponds in our case to doing an analysis of all possible parameters of a problem solving algorithm, which yields a typically multi-dimensional parameter space. This parameter space allows for an analysis of the quality and stability of a specific parameter set. In many cases of conventional visualization approaches the issues of uncertainty and parameter space analyses are neglected. For a long time, uncertainty - if visualized at all - used to be depicted as blurred data. But in most cases the uncertainty in the base data is not considered at all and just the quantities of interest are calculated. And even to calculate these quantities of interest, too often an empirically found parameter set is used to parameterize the underlying algorithms without exploring its sensitivity to changes and without exploring the whole parameter space to find the global or a local optimum. This tutorial aims to open minds and to look at our data and the parameter sets of our algorithms with a healthy skepticism. In the tutorial we combine uncertainty visualization and parameter space analyses which we believe is essential for the acceptance and applicability of future algorithms and techniques. The tutorial provides six sessions starting with an overview of uncertainty visualization including a historical perspective, uncertainty modeling and statistical visualization. The second part of the tutorial will be dedicated to structural uncertainty, parameter space analysis, industrial applications of uncertainty visualization and an outlook in this domain. ", month = oct, event = "IEEE VisWeek", location = "Seattle, WA, USA", keywords = "uncertainty visualization, parameter space analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/VisWeek-Tutorial-2012-Uncertainty/", } @inproceedings{mistelbauer-2012-ssv, title = "Smart Super Views - A Knowledge-Assisted Interface for Medical Visualization", author = "Gabriel Mistelbauer and Hamed Bouzari and R\"{u}diger Schernthaner and Ivan Baclija and Arnold K\"{o}chl and Stefan Bruckner and Milo\v{s} \v{S}r\'{a}mek and Eduard Gr\"{o}ller", year = "2012", abstract = "Due to the ever growing volume of acquired data and information, users have to be constantly aware of the methods for their exploration and for interaction. Of these, not each might be applicable to the data at hand or might reveal the desired result. Owing to this, innovations may be used inappropriately and users may become skeptical. In this paper we propose a knowledge-assisted interface for medical visualization, which reduces the necessary effort to use new visualization methods, by providing only the most relevant ones in a smart way. Consequently, we are able to expand such a system with innovations without the users to worry about when, where, and especially how they may or should use them. We present an application of our system in the medical domain and give qualitative feedback from domain experts.", month = oct, publisher = "IEEE Computer Society", location = "Seattle, WA, USA", booktitle = "IEEE Conference on Visual Analytics Science and Technology (IEEE VAST) 2012", pages = "163--172", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/mistelbauer-2012-ssv/", } @article{Herghelegiu-2012-BPV, title = "Biopsy Planner - Visual Analysis for Needle Pathway Planning in Deep Seated Brain Tumor Biopsy", author = "Paul Herghelegiu and Vasile Manta and Radu Perin and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2012", abstract = "Biopsies involve taking samples from living tissue using a biopsy needle. In current clinical practice they are a first mandatory step before any further medical actions are planned. Performing a biopsy on a deep seated brain tumor requires considerable time for establishing and validating the desired biopsy needle pathway to avoid damage. In this paper, we present a system for the visualization, analysis, and validation of biopsy needle pathways. Our system uses a multi-level approach for identifying stable needle placements which minimize the risk of hitting blood vessels. This is one of the major dangers in this type of intervention. Our approach helps in identifying and visualizing the point on the pathway that is closest to a surrounding blood vessel, requiring a closer inspection by the neurosurgeon. An evaluation by medical experts is performed to demonstrate the utility of our system.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "1085--1094", keywords = "biopsy planning, medical visualization, visual analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Herghelegiu-2012-BPV/", } @article{Birkeland-2012-IMC, title = "Illustrative Membrane Clipping", author = "{\AA}smund Birkeland and Stefan Bruckner and Andrea Brambilla and Ivan Viola", year = "2012", abstract = "Clipping is a fast, common technique for resolving occlusions. It only requires simple interaction, is easily understandable, and thus has been very popular for volume exploration. However, a drawback of clipping is that the technique indiscriminately cuts through features. Illustrators, for example, consider the structures in the vicinity of the cut when visualizing complex spatial data and make sure that smaller structures near the clipping plane are kept in the image and not cut into fragments. In this paper we present a new technique, which combines the simple clipping interaction with automated selective feature preservation using an elastic membrane. In order to prevent cutting objects near the clipping plane, the deformable membrane uses underlying data properties to adjust itself to salient structures. To achieve this behaviour, we translate data attributes into a potential field which acts on the membrane, thus moving the problem of deformation into the soft-body dynamics domain. This allows us to exploit existing GPU-based physics libraries which achieve interactive frame rates. For manual adjustment, the user can insert additional potential fields, as well as pinning the membrane to interesting areas. We demonstrate that our method can act as a flexible and non-invasive replacement of traditional clipping planes.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "905--914", keywords = "illustrative visualization, volume rendering, clipping", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Birkeland-2012-IMC/", } @habilthesis{Bruckner-2012-VEA, title = "Visual Exploration and Analysis of Volumetric Data", author = "Stefan Bruckner", year = "2012", abstract = "Information technology has led to a rapid increase in the amount of data that arise in areas such as biology, medicine, climate science, and engineering. In many cases, these data are volumetric in nature, i.e., they describe the distribution of one or several quantities over a region in space. Volume visualization is the field of research which investigates the transformation of such data sets into images for purposes such as understanding structure or identifying features. This thesis presents work to aid this process by improving the interactive depiction, analysis, and exploration of volumetric data.", month = apr, keywords = "visual analysis, visual exploration, volume data", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Bruckner-2012-VEA/", } @inproceedings{Ford-2012-HRV, title = "HeartPad: Real-Time Visual Guidance for Cardiac Ultrasound", author = "Steven Ford and Gabriel Kiss and Ivan Viola and Stefan Bruckner and Hans Torp", year = "2012", abstract = "Medical ultrasound is a challenging modality when it comes to image interpretation. The goal we address in this work is to assist the ultrasound examiner and partially alleviate the burden of interpretation. We propose to address this goal with visualization that provides clear cues on the orientation and the correspondence between anatomy and the data being imaged. Our system analyzes the stream of 3D ultrasound data and in real-time identifies distinct features that are basis for a dynamically deformed mesh model of the heart. The heart mesh is composited with the original ultrasound data to create the data-to-anatomy correspondence. The visualization is broadcasted over the internet allowing, among other opportunities, a direct visualization on the patient on a tablet computer. The examiner interacts with the transducer and with the visualization parameters on the tablet. Our system has been characterized by domain specialist as useful in medical training and for navigating occasional ultrasound users.", booktitle = "Proceedings of the Workshop at SIGGRAPH Asia 2012", keywords = "medical visualization, ultrasound", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ford-2012-HRV/", } @article{haidacher-2011-VAM, title = "Volume Analysis Using Multimodal Surface Similarity", author = "Martin Haidacher and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2011", abstract = "The combination of volume data acquired by multiple modalities has been recognized as an important but challenging task. Modalities often differ in the structures they can delineate and their joint information can be used to extend the classification space. However, they frequently exhibit differing types of artifacts which makes the process of exploiting the additional information non-trivial. In this paper, we present a framework based on an information-theoretic measure of isosurface similarity between different modalities to overcome these problems. The resulting similarity space provides a concise overview of the differences between the two modalities, and also serves as the basis for an improved selection of features. Multimodal classification is expressed in terms of similarities and dissimilarities between the isosurfaces of individual modalities, instead of data value combinations. We demonstrate that our approach can be used to robustly extract features in applications such as dual energy computed tomography of parts in industrial manufacturing.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "17", number = "12", pages = "1969--1978", keywords = "surface similarity, volume visualization, multimodal data", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/haidacher-2011-VAM/", } @WorkshopTalk{sikachev_peter-2011-dfc, title = "Dynamic Focus + Context for Volume Rendering", author = "Peter Sikachev and Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2011", abstract = "Interactive visualization is widely used in many applications for efficient representation of complex data. Many techniques make use of the focus+context approach in a static manner. These techniques do not fully make use of the interaction semantics. In this paper we present a dynamic focus+context approach that highlights salient features during user interaction. We explore rotation, panning, and zooming interaction semantics and propose several methods of changing visual representations, based on a suggested engagement-estimation method. We use DVR-MIP interpolation and a radial opacity-change approach, exploring rotation, panning, and zooming semantics. Our approach adds short animations during user interaction that help to explore the data efficiently and aid the user in the detection of unknown features.", month = jun, event = "Austrian-Russian Joint Seminar", location = "VRVis, Vienna, Austria", keywords = "focus + context, visualization, volume rendering, user interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/sikachev_peter-2011-dfc/", } @inproceedings{patel-2011-PEA, title = "PhD Education Through Apprenticeship", author = "Daniel Patel and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2011", abstract = "We describe and analyze the PhD education in the visualization group at the Vienna University of Technology and set the education in a larger perspective. Four central mechanisms drive the PhD education in Vienna. They are: to require an article-based PhD; to give the student freedom to choose research direction; to let students work in shared offices towards joint deadlines; and to involve students in reviewing articles. This paper describes these mechanisms in detail and illustrates their effect.", month = apr, location = "Llandudno, United Kingdom", editor = "S. Maddock, J. Jorge", booktitle = "Proceedings of Eurographics 2011 - Education Papers", pages = "23--28", keywords = "meister, education, visualization, apprenticeship", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/patel-2011-PEA/", } @inproceedings{sikachev-2010-DFC, title = "Dynamic Focus+Context for Volume Rendering", author = "Peter Sikachev and Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2010", abstract = "Interactive visualization is widely used in many applications for efficient representation of complex data. Many techniques make use of the focus+context approach in a static manner. These techniques do not fully make use of the interaction semantics. In this paper we present a dynamic focus+context approach that highlights salient features during user interaction. We explore rotation, panning, and zooming interaction semantics and propose several methods of changing visual representations, based on a suggested engagement-estimation method. We use DVR-MIP interpolation and a radial opacity-change approach, exploring rotation, panning, and zooming semantics. Our approach adds short animations during user interaction that help to explore the data efficiently and aid the user in the detection of unknown features.", month = nov, location = "Siegen, Germany", address = "University of Siegen, Siegen, Germany", booktitle = "Proceedings of Vision, Modeling and Visualization 2010", pages = "331--338", keywords = "focus+contex, volume rendering, view-dependent visualization, level-of-detail techniques, nonphotorealistic techniques, user interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/sikachev-2010-DFC/", } @article{brucker-2010-RES, title = "Result-Driven Exploration of Simulation Parameter Spaces for Visual Effects Design", author = "Stefan Bruckner and Torsten M\"{o}ller", year = "2010", abstract = "Graphics artists commonly employ physically-based simulation for the generation of effects such as smoke, explosions, and similar phenomena. The task of finding the correct parameters for a desired result, however, is difficult and time-consuming as current tools provide little to no guidance. In this paper, we present a new approach for the visual exploration of such parameter spaces. Given a three-dimensional scene description, we utilize sampling and spatio-temporal clustering techniques to generate a concise overview of the achievable variations and their temporal evolution. Our visualization system then allows the user to explore the simulation space in a goal-oriented manner. Animation sequences with a set of desired characteristics can be composed using a novel search-by-example approach and interactive direct volume rendering is employed to provide instant visual feedback. A user study was performed to evaluate the applicability of our system in production use.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "16", number = "6", pages = "1467--1475", keywords = "visual exploration, visual effects, clustering, time-dependent volume data", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/brucker-2010-RES/", } @article{solteszova-2010-MOS, title = "A Multidirectional Occlusion Shading Model for Direct Volume Rendering", author = "Veronika Solteszova and Daniel Patel and Stefan Bruckner and Ivan Viola", year = "2010", abstract = "In this paper, we present a novel technique which simulates directional light scattering for more realistic interactive visualization of volume data. Our method extends the recent directional occlusion shading model by enabling light source positioning with practically no performance penalty. Light transport is approximated using a tilted cone-shaped function which leaves elliptic footprints in the opacity buffer during slice-based volume rendering. We perform an incremental blurring operation on the opacity buffer for each slice in front-to-back order. This buffer is then used to define the degree of occlusion for the subsequent slice. Our method is capable of generating high-quality soft shadowing effects, allows interactive modification of all illumination and rendering parameters, and requires no pre-computation.", month = jun, journal = "Computer Graphics Forum", volume = "29", number = "3", pages = "883--891", keywords = "global illumination, volume rendering, shadows, optical model", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/solteszova-2010-MOS/", } @article{bruckner-2010-ISM, title = "Isosurface Similarity Maps", author = "Stefan Bruckner and Torsten M\"{o}ller", year = "2010", abstract = "In this paper, we introduce the concept of isosurface similarity maps for the visualization of volume data. Isosurface similarity maps present structural information of a volume data set by depicting similarities between individual isosurfaces quantified by a robust information-theoretic measure. Unlike conventional histograms, they are not based on the frequency of isovalues and/or derivatives and therefore provide complementary information. We demonstrate that this new representation can be used to guide transfer function design and visualization parameter specification. Furthermore, we use isosurface similarity to develop an automatic parameter-free method for identifying representative isovalues. Using real-world data sets, we show that isosurface similarity maps can be a useful addition to conventional classification techniques.", month = jun, journal = "Computer Graphics Forum", volume = "29", number = "3", note = "EuroVis 2010 Best Paper Award", pages = "773--782", keywords = "isosurfaces, volume visualization, mutual information, histograms", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-ISM/", } @WorkshopTalk{bruckner-2010-SES, title = "volumeshop 101", author = "Stefan Bruckner", year = "2010", abstract = "For the integrated examples (startable using the little VolumeShop icon on the respective slides) to run, you need to modify "volumeshop.cmd" and point it to a compiled VolumeShop-Version (64-Bit).", month = apr, event = "Software Engineering Seminar", location = "Vancouver, Vienna", keywords = "Tutorial, VolumeShop", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-SES/", } @inproceedings{patel-2010-SVV, title = "Seismic Volume Visualization for Horizon Extraction", author = "Daniel Patel and Stefan Bruckner and Ivan Viola and Eduard Gr\"{o}ller", year = "2010", abstract = "Seismic horizons indicate change in rock properties and are central in geoscience interpretation. Traditional interpretation systems involve time consuming and repetitive manual volumetric seeding for horizon growing. We present a novel system for rapidly interpreting and visualizing seismic volumetric data. First we extract horizon surface-parts by preprocessing the seismic data. Then during interaction the user can assemble in realtime the horizon parts into horizons. Traditional interpretation systems use gradient-based illumination models in the rendering of the seismic volume and polygon rendering of horizon surfaces. We employ realtime gradientfree forward-scattering in the rendering of seismic volumes yielding results similar to high-quality global illumination. We use an implicit surface representation of horizons allowing for a seamless integration of horizon rendering and volume rendering. We present a collection of novel techniques constituting an interpretation and visualization system highly tailored to seismic data interpretation.", month = mar, location = "Taipei, Taiwan", booktitle = "Proceedings of IEEE Pacific Visualization 2010", pages = "73--80", keywords = "volume visualization, horizon extraction, seismic data", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/patel-2010-SVV/", } @inproceedings{haidacher_2010_statTF, title = "Volume Visualization based on Statistical Transfer-Function Spaces", author = "Martin Haidacher and Daniel Patel and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2010", abstract = "It is a difficult task to design transfer functions for noisy data. In traditional transfer-function spaces, data values of different materials overlap. In this paper we introduce a novel statistical transfer-function space which in the presence of noise, separates different materials in volume data sets. Our method adaptively estimates statistical properties, i.e. the mean value and the standard deviation, of the data values in the neighborhood of each sample point. These properties are used to define a transfer-function space which enables the distinction of different materials. Additionally, we present a novel approach for interacting with our new transfer-function space which enables the design of transfer functions based on statistical properties. Furthermore, we demonstrate that statistical information can be applied to enhance visual appearance in the rendering process. We compare the new method with 1D, 2D, and LH transfer functions to demonstrate its usefulness.", month = mar, booktitle = "Proceedings of the IEEE Pacific Visualization 2010", pages = "17--24", keywords = "transfer function, statistics, shading, noisy data, classification", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/haidacher_2010_statTF/", } @incollection{bruckner-2010-IFC, title = "Illustrative Focus+Context Approaches in Interactive Volume Visualization", author = "Stefan Bruckner and Eduard Gr\"{o}ller and Klaus Mueller and Bernhard Preim and Deborah Silver", year = "2010", abstract = "Illustrative techniques are a new and exciting direction in visualization research. Traditional techniques which have been used by scientific illustrators for centuries are re-examined under the light of modern computer technology. In this paper, we discuss the use of the focus+context concept for the illustrative visualization of volumetric data. We give an overview of the state-of-the-art and discuss recent approaches which employ this concept in novel ways.", booktitle = "Scientific Visualization: Advanced Concepts", chapter = "10", editor = "Hans Hagen", isbn = "978-3-939897-19-4", note = "The article was originally written in 2005 after the Dagstuhl Seminar on Scientific Visualization and reflects the state-of-the-art at that time.", series = "Dagstuhl Follow-Ups", keywords = "Illustrative Visualization, Volumetric Data", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-IFC/", } @article{bruckner-2010-HVC, title = "Hybrid Visibility Compositing and Masking for Illustrative Rendering", author = "Stefan Bruckner and Peter Rautek and Ivan Viola and Mike Roberts and Mario Costa Sousa and Eduard Gr\"{o}ller", year = "2010", abstract = "In this paper, we introduce a novel framework for the compositing of interactively rendered 3D layers tailored to the needs of scientific illustration. Currently, traditional scientific illustrations are produced in a series of composition stages, combining different pictorial elements using 2D digital layering. Our approach extends the layer metaphor into 3D without giving up the advantages of 2D methods. The new compositing approach allows for effects such as selective transparency, occlusion overrides, and soft depth buffering. Furthermore, we show how common manipulation techniques such as masking can be integrated into this concept. These tools behave just like in 2D, but their influence extends beyond a single viewpoint. Since the presented approach makes no assumptions about the underlying rendering algorithms, layers can be generated based on polygonal geometry, volumetric data, pointbased representations, or others. Our implementation exploits current graphics hardware and permits real-time interaction and rendering.", journal = "Computers & Graphics", number = "34", pages = "361--369", keywords = "compositing, masking, illustration", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-HVC/", } @article{bruckner-2009-BVQ, title = "BrainGazer - Visual Queries for Neurobiology Research", author = "Stefan Bruckner and Veronika Solteszova and Eduard Gr\"{o}ller and Ji\v{r}\'{i} Hlad\r{u}vka and Katja B\"{u}hler and Jai Yu and Barry Dickson", year = "2009", abstract = "Neurobiology investigates how anatomical and physiological relationships in the nervous system mediate behavior. Molecular genetic techniques, applied to species such as the common fruit fly Drosophila melanogaster, have proven to be an important tool in this research. Large databases of transgenic specimens are being built and need to be analyzed to establish models of neural information processing. In this paper we present an approach for the exploration and analysis of neural circuits based on such a database. We have designed and implemented BrainGazer, a system which integrates visualization techniques for volume data acquired through confocal microscopy as well as annotated anatomical structures with an intuitive approach for accessing the available information. We focus on the ability to visually query the data based on semantic as well as spatial relationships. Additionally, we present visualization techniques for the concurrent depiction of neurobiological volume data and geometric objects which aim to reduce visual clutter. The described system is the result of an ongoing interdisciplinary collaboration between neurobiologists and visualization researchers.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "15", number = "6", pages = "1497--1504", keywords = "biomedical visualization, neurobiology, visual queries, volume visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-BVQ/", } @article{bruckner-2009-IVV, title = "Instant Volume Visualization using Maximum Intensity Difference Accumulation", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2009", abstract = "It has long been recognized that transfer function setup for Direct Volume Rendering (DVR) is crucial to its usability. However, the task of finding an appropriate transfer function is complex and time-consuming even for experts. Thus, in many practical applications simpler techniques which do not rely on complex transfer functions are employed. One common example is Maximum Intensity Projection (MIP) which depicts the maximum value along each viewing ray. In this paper, we introduce Maximum Intensity Difference Accumulation (MIDA), a new approach which combines the advantages of DVR and MIP. Like MIP, MIDA exploits common data characteristics and hence does not require complex transfer functions to generate good visualization results. It does, however, feature occlusion and shape cues similar to DVR. Furthermore, we show that MIDA – in addition to being a useful technique in its own right – can be used to smoothly transition between DVR and MIP in an intuitive manner. MIDA can be easily implemented using volume raycasting and achieves real-time performance on current graphics hardware.", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "3", issn = "0167-7055", pages = "775--782", keywords = "illustrative visualization, maximum intensity projection, direct volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-IVV/", } @inproceedings{kohlmann-2009-cp, title = "Contextual Picking of Volumetric Structures", author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2009", month = may, isbn = "978-1-4244-4404-5", location = "Peking, China", editor = "Peter Eades, Thomas Ertl, Han-Wei Shen", booktitle = "Proceedings of the IEEE Pacific Visualization Symposium 2009", pages = "185--192", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/kohlmann-2009-cp/", } @techreport{TR-186-2-08-14, title = "Smart Linking of 2D and 3D Views in Medical Applications", author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2008", abstract = "This paper presents two techniques for the linking of 2D and 3D views in medical applications. Hereby, the goal is a better integration of 3D volume visualization into the diagnostic workflow. Until now, the main obstacle for a good integration is the time-consuming process to adjust various parameters. The LiveSync interaction metaphor is a new concept to synchronize 2D slice views and 3D volumetric views of medical data sets. A single intuitive picking interaction on anatomical structures which are detected in 2D slices results in an automatically generated 3D view. To further improve the integration contextual picking is presented as a method for the interactive identification of contextual interest points within volumetric data. Our results demonstrate how these techniques improve the efficiency to generate diagnostically relevant images and how contextual interest points can, e.g., facilitate the highlighting of relevant structures.", month = dec, number = "TR-186-2-08-14", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/TR-186-2-08-14/", } @inproceedings{haidacher-2008-vcbm, title = "Information-based Transfer Functions for Multimodal Visualization", author = "Martin Haidacher and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2008", abstract = "Transfer functions are an essential part of volume visualization. In multimodal visualization at least two values exist at every sample point. Additionally, other parameters, such as gradient magnitude, are often retrieved for each sample point. To find a good transfer function for this high number of parameters is challenging because of the complexity of this task. In this paper we present a general information-based approach for transfer function design in multimodal visualization which is independent of the used modality types. Based on information theory, the complex multi-dimensional transfer function space is fused to allow utilization of a well-known 2D transfer function with a single value and gradient magnitude as parameters. Additionally, a quantity is introduced which enables better separation of regions with complementary information. The benefit of the new method in contrast to other techniques is a transfer function space which is easy to understand and which provides a better separation of different tissues. The usability of the new approach is shown on examples of different modalities.", month = oct, isbn = "978-3-905674-13-2", publisher = "Eurographics Association", location = "Delft", issn = "2070-5778", editor = "C.P Botha, G. Kindlmann, W.J. Niessen, and B. Preim", booktitle = "VCBM ", pages = "101--108", keywords = "Multimodal Visualization, Transfer Function, Information Theory", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/haidacher-2008-vcbm/", } @article{Rautek-2008-IDS, title = "Interaction-Dependent Semantics for Illustrative Volume Rendering", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2008", abstract = "In traditional illustration the choice of appropriate styles and rendering techniques is guided by the intention of the artist. For illustrative volume visualizations it is difficult to specify the mapping between the 3D data and the visual representation that preserves the intention of the user. The semantic layers concept establishes this mapping with a linguistic formulation of rules that directly map data features to rendering styles. With semantic layers fuzzy logic is used to evaluate the user defined illustration rules in a preprocessing step. In this paper we introduce interaction-dependent rules that are evaluated for each frame and are therefore computationally more expensive. Enabling interaction-dependent rules, however, allows the use of a new class of semantics, resulting in more expressive interactive illustrations. We show that the evaluation of the fuzzy logic can be done on the graphics hardware enabling the efficient use of interaction-dependent semantics. Further we introduce the flat rendering mode and discuss how different rendering parameters are influenced by the rule base. Our approach provides high quality illustrative volume renderings at interactive frame rates, guided by the specification of illustration rules.", month = may, journal = "Computer Graphics Forum", volume = "27", number = "3", pages = "847--854", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-IDS/", } @inproceedings{bruckner-2008-IVV, title = "Integrating Volume Visualization Techniques Into Medical Applications", author = "Stefan Bruckner and Peter Kohlmann and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2008", abstract = "One of the main obstacles in integrating 3D volume visualization in the clinical workflow is the time-consuming process of adjusting parameters such as viewpoint, transfer functions, and clipping planes required to generate a diagnostically relevant image. Current applications therefore make scarce use of volume rendering and instead primarily employ 2D views generated through standard techniques such as multi-planar reconstruction (MPR). However, in many cases 3D renditions can supply additional useful information. This paper discusses ongoing work which aims to improve the integration of 3D visualization into the diagnostic workflow by automatically generating meaningful renditions based on minimal user interaction. A method for automatically generating 3D views for structures in 2D slices based on a single picking interaction is presented.", month = may, isbn = "978-1-4244-2002-5", location = "Paris, Frankreich", booktitle = "Proceedings of 5th IEEE International Symposium on Biomedical Imaging: From Nano to Macro", pages = "820--823", keywords = "viewpoint selection, medical visualization, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/bruckner-2008-IVV/", } @inproceedings{kohlmann-2008-lse, title = "LiveSync++: Enhancements of an Interaction Metaphor", author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2008", abstract = "The LiveSync interaction metaphor allows an efficient and non-intrusive integration of 2D and 3D visualizations in medical workstations. This is achieved by synchronizing the 2D slice view with the volumetric view. The synchronization is initiated by a simple picking on a structure of interest in the slice view. In this paper we present substantial enhancements of the existing concept to improve its usability. First, an efficient parametrization for the derived parameters is presented, which allows hierarchical refinement of the search space for good views. Second, the extraction of the feature of interest is performed in a way, which is adapting to the volumetric extent of the feature. The properties of the extracted features are utilized to adjust a predefined transfer function in a feature-enhancing manner. Third, a new interaction mode is presented, which allows the integration of more knowledge about the user-intended visualization, without increasing the interaction effort. Finally, a new clipping technique is integrated, which guarantees an unoccluded view on the structure of interest while keeping important contextual information.", month = may, location = "Windsor, Ontario, Canada", booktitle = "Proceedings of Graphics Interface 2008", pages = "81--88", keywords = "Viewpoint Selection, Linked Views, Medical Visualization, Smart Interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/kohlmann-2008-lse/", } @phdthesis{bruckner-2008-IIV, title = "Interactive Illustrative Volume Visualization", author = "Stefan Bruckner", year = "2008", abstract = "Illustrations are essential for the effective communication of complex subjects. Their production, however, is a difficult and expensive task. In recent years, three-dimensional imaging has become a vital tool not only in medical diagnosis and treatment planning, but also in many technical disciplines (e.g., material inspection), biology, and archeology. Modalities such as X-Ray Computed Tomography (CT) and Magnetic Resonance Imaging (MRI) produce high-resolution volumetric scans on a daily basis. It seems counter-intuitive that even though such a wealth of data is available, the production of an illustration should still require a mainly manual and time-consuming process. This thesis is devoted to the computer-assisted generation of illustrations directly from volumetric data using advanced visualization techniques. The concept of a direct volume illustration system is introduced for this purpose. Instead of requiring an additional modeling step, this system allows the designer of an illustration to work directly on the measured data. Abstraction, a key component of traditional illustrations, is used in order to reduce visual clutter, emphasize important structures, and reveal hidden detail. Low-level abstraction techniques are concerned with the appearance of objects and allow flexible artistic shading of structures in volumetric data sets. High-level abstraction techniques control which objects are visible. For this purpose, novel methods for the generation of ghosted and exploded views are introduced. The visualization techniques presented in this thesis employ the features of current graphics hardware to achieve interactive performance. The resulting system allows the generation of expressive illustrations directly from volumetric data with applications in medical training, patient education, and scientific communication.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "volume rendering, illustrative visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/bruckner-2008-IIV/", } @inproceedings{ruiz-2008-OVR, title = "Obscurance-based Volume Rendering Framework", author = "Marc Ruiz and Imma Boada and Ivan Viola and Stefan Bruckner and Miquel Feixas and Mateu Sbert", year = "2008", abstract = "Obscurances, from which ambient occlusion is a particular case, is a technology that produces natural-looking lighting effects in a faster way than global illumination. Its application in volume visualization is of special interest since it permits us to generate a high quality rendering at a low cost. In this paper, we propose an obscurance-based framework that allows us to obtain realistic and illustrative volume visualizations in an interactive manner. Obscurances can include color bleeding effects without additional cost. Moreover, we obtain a saliency map from the gradient of obscurances and we show its application to enhance volume visualization and to select the most salient views.", booktitle = "Proceedings of Volume Graphics 2008", keywords = "volume rendering, illustrative visualization, ambient occlusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-OVR/", } @misc{Rautek-2008-VF, title = "Illustrative Visualization – New Technology or Useless Tautology?", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller and Ivan Viola", year = "2008", abstract = "This article can be accessed online in the ACM SIGGRAPH, Computer Graphics Quarterly, Volume 42, Number 3: http://www.siggraph.org/publications/newsletter/volume-42-number-3/illustrative-visualization-2013-new-technology-or-useless-tautology", note = "online journal, without talk", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-VF/", } @inproceedings{ruiz-2008-SEV, title = "Similarity-based Exploded Views", author = "Marc Ruiz and Ivan Viola and Imma Boada and Stefan Bruckner and Miquel Feixas and Mateu Sbert", year = "2008", abstract = "Exploded views are often used in illustration to overcome the problem of occlusion when depicting complex structures. In this paper, we propose a volume visualization technique inspired by exploded views that partitions the volume into a number of parallel slabs and shows them apart from each other. The thickness of slabs is driven by the similarity between partitions. We use an information-theoretic technique for the generation of exploded views. First, the algorithm identifies the viewpoint from which the structure is the highest. Then, the partition of the volume into the most informative slabs for exploding is obtained using two complementary similarity-based strategies. The number of slabs and the similarity parameter are freely adjustable by the user.", booktitle = "Proceedings of Smart Graphics 2008", pages = "154--165", keywords = "volume visualization, illustrative visualization, exploded views", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-SEV/", } @article{kohlmann-2007-livesync, title = "LiveSync: Deformed Viewing Spheres for Knowledge-Based Navigation", author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2007", abstract = "Although real-time interactive volume rendering is available even for very large data sets, this visualization method is used quite rarely in the clinical practice. We suspect this is because it is very complicated and time consuming to adjust the parameters to achieve meaningful results. The clinician has to take care of the appropriate viewpoint, zooming, transfer function setup, clipping planes and other parameters. Because of this, most often only 2D slices of the data set are examined. Our work introduces LiveSync, a new concept to synchronize 2D slice views and volumetric views of medical data sets. Through intuitive picking actions on the slice, the users define the anatomical structures they are interested in. The 3D volumetric view is updated automatically with the goal that the users are provided with expressive result images. To achieve this live synchronization we use a minimal set of derived information without the need for segmented data sets or data-specific pre-computations. The components we consider are the picked point, slice view zoom, patient orientation, viewpoint history, local object shape and visibility. We introduce deformed viewing spheres which encode the viewpoint quality for the components. A combination of these deformed viewing spheres is used to estimate a good viewpoint. Our system provides the physician with synchronized views which help to gain deeper insight into the medical data with minimal user interaction.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "13", number = "6", note = "to be presented at IEEE Visualization 2007", pages = "1544--1551", keywords = "linked views, interaction, medical visualization, navigation, viewpoint selection", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/kohlmann-2007-livesync/", } @article{Rautek-2007-SLI, title = "Semantic Layers for Illustrative Volume Rendering", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2007", abstract = "Direct volume rendering techniques map volumetric attributes (e.g., density, gradient magnitude, etc.) to visual styles. Commonly this mapping is specified by a transfer function. The specification of transfer functions is a complex task and requires expert knowledge about the underlying rendering technique. In the case of multiple volumetric attributes and multiple visual styles the specification of the multi-dimensional transfer function becomes more challenging and non-intuitive. We present a novel methodology for the specification of a mapping from several volumetric attributes to multiple illustrative visual styles. We introduce semantic layers that allow a domain expert to specify the mapping in the natural language of the domain. A semantic layer defines the mapping of volumetric attributes to one visual style. Volumetric attributes and visual styles are represented as fuzzy sets. The mapping is specified by rules that are evaluated with fuzzy logic arithmetics. The user specifies the fuzzy sets and the rules without special knowledge about the underlying rendering technique. Semantic layers allow for a linguistic specification of the mapping from attributes to visual styles replacing the traditional transfer function specification.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "13", number = "6", note = "to be presented at IEEE Visualization 2007", pages = "1336--1343", keywords = "Illustrative Visualization, Volume Visualization, Focus+Context Techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Rautek-2007-SLI/", } @article{bruckner-2007-EDF, title = "Enhancing Depth-Perception with Flexible Volumetric Halos", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2007", abstract = "Volumetric data commonly has high depth complexity which makes it difficult to judge spatial relationships accurately. There are many different ways to enhance depth perception, such as shading, contours, and shadows. Artists and illustrators frequently employ halos for this purpose. In this technique, regions surrounding the edges of certain structures are darkened or brightened which makes it easier to judge occlusion. Based on this concept, we present a flexible method for enhancing and highlighting structures of interest using GPU-based direct volume rendering. Our approach uses an interactively defined halo transfer function to classify structures of interest based on data value, direction, and position. A feature-preserving spreading algorithm is applied to distribute seed values to neighboring locations, generating a controllably smooth field of halo intensities. These halo intensities are then mapped to colors and opacities using a halo profile function. Our method can be used to annotate features at interactive frame rates.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "13", number = "6", pages = "1344--1351", keywords = "volume rendering, illustrative visualization, halos", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/bruckner-2007-EDF/", } @article{bruckner-2007-STF, title = "Style Transfer Functions for Illustrative Volume Rendering", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2007", abstract = "Illustrative volume visualization frequently employs non-photorealistic rendering techniques to enhance important features or to suppress unwanted details. However, it is difficult to integrate multiple non-photorealistic rendering approaches into a single framework due to great differences in the individual methods and their parameters. In this paper, we present the concept of style transfer functions. Our approach enables flexible data-driven illumination which goes beyond using the transfer function to just assign colors and opacities. An image-based lighting model uses sphere maps to represent non-photorealistic rendering styles. Style transfer functions allow us to combine a multitude of different shading styles in a single rendering. We extend this concept with a technique for curvature-controlled style contours and an illustrative transparency model. Our implementation of the presented methods allows interactive generation of high-quality volumetric illustrations.", month = sep, journal = "Computer Graphics Forum", volume = "26", number = "3", note = "Eurographics 2007 3rd Best Paper Award", pages = "715--724", keywords = "illustrative visualization, transfer functions, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/bruckner-2007-STF/", } @article{Kohlmann-2007-EBV, title = "Evaluation of a Bricked Volume Layout for a Medical Workstation based on Java", author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2007", abstract = "Volumes acquired for medical examination purposes are constantly increasing in size. For this reason, the computer’s memory is the limiting factor for visualizing the data. Bricking is a well-known concept used for rendering large data sets. The volume data is subdivided into smaller blocks to achieve better memory utilization. Until now, the vast majority of medical workstations use a linear volume layout. We implemented a bricked volume layout for such a workstation based on Java as required by our collaborative company partner to evaluate different common access patterns to the volume data. For rendering, we were mainly interested to see how the performance will differ from the traditional linear volume layout if we generate images of arbitrarily oriented slices via Multi-Planar Reformatting (MPR). Furthermore, we tested access patterns which are crucial for segmentation issues like a random access to data values and a simulated region growing. Our goal was to find out if it makes sense to change the volume layout of a medical workstation to benefit from bricking. We were also interested to identify the tasks where problems might occur if bricking is applied. Overall, our results show that it is feasible to use a bricked volume layout in the stringent context of a medical workstation implemented in Java.", month = jan, journal = "Journal of WSCG", volume = "15", number = "1-3", issn = "1213-6972", pages = "83--90", keywords = "MPR, Bricked Volume Layout, Medical Visualization, Medical Workstation", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Kohlmann-2007-EBV/", } @misc{tut-vis-2007, title = "IEEE Visualization Tutorial on Illustrative Display and Interaction in Visualization", author = "Ivan Viola and Stefan Bruckner and Mario Costa Sousa and David Ebert and Carlos Correa", year = "2007", abstract = "The area of illustrative visualization is concerned with developing methods to enhance the depiction of scientific data based on principles founded in traditional illustration. The illustration community has century-long experience in adapting their techniques to human perceptual needs in order to generate an effective depiction which conveys the desired message. Thus, their methods can provide us with important insights into visualization problems. In this tutorial, the concepts in illustrative visualization are reviewed. An important aspect here is interaction: while traditional illustrations are commonly only presented as static images, computer-assisted visualization enables interactive exploration and manipulation of complex scientific data. Only by coupling illustrative visualization with effective interaction techniques its full potential can be exploited. The tutorial starts with a general introduction into the area of illustrative visualization. The concept of importance-driven visualization and its applications are presented. Then we proceed with a discussion how traditional abstraction techniques can be applied in an interactive context using importance-based methods. This ranges from low-level appearance to smart viewpoint-dependent visibility techniques such as cutaways or exploded views. Further advanced manipulation strategies are discussed in the third part. The use deformations to enhance visibility of certain features while providing context or to abstract the structure of a complex objects through direct interaction with the data is examined. As many of the presented methods rely on a separation of focus and context, i.e., the important structures in the data have been identified, the tutorial discusses approaches for selecting objects of interest in a three-dimensional environment using intuitive sketch-based interfaces. Since the effectiveness of a user-interface is heavily dependent on the previous knowledge of the user, the last part of the tutorial examines the concept of layering interfaces based on user expertise. Finally, the application of illustrative display and interaction techniques for non-traditional modalities such as mobile devices concludes the tutorial. IEEE Visualization 2007 Tutorial Page: http://vis.computer.org/vis2007/session/tutorials.html#t7 See also previous tutorials on Illustrative Visualization: IEEE Visualization 2006 Tutorial on Illustrative Visualization for Science and Medicine http://www.cg.tuwien.ac.at/research/publications/2006/tut-vis-2006/ Eurographics 2006 Tutorial on Illustrative Visualization for Science and Medicine http://www.cg.tuwien.ac.at/research/publications/2006/tut-eg-2006/ SIGGRAPH 2006 Course on Illustrative Visualization for Science and Medicine http://www.cg.tuwien.ac.at/research/publications/2006/tut-siggraph-2006/ IEEE Visualization 2005 Tutorial on Illustrative Visualization http://www.cg.tuwien.ac.at/research/publications/2005/Viola-vistutillustrativevis/ Eurographics 2005 Tutorial on Illustrative Visualization http://www.cg.tuwien.ac.at/research/publications/2005/eg-tut2005-iv/ ", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/tut-vis-2007/", } @article{bruckner-2006-ICE, title = "Illustrative Context-Preserving Exploration of Volume Data", author = "Stefan Bruckner and S\"{o}ren Grimm and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2006", abstract = "In volume rendering it is very difficult to simultaneously visualize interior and exterior structures while preserving clear shape cues. Highly transparent transfer functions produce cluttered images with many overlapping structures, while clipping techniques completely remove possibly important context information. In this paper we present a new model for volume rendering, inspired by techniques from illustration. It provides a means of interactively inspecting the interior of a volumetric data set in a feature-driven way which retains context information. The context-preserving volume rendering model uses a function of shading intensity, gradient magnitude, distance to the eye point, and previously accumulated opacity to selectively reduce the opacity in less important data regions. It is controlled by two user-specified parameters. This new method represents an alternative to conventional clipping techniques, shares their easy and intuitive user control, but does not suffer from the drawback of missing context information.", month = nov, issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "6", volume = "12", pages = "1559--1569", keywords = "focus+context techniques, volume rendering, illustrative visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/bruckner-2006-ICE/", } @article{bruckner-2006-EVV, title = "Exploded Views for Volume Data", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2006", abstract = "Exploded views are an illustration technique where an object is partitioned into several segments. These segments are displaced to reveal otherwise hidden detail. In this paper we apply the concept of exploded views to volumetric data in order to solve the general problem of occlusion. In many cases an object of interest is occluded by other structures. While transparency or cutaways can be used to reveal a focus object, these techniques remove parts of the context information. Exploded views, on the other hand, do not suffer from this drawback. Our approach employs a force-based model: the volume is divided into a part configuration controlled by a number of forces and constraints. The focus object exerts an explosion force causing the parts to arrange according to the given constraints. We show that this novel and flexible approach allows for a wide variety of explosion-based visualizations including view-dependent explosions. Furthermore, we present a high-quality GPU-based volume ray casting algorithm for exploded views which allows rendering and interaction at several frames per second.", month = sep, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "12", number = "5", issn = "1077-2626", pages = "1077--1084", keywords = "exploded views, illustrative visualization, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/bruckner-2006-EVV/", } @inproceedings{RAUTEK06, title = "D²VR: High Quality Volume Rendering of Projection-based Volumetric Data", author = "Peter Rautek and Bal\'{a}zs Cs\'{e}bfalvi and S\"{o}ren Grimm and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2006", abstract = "Volume rendering techniques are conventionally classified as either direct or indirect methods. Indirect methods require to transform the initial volumetric model into an intermediate geometrical model in order to efficiently visualize it. In contrast, direct volume rendering (DVR) methods can directly process the volumetric data. Modern CT scanners usually provide data as a set of samples on a rectilinear grid, which is computed from the measured projections by discrete tomographic reconstruction. Therefore the rectilinear grid can already be considered as an intermediate volume representation. In this paper we introduce direct direct volume rendering (D²VR). D²VR does not require a rectilinear grid, since it is based on an immediate processing of the measured projections. Arbitrary samples for ray casting are reconstructed from the projections by using the Filtered Back-Projection algorithm. Our method removes a lossy resampling step from the classical volume rendering pipeline. It provides much higher accuracy than traditional grid-based resampling techniques do. Furthermore we also present a novel high-quality gradient estimation scheme, which is also based on the Filtered Back-Projection algorithm.", month = may, publisher = "IEEE CS", booktitle = "Proceedings of Eurographics / IEEE VGTC Symposium on Visualization", number = "In Proceedings of EuroVis", pages = "211--218", keywords = "Volume Rendering, Filtered Back-Projection, Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/RAUTEK06/", } @misc{tut-siggraph-2006, title = "SIGGRAPH Course Illustrative Visualization for Medicine and Science", author = "Mario Costa Sousa and David Ebert and Bruce Gooch and Ivan Viola and Stefan Bruckner and Bill Andrews and Don Stredney and Nikolai Svakhine", year = "2006", abstract = "Research and recent development in computer-generated illustration techniques within non-photorealistic rendering. The course concentrates specifically on illustration methods for computer-generated technical, scientific, medical, and interactive illustrations of both surface and volumetric data. It also presents the perspective of two medical illustrators on computerized illustration. ACM SIGGRAPH 2006 course page: http://www.siggraph.org/s2006/main.php?f=conference&p=courses&s=6 Further Information: http://pages.cpsc.ucalgary.ca/~mario/webpage/publ/courses.htm See also previous tutorials on Illustrative Visualization: IEEE Visualization 2005 Tutorial on Illustrative Visualization http://www.cg.tuwien.ac.at/research/publications/2005/Viola-vistutillustrativevis/ Eurographics 2005 Tutorial on Illustrative Visualization http://www.cg.tuwien.ac.at/research/publications/2005/eg-tut2005-iv/ ", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/tut-siggraph-2006/", } @misc{tut-vis-2006, title = "IEEE Visualization Tutorial on Illustrative Visualization for Science and Medicine", author = "Ivan Viola and Mario Costa Sousa and David Ebert and Bill Andrews and Bruce Gooch and Stefan Bruckner and Bernhard Preim and Don Stredney and Nikolai Svakhine and Christian Tietjen", year = "2006", abstract = "This tutorial presents recent research and developments from academia in illustrative visualization focusing on its use for medical/science subjects. Lectures are organized within a comprehensive illustration framework, focusing on three main components: • Traditional and computerized illustration techniques and principles for technical and scientific subjects • Evaluation and practical use • Viewing & rendering Presentation of topics is balanced between descriptions of traditional methods and practices, practical implementation motivated approaches and evaluation, and detailed descriptions and analysis of illustrative techniques and algorithms. We begin in the morning with a lecture presenting an overview of traditional illustration for technical, scientific, and medical subjects. This is followed by a description of the main components in an illustrative visualization pipeline for developing systems to assist technical and scientific illustrators. The tutorial progresses with an overview of the techniques used in illustration as well as approaches to evaluate their use and effectiveness. The morning concludes with the start of the “viewing and rendering” section. The three lectures in this section describe the latest approaches in computerized illustration algorithms for scientific and medical data for both surface and volumetric data, covering techniques from silhouette enhancement to stippling, to cut-away viewing, labeling, and focus+context rendering. Each of the lectures also discusses practical issues in making these techniques interactive and their use for different application domains. The tutorial includes a trained medical illustrator discussing the principles/caveats/issues in using illustration techniques in real-world medical applications. This lecture will also describe an evaluation, from an illustrator’s point of view, of the use and quality of the techniques presented throughout the day. The tutorial concludes with discussion on specific medical case studies where illustrative visualization has been effectively applied. IEEE Visualization 2006 Tutorial Page: http://vis.computer.org/vis2006/session/tutorials.html#t3 See also previous tutorials on Illustrative Visualization: Eurographics 2006 Tutorial on Illustrative Visualization for Science and Medicine http://www.cg.tuwien.ac.at/research/publications/2006/tut-eg-2006/ SIGGRAPH 2006 Course on Illustrative Visualization for Science and Medicine http://www.cg.tuwien.ac.at/research/publications/2006/tut-siggraph-2006/ IEEE Visualization 2005 Tutorial on Illustrative Visualization http://www.cg.tuwien.ac.at/research/publications/2005/Viola-vistutillustrativevis/ Eurographics 2005 Tutorial on Illustrative Visualization http://www.cg.tuwien.ac.at/research/publications/2005/eg-tut2005-iv/ ", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/tut-vis-2006/", } @misc{viola-popular-article-2006, title = "Explodierende K\"{o}rper", author = "Michael Krassnitzer and Ivan Viola and Stefan Bruckner", year = "2006", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/viola-popular-article-2006/", } @inproceedings{coto-2005-MAC, title = "MammoExplorer: An Advanced CAD Application for Breast DCE-MRI", author = "Ernesto Coto and S\"{o}ren Grimm and Stefan Bruckner and Eduard Gr\"{o}ller and Armin Kanitsar and Omaira Rodriguez", year = "2005", abstract = "Currently X-ray mammography is the most widely used method for early detection of breast cancer. However, the use of Dynamic Contrast Enhanced MRI (DCE-MRI) has gained wider attention, since it considerably improves tumor detection and classification by analyzing the flow of contrast agent within the breast tissue. In this paper we present MammoExplorer, a CAD application that combines advanced interaction, segmentation and visualization techniques to explore Breast DCE-MRI data. Our application uses Brushing and Linking, Two-level Volume Rendering, Importance-driven Volume Rendering, and False Color Maps. In addition, we present Enhancement Scatterplots, a novel graphical representation of DCE-MRI data, novel segmentation approaches, and a new way to explore time-varying CE-MRI data.", month = nov, isbn = "3898380688", location = "Erlangen, Germany", editor = "G. Greiner, J. Hornegger, H. Niemann, M. Stamminger", booktitle = "Proceedings of Vision, Modelling, and Visualization 2005", pages = "91--98", keywords = "CAD, Breast cancer, Contrast Enhanced MRI", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/coto-2005-MAC/", } @inproceedings{bruckner-2005-VIS, title = "VolumeShop: An Interactive System for Direct Volume Illustration", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2005", abstract = "Illustrations play a major role in the education process. Whether used to teach a surgical or radiologic procedure, to illustrate normal or aberrant anatomy, or to explain the functioning of a technical device, illustration significantly impacts learning. Although many specimens are readily available as volumetric data sets, particularly in medicine, illustrations are commonly produced manually as static images in a time-consuming process. Our goal is to create a fully dynamic three-dimensional illustration environment which directly operates on volume data. Single images have the aesthetic appeal of traditional illustrations, but can be interactively altered and explored. In this paper we present methods to realize such a system which combines artistic visual styles and expressive visualization techniques. We introduce a novel concept for direct multi-object volume visualization which allows control of the appearance of inter-penetrating objects via two-dimensional transfer functions. Furthermore, a unifying approach to efficiently integrate many non-photorealistic rendering models is presented. We discuss several illustrative concepts which can be realized by combining cutaways, ghosting, and selective deformation. Finally, we also propose a simple interface to specify objects of interest through three-dimensional volumetric painting. All presented methods are integrated into VolumeShop, an interactive hardware-accelerated application for direct volume illustration.", month = oct, isbn = "0780394623", location = "Minneapolis, USA", editor = "C. T. Silva, E. Gr\"{o}ller, H. Rushmeier", booktitle = "Proceedings of IEEE Visualization 2005", pages = "671--678", keywords = "focus+context techniques, illustrative visualization, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-VIS/", } @misc{bruckner-2005-vid, title = "VolumeShop: Interactive Direct Volume Illustration", author = "Stefan Bruckner and Ivan Viola and Eduard Gr\"{o}ller", year = "2005", abstract = "Illustrations play a major role in the education process. Whether used to teach a surgical or radiologic procedure, to illustrate normal or aberrant anatomy, or to explain the functioning of a technical device, illustration significantly impacts learning. Many specimen are readily available as volumetric data sets, particular in medicine. Illustrations, however, are commonly produced manually as static images in a time-consuming process. Our goal is to create a fully dynamic three-dimensional illustration environment which directly operates on volume data. Single images have the aesthetic appeal of traditional illustrations, but can be interactively altered and explored. We present methods to realize such a system which combines artistic visual styles and expressive visualization techniques. Our implementation exploits the latest generation of GPUs and, thus, is capable of handling commonly sized data sets at interactive frame rates. ", month = aug, booktitle = "ACM Siggraph 2005 DVD Proceedings (Technical Sketch)", keywords = "focus+context techniques, volume rendering, illustrative techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-vid/", } @inproceedings{bruckner-2005-ICV, title = "Illustrative Context-Preserving Volume Rendering", author = "Stefan Bruckner and S\"{o}ren Grimm and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2005", abstract = "In volume rendering it is very difficult to simultaneously visualize interior and exterior structures while preserving clear shape cues. Very transparent transfer functions produce cluttered images with many overlapping structures, while clipping techniques completely remove possibly important context information. In this paper we present a new model for volume rendering, inspired by techniques from illustration that provides a means of interactively inspecting the interior of a volumetric data set in a feature-driven way which retains context information. The context-preserving volume rendering model uses a function of shading intensity, gradient magnitude, distance to the eye point, and previously accumulated opacity to selectively reduce the opacity in less important data regions. It is controlled by two user-specified parameters. This new method represents an alternative to conventional clipping techniques, shares their easy and intuitive user control, but does not suffer from the drawback of missing context information. ", month = may, booktitle = "Proceedings of EuroVis 2005", pages = "69--76", keywords = "non-photorealistic techniques, focus+context techniques, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-ICV/", } @talk{bruckner-2005-AIV, title = "Applications of Illustrative Volume Visualization Techniques", author = "Stefan Bruckner", year = "2005", abstract = "Illustrative visualization deals with computer supported interactive and expressive visualizations through abstractions which are inspired by traditional illustrations. This talk covers two important aspects of illustrative visualization of volume data: exploration and communication. Exploration deals with techniques for the rapid visualization of data with limited or no prior knowledge about the nature and/or structure of the data. Communication, on the other hand, is concerned with conveying complex structures or relationships to individuals. The talk discusses recent approaches in both areas.", location = "Magdeburg, Germany", keywords = "exploration, illustrative visualization, communication", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-AIV/", } @talk{bruckner-2005-ATI, title = "Abstraction Techniques for Interactive Illustration", author = "Stefan Bruckner", year = "2005", abstract = "Illustrations play a major role in the education process. Whether used to teach a surgical or radiologic procedure, to illustrate normal or aberrant anatomy, or to explain the functioning of a technical device, illustration significantly impacts learning. One of the key concepts for creating an expressive illustration is abstraction. Abstraction introduces a distortion between the visualization and the underlying model according to the communicative intent of the illustration. Inspired by observations from hand-made illustrations, similar techniques for the generation of rendered images have been developed. These techniques work on different levels: low level abstraction techniques (stylized depiction methods) deal with how objects should be presented, while high level abstraction techniques (smart visibility approaches) are concerned with what should be visible and recognizable. We review several existing approaches from both categories and describe important concepts used in the design of a system for creating interactive illustrations directly from volumetric data.", event = "Dagstuhl Seminar on Scientific Visualization: Challenges for the Future", location = "Dagstuhl, Germany", keywords = "illustration, scientific visualization, abstraction", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-ATI/", } @article{grimm-2004-arefined, title = "A Refined Data Addressing and Processing Scheme to Accelerate Volume Raycasting", author = "S\"{o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2004", abstract = "Most volume rendering systems based on CPU volume raycasting still suffer from inefficient CPU utilization and high memory usage. To target these issues we present a new technique for efficient data addressing. Furthermore, we introduce a new processing scheme for volume raycasting which exploits thread-level parallelism—a technology now supported by commodity computer architectures.", month = oct, issn = "0097-8493", journal = "Computers & Graphics", number = "5", volume = "28", booktitle = "Computer & Graphics, Vol. 28 (5)", isbn = "0097-8493", pages = "719--729", keywords = "Volume Raycasting, Bricking, Parallel Co", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-arefined/", } @inproceedings{grimm-2004-memory, title = "Memory Efficient Acceleration Structures and Techniques for CPU-based Volume Raycasting of Large Data", author = "S\"{o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2004", abstract = "Most CPU-based volume raycasting approaches achieve high performance by advanced memory layouts, space subdivision, and excessive pre-computing. Such approaches typically need an enormous amount of memory. They are limited to sizes which do not satisfy the medical data used in daily clinical routine. We present a new volume raycasting approach based on image-ordered raycasting with object-ordered processing, which is able to perform high-quality rendering of very large medical data in real-time on commodity computers. For large medical data such as computed tomographic (CT) angiography run-offs (512x512x1202) we achieve rendering times up to 2.5 fps on a commodity notebook. We achieve this by introducing a memory efficient acceleration technique for on-the-fly gradient estimation and a memory efficient hybrid removal and skipping technique of transparent regions. We employ quantized binary histograms, granular resolution octrees, and a cell invisibility cache. These acceleration structures require just a small extra storage of approximately 10%. ", month = oct, isbn = "0-7803-8781-3", editor = "D. Silver, T. Ertl, C. Silva", booktitle = "Proceedings IEEE/SIGGRAPH Symposium on Volume Visualization and Graphics", pages = "1--8", keywords = "Three-Dimensional Graphics and Realism,", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-memory/", } @inproceedings{GRIMM-2004-FDMX-P, title = "Flexible Direct Multi-Volume Rendering in Interactive Scenes", author = "S\"{o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2004", abstract = "In this paper we describe methods to efficiently visualize multiple ntersecting volumetric objects. We introduce the concept of V-Objects. V-Objects represent abstract properties of an object connected to a volumetric data source. We present a method to perform direct volume rendering of a scene comprised of an arbitrary number of possibly intersecting V-Objects. The idea of our approach is to distinguish between regions of intersection, which need costly multi-volume processing, and regions containing only one V-Object, which can be processed using a highly efficient brick-wise volume traversal scheme. Using this method, we achieve significant performance gains for multi-volume rendering. We show possible medical applications, such as surgical planning, diagnosis, and education.", month = oct, location = "Stanford, USA", booktitle = "Vision, Modeling, and Visualization (VMV)", pages = "386--379", keywords = "multi volume rendering, medical visualization, volume raycasting", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/GRIMM-2004-FDMX-P/", } @article{grimm-2004-volume, title = "VOTS: VOlume doTS as a Point-Based Representation of Volumetric Data", author = "S\"{o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2004", abstract = "We present Volume dots (Vots), a new primitive for volumetric data modelling, processing, and rendering. Vots are a point-based representation of volumetric data. An individual Vot is specified by the coefficients of a Taylor series expansion, i.e. the function value and higher order derivatives at a specific point. A Vot does not only represent a single sample point, it represents the underlying function within a region. With the Vots representation we have a more intuitive and high-level description of the volume data. This allows direct analytical examination and manipulation of volumetric datasets. Vots enable the representation of the underlying scalar function with specified precision. User-centric importance sampling is also possible, i.e., unimportant volume parts are still present but represented with just very few Vots. As proof of concept, we show Maximum Intensity Projection based on Vots.", month = sep, journal = "Computer Graphics Forum", volume = "23", number = "3", issn = "0167-7055", pages = "668--661", keywords = "Graphics Data Structures and Data Types", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-volume/", } @mastersthesis{bruckner-2004-EVV, title = "Efficient Volume Visualization of Large Medical Datasets", author = "Stefan Bruckner", year = "2004", abstract = "The size of volumetric datasets used in medical environments is increasing at a rapid pace. Due to excessive pre-computation and memory demanding data structures, most current approaches for volume visualization do not meet the requirements of daily clinical routine. In this diploma thesis, an approach for interactive high-quality rendering of large medical data is presented. It is based on image-order raycasting with object-order data traversal, using an optimized cache coherent memory layout. New techniques and parallelization strategies for direct volume rendering of large data on commodity hardware are presented. By using new memory efficient acceleration data structures, high-quality direct volume rendering of several hundred megabyte sized datasets at sub-second frame rates on a commodity notebook is achieved.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "volume rendering, large data", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/bruckner-2004-EVV/", } @xmascard{xmas-2004, title = "X-Mas 2004", author = "Stefan Bruckner", year = "2004", abstract = "THE X-MAS GECKO The image was created in one pass using direct multi-volume rendering of a single scene consisting of three different datasets. Only the eyes were added using 2D image processing. The hat dataset is a distance field computed from a geometric model. The beard has been created by sculpting 3D Perlin noise. The gecko dataset is a X-ray computed tomography scan performed by the University of Veterinary Medicine Vienna. No animals were harmed during the production of this image. DER WEIHNACHTSGECKO Das Bild wurde mit Direct Multi-Volume Rendering einer Szene bestehend aus drei verschiedenen Datens\"{a}tzen erstellt. Nur die Augen wurden mit 2D Bildverarbeitung hinzugef\"{u}gt. Die M\"{u}tze ist ein Distanzfeld, welches aus einem geometrischen Modell berechnet wurde. Der Bart wurde durch Sculpting von 3D Perlin Noise erzeugt. Der Gecko Datensatz ist eine R\"{o}ntgencomputertomographie-Aufnahme, die von der Veterin\"{a}rmedizinische Universit\"{a}t Wien durchgef\"{u}hrt wurde. Bei der Erstellung dieses Bildes wurden keine Tiere verletzt.", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/xmas-2004/", } @inproceedings{Bruckner-2003-The, title = "The Inverse Warp: Non-Invasive Integration of Shear-Warp Volume Rendering into Polygon Rendering Pipelines", author = "Stefan Bruckner and Dieter Schmalstieg and Helwig Hauser and Eduard Gr\"{o}ller", year = "2003", abstract = "In this paper, a simple and efficient solution for combining shear-warp volume rendering and the hardware graphics pipeline is presented. The approach applies an inverse warp transformation to the Z-Buffer, containing the rendered geometry. This information is used for combining geometry and volume data during compositing. We present applications of this concept which include hybrid volume rendering, i.e., concurrent rendering of polygonal objects and volume data, and volume clipping on convex clipping regions. Furthermore, it can be used to efficiently define regions with different rendering modes and transfer functions for focus+context volume rendering. Empirical results show that the approach has very low impact on performance.", month = nov, isbn = "3898380483", publisher = "infix", editor = "T. Ertl, B. Girod, G. Greiner, H. Niemann, H.-P. Seidel, E. Steinbach, R. Westermann", booktitle = "Workshop on Vision, Modeling and Visualization", pages = "529--536", keywords = "focus+context techniques, clipping, hybrid volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Bruckner-2003-The/", }