@article{Labschuetz_Matthias_2016_JITT, title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure", author = "Matthias Labsch\"{u}tz and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2016", abstract = "Sparse volume data structures enable the efficient representation of large but sparse volumes in GPU memory for computation and visualization. However, the choice of a specific data structure for a given data set depends on several factors, such as the memory budget, the sparsity of the data, and data access patterns. In general, there is no single optimal sparse data structure, but a set of several candidates with individual strengths and drawbacks. One solution to this problem are hybrid data structures which locally adapt themselves to the sparsity. However, they typically suffer from increased traversal overhead which limits their utility in many applications. This paper presents JiTTree, a novel sparse hybrid volume data structure that uses just-in-time compilation to overcome these problems. By combining multiple sparse data structures and reducing traversal overhead we leverage their individual advantages. We demonstrate that hybrid data structures adapt well to a large range of data sets. They are especially superior to other sparse data structures for data sets that locally vary in sparsity. Possible optimization criteria are memory, performance and a combination thereof. Through just-in-time (JIT) compilation, JiTTree reduces the traversal overhead of the resulting optimal data structure. As a result, our hybrid volume data structure enables efficient computations on the GPU, while being superior in terms of memory usage when compared to non-hybrid data structures.", month = jan, issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", note = "Published in January 2016", number = "1", volume = "22", event = "IEEE SciVis 2015", location = "Chicago, IL, USA", pages = "1025--1034", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Labschuetz_Matthias_2016_JITT/", } @article{Labschuetz_Matthias_2015_JIT, title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure", author = "Matthias Labsch\"{u}tz and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2015", abstract = "Sparse volume data structures enable the efficient representation of large but sparse volumes in GPU memory for computation and visualization. However, the choice of a specific data structure for a given data set depends on several factors, such as the memory budget, the sparsity of the data, and data access patterns. In general, there is no single optimal sparse data structure, but a set of several candidates with individual strengths and drawbacks. One solution to this problem are hybrid data structures which locally adapt themselves to the sparsity. However, they typically suffer from increased traversal overhead which limits their utility in many applications. This paper presents JiTTree, a novel sparse hybrid volume data structure that uses just-in-time compilation to overcome these problems. By combining multiple sparse data structures and reducing traversal overhead we leverage their individual advantages. We demonstrate that hybrid data structures adapt well to a large range of data sets. They are especially superior to other sparse data structures for data sets that locally vary in sparsity. Possible optimization criteria are memory, performance and a combination thereof. Through just-in-time (JIT) compilation, JiTTree reduces the traversal overhead of the resulting optimal data structure. As a result, our hybrid volume data structure enables efficient computations on the GPU, while being superior in terms of memory usage when compared to non-hybrid data structures.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "22", number = "1", note = "Published in January 2016", issn = "1077-2626", pages = "1025--1034", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Labschuetz_Matthias_2015_JIT/", } @article{karimov-2015-HD, title = "Guided Volume Editing based on Histogram Dissimilarity", author = "Alexey Karimov and Gabriel Mistelbauer and Thomas Auzinger and Stefan Bruckner", year = "2015", abstract = "Segmentation of volumetric data is an important part of many analysis pipelines, but frequently requires manual inspection and correction. While plenty of volume editing techniques exist, it remains cumbersome and error-prone for the user to find and select appropriate regions for editing. We propose an approach to improve volume editing by detecting potential segmentation defects while considering the underlying structure of the object of interest. Our method is based on a novel histogram dissimilarity measure between individual regions, derived from structural information extracted from the initial segmentation. Based on this information, our interactive system guides the user towards potential defects, provides integrated tools for their inspection, and automatically generates suggestions for their resolution. We demonstrate that our approach can reduce interaction effort and supports the user in a comprehensive investigation for high-quality segmentations. ", month = may, journal = "Computer Graphics Forum", volume = "34", number = "3", pages = "91--100", keywords = "Edge and feature detection, Image Processing and Computer Vision, Computer Graphics, Display algorithms, Picture/Image Generation, Segmentation, Methodology and Techniques, Interaction techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/karimov-2015-HD/", } @article{mindek-2014-mcs, title = "Managing Spatial Selections with Contextual Snapshots", author = "Peter Mindek and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2014", abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing particular features, they can be analyzed and compared in different views. However, the semantics of such selections often depend on specific parameter settings and it can be difficult to reconstruct them without additional information. In this paper, we present the concept of contextual snapshots as an effective means for managing spatial selections in visualized data. The selections are automatically associated with the context in which they have been created. Contextual snapshots can also be used as the basis for interactive integrated and linked views, which enable in-place investigation and comparison of multiple visual representations of data. Our approach is implemented as a flexible toolkit with well-defined interfaces for integration into existing systems. We demonstrate the power and generality of our techniques by applying them to several distinct scenarios such as the visualization of simulation data, the analysis of historical documents, and the display of anatomical data.", month = dec, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "33", pages = "132--144", keywords = "annotations, spatial selections, visual analytics, interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/mindek-2014-mcs/", } @article{Viola_Ivan_IIP, title = "Interactively illustrating polymerization using three-level model fusion", author = "Ivan Koles\'{a}r and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser", year = "2014", abstract = "Background: Research in cell biology is steadily contributing new knowledge about many aspects of physiological processes, both with respect to the involved molecular structures as well as their related function. llustrations of the spatio-temporal development of such processes are not only used in biomedical education, but also can serve scientists as an additional platform for in-silico experiments. Results: In this paper, we contribute a new, three-level modeling approach to illustrate physiological processes from the class of polymerization at different time scales. We integrate physical and empirical modeling, according to which approach best suits the different involved levels of detail, and we additionally enable a form of interactive steering, while the process is illustrated. We demonstrate the suitability of our approach in the context of several polymerization processes and report from a first evaluation with domain experts. Conclusion: We conclude that our approach provides a new, hybrid modeling approach for illustrating the process of emergence in physiology, embedded in a densely filled environment. Our approach of a complementary fusion of three systems combines the strong points from the different modeling approaches and is capable to bridge different spatial and temporal scales.", month = oct, issn = "1471-2105", journal = "BMC Bioinformatics 2014", number = "345", volume = "15", pages = "1--16", keywords = "Multi-agent modeling, L-system modeling, Biochemical visualization, Visualization of physiology, Polymerization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_IIP/", } @article{Groeller_2014_UPS, title = "Guest editorial—Uncertainty and parameter space analysis in visualization", author = "Christoph Heinzl and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2014", abstract = "Within the past decades visualization advanced to a powerful means of exploring and analyzing data. Recent developments in both hard- and software contributed to previously unthinkable evaluations and visualizations of data with strongly increasing sizes and levels of complexity. Providing just insight into available data of a problem seems not to be sufficient anymore: Uncertainty and parameter space analyses in visualization are becoming more prevalent and may be found in astronomic, (bio)-medical, industrial, and engineering applications. The major goal is to find out, at which stage of the pipeline - from data acquisition to the final rendering of the output image - how much uncertainty is introduced and consequently how the desired result (e.g., a dimensional measurement feature) is affected. Therefore effective methods and techniques are required by domain specialists, which help to understand how data is generated, how reliable is the generated data, and where and why data is uncertain. Furthermore, as the problems to investigate are becoming increasingly complex, also finding suitable algorithms providing the desired solution tends to be more difficult. Additional questions may arise, e.g., how does a slight parameter change modify the result, how stable is a parameter, in which range is a parameter stable or which parameter set is optimal for a specific problem. Metaphorically speaking, an algorithm for solving a problem may be seen as finding a path through some rugged terrain (the core problem) ranging from the high grounds of theory to the haunted swamps of heuristics. There are many different paths through this terrain with different levels of comfort, length, and stability. Finding all possible paths corresponds in our case to doing an analysis of all possible parameters of a problem solving algorithm, which yields a typically multi-dimensional parameter space. This parameter space allows for an analysis of the quality and stability of a specific parameter set. In many cases of conventional visualization approaches the issues of uncertainty and parameter space analyses are neglected. For a long time, uncertainty - if visualized at all - used to be depicted as blurred data. But in most cases the uncertainty in the base data is not considered at all and just the quantities of interest are calculated. And even to calculate these quantities of interest, too often an empirically found parameter set is used to parameterize the underlying algorithms without exploring its sensitivity to changes and without exploring the whole parameter space to find the global or a local optimum. This tutorial aims to open minds and to look at our data and the parameter sets of our algorithms with a healthy skepticism. In the tutorial we combine uncertainty visualization and parameter space analyses which we believe is essential for the acceptance and applicability of future algorithms and techniques. The tutorial provides six sessions starting with an overview of uncertainty visualization including a historical perspective, uncertainty modeling and statistical visualization. The second part of the tutorial will be dedicated to structural uncertainty, parameter space analysis, industrial applications of uncertainty visualization and an outlook in this domain. ", month = jun, journal = "Computer & Graphics", volume = "41", pages = "A1--A2", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Groeller_2014_UPS/", } @article{Viola_Ivan_CLD, title = "Continuous Levels-of-Detail and Visual Abstraction for Seamless Molecular Visualization", author = "Julius Parulek and Daniel J\"{o}nsson and Timo Ropinski and Stefan Bruckner and Anders Ynnerman and Ivan Viola", year = "2014", abstract = "Molecular visualization is often challenged with rendering of large molecular structures in real time. We introduce a novel approach that enables us to show even large protein complexes. Our method is based on the level-of-detail concept, where we exploit three different abstractions combined in one visualization. Firstly, molecular surface abstraction exploits three different surfaces, solvent-excluded surface (SES), Gaussian kernels and van der Waals spheres, combined as one surface by linear interpolation. Secondly, we introduce three shading abstraction levels and a method for creating seamless transitions between these representations. The SES representation with full shading and added contours stands in focus while on the other side a sphere representation of a cluster of atoms with constant shading and without contours provide the context. Thirdly, we propose a hierarchical abstraction based on a set of clusters formed on molecular atoms. All three abstraction models are driven by one importance function classifying the scene into the near-, mid- and far-field. Moreover, we introduce a methodology to render the entire molecule directly using the A-buffer technique, which further improves the performance. The rendering performance is evaluated on series of molecules of varying atom counts.", month = may, issn = "0167-7055", journal = "Computer Graphics Forum", number = "6", volume = "33", pages = "276--287", keywords = "clustering, implicit surfaces, level of detail algorithms, scientific visualization, Computer Applications", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_CLD/", } @article{Rautek_Peter_2014_VSA, title = "ViSlang: A System for Interpreted Domain-Specific Languages for Scientific Visualization", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger", year = "2014", abstract = "Researchers from many domains use scientific visualization in their daily practice. Existing implementations of algorithms usually come with a graphical user interface (high-level interface), or as software library or source code (low-level interface). In this paper we present a system that integrates domain-specific languages (DSLs) and facilitates the creation of new DSLs. DSLs provide an effective interface for domain scientists avoiding the difficulties involved with low-level interfaces and at the same time offering more flexibility than high-level interfaces. We describe the design and implementation of ViSlang, an interpreted language specifically tailored for scientific visualization. A major contribution of our design is the extensibility of the ViSlang language. Novel DSLs that are tailored to the problems of the domain can be created and integrated into ViSlang. We show that our approach can be added to existing user interfaces to increase the flexibility for expert users on demand, but at the same time does not interfere with the user experience of novice users. To demonstrate the flexibility of our approach we present new DSLs for volume processing, querying and visualization. We report the implementation effort for new DSLs and compare our approach with Matlab and Python implementations in terms of run-time performance.", issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "12", volume = "20", pages = "2388--2396", keywords = " Volume visualization framework , Volume visualization, Domain-specific languages", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Rautek_Peter_2014_VSA/", } @article{vaico, title = "VAICo: Visual Analysis for Image Comparison", author = "Johanna Schmidt and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2013", abstract = "Scientists, engineers, and analysts are confronted with ever larger and more complex sets of data, whose analysis poses special challenges. In many situations it is necessary to compare two or more datasets. Hence there is a need for comparative visualization tools to help analyze differences or similarities among datasets. In this paper an approach for comparative visualization for sets of images is presented. Well-established techniques for comparing images frequently place them side-by-side. A major drawback of such approaches is that they do not scale well. Other image comparison methods encode differences in images by abstract parameters like color. In this case information about the underlying image data gets lost. This paper introduces a new method for visualizing differences and similarities in large sets of images which preserves contextual information, but also allows the detailed analysis of subtle variations. Our approach identifies local changes and applies cluster analysis techniques to embed them in a hierarchy. The results of this process are then presented in an interactive web application which allows users to rapidly explore the space of differences and drill-down on particular features. We demonstrate the flexibility of our approach by applying it to multiple distinct domains.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "19", number = "12", note = "Demo: http://www.cg.tuwien.ac.at/~jschmidt/vaico/", pages = "2090--2099", keywords = "focus+context, image-set comparison, Comparative visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/vaico/", } @article{Auzinger_Mistelbauer_2013_CSR, title = "Vessel Visualization using Curved Surface Reformation", author = "Thomas Auzinger and Gabriel Mistelbauer and Ivan Baclija and R\"{u}diger Schernthaner and Arnold K\"{o}chl and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2013", abstract = "Visualizations of vascular structures are frequently used in radiological investigations to detect and analyze vascular diseases. Obstructions of the blood flow through a vessel are one of the main interests of physicians, and several methods have been proposed to aid the visual assessment of calcifications on vessel walls. Curved Planar Reformation (CPR) is a wide-spread method that is designed for peripheral arteries which exhibit one dominant direction. To analyze the lumen of arbitrarily oriented vessels, Centerline Reformation (CR) has been proposed. Both methods project the vascular structures into 2D image space in order to reconstruct the vessel lumen. In this paper, we propose Curved Surface Reformation (CSR), a technique that computes the vessel lumen fully in 3D. This offers high-quality interactive visualizations of vessel lumina and does not suffer from problems of earlier methods such as ambiguous visibility cues or premature discretization of centerline data. Our method maintains exact visibility information until the final query of the 3D lumina data. We also present feedback from several domain experts.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE Scientific Visualization 2013)", volume = "19", number = "12", pages = "2858--2867", keywords = "Surface Approximation, Vessel, Reformation, Volume Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_Mistelbauer_2013_CSR/", } @article{mistelbauer-2013-cfa, title = "Vessel Visualization using Curvicircular Feature Aggregation", author = "Gabriel Mistelbauer and Anca Morar and Andrej Varchola and R\"{u}diger Schernthaner and Ivan Baclija and Arnold K\"{o}chl and Armin Kanitsar and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Radiological investigations are common medical practice for the diagnosis of peripheral vascular diseases. Existing visualization methods such as Curved Planar Reformation (CPR) depict calcifications on vessel walls to determine if blood is still able to flow. While it is possible with conventional CPR methods to examine the whole vessel lumen by rotating around the centerline of a vessel, we propose Curvicircular Feature Aggregation (CFA), which aggregates these rotated images into a single view. By eliminating the need for rotation, vessels can be investigated by inspecting only one image. This method can be used as a guidance and visual analysis tool for treatment planning. We present applications of this technique in the medical domain and give feedback from radiologists.", month = jun, journal = "Computer Graphics Forum", volume = "32", number = "3", pages = "231--240", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mistelbauer-2013-cfa/", } @article{karimov-2013-vivisection, title = "ViviSection: Skeleton-based Volume Editing", author = "Alexey Karimov and Gabriel Mistelbauer and Johanna Schmidt and Peter Mindek and Elisabeth Schmidt and Timur Sharipov and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Volume segmentation is important in many applications, particularly in the medical domain. Most segmentation techniques, however, work fully automatically only in very restricted scenarios and cumbersome manual editing of the results is a common task. In this paper, we introduce a novel approach for the editing of segmentation results. Our method exploits structural features of the segmented object to enable intuitive and robust correction and verification. We demonstrate that our new approach can significantly increase the segmentation quality even in difficult cases such as in the presence of severe pathologies.", month = jun, journal = "Computer Graphics Forum", volume = "32", number = "3", pages = "461--470", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/karimov-2013-vivisection/", } @article{mindek-2013-pel, title = "Visual Parameter Exploration in GPU Shader Space", author = "Peter Mindek and Stefan Bruckner and Peter Rautek and Eduard Gr\"{o}ller", year = "2013", abstract = "The wide availability of high-performance GPUs has made the use of shader programs in visualization ubiquitous. Understanding shaders is a challenging task. Frequently it is dif?cult to mentally reconstruct the nature and types of transformations applied to the underlying data during the visualization process. We propose a method for the visual analysis of GPU shaders, which allows the ?exible exploration and investigation of algorithms, parameters, and their effects. We introduce a method for extracting feature vectors composed of several attributes of the shader, as well as a direct manipulation interface for assigning semantics to them. The user interactively classi?es pixels of images which are rendered with the investigated shader. The two resulting classes, a positive class and a negative one, are employed to steer the visualization. Based on this information, we can extract a wide variety of additional attributes and visualize their relation to this classi?cation. Our system allows an interactive exploration of shader space and we demonstrate its utility for several different applications.", journal = "Journal of WSCG", volume = "21", number = "3", issn = "1213-6972", pages = "225--234", keywords = "shader augmentation, parameter space exploration", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-pel/", } @article{Ropinski-2012-UBT, title = "Unified Boundary-Aware Texturing for Interactive Volume Rendering", author = "Timo Ropinski and Stefan Diepenbrock and Stefan Bruckner and Klaus Hinrichs and Eduard Gr\"{o}ller", year = "2012", abstract = "In this paper, we describe a novel approach for applying texture mapping to volumetric data sets. In contrast to previous approaches, the presented technique enables a unified integration of 2D and 3D textures and thus allows to emphasize material boundaries as well as volumetric regions within a volumetric data set at the same time. One key contribution of this paper is a parametrization technique for volumetric data sets, which takes into account material boundaries and volumetric regions. Using this technique, the resulting parametrizations of volumetric data sets enable texturing effects which create a higher degree of realism in volume rendered images. We evaluate the quality of the parametrization and demonstrate the usefulness of the proposed concepts by combining volumetric texturing with volumetric lighting models to generate photorealistic volume renderings. Furthermore, we show the applicability in the area of illustrative visualization.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", number = "11", volume = "18", pages = "1942--1955", keywords = "interactive volume rendering, volumetric texturing", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ropinski-2012-UBT/", } @article{Birkeland-2012-IMC, title = "Illustrative Membrane Clipping", author = "{\AA}smund Birkeland and Stefan Bruckner and Andrea Brambilla and Ivan Viola", year = "2012", abstract = "Clipping is a fast, common technique for resolving occlusions. It only requires simple interaction, is easily understandable, and thus has been very popular for volume exploration. However, a drawback of clipping is that the technique indiscriminately cuts through features. Illustrators, for example, consider the structures in the vicinity of the cut when visualizing complex spatial data and make sure that smaller structures near the clipping plane are kept in the image and not cut into fragments. In this paper we present a new technique, which combines the simple clipping interaction with automated selective feature preservation using an elastic membrane. In order to prevent cutting objects near the clipping plane, the deformable membrane uses underlying data properties to adjust itself to salient structures. To achieve this behaviour, we translate data attributes into a potential field which acts on the membrane, thus moving the problem of deformation into the soft-body dynamics domain. This allows us to exploit existing GPU-based physics libraries which achieve interactive frame rates. For manual adjustment, the user can insert additional potential fields, as well as pinning the membrane to interesting areas. We demonstrate that our method can act as a flexible and non-invasive replacement of traditional clipping planes.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "905--914", keywords = "illustrative visualization, volume rendering, clipping", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Birkeland-2012-IMC/", } @article{Herghelegiu-2012-BPV, title = "Biopsy Planner - Visual Analysis for Needle Pathway Planning in Deep Seated Brain Tumor Biopsy", author = "Paul Herghelegiu and Vasile Manta and Radu Perin and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2012", abstract = "Biopsies involve taking samples from living tissue using a biopsy needle. In current clinical practice they are a first mandatory step before any further medical actions are planned. Performing a biopsy on a deep seated brain tumor requires considerable time for establishing and validating the desired biopsy needle pathway to avoid damage. In this paper, we present a system for the visualization, analysis, and validation of biopsy needle pathways. Our system uses a multi-level approach for identifying stable needle placements which minimize the risk of hitting blood vessels. This is one of the major dangers in this type of intervention. Our approach helps in identifying and visualizing the point on the pathway that is closest to a surrounding blood vessel, requiring a closer inspection by the neurosurgeon. An evaluation by medical experts is performed to demonstrate the utility of our system.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "1085--1094", keywords = "biopsy planning, medical visualization, visual analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Herghelegiu-2012-BPV/", } @article{haidacher-2011-VAM, title = "Volume Analysis Using Multimodal Surface Similarity", author = "Martin Haidacher and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2011", abstract = "The combination of volume data acquired by multiple modalities has been recognized as an important but challenging task. Modalities often differ in the structures they can delineate and their joint information can be used to extend the classification space. However, they frequently exhibit differing types of artifacts which makes the process of exploiting the additional information non-trivial. In this paper, we present a framework based on an information-theoretic measure of isosurface similarity between different modalities to overcome these problems. The resulting similarity space provides a concise overview of the differences between the two modalities, and also serves as the basis for an improved selection of features. Multimodal classification is expressed in terms of similarities and dissimilarities between the isosurfaces of individual modalities, instead of data value combinations. We demonstrate that our approach can be used to robustly extract features in applications such as dual energy computed tomography of parts in industrial manufacturing.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "17", number = "12", pages = "1969--1978", keywords = "surface similarity, volume visualization, multimodal data", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/haidacher-2011-VAM/", } @article{brucker-2010-RES, title = "Result-Driven Exploration of Simulation Parameter Spaces for Visual Effects Design", author = "Stefan Bruckner and Torsten M\"{o}ller", year = "2010", abstract = "Graphics artists commonly employ physically-based simulation for the generation of effects such as smoke, explosions, and similar phenomena. The task of finding the correct parameters for a desired result, however, is difficult and time-consuming as current tools provide little to no guidance. In this paper, we present a new approach for the visual exploration of such parameter spaces. Given a three-dimensional scene description, we utilize sampling and spatio-temporal clustering techniques to generate a concise overview of the achievable variations and their temporal evolution. Our visualization system then allows the user to explore the simulation space in a goal-oriented manner. Animation sequences with a set of desired characteristics can be composed using a novel search-by-example approach and interactive direct volume rendering is employed to provide instant visual feedback. A user study was performed to evaluate the applicability of our system in production use.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "16", number = "6", pages = "1467--1475", keywords = "visual exploration, visual effects, clustering, time-dependent volume data", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/brucker-2010-RES/", } @article{bruckner-2010-ISM, title = "Isosurface Similarity Maps", author = "Stefan Bruckner and Torsten M\"{o}ller", year = "2010", abstract = "In this paper, we introduce the concept of isosurface similarity maps for the visualization of volume data. Isosurface similarity maps present structural information of a volume data set by depicting similarities between individual isosurfaces quantified by a robust information-theoretic measure. Unlike conventional histograms, they are not based on the frequency of isovalues and/or derivatives and therefore provide complementary information. We demonstrate that this new representation can be used to guide transfer function design and visualization parameter specification. Furthermore, we use isosurface similarity to develop an automatic parameter-free method for identifying representative isovalues. Using real-world data sets, we show that isosurface similarity maps can be a useful addition to conventional classification techniques.", month = jun, journal = "Computer Graphics Forum", volume = "29", number = "3", note = "EuroVis 2010 Best Paper Award", pages = "773--782", keywords = "isosurfaces, volume visualization, mutual information, histograms", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-ISM/", } @article{solteszova-2010-MOS, title = "A Multidirectional Occlusion Shading Model for Direct Volume Rendering", author = "Veronika Solteszova and Daniel Patel and Stefan Bruckner and Ivan Viola", year = "2010", abstract = "In this paper, we present a novel technique which simulates directional light scattering for more realistic interactive visualization of volume data. Our method extends the recent directional occlusion shading model by enabling light source positioning with practically no performance penalty. Light transport is approximated using a tilted cone-shaped function which leaves elliptic footprints in the opacity buffer during slice-based volume rendering. We perform an incremental blurring operation on the opacity buffer for each slice in front-to-back order. This buffer is then used to define the degree of occlusion for the subsequent slice. Our method is capable of generating high-quality soft shadowing effects, allows interactive modification of all illumination and rendering parameters, and requires no pre-computation.", month = jun, journal = "Computer Graphics Forum", volume = "29", number = "3", pages = "883--891", keywords = "global illumination, volume rendering, shadows, optical model", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/solteszova-2010-MOS/", } @article{bruckner-2010-HVC, title = "Hybrid Visibility Compositing and Masking for Illustrative Rendering", author = "Stefan Bruckner and Peter Rautek and Ivan Viola and Mike Roberts and Mario Costa Sousa and Eduard Gr\"{o}ller", year = "2010", abstract = "In this paper, we introduce a novel framework for the compositing of interactively rendered 3D layers tailored to the needs of scientific illustration. Currently, traditional scientific illustrations are produced in a series of composition stages, combining different pictorial elements using 2D digital layering. Our approach extends the layer metaphor into 3D without giving up the advantages of 2D methods. The new compositing approach allows for effects such as selective transparency, occlusion overrides, and soft depth buffering. Furthermore, we show how common manipulation techniques such as masking can be integrated into this concept. These tools behave just like in 2D, but their influence extends beyond a single viewpoint. Since the presented approach makes no assumptions about the underlying rendering algorithms, layers can be generated based on polygonal geometry, volumetric data, pointbased representations, or others. Our implementation exploits current graphics hardware and permits real-time interaction and rendering.", journal = "Computers & Graphics", number = "34", pages = "361--369", keywords = "compositing, masking, illustration", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-HVC/", } @article{bruckner-2009-BVQ, title = "BrainGazer - Visual Queries for Neurobiology Research", author = "Stefan Bruckner and Veronika Solteszova and Eduard Gr\"{o}ller and Ji\v{r}\'{i} Hlad\r{u}vka and Katja B\"{u}hler and Jai Yu and Barry Dickson", year = "2009", abstract = "Neurobiology investigates how anatomical and physiological relationships in the nervous system mediate behavior. Molecular genetic techniques, applied to species such as the common fruit fly Drosophila melanogaster, have proven to be an important tool in this research. Large databases of transgenic specimens are being built and need to be analyzed to establish models of neural information processing. In this paper we present an approach for the exploration and analysis of neural circuits based on such a database. We have designed and implemented BrainGazer, a system which integrates visualization techniques for volume data acquired through confocal microscopy as well as annotated anatomical structures with an intuitive approach for accessing the available information. We focus on the ability to visually query the data based on semantic as well as spatial relationships. Additionally, we present visualization techniques for the concurrent depiction of neurobiological volume data and geometric objects which aim to reduce visual clutter. The described system is the result of an ongoing interdisciplinary collaboration between neurobiologists and visualization researchers.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "15", number = "6", pages = "1497--1504", keywords = "biomedical visualization, neurobiology, visual queries, volume visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-BVQ/", } @article{bruckner-2009-IVV, title = "Instant Volume Visualization using Maximum Intensity Difference Accumulation", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2009", abstract = "It has long been recognized that transfer function setup for Direct Volume Rendering (DVR) is crucial to its usability. However, the task of finding an appropriate transfer function is complex and time-consuming even for experts. Thus, in many practical applications simpler techniques which do not rely on complex transfer functions are employed. One common example is Maximum Intensity Projection (MIP) which depicts the maximum value along each viewing ray. In this paper, we introduce Maximum Intensity Difference Accumulation (MIDA), a new approach which combines the advantages of DVR and MIP. Like MIP, MIDA exploits common data characteristics and hence does not require complex transfer functions to generate good visualization results. It does, however, feature occlusion and shape cues similar to DVR. Furthermore, we show that MIDA – in addition to being a useful technique in its own right – can be used to smoothly transition between DVR and MIP in an intuitive manner. MIDA can be easily implemented using volume raycasting and achieves real-time performance on current graphics hardware.", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "3", issn = "0167-7055", pages = "775--782", keywords = "illustrative visualization, maximum intensity projection, direct volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-IVV/", } @article{Rautek-2008-IDS, title = "Interaction-Dependent Semantics for Illustrative Volume Rendering", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2008", abstract = "In traditional illustration the choice of appropriate styles and rendering techniques is guided by the intention of the artist. For illustrative volume visualizations it is difficult to specify the mapping between the 3D data and the visual representation that preserves the intention of the user. The semantic layers concept establishes this mapping with a linguistic formulation of rules that directly map data features to rendering styles. With semantic layers fuzzy logic is used to evaluate the user defined illustration rules in a preprocessing step. In this paper we introduce interaction-dependent rules that are evaluated for each frame and are therefore computationally more expensive. Enabling interaction-dependent rules, however, allows the use of a new class of semantics, resulting in more expressive interactive illustrations. We show that the evaluation of the fuzzy logic can be done on the graphics hardware enabling the efficient use of interaction-dependent semantics. Further we introduce the flat rendering mode and discuss how different rendering parameters are influenced by the rule base. Our approach provides high quality illustrative volume renderings at interactive frame rates, guided by the specification of illustration rules.", month = may, journal = "Computer Graphics Forum", volume = "27", number = "3", pages = "847--854", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-IDS/", } @article{Rautek-2007-SLI, title = "Semantic Layers for Illustrative Volume Rendering", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2007", abstract = "Direct volume rendering techniques map volumetric attributes (e.g., density, gradient magnitude, etc.) to visual styles. Commonly this mapping is specified by a transfer function. The specification of transfer functions is a complex task and requires expert knowledge about the underlying rendering technique. In the case of multiple volumetric attributes and multiple visual styles the specification of the multi-dimensional transfer function becomes more challenging and non-intuitive. We present a novel methodology for the specification of a mapping from several volumetric attributes to multiple illustrative visual styles. We introduce semantic layers that allow a domain expert to specify the mapping in the natural language of the domain. A semantic layer defines the mapping of volumetric attributes to one visual style. Volumetric attributes and visual styles are represented as fuzzy sets. The mapping is specified by rules that are evaluated with fuzzy logic arithmetics. The user specifies the fuzzy sets and the rules without special knowledge about the underlying rendering technique. Semantic layers allow for a linguistic specification of the mapping from attributes to visual styles replacing the traditional transfer function specification.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "13", number = "6", note = "to be presented at IEEE Visualization 2007", pages = "1336--1343", keywords = "Illustrative Visualization, Volume Visualization, Focus+Context Techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Rautek-2007-SLI/", } @article{bruckner-2007-EDF, title = "Enhancing Depth-Perception with Flexible Volumetric Halos", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2007", abstract = "Volumetric data commonly has high depth complexity which makes it difficult to judge spatial relationships accurately. There are many different ways to enhance depth perception, such as shading, contours, and shadows. Artists and illustrators frequently employ halos for this purpose. In this technique, regions surrounding the edges of certain structures are darkened or brightened which makes it easier to judge occlusion. Based on this concept, we present a flexible method for enhancing and highlighting structures of interest using GPU-based direct volume rendering. Our approach uses an interactively defined halo transfer function to classify structures of interest based on data value, direction, and position. A feature-preserving spreading algorithm is applied to distribute seed values to neighboring locations, generating a controllably smooth field of halo intensities. These halo intensities are then mapped to colors and opacities using a halo profile function. Our method can be used to annotate features at interactive frame rates.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "13", number = "6", pages = "1344--1351", keywords = "volume rendering, illustrative visualization, halos", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/bruckner-2007-EDF/", } @article{kohlmann-2007-livesync, title = "LiveSync: Deformed Viewing Spheres for Knowledge-Based Navigation", author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2007", abstract = "Although real-time interactive volume rendering is available even for very large data sets, this visualization method is used quite rarely in the clinical practice. We suspect this is because it is very complicated and time consuming to adjust the parameters to achieve meaningful results. The clinician has to take care of the appropriate viewpoint, zooming, transfer function setup, clipping planes and other parameters. Because of this, most often only 2D slices of the data set are examined. Our work introduces LiveSync, a new concept to synchronize 2D slice views and volumetric views of medical data sets. Through intuitive picking actions on the slice, the users define the anatomical structures they are interested in. The 3D volumetric view is updated automatically with the goal that the users are provided with expressive result images. To achieve this live synchronization we use a minimal set of derived information without the need for segmented data sets or data-specific pre-computations. The components we consider are the picked point, slice view zoom, patient orientation, viewpoint history, local object shape and visibility. We introduce deformed viewing spheres which encode the viewpoint quality for the components. A combination of these deformed viewing spheres is used to estimate a good viewpoint. Our system provides the physician with synchronized views which help to gain deeper insight into the medical data with minimal user interaction.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "13", number = "6", note = "to be presented at IEEE Visualization 2007", pages = "1544--1551", keywords = "linked views, interaction, medical visualization, navigation, viewpoint selection", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/kohlmann-2007-livesync/", } @article{bruckner-2007-STF, title = "Style Transfer Functions for Illustrative Volume Rendering", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2007", abstract = "Illustrative volume visualization frequently employs non-photorealistic rendering techniques to enhance important features or to suppress unwanted details. However, it is difficult to integrate multiple non-photorealistic rendering approaches into a single framework due to great differences in the individual methods and their parameters. In this paper, we present the concept of style transfer functions. Our approach enables flexible data-driven illumination which goes beyond using the transfer function to just assign colors and opacities. An image-based lighting model uses sphere maps to represent non-photorealistic rendering styles. Style transfer functions allow us to combine a multitude of different shading styles in a single rendering. We extend this concept with a technique for curvature-controlled style contours and an illustrative transparency model. Our implementation of the presented methods allows interactive generation of high-quality volumetric illustrations.", month = sep, journal = "Computer Graphics Forum", volume = "26", number = "3", note = "Eurographics 2007 3rd Best Paper Award", pages = "715--724", keywords = "illustrative visualization, transfer functions, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/bruckner-2007-STF/", } @article{Kohlmann-2007-EBV, title = "Evaluation of a Bricked Volume Layout for a Medical Workstation based on Java", author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2007", abstract = "Volumes acquired for medical examination purposes are constantly increasing in size. For this reason, the computer’s memory is the limiting factor for visualizing the data. Bricking is a well-known concept used for rendering large data sets. The volume data is subdivided into smaller blocks to achieve better memory utilization. Until now, the vast majority of medical workstations use a linear volume layout. We implemented a bricked volume layout for such a workstation based on Java as required by our collaborative company partner to evaluate different common access patterns to the volume data. For rendering, we were mainly interested to see how the performance will differ from the traditional linear volume layout if we generate images of arbitrarily oriented slices via Multi-Planar Reformatting (MPR). Furthermore, we tested access patterns which are crucial for segmentation issues like a random access to data values and a simulated region growing. Our goal was to find out if it makes sense to change the volume layout of a medical workstation to benefit from bricking. We were also interested to identify the tasks where problems might occur if bricking is applied. Overall, our results show that it is feasible to use a bricked volume layout in the stringent context of a medical workstation implemented in Java.", month = jan, journal = "Journal of WSCG", volume = "15", number = "1-3", issn = "1213-6972", pages = "83--90", keywords = "MPR, Bricked Volume Layout, Medical Visualization, Medical Workstation", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Kohlmann-2007-EBV/", } @article{bruckner-2006-ICE, title = "Illustrative Context-Preserving Exploration of Volume Data", author = "Stefan Bruckner and S\"{o}ren Grimm and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2006", abstract = "In volume rendering it is very difficult to simultaneously visualize interior and exterior structures while preserving clear shape cues. Highly transparent transfer functions produce cluttered images with many overlapping structures, while clipping techniques completely remove possibly important context information. In this paper we present a new model for volume rendering, inspired by techniques from illustration. It provides a means of interactively inspecting the interior of a volumetric data set in a feature-driven way which retains context information. The context-preserving volume rendering model uses a function of shading intensity, gradient magnitude, distance to the eye point, and previously accumulated opacity to selectively reduce the opacity in less important data regions. It is controlled by two user-specified parameters. This new method represents an alternative to conventional clipping techniques, shares their easy and intuitive user control, but does not suffer from the drawback of missing context information.", month = nov, issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "6", volume = "12", pages = "1559--1569", keywords = "focus+context techniques, volume rendering, illustrative visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/bruckner-2006-ICE/", } @article{bruckner-2006-EVV, title = "Exploded Views for Volume Data", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2006", abstract = "Exploded views are an illustration technique where an object is partitioned into several segments. These segments are displaced to reveal otherwise hidden detail. In this paper we apply the concept of exploded views to volumetric data in order to solve the general problem of occlusion. In many cases an object of interest is occluded by other structures. While transparency or cutaways can be used to reveal a focus object, these techniques remove parts of the context information. Exploded views, on the other hand, do not suffer from this drawback. Our approach employs a force-based model: the volume is divided into a part configuration controlled by a number of forces and constraints. The focus object exerts an explosion force causing the parts to arrange according to the given constraints. We show that this novel and flexible approach allows for a wide variety of explosion-based visualizations including view-dependent explosions. Furthermore, we present a high-quality GPU-based volume ray casting algorithm for exploded views which allows rendering and interaction at several frames per second.", month = sep, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "12", number = "5", issn = "1077-2626", pages = "1077--1084", keywords = "exploded views, illustrative visualization, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/bruckner-2006-EVV/", } @article{grimm-2004-arefined, title = "A Refined Data Addressing and Processing Scheme to Accelerate Volume Raycasting", author = "S\"{o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2004", abstract = "Most volume rendering systems based on CPU volume raycasting still suffer from inefficient CPU utilization and high memory usage. To target these issues we present a new technique for efficient data addressing. Furthermore, we introduce a new processing scheme for volume raycasting which exploits thread-level parallelism—a technology now supported by commodity computer architectures.", month = oct, issn = "0097-8493", journal = "Computers & Graphics", number = "5", volume = "28", booktitle = "Computer & Graphics, Vol. 28 (5)", isbn = "0097-8493", pages = "719--729", keywords = "Volume Raycasting, Bricking, Parallel Co", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-arefined/", } @article{grimm-2004-volume, title = "VOTS: VOlume doTS as a Point-Based Representation of Volumetric Data", author = "S\"{o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2004", abstract = "We present Volume dots (Vots), a new primitive for volumetric data modelling, processing, and rendering. Vots are a point-based representation of volumetric data. An individual Vot is specified by the coefficients of a Taylor series expansion, i.e. the function value and higher order derivatives at a specific point. A Vot does not only represent a single sample point, it represents the underlying function within a region. With the Vots representation we have a more intuitive and high-level description of the volume data. This allows direct analytical examination and manipulation of volumetric datasets. Vots enable the representation of the underlying scalar function with specified precision. User-centric importance sampling is also possible, i.e., unimportant volume parts are still present but represented with just very few Vots. As proof of concept, we show Maximum Intensity Projection based on Vots.", month = sep, journal = "Computer Graphics Forum", volume = "23", number = "3", issn = "0167-7055", pages = "668--661", keywords = "Graphics Data Structures and Data Types", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-volume/", }