@inproceedings{Bruckner_Stefan_2015_VAS, title = "Visual Analysis of Spatio-Temporal Data: Applications in Weather Forecasting", author = "Alexandra Diehl and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2015", abstract = "Weather conditions affect multiple aspects of human life such as economy, safety, security, and social activities. For this reason, weather forecast plays a major role in society. Currently weather forecasts are based on Numerical Weather Prediction (NWP) models that generate a representation of the atmospheric flow. Interactive visualization of geo-spatial data has been widely used in order to facilitate the analysis of NWP models. This paper presents a visualization system for the analysis of spatio-temporal patterns in short-term weather forecasts. For this purpose, we provide an interactive visualization interface that guides users from simple visual overviews to more advanced visualization techniques. Our solution presents multiple views that include a timeline with geo-referenced maps, an integrated webmap view, a forecast operation tool, a curve-pattern selector, spatial filters, and a linked meteogram. Two key contributions of this work are the timeline with geo-referenced maps and the curve-pattern selector. The latter provides novel functionality that allows users to specify and search for meaningful patterns in the data. The visual interface of our solution allows users to detect both possible weather trends and errors in the weather forecast model.We illustrate the usage of our solution with a series of case studies that were designed and validated in collaboration with domain experts.", month = may, location = "Cagliari, Sardinia, Italy", booktitle = "Computer Graphic Forum", pages = "381--390", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Bruckner_Stefan_2015_VAS/", } @inproceedings{mindek-2015-mc, title = "Automatized Summarization of Multiplayer Games", author = "Peter Mindek and Ladislav \v{C}mol\'{i}k and Ivan Viola and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2015", abstract = "We present a novel method for creating automatized gameplay dramatization of multiplayer video games. The dramatization serves as a visual form of guidance through dynamic 3D scenes with multiple foci, typical for such games. Our goal is to convey interesting aspects of the gameplay by animated sequences creating a summary of events which occurred during the game. Our technique is based on processing many cameras, which we refer to as a flock of cameras, and events captured during the gameplay, which we organize into a so-called event graph. Each camera has a lifespan with a certain time interval and its parameters such as position or look-up vector are changing over time. Additionally, during its lifespan each camera is assigned an importance function, which is dependent on the significance of the structures that are being captured by the camera. The images captured by the cameras are composed into a single continuous video using a set of operators based on cinematographic effects. The sequence of operators is selected by traversing the event graph and looking for specific patterns corresponding to the respective operators. In this way, a large number of cameras can be processed to generate an informative visual story presenting the gameplay. Our compositing approach supports insets of camera views to account for several important cameras simultaneously. Additionally, we create seamless transitions between individual selected camera views in order to preserve temporal continuity, which helps the user to follow the virtual story of the gameplay.", month = apr, isbn = "978-80-223-3844-8", publisher = "Comenius University, Bratislava", location = "Smolenice, Slovakia", editor = "Joaquim Jorge, Luis Paulo Santos, Roman Durikovic", booktitle = "Proceedings of Spring Conference on Computer Graphics 2015", pages = "93--100", keywords = "storytelling, game visualization, animation", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/mindek-2015-mc/", } @inproceedings{ymca, title = "YMCA - Your Mesh Comparison Application", author = "Johanna Schmidt and Reinhold Preiner and Thomas Auzinger and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2014", abstract = "Polygonal meshes can be created in several different ways. In this paper we focus on the reconstruction of meshes from point clouds, which are sets of points in 3D. Several algorithms that tackle this task already exist, but they have different benefits and drawbacks, which leads to a large number of possible reconstruction results (i.e., meshes). The evaluation of those techniques requires extensive comparisons between different meshes which is up to now done by either placing images of rendered meshes side-by-side, or by encoding differences by heat maps. A major drawback of both approaches is that they do not scale well with the number of meshes. This paper introduces a new comparative visual analysis technique for 3D meshes which enables the simultaneous comparison of several meshes and allows for the interactive exploration of their differences. Our approach gives an overview of the differences of the input meshes in a 2D view. By selecting certain areas of interest, the user can switch to a 3D representation and explore the spatial differences in detail. To inspect local variations, we provide a magic lens tool in 3D. The location and size of the lens provide further information on the variations of the reconstructions in the selected area. With our comparative visualization approach, differences between several mesh reconstruction algorithms can be easily localized and inspected.", month = nov, series = "VAST ", publisher = "IEEE Computer Society", note = "http://dx.doi.org/10.1109/VAST.2014.7042491", location = "Paris, France", booktitle = "IEEE Visual Analytics Science and Technology", keywords = "mesh comparison, 3D data exploration, focus+context, comparative visualization, Visual analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/ymca/", } @inproceedings{waldner-2014-ghi, title = "Graphical Histories of Information Foraging", author = "Manuela Waldner and Stefan Bruckner and Ivan Viola", year = "2014", abstract = "During information foraging, knowledge workers iteratively seek, filter, read, and extract information. When using multiple information sources and different applications for information processing, re-examination of activities for validation of previous decisions or re-discovery of previously used information sources is challenging. In this paper, we present a novel representation of cross-application histories to support recall of past operations and re-discovery of information resources. Our graphical history consists of a cross-scale visualization combining an overview node-link diagram of used desktop resources with nested (animated) snapshot sequences, based on a recording of the visual screen output during the users’ desktop work. This representation makes key elements of the users’ tasks visually stand out, while exploiting the power of visual memory to recover subtle details of their activities. In a preliminary study, users found our graphical history helpful to recall details of an information foraging task and commented positively on the ability to expand overview nodes into snapshot and video sequences.", month = oct, isbn = "978-1-4503-2542-4", publisher = "ACM", organization = "NordiCHI’14 - Nordic Conference on Human-Computer Interaction", location = "Helsinki, Finland", booktitle = "Proceedings of the 8th Nordic Conference on Human-Computer Interaction: Fun, Fast, Foundational ", pages = "295--304", keywords = "Graph visualization, Interaction history, Provenance", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/", } @inproceedings{Viola_Ivan_VDP, title = "Visibility-Driven Processing of Streaming Volume Data", author = "Veronika Solteszova and {\AA}smund Birkeland and Ivan Viola and Stefan Bruckner", year = "2014", abstract = "In real-time volume data acquisition, such as 4D ultrasound, the raw data is challenging to visualize directly without additional processing. Noise removal and feature detection are common operations, but many methods are too costly to compute over the whole volume when dealing with live streamed data. In this paper, we propose a visibility-driven processing scheme for handling costly on-the-fly processing of volumetric data in real-time. In contrast to the traditional visualization pipeline, our scheme utilizes a fast computation of the potentially visible subset of voxels which significantly reduces the amount of data required to process. As filtering operations modify the data values which may affect their visibility, our method for visibility-mask generation ensures that the set of elements deemed visible does not change after processing. Our approach also exploits the visibility information for the storage of intermediate values when multiple operations are performed in sequence, and can therefore significantly reduce the memory overhead of longer filter pipelines. We provide a thorough technical evaluation of the approach and demonstrate it on several typical scenarios where on-the-fly processing is required.", month = sep, isbn = "978-3-905674-62-0", publisher = "Eurographics Association", location = "Vienna, Austria", issn = "2070-5778", event = "4th EG Workshop on Visual Computing and Biology Medicine", editor = "Ivan Viola and Katja Buehler and Timo Ropinski", booktitle = "Proceedings of EG VCBM 2014", pages = "127--136", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_VDP/", } @inproceedings{kolesar-ivan-2014-polymers, title = "Illustrating Polymerization using Three-level Model Fusion", author = "Ivan Koles\'{a}r and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser", year = "2014", abstract = "Research in cell biology is steadily contributing new knowledge about many di?erent aspects of physiological processes like polymerization, both with respect to the involved molecular structures as well as their related function. Illustrations of the spatio-temporal development of such processes are not only used in biomedical education, but also can serve scientists as an additional platform for in-silico experiments. In this paper, we contribute a new, three-level modeling approach to illustrate physiological processes from the class of polymerization at di?erent time scales. We integrate physical and empirical modeling, according to which approach suits the di?erent involved levels of detail best, and we additionally enable a simple form of interactive steering while the process is illustrated. We demonstrate the suitability of our approach in the context of several polymerization processes and report from a ?rst evaluation with domain experts.", month = jul, publisher = "IEEE Digital Library", organization = "4th Symposium on Biological Data Visualization (in Conjunction with the International Conference on Intelligent Systems for Molecular Biology (ISMB 2014)) ", location = "Boston, USA", booktitle = "Proceedings of IEEE BioVis 2014", pages = "1--22", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/kolesar-ivan-2014-polymers/", } @inproceedings{mindek-2013-csl, title = "Contextual Snapshots: Enriched Visualization with Interactive Spatial Annotations", author = "Peter Mindek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing particular features, they can be analyzed and compared in different views. However, the semantics of such selections are often dependent on other parameter settings and it can be difficult to reconstruct them without additional information. In this paper, we present the concept of contextual snapshots as an effective means for managing spatial selections in visualized data. The selections are automatically associated with the context in which they have been created. Contextual snapshots can be also used as the basis for interactive integrated and linked views, which enable in-place investigation and comparison of multiple visual representations of data. Our approach is implemented as a flexible toolkit with welldefined interfaces for integration into existing systems. We demonstrate the power and generality of our techniques by applying them to several distinct scenarios such as the visualization of simulation data and the analysis of historical documents.", month = may, series = "SCCG ", location = "Smolenice, Slovakia", booktitle = "Proceedings of the 29th Spring Conference on Computer Graphics", keywords = "spatial selections, annotations, linked views, provenance", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-csl/", } @inproceedings{Csebfalvi-2012-IOM, title = "Illumination-Driven Opacity Modulation for Expressive Volume Rendering", author = "Bal\'{a}zs Cs\'{e}bfalvi and Bal\'{a}zs T\'{o}th and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2012", abstract = "Using classical volume visualization, typically a couple of isosurface layers are rendered semi-transparently to show the internal structures contained in the data. However, the opacity transfer function is often difficult to specify such that all the isosurfaces are of high contrast and sufficiently perceivable. In this paper, we propose a volumerendering technique which ensures that the different layers contribute to fairly different regions of the image space. Since the overlapping between the effected regions is reduced, an outer translucent isosurface does not decrease significantly the contrast of a partially hidden inner isosurface. Therefore, the layers of the data become visually well separated. Traditional transfer functions assign color and opacity values to the voxels depending on the density and the gradient. In contrast, we assign also different illumination directions to different materials, and modulate the opacities view-dependently based on the surface normals and the directions of the light sources, which are fixed to the viewing angle. We will demonstrate that this model allows an expressive visualization of volumetric data.", month = nov, location = "Magdeburg, Germany", booktitle = "Proceedings of Vision, Modeling & Visualization 2012", pages = "103--109", keywords = "illustrative visualization, illumination, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Csebfalvi-2012-IOM/", } @inproceedings{mistelbauer-2012-ssv, title = "Smart Super Views - A Knowledge-Assisted Interface for Medical Visualization", author = "Gabriel Mistelbauer and Hamed Bouzari and R\"{u}diger Schernthaner and Ivan Baclija and Arnold K\"{o}chl and Stefan Bruckner and Milo\v{s} \v{S}r\'{a}mek and Eduard Gr\"{o}ller", year = "2012", abstract = "Due to the ever growing volume of acquired data and information, users have to be constantly aware of the methods for their exploration and for interaction. Of these, not each might be applicable to the data at hand or might reveal the desired result. Owing to this, innovations may be used inappropriately and users may become skeptical. In this paper we propose a knowledge-assisted interface for medical visualization, which reduces the necessary effort to use new visualization methods, by providing only the most relevant ones in a smart way. Consequently, we are able to expand such a system with innovations without the users to worry about when, where, and especially how they may or should use them. We present an application of our system in the medical domain and give qualitative feedback from domain experts.", month = oct, publisher = "IEEE Computer Society", location = "Seattle, WA, USA", booktitle = "IEEE Conference on Visual Analytics Science and Technology (IEEE VAST) 2012", pages = "163--172", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/mistelbauer-2012-ssv/", } @inproceedings{Ford-2012-HRV, title = "HeartPad: Real-Time Visual Guidance for Cardiac Ultrasound", author = "Steven Ford and Gabriel Kiss and Ivan Viola and Stefan Bruckner and Hans Torp", year = "2012", abstract = "Medical ultrasound is a challenging modality when it comes to image interpretation. The goal we address in this work is to assist the ultrasound examiner and partially alleviate the burden of interpretation. We propose to address this goal with visualization that provides clear cues on the orientation and the correspondence between anatomy and the data being imaged. Our system analyzes the stream of 3D ultrasound data and in real-time identifies distinct features that are basis for a dynamically deformed mesh model of the heart. The heart mesh is composited with the original ultrasound data to create the data-to-anatomy correspondence. The visualization is broadcasted over the internet allowing, among other opportunities, a direct visualization on the patient on a tablet computer. The examiner interacts with the transducer and with the visualization parameters on the tablet. Our system has been characterized by domain specialist as useful in medical training and for navigating occasional ultrasound users.", booktitle = "Proceedings of the Workshop at SIGGRAPH Asia 2012", keywords = "medical visualization, ultrasound", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ford-2012-HRV/", } @inproceedings{patel-2011-PEA, title = "PhD Education Through Apprenticeship", author = "Daniel Patel and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2011", abstract = "We describe and analyze the PhD education in the visualization group at the Vienna University of Technology and set the education in a larger perspective. Four central mechanisms drive the PhD education in Vienna. They are: to require an article-based PhD; to give the student freedom to choose research direction; to let students work in shared offices towards joint deadlines; and to involve students in reviewing articles. This paper describes these mechanisms in detail and illustrates their effect.", month = apr, location = "Llandudno, United Kingdom", editor = "S. Maddock, J. Jorge", booktitle = "Proceedings of Eurographics 2011 - Education Papers", pages = "23--28", keywords = "meister, education, visualization, apprenticeship", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/patel-2011-PEA/", } @inproceedings{sikachev-2010-DFC, title = "Dynamic Focus+Context for Volume Rendering", author = "Peter Sikachev and Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2010", abstract = "Interactive visualization is widely used in many applications for efficient representation of complex data. Many techniques make use of the focus+context approach in a static manner. These techniques do not fully make use of the interaction semantics. In this paper we present a dynamic focus+context approach that highlights salient features during user interaction. We explore rotation, panning, and zooming interaction semantics and propose several methods of changing visual representations, based on a suggested engagement-estimation method. We use DVR-MIP interpolation and a radial opacity-change approach, exploring rotation, panning, and zooming semantics. Our approach adds short animations during user interaction that help to explore the data efficiently and aid the user in the detection of unknown features.", month = nov, location = "Siegen, Germany", address = "University of Siegen, Siegen, Germany", booktitle = "Proceedings of Vision, Modeling and Visualization 2010", pages = "331--338", keywords = "focus+contex, volume rendering, view-dependent visualization, level-of-detail techniques, nonphotorealistic techniques, user interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/sikachev-2010-DFC/", } @inproceedings{patel-2010-SVV, title = "Seismic Volume Visualization for Horizon Extraction", author = "Daniel Patel and Stefan Bruckner and Ivan Viola and Eduard Gr\"{o}ller", year = "2010", abstract = "Seismic horizons indicate change in rock properties and are central in geoscience interpretation. Traditional interpretation systems involve time consuming and repetitive manual volumetric seeding for horizon growing. We present a novel system for rapidly interpreting and visualizing seismic volumetric data. First we extract horizon surface-parts by preprocessing the seismic data. Then during interaction the user can assemble in realtime the horizon parts into horizons. Traditional interpretation systems use gradient-based illumination models in the rendering of the seismic volume and polygon rendering of horizon surfaces. We employ realtime gradientfree forward-scattering in the rendering of seismic volumes yielding results similar to high-quality global illumination. We use an implicit surface representation of horizons allowing for a seamless integration of horizon rendering and volume rendering. We present a collection of novel techniques constituting an interpretation and visualization system highly tailored to seismic data interpretation.", month = mar, location = "Taipei, Taiwan", booktitle = "Proceedings of IEEE Pacific Visualization 2010", pages = "73--80", keywords = "volume visualization, horizon extraction, seismic data", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/patel-2010-SVV/", } @inproceedings{haidacher_2010_statTF, title = "Volume Visualization based on Statistical Transfer-Function Spaces", author = "Martin Haidacher and Daniel Patel and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2010", abstract = "It is a difficult task to design transfer functions for noisy data. In traditional transfer-function spaces, data values of different materials overlap. In this paper we introduce a novel statistical transfer-function space which in the presence of noise, separates different materials in volume data sets. Our method adaptively estimates statistical properties, i.e. the mean value and the standard deviation, of the data values in the neighborhood of each sample point. These properties are used to define a transfer-function space which enables the distinction of different materials. Additionally, we present a novel approach for interacting with our new transfer-function space which enables the design of transfer functions based on statistical properties. Furthermore, we demonstrate that statistical information can be applied to enhance visual appearance in the rendering process. We compare the new method with 1D, 2D, and LH transfer functions to demonstrate its usefulness.", month = mar, booktitle = "Proceedings of the IEEE Pacific Visualization 2010", pages = "17--24", keywords = "transfer function, statistics, shading, noisy data, classification", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/haidacher_2010_statTF/", } @inproceedings{kohlmann-2009-cp, title = "Contextual Picking of Volumetric Structures", author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2009", month = may, isbn = "978-1-4244-4404-5", location = "Peking, China", editor = "Peter Eades, Thomas Ertl, Han-Wei Shen", booktitle = "Proceedings of the IEEE Pacific Visualization Symposium 2009", pages = "185--192", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/kohlmann-2009-cp/", } @inproceedings{haidacher-2008-vcbm, title = "Information-based Transfer Functions for Multimodal Visualization", author = "Martin Haidacher and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2008", abstract = "Transfer functions are an essential part of volume visualization. In multimodal visualization at least two values exist at every sample point. Additionally, other parameters, such as gradient magnitude, are often retrieved for each sample point. To find a good transfer function for this high number of parameters is challenging because of the complexity of this task. In this paper we present a general information-based approach for transfer function design in multimodal visualization which is independent of the used modality types. Based on information theory, the complex multi-dimensional transfer function space is fused to allow utilization of a well-known 2D transfer function with a single value and gradient magnitude as parameters. Additionally, a quantity is introduced which enables better separation of regions with complementary information. The benefit of the new method in contrast to other techniques is a transfer function space which is easy to understand and which provides a better separation of different tissues. The usability of the new approach is shown on examples of different modalities.", month = oct, isbn = "978-3-905674-13-2", publisher = "Eurographics Association", location = "Delft", issn = "2070-5778", editor = "C.P Botha, G. Kindlmann, W.J. Niessen, and B. Preim", booktitle = "VCBM ", pages = "101--108", keywords = "Multimodal Visualization, Transfer Function, Information Theory", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/haidacher-2008-vcbm/", } @inproceedings{bruckner-2008-IVV, title = "Integrating Volume Visualization Techniques Into Medical Applications", author = "Stefan Bruckner and Peter Kohlmann and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2008", abstract = "One of the main obstacles in integrating 3D volume visualization in the clinical workflow is the time-consuming process of adjusting parameters such as viewpoint, transfer functions, and clipping planes required to generate a diagnostically relevant image. Current applications therefore make scarce use of volume rendering and instead primarily employ 2D views generated through standard techniques such as multi-planar reconstruction (MPR). However, in many cases 3D renditions can supply additional useful information. This paper discusses ongoing work which aims to improve the integration of 3D visualization into the diagnostic workflow by automatically generating meaningful renditions based on minimal user interaction. A method for automatically generating 3D views for structures in 2D slices based on a single picking interaction is presented.", month = may, isbn = "978-1-4244-2002-5", location = "Paris, Frankreich", booktitle = "Proceedings of 5th IEEE International Symposium on Biomedical Imaging: From Nano to Macro", pages = "820--823", keywords = "viewpoint selection, medical visualization, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/bruckner-2008-IVV/", } @inproceedings{kohlmann-2008-lse, title = "LiveSync++: Enhancements of an Interaction Metaphor", author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2008", abstract = "The LiveSync interaction metaphor allows an efficient and non-intrusive integration of 2D and 3D visualizations in medical workstations. This is achieved by synchronizing the 2D slice view with the volumetric view. The synchronization is initiated by a simple picking on a structure of interest in the slice view. In this paper we present substantial enhancements of the existing concept to improve its usability. First, an efficient parametrization for the derived parameters is presented, which allows hierarchical refinement of the search space for good views. Second, the extraction of the feature of interest is performed in a way, which is adapting to the volumetric extent of the feature. The properties of the extracted features are utilized to adjust a predefined transfer function in a feature-enhancing manner. Third, a new interaction mode is presented, which allows the integration of more knowledge about the user-intended visualization, without increasing the interaction effort. Finally, a new clipping technique is integrated, which guarantees an unoccluded view on the structure of interest while keeping important contextual information.", month = may, location = "Windsor, Ontario, Canada", booktitle = "Proceedings of Graphics Interface 2008", pages = "81--88", keywords = "Viewpoint Selection, Linked Views, Medical Visualization, Smart Interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/kohlmann-2008-lse/", } @inproceedings{ruiz-2008-OVR, title = "Obscurance-based Volume Rendering Framework", author = "Marc Ruiz and Imma Boada and Ivan Viola and Stefan Bruckner and Miquel Feixas and Mateu Sbert", year = "2008", abstract = "Obscurances, from which ambient occlusion is a particular case, is a technology that produces natural-looking lighting effects in a faster way than global illumination. Its application in volume visualization is of special interest since it permits us to generate a high quality rendering at a low cost. In this paper, we propose an obscurance-based framework that allows us to obtain realistic and illustrative volume visualizations in an interactive manner. Obscurances can include color bleeding effects without additional cost. Moreover, we obtain a saliency map from the gradient of obscurances and we show its application to enhance volume visualization and to select the most salient views.", booktitle = "Proceedings of Volume Graphics 2008", keywords = "volume rendering, illustrative visualization, ambient occlusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-OVR/", } @inproceedings{ruiz-2008-SEV, title = "Similarity-based Exploded Views", author = "Marc Ruiz and Ivan Viola and Imma Boada and Stefan Bruckner and Miquel Feixas and Mateu Sbert", year = "2008", abstract = "Exploded views are often used in illustration to overcome the problem of occlusion when depicting complex structures. In this paper, we propose a volume visualization technique inspired by exploded views that partitions the volume into a number of parallel slabs and shows them apart from each other. The thickness of slabs is driven by the similarity between partitions. We use an information-theoretic technique for the generation of exploded views. First, the algorithm identifies the viewpoint from which the structure is the highest. Then, the partition of the volume into the most informative slabs for exploding is obtained using two complementary similarity-based strategies. The number of slabs and the similarity parameter are freely adjustable by the user.", booktitle = "Proceedings of Smart Graphics 2008", pages = "154--165", keywords = "volume visualization, illustrative visualization, exploded views", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/ruiz-2008-SEV/", } @inproceedings{RAUTEK06, title = "D²VR: High Quality Volume Rendering of Projection-based Volumetric Data", author = "Peter Rautek and Bal\'{a}zs Cs\'{e}bfalvi and S\"{o}ren Grimm and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2006", abstract = "Volume rendering techniques are conventionally classified as either direct or indirect methods. Indirect methods require to transform the initial volumetric model into an intermediate geometrical model in order to efficiently visualize it. In contrast, direct volume rendering (DVR) methods can directly process the volumetric data. Modern CT scanners usually provide data as a set of samples on a rectilinear grid, which is computed from the measured projections by discrete tomographic reconstruction. Therefore the rectilinear grid can already be considered as an intermediate volume representation. In this paper we introduce direct direct volume rendering (D²VR). D²VR does not require a rectilinear grid, since it is based on an immediate processing of the measured projections. Arbitrary samples for ray casting are reconstructed from the projections by using the Filtered Back-Projection algorithm. Our method removes a lossy resampling step from the classical volume rendering pipeline. It provides much higher accuracy than traditional grid-based resampling techniques do. Furthermore we also present a novel high-quality gradient estimation scheme, which is also based on the Filtered Back-Projection algorithm.", month = may, publisher = "IEEE CS", booktitle = "Proceedings of Eurographics / IEEE VGTC Symposium on Visualization", number = "In Proceedings of EuroVis", pages = "211--218", keywords = "Volume Rendering, Filtered Back-Projection, Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/RAUTEK06/", } @inproceedings{coto-2005-MAC, title = "MammoExplorer: An Advanced CAD Application for Breast DCE-MRI", author = "Ernesto Coto and S\"{o}ren Grimm and Stefan Bruckner and Eduard Gr\"{o}ller and Armin Kanitsar and Omaira Rodriguez", year = "2005", abstract = "Currently X-ray mammography is the most widely used method for early detection of breast cancer. However, the use of Dynamic Contrast Enhanced MRI (DCE-MRI) has gained wider attention, since it considerably improves tumor detection and classification by analyzing the flow of contrast agent within the breast tissue. In this paper we present MammoExplorer, a CAD application that combines advanced interaction, segmentation and visualization techniques to explore Breast DCE-MRI data. Our application uses Brushing and Linking, Two-level Volume Rendering, Importance-driven Volume Rendering, and False Color Maps. In addition, we present Enhancement Scatterplots, a novel graphical representation of DCE-MRI data, novel segmentation approaches, and a new way to explore time-varying CE-MRI data.", month = nov, isbn = "3898380688", location = "Erlangen, Germany", editor = "G. Greiner, J. Hornegger, H. Niemann, M. Stamminger", booktitle = "Proceedings of Vision, Modelling, and Visualization 2005", pages = "91--98", keywords = "CAD, Breast cancer, Contrast Enhanced MRI", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/coto-2005-MAC/", } @inproceedings{bruckner-2005-VIS, title = "VolumeShop: An Interactive System for Direct Volume Illustration", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2005", abstract = "Illustrations play a major role in the education process. Whether used to teach a surgical or radiologic procedure, to illustrate normal or aberrant anatomy, or to explain the functioning of a technical device, illustration significantly impacts learning. Although many specimens are readily available as volumetric data sets, particularly in medicine, illustrations are commonly produced manually as static images in a time-consuming process. Our goal is to create a fully dynamic three-dimensional illustration environment which directly operates on volume data. Single images have the aesthetic appeal of traditional illustrations, but can be interactively altered and explored. In this paper we present methods to realize such a system which combines artistic visual styles and expressive visualization techniques. We introduce a novel concept for direct multi-object volume visualization which allows control of the appearance of inter-penetrating objects via two-dimensional transfer functions. Furthermore, a unifying approach to efficiently integrate many non-photorealistic rendering models is presented. We discuss several illustrative concepts which can be realized by combining cutaways, ghosting, and selective deformation. Finally, we also propose a simple interface to specify objects of interest through three-dimensional volumetric painting. All presented methods are integrated into VolumeShop, an interactive hardware-accelerated application for direct volume illustration.", month = oct, isbn = "0780394623", location = "Minneapolis, USA", editor = "C. T. Silva, E. Gr\"{o}ller, H. Rushmeier", booktitle = "Proceedings of IEEE Visualization 2005", pages = "671--678", keywords = "focus+context techniques, illustrative visualization, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-VIS/", } @inproceedings{bruckner-2005-ICV, title = "Illustrative Context-Preserving Volume Rendering", author = "Stefan Bruckner and S\"{o}ren Grimm and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2005", abstract = "In volume rendering it is very difficult to simultaneously visualize interior and exterior structures while preserving clear shape cues. Very transparent transfer functions produce cluttered images with many overlapping structures, while clipping techniques completely remove possibly important context information. In this paper we present a new model for volume rendering, inspired by techniques from illustration that provides a means of interactively inspecting the interior of a volumetric data set in a feature-driven way which retains context information. The context-preserving volume rendering model uses a function of shading intensity, gradient magnitude, distance to the eye point, and previously accumulated opacity to selectively reduce the opacity in less important data regions. It is controlled by two user-specified parameters. This new method represents an alternative to conventional clipping techniques, shares their easy and intuitive user control, but does not suffer from the drawback of missing context information. ", month = may, booktitle = "Proceedings of EuroVis 2005", pages = "69--76", keywords = "non-photorealistic techniques, focus+context techniques, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-ICV/", } @inproceedings{GRIMM-2004-FDMX-P, title = "Flexible Direct Multi-Volume Rendering in Interactive Scenes", author = "S\"{o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2004", abstract = "In this paper we describe methods to efficiently visualize multiple ntersecting volumetric objects. We introduce the concept of V-Objects. V-Objects represent abstract properties of an object connected to a volumetric data source. We present a method to perform direct volume rendering of a scene comprised of an arbitrary number of possibly intersecting V-Objects. The idea of our approach is to distinguish between regions of intersection, which need costly multi-volume processing, and regions containing only one V-Object, which can be processed using a highly efficient brick-wise volume traversal scheme. Using this method, we achieve significant performance gains for multi-volume rendering. We show possible medical applications, such as surgical planning, diagnosis, and education.", month = oct, location = "Stanford, USA", booktitle = "Vision, Modeling, and Visualization (VMV)", pages = "386--379", keywords = "multi volume rendering, medical visualization, volume raycasting", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/GRIMM-2004-FDMX-P/", } @inproceedings{grimm-2004-memory, title = "Memory Efficient Acceleration Structures and Techniques for CPU-based Volume Raycasting of Large Data", author = "S\"{o}ren Grimm and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2004", abstract = "Most CPU-based volume raycasting approaches achieve high performance by advanced memory layouts, space subdivision, and excessive pre-computing. Such approaches typically need an enormous amount of memory. They are limited to sizes which do not satisfy the medical data used in daily clinical routine. We present a new volume raycasting approach based on image-ordered raycasting with object-ordered processing, which is able to perform high-quality rendering of very large medical data in real-time on commodity computers. For large medical data such as computed tomographic (CT) angiography run-offs (512x512x1202) we achieve rendering times up to 2.5 fps on a commodity notebook. We achieve this by introducing a memory efficient acceleration technique for on-the-fly gradient estimation and a memory efficient hybrid removal and skipping technique of transparent regions. We employ quantized binary histograms, granular resolution octrees, and a cell invisibility cache. These acceleration structures require just a small extra storage of approximately 10%. ", month = oct, isbn = "0-7803-8781-3", editor = "D. Silver, T. Ertl, C. Silva", booktitle = "Proceedings IEEE/SIGGRAPH Symposium on Volume Visualization and Graphics", pages = "1--8", keywords = "Three-Dimensional Graphics and Realism,", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/grimm-2004-memory/", } @inproceedings{Bruckner-2003-The, title = "The Inverse Warp: Non-Invasive Integration of Shear-Warp Volume Rendering into Polygon Rendering Pipelines", author = "Stefan Bruckner and Dieter Schmalstieg and Helwig Hauser and Eduard Gr\"{o}ller", year = "2003", abstract = "In this paper, a simple and efficient solution for combining shear-warp volume rendering and the hardware graphics pipeline is presented. The approach applies an inverse warp transformation to the Z-Buffer, containing the rendered geometry. This information is used for combining geometry and volume data during compositing. We present applications of this concept which include hybrid volume rendering, i.e., concurrent rendering of polygonal objects and volume data, and volume clipping on convex clipping regions. Furthermore, it can be used to efficiently define regions with different rendering modes and transfer functions for focus+context volume rendering. Empirical results show that the approach has very low impact on performance.", month = nov, isbn = "3898380483", publisher = "infix", editor = "T. Ertl, B. Girod, G. Greiner, H. Niemann, H.-P. Seidel, E. Steinbach, R. Westermann", booktitle = "Workshop on Vision, Modeling and Visualization", pages = "529--536", keywords = "focus+context techniques, clipping, hybrid volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Bruckner-2003-The/", }