@inproceedings{sorger_2017_metamorphers, title = "Metamorphers: Storytelling Templates For Illustrative Animated Transitions in Molecular Visualization", author = "Johannes Sorger and Peter Mindek and Peter Rautek and Eduard Gr\"{o}ller and Graham Johnson and Ivan Viola", year = "2017", abstract = "In molecular biology, illustrative animations are used to convey complex biological phenomena to broad audiences. However, such animations have to be manually authored in 3D modeling software, a time consuming task that has to be repeated from scratch for every new data set, and requires a high level of expertise in illustration, animation, and biology. We therefore propose metamorphers: a set of operations for defining animation states as well as the transitions to them in the form of re-usable story telling templates. The re-usability is two-fold. Firstly, due to their modular nature, metamorphers can be re-used in different combinations to create a wide range of animations. Secondly, due to their abstract nature, metamorphers can be re-used to re-create an intended animation for a wide range of compatible data sets. Metamorphers thereby mask the low level complexity of explicit animation specifications by exploiting the inherent properties of the molecular data, such as the position, size, and hierarchy level of a semantic data subset.", month = may, location = "Mikulov, Czech Republic", booktitle = "Proceedings of the Spring Conference on Computer Graphics 2017", pages = "27--36", keywords = "animated transitions, storytelling, molecular visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/sorger_2017_metamorphers/", } @misc{klein-2016-WCL, title = "Towards Interactive Visual Exploration of Parallel Programs using a Domain-Specific Language", author = "Tobias Klein and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2016", abstract = "The use of GPUs and the massively parallel computing paradigm have become wide-spread. We describe a framework for the interactive visualization and visual analysis of the run-time behavior of massively parallel programs, especially OpenCL kernels. This facilitates understanding a program's function and structure, finding the causes of possible slowdowns, locating program bugs, and interactively exploring and visually comparing different code variants in order to improve performance and correctness. Our approach enables very specific, user-centered analysis, both in terms of the recording of the run-time behavior and the visualization itself. Instead of having to manually write instrumented code to record data, simple code annotations tell the source-to-source compiler which code instrumentation to generate automatically. The visualization part of our framework then enables the interactive analysis of kernel run-time behavior in a way that can be very specific to a particular problem or optimization goal, such as analyzing the causes of memory bank conflicts or understanding an entire parallel algorithm.", month = apr, publisher = "ACM", location = "Vienna, Austria", event = "4th International Workshop on OpenCL (IWOCL '16)", Conference date = "Poster presented at 4th International Workshop on OpenCL (IWOCL '16) ()", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/klein-2016-WCL/", } @article{Labschuetz_Matthias_2016_JITT, title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure", author = "Matthias Labsch\"{u}tz and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2016", abstract = "Sparse volume data structures enable the efficient representation of large but sparse volumes in GPU memory for computation and visualization. However, the choice of a specific data structure for a given data set depends on several factors, such as the memory budget, the sparsity of the data, and data access patterns. In general, there is no single optimal sparse data structure, but a set of several candidates with individual strengths and drawbacks. One solution to this problem are hybrid data structures which locally adapt themselves to the sparsity. However, they typically suffer from increased traversal overhead which limits their utility in many applications. This paper presents JiTTree, a novel sparse hybrid volume data structure that uses just-in-time compilation to overcome these problems. By combining multiple sparse data structures and reducing traversal overhead we leverage their individual advantages. We demonstrate that hybrid data structures adapt well to a large range of data sets. They are especially superior to other sparse data structures for data sets that locally vary in sparsity. Possible optimization criteria are memory, performance and a combination thereof. Through just-in-time (JIT) compilation, JiTTree reduces the traversal overhead of the resulting optimal data structure. As a result, our hybrid volume data structure enables efficient computations on the GPU, while being superior in terms of memory usage when compared to non-hybrid data structures.", month = jan, issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", note = "Published in January 2016", number = "1", volume = "22", event = "IEEE SciVis 2015", location = "Chicago, IL, USA", pages = "1025--1034", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Labschuetz_Matthias_2016_JITT/", } @inproceedings{Waldin_Nicholas_2016_Individualization, title = "Individualization of 2D Color Maps for People with Color Vision Deficiencies", author = "Nicholas Waldin and Matthias Bernhard and Peter Rautek and Ivan Viola", year = "2016", location = "Slomenice, Slovakia", booktitle = "Proceedings of the 32Nd Spring Conference on Computer Graphics", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Individualization/", } @article{Labschuetz_Matthias_2015_JIT, title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure", author = "Matthias Labsch\"{u}tz and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2015", abstract = "Sparse volume data structures enable the efficient representation of large but sparse volumes in GPU memory for computation and visualization. However, the choice of a specific data structure for a given data set depends on several factors, such as the memory budget, the sparsity of the data, and data access patterns. In general, there is no single optimal sparse data structure, but a set of several candidates with individual strengths and drawbacks. One solution to this problem are hybrid data structures which locally adapt themselves to the sparsity. However, they typically suffer from increased traversal overhead which limits their utility in many applications. This paper presents JiTTree, a novel sparse hybrid volume data structure that uses just-in-time compilation to overcome these problems. By combining multiple sparse data structures and reducing traversal overhead we leverage their individual advantages. We demonstrate that hybrid data structures adapt well to a large range of data sets. They are especially superior to other sparse data structures for data sets that locally vary in sparsity. Possible optimization criteria are memory, performance and a combination thereof. Through just-in-time (JIT) compilation, JiTTree reduces the traversal overhead of the resulting optimal data structure. As a result, our hybrid volume data structure enables efficient computations on the GPU, while being superior in terms of memory usage when compared to non-hybrid data structures.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "22", number = "1", note = "Published in January 2016", issn = "1077-2626", pages = "1025--1034", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Labschuetz_Matthias_2015_JIT/", } @WorkshopTalk{Vad_Viktor_2015_RVV, title = "Reproducibility, Verification, and Validation of Experiments on the Marschner-Lobb Test Signal", author = "Viktor Vad and Bal\'{a}zs Cs\'{e}bfalvi and Peter Rautek and Eduard Gr\"{o}ller", year = "2015", abstract = "The Marschner-Lobb (ML) test signal has been used for two decades to evaluate the visual quality of different volumetric reconstruction schemes. Previously, the reproduction of these experiments was very simple, as the ML signal was used to evaluate only compact filters applied on the traditional Cartesian lattice. As the Cartesian lattice is separable, it is easy to implement these filters as separable tensor-product extensions of well-known 1D filter kernels. Recently, however, non-separable reconstruction filters have received increased attention that are much more difficult to implement than the traditional tensor-product filters. Even if these are piecewise polynomial filters, the space partitions of the polynomial pieces are geometrically rather complicated. Therefore, the reproduction of the ML experiments is getting more and more difficult. Recently, we reproduced a previously published ML experiment for comparing Cartesian Cubic (CC), Body-Centered Cubic (BCC), and Face-Centered Cubic (FCC) lattices in terms of prealiasing. We recognized that the previously applied settings were biased and gave an undue advantage to the FCC-sampled ML representation. This result clearly shows that reproducibility, verification, and validation of the ML experiments is of crucial importance as the ML signal is the most frequently used benchmark for demonstrating the superiority of a reconstruction scheme or volume representations on non-Cartesian lattices.", month = may, event = "EuroVis Workshop on Reproducibility, Verification, and Validation in Visualization (EuroRV3)", location = "Cagliari, Sardinia, Italy", journal = "@inproceedings {eurorv3.20151140", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Vad_Viktor_2015_RVV/", } @article{Rautek_Peter_2014_TUC, title = "Towards an Unbiased Comparison of CC, BCC, and FCC Lattices in Terms of Prealiasing", author = "Viktor Vad and Bal\'{a}zs Cs\'{e}bfalvi and Peter Rautek and Eduard Gr\"{o}ller", year = "2014", abstract = "In the literature on optimal regular volume sampling, the Body-Centered Cubic (BCC) lattice has been proven to be optimal for sampling spherically band-limited signals above the Nyquist limit. On the other hand, if the sampling frequency is below the Nyquist limit, the Face-Centered Cubic (FCC) lattice was demonstrated to be optimal in reducing the prealiasing effect. In this paper, we confirm that the FCC lattice is indeed optimal in this sense in a certain interval of the sampling frequency. By theoretically estimating the prealiasing error in a realistic range of the sampling frequency, we show that in other frequency intervals, the BCC lattice and even the traditional Cartesian Cubic (CC) lattice are expected to minimize the prealiasing. The BCC lattice is superior over the FCC lattice if the sampling frequency is not significantly below the Nyquist limit. Interestingly, if the original signal is drastically undersampled, the CC lattice is expected to provide the lowest prealiasing error. Additionally, we give a comprehensible clarification that the sampling efficiency of the FCC lattice is lower than that of the BCC lattice. Although this is a well-known fact, the exact percentage has been erroneously reported in the literature. Furthermore, for the sake of an unbiased comparison, we propose to rotate the Marschner-Lobb test signal such that an undue advantage is not given to either lattice.", month = jun, journal = "Computer Graphics Forum", volume = "33", number = "3", pages = "81--90", keywords = " Image representation—Volumetric, Picture/Image Generation—Display algorit, Categories and Subject Descriptors", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Rautek_Peter_2014_TUC/", } @article{Rautek_Peter_2014_VSA, title = "ViSlang: A System for Interpreted Domain-Specific Languages for Scientific Visualization", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger", year = "2014", abstract = "Researchers from many domains use scientific visualization in their daily practice. Existing implementations of algorithms usually come with a graphical user interface (high-level interface), or as software library or source code (low-level interface). In this paper we present a system that integrates domain-specific languages (DSLs) and facilitates the creation of new DSLs. DSLs provide an effective interface for domain scientists avoiding the difficulties involved with low-level interfaces and at the same time offering more flexibility than high-level interfaces. We describe the design and implementation of ViSlang, an interpreted language specifically tailored for scientific visualization. A major contribution of our design is the extensibility of the ViSlang language. Novel DSLs that are tailored to the problems of the domain can be created and integrated into ViSlang. We show that our approach can be added to existing user interfaces to increase the flexibility for expert users on demand, but at the same time does not interfere with the user experience of novice users. To demonstrate the flexibility of our approach we present new DSLs for volume processing, querying and visualization. We report the implementation effort for new DSLs and compare our approach with Matlab and Python implementations in terms of run-time performance.", issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "12", volume = "20", pages = "2388--2396", keywords = " Volume visualization framework , Volume visualization, Domain-specific languages", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Rautek_Peter_2014_VSA/", } @article{mindek-2013-pel, title = "Visual Parameter Exploration in GPU Shader Space", author = "Peter Mindek and Stefan Bruckner and Peter Rautek and Eduard Gr\"{o}ller", year = "2013", abstract = "The wide availability of high-performance GPUs has made the use of shader programs in visualization ubiquitous. Understanding shaders is a challenging task. Frequently it is dif?cult to mentally reconstruct the nature and types of transformations applied to the underlying data during the visualization process. We propose a method for the visual analysis of GPU shaders, which allows the ?exible exploration and investigation of algorithms, parameters, and their effects. We introduce a method for extracting feature vectors composed of several attributes of the shader, as well as a direct manipulation interface for assigning semantics to them. The user interactively classi?es pixels of images which are rendered with the investigated shader. The two resulting classes, a positive class and a negative one, are employed to steer the visualization. Based on this information, we can extract a wide variety of additional attributes and visualize their relation to this classi?cation. Our system allows an interactive exploration of shader space and we demonstrate its utility for several different applications.", journal = "Journal of WSCG", volume = "21", number = "3", issn = "1213-6972", pages = "225--234", keywords = "shader augmentation, parameter space exploration", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-pel/", } @article{Peter_2012_AIV, title = "Semantics by Analogy for Illustrative Volume Visualization", author = "Moritz Gerl and Peter Rautek and Tobias Isenberg and Eduard Gr\"{o}ller", year = "2012", abstract = "We present an interactive graphical approach for the explicit specification of semantics for volume visualization. This explicit and graphical specification of semantics for volumetric features allows us to visually assign meaning to both input and output parameters of the visualization mapping. This is in contrast to the implicit way of specifying semantics using transfer functions. In particular, we demonstrate how to realize a dynamic specification of semantics which allows to flexibly explore a wide range of mappings. Our approach is based on three concepts. First, we use semantic shader augmentation to automatically add rule-based rendering functionality to static visualization mappings in a shader program, while preserving the visual abstraction that the initial shader encodes. With this technique we extend recent developments that define a mapping between data attributes and visual attributes with rules, which are evaluated using fuzzy logic. Second, we let users define the semantics by analogy through brushing on renderings of the data attributes of interest. Third, the rules are specified graphically in an interface that provides visual clues for potential modifications. Together, the presented methods offer a high degree of freedom in the specification and exploration of rule-based mappings and avoid the limitations of a linguistic rule formulation.", month = may, journal = "Computers & Graphics", number = "3", volume = "36", pages = "201--213", keywords = "shader augmentation, semantic visualization mapping, illustrative visualization, Volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Peter_2012_AIV/", } @inproceedings{Soros_AVN_2011, title = "Augmented Visualization with Natural Feature Tracking", author = "G\'{a}bor S\"{o}r\"{o}s and Peter Rautek and Hartmut Seichter and Eduard Gr\"{o}ller", year = "2011", abstract = "Visualization systems often require large monitors or projection screens to display complex information. Even very sophisticated systems that exhibit complex user interfaces do usually not exploit advanced input and output devices. One of the reasons for that is the high cost of special hardware. This paper introduces Augmen- ted Visualization, an interaction method for projection walls as well as monitors using affordable and widely available hardware such as mobile phones or tablets. The main technical challenge is the track- ing of the users’ devices without any special equipment or fiducial markers in the working area. We propose to track natural features of the display content with the built-in camera of mobile devices. Tracking the visualized scene allows pose estimation of the mobile devices with six degrees of freedom. The position and orientation information is then used for advanced interaction metaphors like magic lenses. For a group of experts who are analyzing the data in front of the same screen, a personal augmented view of the visua- lized scene is presented, for each user on his/her personal device. The prototype Augmented Visualization System achieves interactive frame rates and may lead to a greatly enhanced user experience. The paper discusses the design and implementation questions and illustrates potential application scenarios.", month = dec, isbn = "978-1-4503-1096-3", publisher = "ACM", location = "Beijing, China", event = "10th International Conference on Mobile and Ubiquitous Multimedia (MUM 2011) ", booktitle = "Proceedings of the 10th International Conference on Mobile and Ubiquitous Multimedia (MUM 2011) ", pages = "4--12", keywords = "human computer interaction, Handheld augmented reality, natural feature tracking, interactive visualization systems", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/Soros_AVN_2011/", } @WorkshopTalk{sikachev_peter-2011-dfc, title = "Dynamic Focus + Context for Volume Rendering", author = "Peter Sikachev and Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2011", abstract = "Interactive visualization is widely used in many applications for efficient representation of complex data. Many techniques make use of the focus+context approach in a static manner. These techniques do not fully make use of the interaction semantics. In this paper we present a dynamic focus+context approach that highlights salient features during user interaction. We explore rotation, panning, and zooming interaction semantics and propose several methods of changing visual representations, based on a suggested engagement-estimation method. We use DVR-MIP interpolation and a radial opacity-change approach, exploring rotation, panning, and zooming semantics. Our approach adds short animations during user interaction that help to explore the data efficiently and aid the user in the detection of unknown features.", month = jun, event = "Austrian-Russian Joint Seminar", location = "VRVis, Vienna, Austria", keywords = "focus + context, visualization, volume rendering, user interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/sikachev_peter-2011-dfc/", } @WorkshopTalk{rautek2010, title = "Continuous Integration", author = "Peter Rautek", year = "2010", month = nov, event = "Software Engineering Seminar", location = "Vienna", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/rautek2010/", } @inproceedings{sikachev-2010-DFC, title = "Dynamic Focus+Context for Volume Rendering", author = "Peter Sikachev and Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2010", abstract = "Interactive visualization is widely used in many applications for efficient representation of complex data. Many techniques make use of the focus+context approach in a static manner. These techniques do not fully make use of the interaction semantics. In this paper we present a dynamic focus+context approach that highlights salient features during user interaction. We explore rotation, panning, and zooming interaction semantics and propose several methods of changing visual representations, based on a suggested engagement-estimation method. We use DVR-MIP interpolation and a radial opacity-change approach, exploring rotation, panning, and zooming semantics. Our approach adds short animations during user interaction that help to explore the data efficiently and aid the user in the detection of unknown features.", month = nov, location = "Siegen, Germany", address = "University of Siegen, Siegen, Germany", booktitle = "Proceedings of Vision, Modeling and Visualization 2010", pages = "331--338", keywords = "focus+contex, volume rendering, view-dependent visualization, level-of-detail techniques, nonphotorealistic techniques, user interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/sikachev-2010-DFC/", } @article{bruckner-2010-HVC, title = "Hybrid Visibility Compositing and Masking for Illustrative Rendering", author = "Stefan Bruckner and Peter Rautek and Ivan Viola and Mike Roberts and Mario Costa Sousa and Eduard Gr\"{o}ller", year = "2010", abstract = "In this paper, we introduce a novel framework for the compositing of interactively rendered 3D layers tailored to the needs of scientific illustration. Currently, traditional scientific illustrations are produced in a series of composition stages, combining different pictorial elements using 2D digital layering. Our approach extends the layer metaphor into 3D without giving up the advantages of 2D methods. The new compositing approach allows for effects such as selective transparency, occlusion overrides, and soft depth buffering. Furthermore, we show how common manipulation techniques such as masking can be integrated into this concept. These tools behave just like in 2D, but their influence extends beyond a single viewpoint. Since the presented approach makes no assumptions about the underlying rendering algorithms, layers can be generated based on polygonal geometry, volumetric data, pointbased representations, or others. Our implementation exploits current graphics hardware and permits real-time interaction and rendering.", journal = "Computers & Graphics", number = "34", pages = "361--369", keywords = "compositing, masking, illustration", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-HVC/", } @phdthesis{rautek-2009-vmv, title = "Semantic Visualization Mapping for Volume Illustration", author = "Peter Rautek", year = "2009", abstract = "Scientific visualization is the discipline of automatically rendering images from scientific data. Adequate visual abstractions are important to show relevant information in the data. Visual abstractions are a trade-off between showing detailed information and preventing visual overload. To use visual abstractions for the depiction of data, a mapping from data attributes to visual abstractions is needed. This mapping is called the visualization mapping. This thesis reviews the history of visual abstractions and visualizationmapping in the context of scientific visualization. Later a novel visual abstraction method called caricaturistic visualization is presented. The concept of exaggeration is the visual abstraction used for caricaturistic visualization. Principles from traditional caricatures are used to accentuate salient details of data while sparsely sketching the context. The visual abstractions described in this thesis are inspired by visual art and mostly by traditional illustration techniques. To make effective use of the recently developed visualizationmethods, that imitate illustration techniques, an expressive visualization mapping approach is required. In this thesis a visualization mapping method is investigated that makes explicit use of semantics to describe mappings from data attributes to visual abstractions. The semantic visualization mapping explicitly uses domain semantics and visual abstraction semantics to specify visualization rules. Illustrative visualization results are shown that are achieved with the semantic visualization mapping. The behavior of the automatically rendered interactive illustrations is specified using interaction-dependent visualization rules. Interactions like the change of the viewpoint, or the manipulation of a slicing plane are state of the art in volume visualization. In this thesis a method for more elaborate interaction techniques is presented. The behavior of the illustrations is specified with interaction-dependent rules that are integrated in the semantic visualization mapping approach.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/rautek-2009-vmv/", } @article{Rautek-2008-IDS, title = "Interaction-Dependent Semantics for Illustrative Volume Rendering", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2008", abstract = "In traditional illustration the choice of appropriate styles and rendering techniques is guided by the intention of the artist. For illustrative volume visualizations it is difficult to specify the mapping between the 3D data and the visual representation that preserves the intention of the user. The semantic layers concept establishes this mapping with a linguistic formulation of rules that directly map data features to rendering styles. With semantic layers fuzzy logic is used to evaluate the user defined illustration rules in a preprocessing step. In this paper we introduce interaction-dependent rules that are evaluated for each frame and are therefore computationally more expensive. Enabling interaction-dependent rules, however, allows the use of a new class of semantics, resulting in more expressive interactive illustrations. We show that the evaluation of the fuzzy logic can be done on the graphics hardware enabling the efficient use of interaction-dependent semantics. Further we introduce the flat rendering mode and discuss how different rendering parameters are influenced by the rule base. Our approach provides high quality illustrative volume renderings at interactive frame rates, guided by the specification of illustration rules.", month = may, journal = "Computer Graphics Forum", volume = "27", number = "3", pages = "847--854", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-IDS/", } @misc{Rautek-2008-VF, title = "Illustrative Visualization – New Technology or Useless Tautology?", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller and Ivan Viola", year = "2008", abstract = "This article can be accessed online in the ACM SIGGRAPH, Computer Graphics Quarterly, Volume 42, Number 3: http://www.siggraph.org/publications/newsletter/volume-42-number-3/illustrative-visualization-2013-new-technology-or-useless-tautology", note = "online journal, without talk", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-VF/", } @misc{Rautek-2008-kav, title = "Visual Abstractions and Interaction Metaphors for Knowledge Assisted Volume Visualization", author = "Peter Rautek and Ivan Viola", year = "2008", abstract = "Extended abstract published at Knowledge-assisted Visualization Workshop (colocated with the IEEE Visualization 2008 conference): http://kav.cs.wright.edu/", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-kav/", } @xmascard{Rau08, title = "X-Mas 2008", author = "Peter Rautek", year = "2008", abstract = "THE X-MAS MOSAIC The mosaic of a Christmas tree on the front was created using images from our publication database: http://www.cg.tuwien.ac.at/research/publications/ The mosaic on the backside of the card consists of the Christmas cards of the previous 15 years and of this year’s motif. DAS WEIHNACHTSMOSAIK Das Mosaik auf der Vorderseite zeigt einen Weihnachtsbaum, der aus Bildern unserer Publikationsdatenbank zusammengesetzt ist: http://www.cg.tuwien.ac.at/research/publications/ Das Mosaik auf der R\"{u}ckseite setzt sich aus den Weihnachtskarten der vergangenen 15 Jahre und aus dem diesj\"{a}hrigen Motiv zusammen.", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Rau08/", } @article{Rautek-2007-SLI, title = "Semantic Layers for Illustrative Volume Rendering", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2007", abstract = "Direct volume rendering techniques map volumetric attributes (e.g., density, gradient magnitude, etc.) to visual styles. Commonly this mapping is specified by a transfer function. The specification of transfer functions is a complex task and requires expert knowledge about the underlying rendering technique. In the case of multiple volumetric attributes and multiple visual styles the specification of the multi-dimensional transfer function becomes more challenging and non-intuitive. We present a novel methodology for the specification of a mapping from several volumetric attributes to multiple illustrative visual styles. We introduce semantic layers that allow a domain expert to specify the mapping in the natural language of the domain. A semantic layer defines the mapping of volumetric attributes to one visual style. Volumetric attributes and visual styles are represented as fuzzy sets. The mapping is specified by rules that are evaluated with fuzzy logic arithmetics. The user specifies the fuzzy sets and the rules without special knowledge about the underlying rendering technique. Semantic layers allow for a linguistic specification of the mapping from attributes to visual styles replacing the traditional transfer function specification.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "13", number = "6", note = "to be presented at IEEE Visualization 2007", pages = "1336--1343", keywords = "Illustrative Visualization, Volume Visualization, Focus+Context Techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Rautek-2007-SLI/", } @misc{Rautek-2007-O3D, title = "Caricaturistic Visualization of Deformation Data Based on High Density Point Clouds", author = "Peter Rautek and Alexander Reiterer and Eduard Gr\"{o}ller", year = "2007", abstract = "Modern deformation monitoring techniques offer possibilities to measure and analyze deformation processes in detail. There are various different sensor systems on the market that can be used in these application fields, each having specific features in terms of accuracy, robustness, user interaction, operational range, measurement speed and frequency, resolution, or other relevant parameters. The selection of an appropriate sensor system for a particular application is not trivial. In recent years research on image-based measurement systems and laser scanners has gained increasing interest – in many cases a combination of different sensors has advantages over a single-sensor system (e.g. different accuracy and reliability classes, different measurement range, etc.). The point clouds produced by such systems potentially consist of a vast number of points. One of the main problems concerning the analysis and interpretation of deformation measurements is the visualization of the data respectively of the underlying deformation. In this paper we present the application of the recently developed caricaturistic visualization method to deformation data based on high density point clouds. Caricaturistic visualization depicts the deformation data in an exaggerated way. The exaggeration of the deformation accents subtle deviations and supports the viewer for the correct interpretation of the underlying deformation. We show results for facade deformation data as well as for landslide data. ", booktitle = "8th Conference on Optical 3-D Measurement Techniques, Zurich, Switzerland", Conference date = "Poster presented at (2007-07-09--2007-07-12)", keywords = "Point Cloud, Deformation Measurement, Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Rautek-2007-O3D/", } @article{Rautek06Vis, title = "Caricaturistic Visualization", author = "Peter Rautek and Ivan Viola and Eduard Gr\"{o}ller", year = "2006", abstract = "Caricatures are pieces of art depicting persons or sociological conditions in a non-veridical way. In both cases caricatures are referring to a reference model. The deviations from the reference model are the characteristic features of the depicted subject. Good caricatures exaggerate the characteristics of a subject in order to accent them. The concept of caricaturistic visualization is based on the caricature metaphor. The aim of caricaturistic visualization is an illustrative depiction of characteristics of a given dataset by exaggerating deviations from the reference model. We present the general concept of caricaturistic visualization as well as a variety of examples. We investigate different visual representations for the depiction of caricatures. Further, we present the caricature matrix, a technique to make differences between datasets easily identifiable.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "12", number = "5", issn = "1077-2626", pages = "1085--1092", keywords = "Focus+Context Techniques, Volume Visualization, Illustrative Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Rautek06Vis/", } @inproceedings{RAUTEK06, title = "D²VR: High Quality Volume Rendering of Projection-based Volumetric Data", author = "Peter Rautek and Bal\'{a}zs Cs\'{e}bfalvi and S\"{o}ren Grimm and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2006", abstract = "Volume rendering techniques are conventionally classified as either direct or indirect methods. Indirect methods require to transform the initial volumetric model into an intermediate geometrical model in order to efficiently visualize it. In contrast, direct volume rendering (DVR) methods can directly process the volumetric data. Modern CT scanners usually provide data as a set of samples on a rectilinear grid, which is computed from the measured projections by discrete tomographic reconstruction. Therefore the rectilinear grid can already be considered as an intermediate volume representation. In this paper we introduce direct direct volume rendering (D²VR). D²VR does not require a rectilinear grid, since it is based on an immediate processing of the measured projections. Arbitrary samples for ray casting are reconstructed from the projections by using the Filtered Back-Projection algorithm. Our method removes a lossy resampling step from the classical volume rendering pipeline. It provides much higher accuracy than traditional grid-based resampling techniques do. Furthermore we also present a novel high-quality gradient estimation scheme, which is also based on the Filtered Back-Projection algorithm.", month = may, publisher = "IEEE CS", booktitle = "Proceedings of Eurographics / IEEE VGTC Symposium on Visualization", number = "In Proceedings of EuroVis", pages = "211--218", keywords = "Volume Rendering, Filtered Back-Projection, Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/RAUTEK06/", } @talk{Rautek06VMLS, title = "Caricaturistic Visualization", author = "Peter Rautek", year = "2006", abstract = "For many applications of medicine and life science, data is gathered or measured to find and to analyze the characteristics of the investigated object. Characteristics of a dataset can be expressed as the deviations from the norm. These deviations traditionally are found and classified using statistical methods. In many cases the statistical models do not appropriately describe the underlying phenomenon. They are therefore unsuitable for the data of interest. In this case visualization can replace the statistical methods. Expressive visualizations guide the user to find characteristics. Further the user is enabled to analyze the deviations of a given dataset. Caricaturistic visualization is an expressive method tailored to depict the deviations in an exaggerated way. It is guided by the idea of caricatures which exaggerate the outstanding features of an object. A method for caricaturistic visualization is presented and its power is shown on different examples. Caricaturistic visualization assumes the existence of a reference model. In many applications an explicit reference model is not available. To overcome this limitation different datasets are compared to each other. This results in the Caricature matrix, a 2D matrix of caricaturistic visualizations. ", event = "Workshop on Visualization in Medicine and Life Sciences", location = "R\"{u}gen, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Rautek06VMLS/", } @unknown{Rautek-2006, title = "Vis T-Shirt 2006", author = "Peter Rautek", year = "2006", abstract = "The T-Shirt that was produced for the IEEE Visualization conference 2006.", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Rautek-2006/", } @mastersthesis{rautek-2005-dvr, title = "D²VR High-Quality Volume Rendering of Projection-based Volumetric Data", author = "Peter Rautek", year = "2005", abstract = "Volume rendering techniques are conventionally classified into two categories represented by direct and indirect methods. Indirect methods require to transform the initial volumetric model into an intermediate geometrical model in order to efficiently visualize it. In contrast, direct volume-rendering (DVR) methods can directly process the volumetric data. Modern 3D scanning technologies, like CT or MRI, usually provide data as a set of samples on rectilinear grid points, which are computed from the measured projections by discrete tomographic reconstruction. Therefore the set of these reconstructed samples can already be considered as an intermediate volume representation. In this diploma thesis a new paradigm for direct direct volume rendering (D2VR) is introduced, which does not even require a rectilinear grid, since it is based on an immediate processing of the measured projections. Arbitrary samples for ray casting are reconstructed from the projections by using the Filtered Back-Projection algorithm. The method presented in this thesis removes an unnecessary and lossy resampling step from the classical volume rendering pipeline. Thus, it provides much higher accuracy than traditional grid-based resampling techniques do. Furthermore a novel high-quality gradient estimation scheme, which is also based on the Filtered Back-Projection algorithm is presented. Finally we introduce a hierarchical space partitioning approach for projection-based volumetric data, which is used to accelerate D²VR.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/rautek-2005-dvr/", }