@phdthesis{schmidt-phd, title = "Scalable Comparative Visualization", author = "Johanna Schmidt", year = "2016", abstract = "The comparison of two or more objects is getting an increasingly important task in data analysis. Visualization systems successively have to move from representing one phenomenon to allowing users to analyze several datasets at once. Visualization systems can support the users in several ways. Firstly, comparison tasks can be supported in a very intuitive way by allowing users to place objects that should be compared in an appropriate context. Secondly, visualization systems can explicitly compute differences among the datasets and present the results to the user. In comparative visualization, researchers are working on new approaches for computer-supported techniques that provide data comparison functionality. Techniques from this research field can be used to compare two objects with each other, but often reach their limits if a multitude of objects (i.e., 100 or more) have to be compared. Large data collections that contain a lot of individual, but related, datasets with slightly different characteristics can be called ensembles. The individual datasets being part of an ensemble are called the ensemble members. Ensembles have been created in the simulation domain, especially for weather and climate research, for already quite some time. These domains were greatly driving the development of ensemble visualization techniques. Due to the availability of affordable computing resources and the multitude of different analysis algorithms (e.g., for segmentation), other domains nowadays also face similar problems. All together, this shows a great need for ensemble visualization techniques in various domains. Ensembles can either be analyzed in a feature-based or in a location-based way. In the case of a location-based analysis, the ensemble members are compared based on certain spatial data positions of interest. For such an analysis, local selection and analysis techniques for ensembles are needed. In the course of this thesis different visual analytics techniques for the comparative visualization of datasets have been researched. A special focus has been set on providing scalable techniques, which makes them also suitable for ensemble datasets. The proposed techniques operate on different dataset types in 2D and 3D. In the first part of the thesis, a visual analytics approach for the analysis of 2D image datasets is introduced. The technique analyzes localized differences in 2D images. The approach not only identifies differences in the data, but also provides a technique to quickly find out what the differences are, and judge upon the underlying data. This way patterns can be found in the data, and outliers can be identified very quickly. As a second part of the thesis, a scalable application for the comparison of several similar 3D mesh datasets is described. Such meshes may be, for example, created by point-cloud reconstruction algorithms, using different parameter settings. Similar to the proposed technique for the comparison of 2D images, this application is also scalable to a large number of individual datasets. The application enables the automatic comparison of the meshes, searches interesting regions in the data, and allows users to also concentrate on local regions of interest. The analysis of the local regions is in this case done in 3D. The application provides the possibility to arrange local regions in a parallel coordinates plot. The regions are represented by the axes in the plot, and the input meshes are depicted as polylines. This way it can be very quickly spotted whether meshes produce good/bad results in a certain local region. In the third and last part of the thesis, a technique for the interactive analysis of local regions in a volume ensemble dataset is introduced. Users can pick regions of interest, and these regions can be arranged in a graph according to their similarity. The graph can then be used to detect similar regions with a similar data distribution within the ensemble, and to compare individual ensemble members against the rest of the ensemble. All proposed techniques and applications have been tested with real-world datasets from different domains. The results clearly show the usefulness of the techniques for the comparative analysis of ensembles.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/schmidt-phd/", } @article{Labschuetz_Matthias_2016_JITT, title = "JiTTree: A Just-in-Time Compiled Sparse GPU Volume Data Structure", author = "Matthias Labsch\"{u}tz and Stefan Bruckner and Eduard Gr\"{o}ller and Markus Hadwiger and Peter Rautek", year = "2016", abstract = "Sparse volume data structures enable the efficient representation of large but sparse volumes in GPU memory for computation and visualization. However, the choice of a specific data structure for a given data set depends on several factors, such as the memory budget, the sparsity of the data, and data access patterns. In general, there is no single optimal sparse data structure, but a set of several candidates with individual strengths and drawbacks. One solution to this problem are hybrid data structures which locally adapt themselves to the sparsity. However, they typically suffer from increased traversal overhead which limits their utility in many applications. This paper presents JiTTree, a novel sparse hybrid volume data structure that uses just-in-time compilation to overcome these problems. By combining multiple sparse data structures and reducing traversal overhead we leverage their individual advantages. We demonstrate that hybrid data structures adapt well to a large range of data sets. They are especially superior to other sparse data structures for data sets that locally vary in sparsity. Possible optimization criteria are memory, performance and a combination thereof. Through just-in-time (JIT) compilation, JiTTree reduces the traversal overhead of the resulting optimal data structure. As a result, our hybrid volume data structure enables efficient computations on the GPU, while being superior in terms of memory usage when compared to non-hybrid data structures.", month = jan, issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", note = "Published in January 2016", number = "1", volume = "22", event = "IEEE SciVis 2015", location = "Chicago, IL, USA", pages = "1025--1034", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Labschuetz_Matthias_2016_JITT/", } @talk{mindek-2015-mctalk, title = "Automatized Summarization of Multiplayer Games", author = "Peter Mindek", year = "2015", abstract = "We present a novel method for creating automatized gameplay dramatization of multiplayer video games. The dramatization serves as a visual form of guidance through dynamic 3D scenes with multiple foci, typical for such games. Our goal is to convey interesting aspects of the gameplay by animated sequences creating a summary of events which occurred during the game. Our technique is based on processing many cameras, which we refer to as a flock of cameras, and events captured during the gameplay, which we organize into a so-called event graph. Each camera has a lifespan with a certain time interval and its parameters such as position or look-up vector are changing over time. Additionally, during its lifespan each camera is assigned an importance function, which is dependent on the significance of the structures that are being captured by the camera. The images captured by the cameras are composed into a single continuous video using a set of operators based on cinematographic effects. The sequence of operators is selected by traversing the event graph and looking for specific patterns corresponding to the respective operators. In this way, a large number of cameras can be processed to generate an informative visual story presenting the gameplay. Our compositing approach supports insets of camera views to account for several important cameras simultaneously. Additionally, we create seamless transitions between individual selected camera views in order to preserve temporal continuity, which helps the user to follow the virtual story of the gameplay.", event = "Numerical Geometry Seminar", location = "Comenius University in Bratislava", keywords = "Animation, Storytelling, Game Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/mindek-2015-mctalk/", } @phdthesis{mindek-thesis, title = "Interactive Integrated Exploration and Management of Visualization Parameters", author = "Peter Mindek", year = "2015", abstract = "Visualization algorithms are parameterized to offer universality in terms of handling various data types, showing different aspects of the visualized data, or producing results useful for domain experts from different fields. Hence, input parameters are an important aspect of the visualization process. Their exploration and management are tasks which enable the visualization reusability, portability, and interdisciplinary communication. With increasing availability of visualization systems, which are suitable for a great variety of tasks, their complexity increases as well. This usually involves many input parameters necessary for the meaningful visualization of data. Multiple input parameters form parameter spaces which are too large to be explored by brute-force. Knowing the properties of a parameter space is often beneficial for improving data visualization. Therefore, it is important for domain experts utilizing data visualization to have tools for automatic parameter specification and for aiding the manual parameter setting. In this thesis, we review existing approaches for parameter-space visualization, exploration, and management. These approaches are used with a great variety of underlying algorithms. We focus on their applicability to visualization algorithms. We propose three methods solving specific problems arising from the fact that the output of a visualization algorithm is an image, which is challenging to process automatically and often needs to be analyzed by a human. First, we propose a method for the exploration of parameter-spaces of visualization algorithms. The method is used to understand effects of combinations of parameters and parts of the internal structure of the visualization algorithms on the final image result. The exploration is carried out by specifying semantics for localized parts of the visualization images in the form of positive and negative examples influenced by a set of input parameters or parts of the visualization algorithm itself. After specifying the localized semantics, global effects of the specified components of the visualization algorithm can be observed. The method itself is independent from the underlying algorithm. Subsequently, we present a method for managing image-space selections in visualizations and automatically link them with the context in which they were created. The context is described by the values of the visualization parameters influencing the output image. The method contains a mechanism for linking additional views to the selections, allowing the user an effective management of the visualization parameters whose effects are localized to certain areas of the visualizations. We present various applications for the method, as well as an implementation in the form of a library, which is ready to be used in existing visualization systems. Our third method is designed to integrate dynamic parameters stored during a multiplayer video game session by the individual participating players. For each player, the changing parameter values of the game describe their view of the gameplay. Integrating these multiple views into a single continuous visual narrative provides means for effective summarization of gameplays, useful for entertainment, or even gameplay analysis purposes by semi-professional or professional players. We demonstrate the utility of our approach on an existing video game by producing a gameplay summary of a multiplayer game session. The proposed method opens possibilities for further research in the areas of storytelling, or at a more abstract level, parameter integration for visual computing algorithms. ", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/mindek-thesis/", } @article{beham-2014-cupid, title = "Cupid: Cluster-based Exploration of Geometry Generators with Parallel Coordinates and Radial Trees", author = "Michael Beham and Wolfgang Herzner and Eduard Gr\"{o}ller and Johannes Kehrer", year = "2014", abstract = "Geometry generators are commonly used in video games and evaluation systems for computer vision to create geometric shapes such as terrains, vegetation or airplanes. The parameters of the generator are often sampled automatically which can lead to many similar or unwanted geometric shapes. In this paper, we propose a novel visual exploration approach that combines the abstract parameter space of the geometry generator with the resulting 3D shapes in a composite visualization. Similar geometric shapes are first grouped using hierarchical clustering and then nested within an illustrative parallel coordinates visualization. This helps the user to study the sensitivity of the generator with respect to its parameter space and to identify invalid parameter settings. Starting from a compact overview representation, the user can iteratively drill-down into local shape differences by clicking on the respective clusters. Additionally, a linked radial tree gives an overview of the cluster hierarchy and enables the user to manually split or merge clusters. We evaluate our approach by exploring the parameter space of a cup generator and provide feedback from domain experts.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "20", number = "12", issn = "1077-2626", pages = "1693--1702 ", keywords = "3D shape analysis, radial trees, hierarchical clustering, illustrative parallel coordinates, composite visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/beham-2014-cupid/", } @article{mindek-2014-mcs, title = "Managing Spatial Selections with Contextual Snapshots", author = "Peter Mindek and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2014", abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing particular features, they can be analyzed and compared in different views. However, the semantics of such selections often depend on specific parameter settings and it can be difficult to reconstruct them without additional information. In this paper, we present the concept of contextual snapshots as an effective means for managing spatial selections in visualized data. The selections are automatically associated with the context in which they have been created. Contextual snapshots can also be used as the basis for interactive integrated and linked views, which enable in-place investigation and comparison of multiple visual representations of data. Our approach is implemented as a flexible toolkit with well-defined interfaces for integration into existing systems. We demonstrate the power and generality of our techniques by applying them to several distinct scenarios such as the visualization of simulation data, the analysis of historical documents, and the display of anatomical data.", month = dec, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "33", pages = "132--144", keywords = "annotations, spatial selections, visual analytics, interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/mindek-2014-mcs/", } @inproceedings{ymca, title = "YMCA - Your Mesh Comparison Application", author = "Johanna Schmidt and Reinhold Preiner and Thomas Auzinger and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2014", abstract = "Polygonal meshes can be created in several different ways. In this paper we focus on the reconstruction of meshes from point clouds, which are sets of points in 3D. Several algorithms that tackle this task already exist, but they have different benefits and drawbacks, which leads to a large number of possible reconstruction results (i.e., meshes). The evaluation of those techniques requires extensive comparisons between different meshes which is up to now done by either placing images of rendered meshes side-by-side, or by encoding differences by heat maps. A major drawback of both approaches is that they do not scale well with the number of meshes. This paper introduces a new comparative visual analysis technique for 3D meshes which enables the simultaneous comparison of several meshes and allows for the interactive exploration of their differences. Our approach gives an overview of the differences of the input meshes in a 2D view. By selecting certain areas of interest, the user can switch to a 3D representation and explore the spatial differences in detail. To inspect local variations, we provide a magic lens tool in 3D. The location and size of the lens provide further information on the variations of the reconstructions in the selected area. With our comparative visualization approach, differences between several mesh reconstruction algorithms can be easily localized and inspected.", month = nov, series = "VAST ", publisher = "IEEE Computer Society", note = "http://dx.doi.org/10.1109/VAST.2014.7042491", location = "Paris, France", booktitle = "IEEE Visual Analytics Science and Technology", keywords = "mesh comparison, 3D data exploration, focus+context, comparative visualization, Visual analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/ymca/", } @talk{Kehrer-2014-CSD, title = "Interactive Visual Analysis of Complex Scientific Data", author = "Johannes Kehrer", year = "2014", event = "TU M\"{u}nchen", location = "Munich, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Kehrer-2014-CSD/", } @talk{mindek-2014-vivi_cvut, title = "ViviSection: Skeleton-based Volume Editing", author = "Peter Mindek", year = "2014", event = "Scientific meeting of Department of Computer Graphics and Interaction", location = "Czech Technical University in Prague", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/mindek-2014-vivi_cvut/", } @talk{mindek_peter-2014-cs_kaust, title = "Managing Spatial Selections with Contextual Snapshots", author = "Peter Mindek", year = "2014", abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing particular features, they can be analyzed and compared in different views. However, the semantics of such selections often depend on specific parameter settings and it can be difficult to reconstruct them without additional information. In this paper, we present the concept of contextual snapshots as an effective means for managing spatial selections in visualized data. The selections are automatically associated with the context in which they have been created. Contextual snapshots can also be used as the basis for interactive integrated and linked views, which enable in-place investigation and comparison of multiple visual representations of data. Our approach is implemented as a flexible toolkit with well-defined interfaces for integration into existing systems. We demonstrate the power and generality of our techniques by applying them to several distinct scenarios such as the visualization of simulation data, the analysis of historical documents, and the display of anatomical data.", event = "Scientific meeting of Visual Computing Center", location = "King Abdullah University of Science and Technology, Saudi Arabia", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/mindek_peter-2014-cs_kaust/", } @article{kehrer-2013-SBC, title = "A Model for Structure-based Comparison of Many Categories in Small-Multiple Displays", author = "Johannes Kehrer and Harald Piringer and Wolfgang Berger and Eduard Gr\"{o}ller", year = "2013", abstract = "Many application domains deal with multi-variate data that consists of both categorical and numerical information. Small-multiple displays are a powerful concept for comparing such data by juxtaposition. For comparison by overlay or by explicit encoding of computed differences, however, a specification of references is necessary. In this paper, we present a formal model for defining semantically meaningful comparisons between many categories in a small-multiple display. Based on pivotized data that are hierarchically partitioned by the categories assigned to the x and y axis of the display, we propose two alternatives for structure-based comparison within this hierarchy. With an absolute reference specification, categories are compared to a fixed reference category. With a relative reference specification, in contrast, a semantic ordering of the categories is considered when comparing them either to the previous or subsequent category each. Both reference specifications can be defined at multiple levels of the hierarchy (including aggregated summaries), enabling a multitude of useful comparisons. We demonstrate the general applicability of our model in several application examples using different visualizations that compare data by overlay or explicit encoding of differences.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "19", number = "12", pages = "2287--2296", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/kehrer-2013-SBC/", } @article{vaico, title = "VAICo: Visual Analysis for Image Comparison", author = "Johanna Schmidt and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2013", abstract = "Scientists, engineers, and analysts are confronted with ever larger and more complex sets of data, whose analysis poses special challenges. In many situations it is necessary to compare two or more datasets. Hence there is a need for comparative visualization tools to help analyze differences or similarities among datasets. In this paper an approach for comparative visualization for sets of images is presented. Well-established techniques for comparing images frequently place them side-by-side. A major drawback of such approaches is that they do not scale well. Other image comparison methods encode differences in images by abstract parameters like color. In this case information about the underlying image data gets lost. This paper introduces a new method for visualizing differences and similarities in large sets of images which preserves contextual information, but also allows the detailed analysis of subtle variations. Our approach identifies local changes and applies cluster analysis techniques to embed them in a hierarchy. The results of this process are then presented in an interactive web application which allows users to rapidly explore the space of differences and drill-down on particular features. We demonstrate the flexibility of our approach by applying it to multiple distinct domains.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "19", number = "12", note = "Demo: http://www.cg.tuwien.ac.at/~jschmidt/vaico/", pages = "2090--2099", keywords = "focus+context, image-set comparison, Comparative visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/vaico/", } @WorkshopTalk{oeltze-2013-tut, title = "IEEE VIS Tutorial on Interactive Visual Analysis of Scientific Data", author = "Steffen Oeltze and Helwig Hauser and Johannes Kehrer", year = "2013", abstract = "In a growing number of application areas, a subject or phenomenon is investigated by means of multiple datasets being acquired over time (spatiotemporal), comprising several attributes per data point (multi-variate), stemming from different data sources (multi-modal) or multiple simulation runs (multi-run/ensemble). Interactive visual analysis (IVA) comprises concepts and techniques for a user-guided knowledge discovery in such complex data. Through a tight feedback loop of computation, visualization and user interaction, it provides new insight into the data and serves as a vehicle for hypotheses generation or validation. It is often implemented via a multiple coordinated view framework where each view is equipped with interactive drill-down operations for focusing on data features. Two classes of views are integrated: physical views, such as direct volume rendering, show information in the context of the spatiotemporal observation space while attribute views, such as scatter plots and parallel coordinates, show relationships between multiple data attributes. The user may drill-down the data by selecting interesting regions of the observation space or attribute ranges leading to a consistent highlighting of this selection in all other views (brushing-and-linking). Three patterns of explorative/analytical procedures may be accomplished by doing so. In a feature localization, the user searches for places in the 3D/4D observation space where certain attribute values are present. In a multi-variate analysis, relations between data attributes are investigated, e.g., by searching for correla- tions. In a local investigation, the user inspects the values of selected attributes with respect to certain spatiotemporal subsets of the observation space. In this tutorial, we discuss examples for successful applications of IVA to scientific data from various fields: climate research, medicine, epidemiology, and flow simulation / computation, in particular for automotive engineering. We base our discussions on a theoretical foundation of IVA which helps the tutorial attendees in transferring the subject matter to their own data and application area. In the course of the tutorial, the attendees will become acquainted with techniques from statistics and knowledge discovery, which proved to be particularly useful for a specific IVA application. The tutorial further comprises an overview of off-the-shelf IVA solutions, which may be be particularly interesting for visualization practitioners. It is concluded by a summary of the gained knowledge and a discussion of open problems in IVA of scientific data. The tutorial slides will be available at: http://tinyurl.com/SciDataIVA13", month = oct, event = "IEEE VisWeek", location = "Atlanta, Georgia, USA", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/oeltze-2013-tut/", } @article{borgo-2013-gly, title = "Glyph-based Visualization: Foundations, Design Guidelines, Techniques and Applications", author = "Rita Borgo and Johannes Kehrer and David H.S. Chung and Eamonn Maguire and Robert S. Laramee and Helwig Hauser and Matthew Ward and Min Chen", year = "2013", abstract = "This state of the art report focuses on glyph-based visualization, a common form of visual design where a data set is depicted by a collection of visual objects referred to as glyphs. Its major strength is that patterns of multivariate data involving more than two attribute dimensions can often be more readily perceived in the context of a spatial relationship, whereas many techniques for spatial data such as direct volume rendering find difficult to depict with multivariate or multi-field data, and many techniques for non-spatial data such as parallel coordinates are less able to convey spatial relationships encoded in the data. This report fills several major gaps in the literature, drawing the link between the fundamental concepts in semiotics and the broad spectrum of glyph-based visualization, reviewing existing design guidelines and implementation techniques, and surveying the use of glyph-based visualization in many applications.", month = may, journal = "Eurographics State of the Art Reports", note = "http://diglib.eg.org/EG/DL/conf/EG2013/stars/039-063.pdf", publisher = "Eurographics Association", series = "EG STARs", pages = "39--63", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/borgo-2013-gly/", } @inproceedings{mindek-2013-csl, title = "Contextual Snapshots: Enriched Visualization with Interactive Spatial Annotations", author = "Peter Mindek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2013", abstract = "Spatial selections are a ubiquitous concept in visualization. By localizing particular features, they can be analyzed and compared in different views. However, the semantics of such selections are often dependent on other parameter settings and it can be difficult to reconstruct them without additional information. In this paper, we present the concept of contextual snapshots as an effective means for managing spatial selections in visualized data. The selections are automatically associated with the context in which they have been created. Contextual snapshots can be also used as the basis for interactive integrated and linked views, which enable in-place investigation and comparison of multiple visual representations of data. Our approach is implemented as a flexible toolkit with welldefined interfaces for integration into existing systems. We demonstrate the power and generality of our techniques by applying them to several distinct scenarios such as the visualization of simulation data and the analysis of historical documents.", month = may, series = "SCCG ", location = "Smolenice, Slovakia", booktitle = "Proceedings of the 29th Spring Conference on Computer Graphics", keywords = "spatial selections, annotations, linked views, provenance", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-csl/", } @talk{mindek-2013-cs_cvut, title = "Contextual Snapshots: Enriched Visualization with Interactive Spatial Annotations", author = "Peter Mindek", year = "2013", event = "Scientific meeting of Department of Computer Graphics and Interaction", location = "Czech Technical University in Prague", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-cs_cvut/", } @talk{kehrer-2013-IVA, title = "Visual Analysis of Multi-faceted Scientific Data: Challenges and Trends", author = "Johannes Kehrer", year = "2013", event = "Karlsruhe Institute of Technology", location = "Karlsruhe, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/kehrer-2013-IVA/", } @article{mindek-2013-pel, title = "Visual Parameter Exploration in GPU Shader Space", author = "Peter Mindek and Stefan Bruckner and Peter Rautek and Eduard Gr\"{o}ller", year = "2013", abstract = "The wide availability of high-performance GPUs has made the use of shader programs in visualization ubiquitous. Understanding shaders is a challenging task. Frequently it is dif?cult to mentally reconstruct the nature and types of transformations applied to the underlying data during the visualization process. We propose a method for the visual analysis of GPU shaders, which allows the ?exible exploration and investigation of algorithms, parameters, and their effects. We introduce a method for extracting feature vectors composed of several attributes of the shader, as well as a direct manipulation interface for assigning semantics to them. The user interactively classi?es pixels of images which are rendered with the investigated shader. The two resulting classes, a positive class and a negative one, are employed to steer the visualization. Based on this information, we can extract a wide variety of additional attributes and visualize their relation to this classi?cation. Our system allows an interactive exploration of shader space and we demonstrate its utility for several different applications.", journal = "Journal of WSCG", volume = "21", number = "3", issn = "1213-6972", pages = "225--234", keywords = "shader augmentation, parameter space exploration", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/mindek-2013-pel/", } @inproceedings{Csebfalvi-2012-IOM, title = "Illumination-Driven Opacity Modulation for Expressive Volume Rendering", author = "Bal\'{a}zs Cs\'{e}bfalvi and Bal\'{a}zs T\'{o}th and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2012", abstract = "Using classical volume visualization, typically a couple of isosurface layers are rendered semi-transparently to show the internal structures contained in the data. However, the opacity transfer function is often difficult to specify such that all the isosurfaces are of high contrast and sufficiently perceivable. In this paper, we propose a volumerendering technique which ensures that the different layers contribute to fairly different regions of the image space. Since the overlapping between the effected regions is reduced, an outer translucent isosurface does not decrease significantly the contrast of a partially hidden inner isosurface. Therefore, the layers of the data become visually well separated. Traditional transfer functions assign color and opacity values to the voxels depending on the density and the gradient. In contrast, we assign also different illumination directions to different materials, and modulate the opacities view-dependently based on the surface normals and the directions of the light sources, which are fixed to the viewing angle. We will demonstrate that this model allows an expressive visualization of volumetric data.", month = nov, location = "Magdeburg, Germany", booktitle = "Proceedings of Vision, Modeling & Visualization 2012", pages = "103--109", keywords = "illustrative visualization, illumination, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Csebfalvi-2012-IOM/", } @article{Ropinski-2012-UBT, title = "Unified Boundary-Aware Texturing for Interactive Volume Rendering", author = "Timo Ropinski and Stefan Diepenbrock and Stefan Bruckner and Klaus Hinrichs and Eduard Gr\"{o}ller", year = "2012", abstract = "In this paper, we describe a novel approach for applying texture mapping to volumetric data sets. In contrast to previous approaches, the presented technique enables a unified integration of 2D and 3D textures and thus allows to emphasize material boundaries as well as volumetric regions within a volumetric data set at the same time. One key contribution of this paper is a parametrization technique for volumetric data sets, which takes into account material boundaries and volumetric regions. Using this technique, the resulting parametrizations of volumetric data sets enable texturing effects which create a higher degree of realism in volume rendered images. We evaluate the quality of the parametrization and demonstrate the usefulness of the proposed concepts by combining volumetric texturing with volumetric lighting models to generate photorealistic volume renderings. Furthermore, we show the applicability in the area of illustrative visualization.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", number = "11", volume = "18", pages = "1942--1955", keywords = "interactive volume rendering, volumetric texturing", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Ropinski-2012-UBT/", } @article{Birkeland-2012-IMC, title = "Illustrative Membrane Clipping", author = "{\AA}smund Birkeland and Stefan Bruckner and Andrea Brambilla and Ivan Viola", year = "2012", abstract = "Clipping is a fast, common technique for resolving occlusions. It only requires simple interaction, is easily understandable, and thus has been very popular for volume exploration. However, a drawback of clipping is that the technique indiscriminately cuts through features. Illustrators, for example, consider the structures in the vicinity of the cut when visualizing complex spatial data and make sure that smaller structures near the clipping plane are kept in the image and not cut into fragments. In this paper we present a new technique, which combines the simple clipping interaction with automated selective feature preservation using an elastic membrane. In order to prevent cutting objects near the clipping plane, the deformable membrane uses underlying data properties to adjust itself to salient structures. To achieve this behaviour, we translate data attributes into a potential field which acts on the membrane, thus moving the problem of deformation into the soft-body dynamics domain. This allows us to exploit existing GPU-based physics libraries which achieve interactive frame rates. For manual adjustment, the user can insert additional potential fields, as well as pinning the membrane to interesting areas. We demonstrate that our method can act as a flexible and non-invasive replacement of traditional clipping planes.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "905--914", keywords = "illustrative visualization, volume rendering, clipping", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Birkeland-2012-IMC/", } @article{Herghelegiu-2012-BPV, title = "Biopsy Planner - Visual Analysis for Needle Pathway Planning in Deep Seated Brain Tumor Biopsy", author = "Paul Herghelegiu and Vasile Manta and Radu Perin and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2012", abstract = "Biopsies involve taking samples from living tissue using a biopsy needle. In current clinical practice they are a first mandatory step before any further medical actions are planned. Performing a biopsy on a deep seated brain tumor requires considerable time for establishing and validating the desired biopsy needle pathway to avoid damage. In this paper, we present a system for the visualization, analysis, and validation of biopsy needle pathways. Our system uses a multi-level approach for identifying stable needle placements which minimize the risk of hitting blood vessels. This is one of the major dangers in this type of intervention. Our approach helps in identifying and visualizing the point on the pathway that is closest to a surrounding blood vessel, requiring a closer inspection by the neurosurgeon. An evaluation by medical experts is performed to demonstrate the utility of our system.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "1085--1094", keywords = "biopsy planning, medical visualization, visual analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Herghelegiu-2012-BPV/", } @article{Peter_2012_AIV, title = "Semantics by Analogy for Illustrative Volume Visualization", author = "Moritz Gerl and Peter Rautek and Tobias Isenberg and Eduard Gr\"{o}ller", year = "2012", abstract = "We present an interactive graphical approach for the explicit specification of semantics for volume visualization. This explicit and graphical specification of semantics for volumetric features allows us to visually assign meaning to both input and output parameters of the visualization mapping. This is in contrast to the implicit way of specifying semantics using transfer functions. In particular, we demonstrate how to realize a dynamic specification of semantics which allows to flexibly explore a wide range of mappings. Our approach is based on three concepts. First, we use semantic shader augmentation to automatically add rule-based rendering functionality to static visualization mappings in a shader program, while preserving the visual abstraction that the initial shader encodes. With this technique we extend recent developments that define a mapping between data attributes and visual attributes with rules, which are evaluated using fuzzy logic. Second, we let users define the semantics by analogy through brushing on renderings of the data attributes of interest. Third, the rules are specified graphically in an interface that provides visual clues for potential modifications. Together, the presented methods offer a high degree of freedom in the specification and exploration of rule-based mappings and avoid the limitations of a linguistic rule formulation.", month = may, journal = "Computers & Graphics", number = "3", volume = "36", pages = "201--213", keywords = "shader augmentation, semantic visualization mapping, illustrative visualization, Volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Peter_2012_AIV/", } @article{Groeller_2011_NR, title = "Nodes on Ropes: A Comprehensive Data and Control Flow for Steering Ensemble Simulations", author = "J\"{u}rgen Waser and Hrvoje Ribi\v{c}i\'{c} and Raphael Fuchs and Christian Hirsch and Benjamin Schindler and G\"{u}nter Bl\"{o}schl and Eduard Gr\"{o}ller", year = "2011", abstract = "Flood disasters are the most common natural risk and tremendous efforts are spent to improve their simulation and management. However, simulation-based investigation of actions that can be taken in case of flood emergencies is rarely done. This is in part due to the lack of a comprehensive framework which integrates and facilitates these efforts. In this paper, we tackle several problems which are related to steering a flood simulation. One issue is related to uncertainty. We need to account for uncertain knowledge about the environment, such as levee-breach locations. Furthermore, the steering process has to reveal how these uncertainties in the boundary conditions affect the confidence in the simulation outcome. Another important problem is that the simulation setup is often hidden in a black-box. We expose system internals and show that simulation steering can be comprehensible at the same time. This is important because the domain expert needs to be able to modify the simulation setup in order to include local knowledge and experience. In the proposed solution, users steer parameter studies through the World Lines interface to account for input uncertainties. The transport of steering information to the underlying data-flow components is handled by a novel meta-flow. The meta-flow is an extension to a standard data-flow network, comprising additional nodes and ropes to abstract parameter control. The meta-flow has a visual representation to inform the user about which control operations happen. Finally, we present the idea to use the data-flow diagram itself for visualizing steering information and simulation results. We discuss a case-study in collaboration with a domain expert who proposes different actions to protect a virtual city from imminent flooding. The key to choosing the best response strategy is the ability to compare different regions of the parameter space while retaining an understanding of what is happening inside the data-flow system.", month = dec, issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "12", volume = "17", pages = "1872--1881", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/Groeller_2011_NR/", } @inproceedings{Soros_AVN_2011, title = "Augmented Visualization with Natural Feature Tracking", author = "G\'{a}bor S\"{o}r\"{o}s and Peter Rautek and Hartmut Seichter and Eduard Gr\"{o}ller", year = "2011", abstract = "Visualization systems often require large monitors or projection screens to display complex information. Even very sophisticated systems that exhibit complex user interfaces do usually not exploit advanced input and output devices. One of the reasons for that is the high cost of special hardware. This paper introduces Augmen- ted Visualization, an interaction method for projection walls as well as monitors using affordable and widely available hardware such as mobile phones or tablets. The main technical challenge is the track- ing of the users’ devices without any special equipment or fiducial markers in the working area. We propose to track natural features of the display content with the built-in camera of mobile devices. Tracking the visualized scene allows pose estimation of the mobile devices with six degrees of freedom. The position and orientation information is then used for advanced interaction metaphors like magic lenses. For a group of experts who are analyzing the data in front of the same screen, a personal augmented view of the visua- lized scene is presented, for each user on his/her personal device. The prototype Augmented Visualization System achieves interactive frame rates and may lead to a greatly enhanced user experience. The paper discusses the design and implementation questions and illustrates potential application scenarios.", month = dec, isbn = "978-1-4503-1096-3", publisher = "ACM", location = "Beijing, China", event = "10th International Conference on Mobile and Ubiquitous Multimedia (MUM 2011) ", booktitle = "Proceedings of the 10th International Conference on Mobile and Ubiquitous Multimedia (MUM 2011) ", pages = "4--12", keywords = "human computer interaction, Handheld augmented reality, natural feature tracking, interactive visualization systems", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/Soros_AVN_2011/", } @WorkshopTalk{sikachev_peter-2011-dfc, title = "Dynamic Focus + Context for Volume Rendering", author = "Peter Sikachev and Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2011", abstract = "Interactive visualization is widely used in many applications for efficient representation of complex data. Many techniques make use of the focus+context approach in a static manner. These techniques do not fully make use of the interaction semantics. In this paper we present a dynamic focus+context approach that highlights salient features during user interaction. We explore rotation, panning, and zooming interaction semantics and propose several methods of changing visual representations, based on a suggested engagement-estimation method. We use DVR-MIP interpolation and a radial opacity-change approach, exploring rotation, panning, and zooming semantics. Our approach adds short animations during user interaction that help to explore the data efficiently and aid the user in the detection of unknown features.", month = jun, event = "Austrian-Russian Joint Seminar", location = "VRVis, Vienna, Austria", keywords = "focus + context, visualization, volume rendering, user interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/sikachev_peter-2011-dfc/", } @WorkshopTalk{sikachev_peter-2011-protovis, title = "ProtoVis", author = "Peter Sikachev", year = "2011", month = may, event = "Software Seminar", location = "Vienna", keywords = "Qt, integration, ProtoVis, information visualization, software engineering, JavaScript, C++, VolumeShop", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/sikachev_peter-2011-protovis/", } @WorkshopTalk{rautek2010, title = "Continuous Integration", author = "Peter Rautek", year = "2010", month = nov, event = "Software Engineering Seminar", location = "Vienna", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/rautek2010/", } @inproceedings{sikachev-2010-DFC, title = "Dynamic Focus+Context for Volume Rendering", author = "Peter Sikachev and Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2010", abstract = "Interactive visualization is widely used in many applications for efficient representation of complex data. Many techniques make use of the focus+context approach in a static manner. These techniques do not fully make use of the interaction semantics. In this paper we present a dynamic focus+context approach that highlights salient features during user interaction. We explore rotation, panning, and zooming interaction semantics and propose several methods of changing visual representations, based on a suggested engagement-estimation method. We use DVR-MIP interpolation and a radial opacity-change approach, exploring rotation, panning, and zooming semantics. Our approach adds short animations during user interaction that help to explore the data efficiently and aid the user in the detection of unknown features.", month = nov, location = "Siegen, Germany", address = "University of Siegen, Siegen, Germany", booktitle = "Proceedings of Vision, Modeling and Visualization 2010", pages = "331--338", keywords = "focus+contex, volume rendering, view-dependent visualization, level-of-detail techniques, nonphotorealistic techniques, user interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/sikachev-2010-DFC/", } @WorkshopTalk{sikachev_peter_2010_STLCI, title = "STL Containers Inside", author = "Peter Sikachev", year = "2010", abstract = "STL containers are discussed in genereal. Such topics as performance, safety and memory consumption are discussed on the example of the most popular containers (vector, list, set, map)", month = may, event = "Software Engineering Seminar", location = "TUW", keywords = "software engineering, STL, containers", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/sikachev_peter_2010_STLCI/", } @talk{sikachev-2010-ill_vis_vol_ren, title = "Illustrative Visualization and Volume Rendering", author = "Peter Sikachev", year = "2010", event = "-", location = "Moscow, Russia", keywords = "tuwien, volume rendering, vis-group, illustrative visualization, ViMaL", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/sikachev-2010-ill_vis_vol_ren/", } @article{bruckner-2010-HVC, title = "Hybrid Visibility Compositing and Masking for Illustrative Rendering", author = "Stefan Bruckner and Peter Rautek and Ivan Viola and Mike Roberts and Mario Costa Sousa and Eduard Gr\"{o}ller", year = "2010", abstract = "In this paper, we introduce a novel framework for the compositing of interactively rendered 3D layers tailored to the needs of scientific illustration. Currently, traditional scientific illustrations are produced in a series of composition stages, combining different pictorial elements using 2D digital layering. Our approach extends the layer metaphor into 3D without giving up the advantages of 2D methods. The new compositing approach allows for effects such as selective transparency, occlusion overrides, and soft depth buffering. Furthermore, we show how common manipulation techniques such as masking can be integrated into this concept. These tools behave just like in 2D, but their influence extends beyond a single viewpoint. Since the presented approach makes no assumptions about the underlying rendering algorithms, layers can be generated based on polygonal geometry, volumetric data, pointbased representations, or others. Our implementation exploits current graphics hardware and permits real-time interaction and rendering.", journal = "Computers & Graphics", number = "34", pages = "361--369", keywords = "compositing, masking, illustration", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-HVC/", }