@inproceedings{sorger_2017_metamorphers, title = "Metamorphers: Storytelling Templates For Illustrative Animated Transitions in Molecular Visualization", author = "Johannes Sorger and Peter Mindek and Peter Rautek and Eduard Gr\"{o}ller and Graham Johnson and Ivan Viola", year = "2017", abstract = "In molecular biology, illustrative animations are used to convey complex biological phenomena to broad audiences. However, such animations have to be manually authored in 3D modeling software, a time consuming task that has to be repeated from scratch for every new data set, and requires a high level of expertise in illustration, animation, and biology. We therefore propose metamorphers: a set of operations for defining animation states as well as the transitions to them in the form of re-usable story telling templates. The re-usability is two-fold. Firstly, due to their modular nature, metamorphers can be re-used in different combinations to create a wide range of animations. Secondly, due to their abstract nature, metamorphers can be re-used to re-create an intended animation for a wide range of compatible data sets. Metamorphers thereby mask the low level complexity of explicit animation specifications by exploiting the inherent properties of the molecular data, such as the position, size, and hierarchy level of a semantic data subset.", month = may, location = "Mikulov, Czech Republic", booktitle = "Proceedings of the Spring Conference on Computer Graphics 2017", pages = "27--36", keywords = "animated transitions, storytelling, molecular visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/sorger_2017_metamorphers/", } @article{mindek-2017-marion, title = "Visualization Multi-Pipeline for Communicating Biology", author = "Peter Mindek and David Kou\v{r}il and Johannes Sorger and David Toloudis and Blair Lyons and Graham Johnson and Eduard Gr\"{o}ller and Ivan Viola", year = "2017", abstract = "We propose a system to facilitate biology communication by developing a pipeline to support the instructional visualization of heterogeneous biological data on heterogeneous user-devices. Discoveries and concepts in biology are typically summarized with illustrations assembled manually from the interpretation and application of heterogenous data. The creation of such illustrations is time consuming, which makes it incompatible with frequent updates to the measured data as new discoveries are made. Illustrations are typically non-interactive, and when an illustration is updated, it still has to reach the user. Our system is designed to overcome these three obstacles. It supports the integration of heterogeneous datasets, reflecting the knowledge that is gained from different data sources in biology. After pre-processing the datasets, the system transforms them into visual representations as inspired by scientific illustrations. As opposed to traditional scientific illustration these representations are generated in real-time - they are interactive. The code generating the visualizations can be embedded in various software environments. To demonstrate this, we implemented both a desktop application and a remote-rendering server in which the pipeline is embedded. The remote-rendering server supports multi-threaded rendering and it is able to handle multiple users simultaneously. This scalability to different hardware environments, including multi-GPU setups, makes our system useful for efficient public dissemination of biological discoveries. ", journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "24", number = "1", keywords = "Biological visualization, remote rendering, public dissemination", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-marion/", } @phdthesis{LeMuzic_2016_PhD, title = "From Atoms to Cells: Interactive and Illustrative Visualization of Digitally Reproduced Lifeforms", author = "Mathieu Le Muzic", year = "2016", abstract = "Macromolecules, such as proteins, are the building blocks of the machinery of life, and therefore are essential to the comprehension of physiological processes. In physiology, illustrations and animations are often utilized as a mean of communication because they can easily be understood with little background knowledge. However, their realization requires numerous months of manual work, which is both expensive and time consuming. Computational biology experts produce everyday large amount of data that is publicly available and that contains valuable information about the structure and also the function of these macromolecules. Instead of relying on manual work to generate illustrative visualizations of the cell biology, we envision a solution that would utilize all the data already available in order to streamline the creation process. In this thesis are presented several contributions that aim at enabling our vision. First, a novel GPU-based rendering pipeline that allows interactive visualization of realistic molecular datasets comprising up to hundreds of millions of macromolecules. The rendering pipeline is embedded into a popular game engine and well known computer graphics optimizations were adapted to support this type of data, such as level-of-detail, instancing and occlusion queries. Secondly, a new method for authoring cutaway views and improving spatial exploration of crowded molecular landscapes. The system relies on the use of clipping objects that are manually placed in the scene and on visibility equalizers that allows fine tuning of the visibility of each species present in the scene. Agent-based modeling produces trajectory data that can also be combined with structural information in order to animate these landscapes. The snapshots of the trajectories are often played in fast-forward to shorten the length of the visualized sequences, which also renders potentially interesting events occurring at a higher temporal resolution invisible. The third contribution is a solution to visualize time-lapse of agent-based simulations that also reveals hidden information that is only observable at higher temporal resolutions. And finally, a new type of particle-system that utilize quantitative models as input and generate missing spatial information to enable the visualization of molecular trajectories and interactions. The particle-system produces a similar visual output as traditional agent-based modeling tools for a much lower computational footprint and allows interactive changing of the simulation parameters, which was not achievable with previous methods.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/LeMuzic_2016_PhD/", } @inproceedings{sorger-2016-fowardabstraction, title = "Illustrative Transitions in Molecular Visualization via Forward and Inverse Abstraction Transform", author = "Johannes Sorger and Peter Mindek and Tobias Klein and Graham Johnson and Ivan Viola", year = "2016", abstract = "A challenging problem in biology is the incompleteness of acquired information when visualizing biological phenomena. Structural biology generates detailed models of viruses or bacteria at different development stages, while the processes that relate one stage to another are often not clear. Similarly, the entire life cycle of a biological entity might be available as a quantitative model, while only one structural model is available. If the relation between two models is specified at a lower level of detail than the actual models themselves, the two models cannot be interpolated correctly. We propose a method that deals with the visualization of incomplete data information in the developmental or evolutionary states of biological mesoscale models, such as viruses or microorganisms. The central tool in our approach is visual abstraction. Instead of directly interpolating between two models that show different states of an organism, we gradually forward transform the models into a level of visual abstraction that matches the level of detail of the modeled relation between them. At this level, the models can be interpolated without conveying false information. After the interpolation to the new state, we apply the inverse transformation to the model’'s original level of abstraction. To show the flexibility of our approach, we demonstrate our method on the basis of molecular data, in particular data of the HIV virion and the mycoplasma bacterium.", month = sep, organization = "Eurographics", location = "Bergen", editor = "S. Bruckner, B. Preim, and A. Vilanova", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine (VCBM)", pages = "21--30", keywords = "I.3.3 [Computer Graphics]: Picture/Image Generation-Display algorithms", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/sorger-2016-fowardabstraction/", } @article{Reichinger_2016, title = "Gesture-Based Interactive Audio Guide on Tactile Reliefs", author = "Andreas Reichinger and Stefan Maierhofer and Anton Fuhrmann and Werner Purgathofer", year = "2016", abstract = "For blind and visually impaired people, tactile reliefs offer many benefits over the more classic raised line drawings or tactile diagrams, as depth, 3D shape and surface textures are directly perceivable. However, without proper guidance some reliefs are still difficult to explore autonomously. In this work, we present a gesture-controlled interactive audio guide (IAG) based on recent low-cost depth cameras that operates directly on relief surfaces. The interactively explorable, location-dependent verbal descriptions promise rapid tactile accessibility to 2.5D spatial information in a home or education setting, to on-line resources, or as a kiosk installation at public places. We present a working prototype, discuss design decisions and present the results of two evaluation sessions with a total of 20 visually impaired test users.", month = oct, journal = "Proceedings of the 18th International ACM SIGACCESS Conference on Computers & Accessibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Reichinger_2016/", } @inproceedings{Reisacher2016, title = "CellPathway: A Simulation Tool for Illustrative Visualization of Biochemical Networks", author = "Matthias Reisacher and Mathieu Le Muzic and Ivan Viola", year = "2016", abstract = "The molecular knowledge about complex biochemical reaction networks in biotechnology is crucial and has received a lot of attention lately. As a consequence, multiple visualization programs have been already developed to illustrate the anatomy of a cell. However, since a real cell performs millions of reactions every second to sustain live, it is necessary to move from anatomical to physiological illustrations to communicate knowledge about the behavior of a cell more accurately. In this thesis I propose a reaction system including a collision detection algorithm, which is able to work at the level of single atoms, to enable precise simulation of molecular interactions. To visually explain molecular activities during the simulation process, a real-time glow effect in combination with a clipping object have been implemented. Since intracellular processes are performed with a set of chemical transformations, a hierarchical structure is used to illustrate the impact of one reaction on the entire simulation. The CellPathway system integrates acceleration techniques to render large datasets containing millions of atoms in real-time, while the reaction system is processed directly on the GPU to enable simulation with more than 1000 molecules. Furthermore, a graphical user interface has been implemented to allow the user to control parameters during simulation interactively.", location = "Pilsen, Czech Republic", booktitle = "Proceedings of WSCG", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Reisacher2016/", } @article{Waldin_Nicholas_2016_Colormaps, title = "Personalized 2D color maps", author = "Nicholas Waldin and Matthias Bernhard and Ivan Viola", year = "2016", abstract = "2D color maps are often used to visually encode complex data characteristics such as heat or height. The comprehension of color maps in visualization is affected by the display (e.g., a monitor) and the perceptual abilities of the viewer. In this paper we present a novel method to measure a user׳s ability to distinguish colors of a two-dimensional color map on a given monitor. We show how to adapt the color map to the user and display to optimally compensate for the measured deficiencies. Furthermore, we improve user acceptance of the calibration procedure by transforming the calibration into a game. The user has to sort colors along a line in a 3D color space in a competitive fashion. The errors the user makes in sorting these lines are used to adapt the color map to his perceptual capabilities.", issn = "0097-8493", journal = "Computers & Graphics", volume = "59", pages = "143--150", keywords = "Color; Perception, Perception, Color vision deficiency", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Colormaps/", } @article{lemuzic-mindek-2016-viseq, title = "Visibility Equalizer: Cutaway Visualization of Mesoscopic Biological Models", author = "Mathieu Le Muzic and Peter Mindek and Johannes Sorger and Ludovic Autin and David Goodsell and Ivan Viola", year = "2016", abstract = "In scientific illustrations and visualization, cutaway views are often employed as an effective technique for occlusion management in densely packed scenes.We propose a novel method for authoring cutaway illustrations of mesoscopic biological models. In contrast to the existing cutaway algorithms, we take advantage of the specific nature of the biological models. These models consist of thousands of instances with a comparably smaller number of different types. Our method constitutes a two stage process. In the first step, clipping objects are placed in the scene, creating a cutaway visualization of the model. During this process, a hierarchical list of stacked bars inform the user about the instance visibility distribution of each individual molecular type in the scene. In the second step, the visibility of each molecular type is fine-tuned through these bars, which at this point act as interactive visibility equalizers. An evaluation of our technique with domain experts confirmed that our equalizer-based approach for visibility specification is valuable and effective for both, scientific and educational purposes.", journal = "Computer Graphics Forum", volume = "35", number = "3", keywords = "molecular visualization, visibility, occlusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/lemuzic-mindek-2016-viseq/", } @inproceedings{Reichinger_Fuhrmann_2016, title = "A Concept for Re-Useable Interactive Tactile Reliefs", author = "Andreas Reichinger and Anton Fuhrmann and Stefan Maierhofer and Werner Purgathofer", year = "2016", abstract = "We introduce a concept for a relief-printer, a novel production method for tactile reliefs, that allows to reproduce bas-reliefs of several centimeters height difference. In contrast to available methods, this printer will have a much smaller preparation time, and does not consume material nor produce waste, since it is based on a re-usable medium, suitable for temporary printouts. Second, we sketch a concept for the autonomous, interactive exploration of tactile reliefs, in the form of a gesture-controlled audio guide, based on recent depth cameras. Especially the combination of both approaches promises rapid tactile accessibility to 2.5D spatial information in a home or education setting, to on-line resources, or as a kiosk installation in museums.", booktitle = "A Concept for Re-Useable Interactive Tactile Reliefs", journal = "ICCHP 2016, Part II", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Reichinger_Fuhrmann_2016/", } @habilthesis{viola-evr, title = "Effective Visual Representations", author = "Ivan Viola", year = "2016", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/viola-evr/", } @inproceedings{cellVIEW_2015, title = "cellVIEW: a Tool for Illustrative and Multi-Scale Rendering of Large Biomolecular Datasets", author = "Mathieu Le Muzic and Ludovic Autin and Julius Parulek and Ivan Viola", year = "2015", abstract = "In this article we introduce cellVIEW, a new system to interactively visualize large biomolecular datasets on the atomic level. Our tool is unique and has been specifically designed to match the ambitions of our domain experts to model and interactively visualize structures comprised of several billions atom. The cellVIEW system integrates acceleration techniques to allow for real-time graphics performance of 60 Hz display rate on datasets representing large viruses and bacterial organisms. Inspired by the work of scientific illustrators, we propose a level-of-detail scheme which purpose is two-fold: accelerating the rendering and reducing visual clutter. The main part of our datasets is made out of macromolecules, but it also comprises nucleic acids strands which are stored as sets of control points. For that specific case, we extend our rendering method to support the dynamic generation of DNA strands directly on the GPU. It is noteworthy that our tool has been directly implemented inside a game engine. We chose to rely on a third party engine to reduce software development work-load and to make bleeding-edge graphics techniques more accessible to the end-users. To our knowledge cellVIEW is the only suitable solution for interactive visualization of large bimolecular landscapes on the atomic level and is freely available to use and extend.", month = sep, isbn = "978-3-905674-82-8", publisher = "The Eurographics Association", organization = "EG Digital Library", location = "Chester, United Kingdom", issn = "2070-5786", editor = "Katja B\"{u}hler and Lars Linsen and Nigel W. John", booktitle = "Eurographics Workshop on Visual Computing for Biology and Medicine", pages = "61--70", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/cellVIEW_2015/", } @inproceedings{lemuzic_2015_timelapse, title = "Illustrative Timelapse: A Technique for Illustrative Visualization of Particle Simulations on the Mesoscale Level", author = "Mathieu Le Muzic and Manuela Waldner and Julius Parulek and Ivan Viola", year = "2015", abstract = "Animated movies are a popular way to communicate complex phenomena in cell biology to the broad audience. Animation artists apply sophisticated illustration techniques to communicate a story, while trying to maintain a realistic representation of a complex dynamic environment. Since such hand-crafted animations are timeconsuming and cost-intensive to create, our goal is to formalize illustration techniques used by artists to facilitate the automatic creation of visualizations generated from mesoscale particle-based molecular simulations. Our technique Illustrative Timelapse supports visual exploration of complex biochemical processes in dynamic environments by (1) seamless temporal zooming to observe phenomena in different temporal resolutions, (2) visual abstraction of molecular trajectories to ensure that observers are able to visually follow the main actors, (3) increased visual focus on events of interest, and (4) lens effects to preserve a realistic representation of the environment in the context. Results from a first user study indicate that visual abstraction of trajectories improves the ability to follow a story and is also appreciated by users. Lens effects increased the perceived amount of molecular motion in the environment while trading off traceability of individual molecules.", month = apr, publisher = "IEEE", organization = "8th IEEE Pacific Visualization Symposium (PacificVis 2015)", location = "Zijingang Campus, Zhejiang University, Hangzhou, China", booktitle = "Visualization Symposium (PacificVis), 2015 IEEE Pacific", pages = "247--254", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/", } @inproceedings{mindek-2015-mc, title = "Automatized Summarization of Multiplayer Games", author = "Peter Mindek and Ladislav \v{C}mol\'{i}k and Ivan Viola and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2015", abstract = "We present a novel method for creating automatized gameplay dramatization of multiplayer video games. The dramatization serves as a visual form of guidance through dynamic 3D scenes with multiple foci, typical for such games. Our goal is to convey interesting aspects of the gameplay by animated sequences creating a summary of events which occurred during the game. Our technique is based on processing many cameras, which we refer to as a flock of cameras, and events captured during the gameplay, which we organize into a so-called event graph. Each camera has a lifespan with a certain time interval and its parameters such as position or look-up vector are changing over time. Additionally, during its lifespan each camera is assigned an importance function, which is dependent on the significance of the structures that are being captured by the camera. The images captured by the cameras are composed into a single continuous video using a set of operators based on cinematographic effects. The sequence of operators is selected by traversing the event graph and looking for specific patterns corresponding to the respective operators. In this way, a large number of cameras can be processed to generate an informative visual story presenting the gameplay. Our compositing approach supports insets of camera views to account for several important cameras simultaneously. Additionally, we create seamless transitions between individual selected camera views in order to preserve temporal continuity, which helps the user to follow the virtual story of the gameplay.", month = apr, isbn = "978-80-223-3844-8", publisher = "Comenius University, Bratislava", location = "Smolenice, Slovakia", editor = "Joaquim Jorge, Luis Paulo Santos, Roman Durikovic", booktitle = "Proceedings of Spring Conference on Computer Graphics 2015", pages = "93--100", keywords = "storytelling, game visualization, animation", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/mindek-2015-mc/", } @article{Viola_Ivan_IIP, title = "Interactively illustrating polymerization using three-level model fusion", author = "Ivan Koles\'{a}r and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser", year = "2014", abstract = "Background: Research in cell biology is steadily contributing new knowledge about many aspects of physiological processes, both with respect to the involved molecular structures as well as their related function. llustrations of the spatio-temporal development of such processes are not only used in biomedical education, but also can serve scientists as an additional platform for in-silico experiments. Results: In this paper, we contribute a new, three-level modeling approach to illustrate physiological processes from the class of polymerization at different time scales. We integrate physical and empirical modeling, according to which approach best suits the different involved levels of detail, and we additionally enable a form of interactive steering, while the process is illustrated. We demonstrate the suitability of our approach in the context of several polymerization processes and report from a first evaluation with domain experts. Conclusion: We conclude that our approach provides a new, hybrid modeling approach for illustrating the process of emergence in physiology, embedded in a densely filled environment. Our approach of a complementary fusion of three systems combines the strong points from the different modeling approaches and is capable to bridge different spatial and temporal scales.", month = oct, issn = "1471-2105", journal = "BMC Bioinformatics 2014", number = "345", volume = "15", pages = "1--16", keywords = "Multi-agent modeling, L-system modeling, Biochemical visualization, Visualization of physiology, Polymerization", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_IIP/", } @inproceedings{kolesar-ivan-2014-polymers, title = "Illustrating Polymerization using Three-level Model Fusion", author = "Ivan Koles\'{a}r and Julius Parulek and Ivan Viola and Stefan Bruckner and Anne-Kristin Stavrum and Helwig Hauser", year = "2014", abstract = "Research in cell biology is steadily contributing new knowledge about many di?erent aspects of physiological processes like polymerization, both with respect to the involved molecular structures as well as their related function. Illustrations of the spatio-temporal development of such processes are not only used in biomedical education, but also can serve scientists as an additional platform for in-silico experiments. In this paper, we contribute a new, three-level modeling approach to illustrate physiological processes from the class of polymerization at di?erent time scales. We integrate physical and empirical modeling, according to which approach suits the di?erent involved levels of detail best, and we additionally enable a simple form of interactive steering while the process is illustrated. We demonstrate the suitability of our approach in the context of several polymerization processes and report from a ?rst evaluation with domain experts.", month = jul, publisher = "IEEE Digital Library", organization = "4th Symposium on Biological Data Visualization (in Conjunction with the International Conference on Intelligent Systems for Molecular Biology (ISMB 2014)) ", location = "Boston, USA", booktitle = "Proceedings of IEEE BioVis 2014", pages = "1--22", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/kolesar-ivan-2014-polymers/", } @article{lemuzic-2014-ivm, title = "Illustrative Visualization of Molecular Reactions using Omniscient Intelligence and Passive Agents ", author = "Mathieu Le Muzic and Julius Parulek and Anne-Kristin Stavrum and Ivan Viola", year = "2014", abstract = "In this paper we propose a new type of a particle systems, tailored for illustrative visualization purposes, in particular for visualizing molecular reactions in biological networks. Previous visualizations of biochemical processes were exploiting the results of agent-based modeling. Such modeling aims at reproducing accurately the stochastic nature of molecular interactions. However, it is impossible to expect events of interest happening at a certain time and location, which is impractical for storytelling. To obtain the means of controlling molecular interactions, we propose to govern passive agents with an omniscient intelligence, instead of giving to the agents the freedom of initiating reaction autonomously. This makes it possible to generate illustrative animated stories that communicate the functioning of the molecular machinery. The rendering performance delivers for interactive framerates of massive amounts of data, based on the dynamic tessellation capabilities of modern graphics cards. Finally, we report an informal expert feedback we obtained from the potential users.", month = jun, journal = "Computer Graphics Forum", volume = "33", number = "3", note = "Article first published online: 12 JUL 2014", pages = "141--150", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic-2014-ivm/", } @article{Viola_Ivan_CLD, title = "Continuous Levels-of-Detail and Visual Abstraction for Seamless Molecular Visualization", author = "Julius Parulek and Daniel J\"{o}nsson and Timo Ropinski and Stefan Bruckner and Anders Ynnerman and Ivan Viola", year = "2014", abstract = "Molecular visualization is often challenged with rendering of large molecular structures in real time. We introduce a novel approach that enables us to show even large protein complexes. Our method is based on the level-of-detail concept, where we exploit three different abstractions combined in one visualization. Firstly, molecular surface abstraction exploits three different surfaces, solvent-excluded surface (SES), Gaussian kernels and van der Waals spheres, combined as one surface by linear interpolation. Secondly, we introduce three shading abstraction levels and a method for creating seamless transitions between these representations. The SES representation with full shading and added contours stands in focus while on the other side a sphere representation of a cluster of atoms with constant shading and without contours provide the context. Thirdly, we propose a hierarchical abstraction based on a set of clusters formed on molecular atoms. All three abstraction models are driven by one importance function classifying the scene into the near-, mid- and far-field. Moreover, we introduce a methodology to render the entire molecule directly using the A-buffer technique, which further improves the performance. The rendering performance is evaluated on series of molecules of varying atom counts.", month = may, issn = "0167-7055", journal = "Computer Graphics Forum", number = "6", volume = "33", pages = "276--287", keywords = "clustering, implicit surfaces, level of detail algorithms, scientific visualization, Computer Applications", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Viola_Ivan_CLD/", } @misc{lemuzic_2014_ipv, title = "Illustrative Visualization of Biochemical Processes Featuring Multiple Temporal Scales", author = "Mathieu Le Muzic and Julius Parulek and Manuela Waldner and Ivan Viola", year = "2014", abstract = "Scientific illustrators are commonly using structural description of molecular compounds when depicting complex biochemical processes. However, computational biology also provides procedural models describing the function of biological processes which are not currently used in the production pipeline. Instead, animators utilize scientific knowledge to manually animate and reproduce the functioning of cellular biology. We would like to explore the use of such models in order to generate explanatory illustrations that would show how molecular machinery works. Particle-based simulations provide the means for spatially representing the dynamics of biochemical processes. They compute the positions of each single particle and are supposed to mimic a realistic behaviour of the metabolites. Current mesoscale visualization also allows to directly show the results of such simulations by mapping the positions of particles in a virtual 3D environment. Nevertheless, some biochemical processes, like the DNA repair for instance, exhibit temporal multiscale aspects because they comprise diffusion rates which are much greater in comparison with reaction rates. As a result, it is challenging to produce a clear and coherent visualization out of this type of simulation. Indeed, when viewing the process at the pace which would let us see the reactions, it becomes impossible for the human eye to keep track of individual elements because of the very large diffusion displacements. On the other hand, if one would playback the simulation slow enough to be see a steady motion of individual elements, then only a very few number of reactions would occur in a reasonable amount of time. In this work we propose to solve the problem associated with multiple temporal scales by providing means for spatial. With this approach we aim at showing the two different temporal scale at the same time by using advanced trajectory smoothing mechanism. This would allow us to see individual elements while showing a world full of reactions, hence enabling us to communicate complex biological processes and molecular machineries in a comprehensive way. ", event = "Eurographics Workshop on Visual Computing for Biology", Conference date = "Poster presented at Eurographics Workshop on Visual Computing for Biology (2014-09-04--2014-09-05)", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic_2014_ipv/", } @inproceedings{Viola_Ivan_2013_SVA, title = "Seamless Visual Abstraction of Molecular Surfaces", author = "Julius Parulek and Timo Ropinski and Ivan Viola", year = "2013", abstract = "Molecular visualization is often challenged with rendering of large sequences of molecular simulations in real time. We introduce a novel approach that enables us to show even large protein complexes over time in real-time. Our method is based on the level-ofdetail concept, where we exploit three different molecular surface models, solvent excluded surface (SES), Gaussian kernels and van der Waals spheres combined in one visualization. We introduce three shading levels that correspond to their geometric counterparts and a method for creating seamless transition between these representations. The SES representation with full shading and added contours stands in focus while on the other side a sphere representation with constant shading and without contours provide the context. Moreover, we introduce a methodology to render the entire molecule directly using the A-buffer technique, which further improves the performance. The rendering performance is evaluated on series of molecules of varying atom counts.", month = may, isbn = "978-80-223-3377-1", series = " SCCG '13", publisher = "ACM Publishing House", organization = "Comenius University, Bratislava", location = "Smolenice, Slovak Republic", editor = "Roman Durikovi\v{c}, Holly Rushmeier", booktitle = "SCCG 2013 - 29th Proceedings Spring conference on Computer Graphics", pages = "120--127", keywords = "Implicit Surfaces, Level-of-detail, Visualization of Molecular Surfaces", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Viola_Ivan_2013_SVA/", } @article{Birkeland-2012-IMC, title = "Illustrative Membrane Clipping", author = "{\AA}smund Birkeland and Stefan Bruckner and Andrea Brambilla and Ivan Viola", year = "2012", abstract = "Clipping is a fast, common technique for resolving occlusions. It only requires simple interaction, is easily understandable, and thus has been very popular for volume exploration. However, a drawback of clipping is that the technique indiscriminately cuts through features. Illustrators, for example, consider the structures in the vicinity of the cut when visualizing complex spatial data and make sure that smaller structures near the clipping plane are kept in the image and not cut into fragments. In this paper we present a new technique, which combines the simple clipping interaction with automated selective feature preservation using an elastic membrane. In order to prevent cutting objects near the clipping plane, the deformable membrane uses underlying data properties to adjust itself to salient structures. To achieve this behaviour, we translate data attributes into a potential field which acts on the membrane, thus moving the problem of deformation into the soft-body dynamics domain. This allows us to exploit existing GPU-based physics libraries which achieve interactive frame rates. For manual adjustment, the user can insert additional potential fields, as well as pinning the membrane to interesting areas. We demonstrate that our method can act as a flexible and non-invasive replacement of traditional clipping planes.", month = jun, journal = "Computer Graphics Forum", volume = "31", number = "3", note = "presented at EuroVis 2012", pages = "905--914", keywords = "illustrative visualization, volume rendering, clipping", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Birkeland-2012-IMC/", } @article{Peter_2012_AIV, title = "Semantics by Analogy for Illustrative Volume Visualization", author = "Moritz Gerl and Peter Rautek and Tobias Isenberg and Eduard Gr\"{o}ller", year = "2012", abstract = "We present an interactive graphical approach for the explicit specification of semantics for volume visualization. This explicit and graphical specification of semantics for volumetric features allows us to visually assign meaning to both input and output parameters of the visualization mapping. This is in contrast to the implicit way of specifying semantics using transfer functions. In particular, we demonstrate how to realize a dynamic specification of semantics which allows to flexibly explore a wide range of mappings. Our approach is based on three concepts. First, we use semantic shader augmentation to automatically add rule-based rendering functionality to static visualization mappings in a shader program, while preserving the visual abstraction that the initial shader encodes. With this technique we extend recent developments that define a mapping between data attributes and visual attributes with rules, which are evaluated using fuzzy logic. Second, we let users define the semantics by analogy through brushing on renderings of the data attributes of interest. Third, the rules are specified graphically in an interface that provides visual clues for potential modifications. Together, the presented methods offer a high degree of freedom in the specification and exploration of rule-based mappings and avoid the limitations of a linguistic rule formulation.", month = may, journal = "Computers & Graphics", number = "3", volume = "36", pages = "201--213", keywords = "shader augmentation, semantic visualization mapping, illustrative visualization, Volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Peter_2012_AIV/", } @misc{Groeller_2011_IPV, title = "Illustrative Particle Visualization of 4D MRI Blood-Flow Data", author = "Roy van Pelt and Eduard Gr\"{o}ller and Bart ter Haar Romenij and Anna Vilanova i Bartroli", year = "2011", month = may, location = "Bergen, Norway", event = "EuroVis 2011", booktitle = "Biomedical Image Analysis", Conference date = "Poster presented at EuroVis 2011 (2011-05-31--2011-06-03)", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/Groeller_2011_IPV/", } @inproceedings{Balabanian-2010-IIV, title = "Interactive Illustrative Visualization of Hierarchical Volume Data", author = "Jean-Paul Balabanian and Ivan Viola and Eduard Gr\"{o}ller", year = "2010", abstract = "In scientific visualization the underlying data often has an inherent abstract and hierarchical structure. Therefore, the same dataset can simultaneously be studied with respect to its characteristics in the three-dimensional space and in the hierarchy space. Often both characteristics are equally important to convey. For such scenarios we explore the combination of hierarchy visualization and scientific visualization, where both data spaces are effectively integrated. We have been inspired by illustrations of species evolutions where hierarchical information is often present. Motivated by these traditional illustrations, we introduce integrated visualizations for hierarchically organized volumetric datasets. The hierarchy data is displayed as a graph, whose nodes are visually augmented to depict the corresponding 3D information. These augmentations include images due to volume raycasting, slicing of 3D structures, and indicators of structure visibility from occlusion testing. New interaction metaphors are presented that extend visualizations and interactions, typical for one visualization space, to control visualization parameters of the other space. Interaction on a node in the hierarchy influences visual representations of 3D structures and vice versa. We integrate both the abstract and the scientific visualizations into one view which avoids frequent refocusing typical for interaction with linked-view layouts. We demonstrate our approach on different volumetric datasets enhanced with hierarchical information.", month = jun, location = "Ottawa, Ontario, Canada", booktitle = "Proceedings of Graphics Interface 2010", pages = "137--144", keywords = "visualization, volume data, hierarchical", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Balabanian-2010-IIV/", } @article{bruckner-2010-HVC, title = "Hybrid Visibility Compositing and Masking for Illustrative Rendering", author = "Stefan Bruckner and Peter Rautek and Ivan Viola and Mike Roberts and Mario Costa Sousa and Eduard Gr\"{o}ller", year = "2010", abstract = "In this paper, we introduce a novel framework for the compositing of interactively rendered 3D layers tailored to the needs of scientific illustration. Currently, traditional scientific illustrations are produced in a series of composition stages, combining different pictorial elements using 2D digital layering. Our approach extends the layer metaphor into 3D without giving up the advantages of 2D methods. The new compositing approach allows for effects such as selective transparency, occlusion overrides, and soft depth buffering. Furthermore, we show how common manipulation techniques such as masking can be integrated into this concept. These tools behave just like in 2D, but their influence extends beyond a single viewpoint. Since the presented approach makes no assumptions about the underlying rendering algorithms, layers can be generated based on polygonal geometry, volumetric data, pointbased representations, or others. Our implementation exploits current graphics hardware and permits real-time interaction and rendering.", journal = "Computers & Graphics", number = "34", pages = "361--369", keywords = "compositing, masking, illustration", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-HVC/", } @incollection{bruckner-2010-IFC, title = "Illustrative Focus+Context Approaches in Interactive Volume Visualization", author = "Stefan Bruckner and Eduard Gr\"{o}ller and Klaus Mueller and Bernhard Preim and Deborah Silver", year = "2010", abstract = "Illustrative techniques are a new and exciting direction in visualization research. Traditional techniques which have been used by scientific illustrators for centuries are re-examined under the light of modern computer technology. In this paper, we discuss the use of the focus+context concept for the illustrative visualization of volumetric data. We give an overview of the state-of-the-art and discuss recent approaches which employ this concept in novel ways.", booktitle = "Scientific Visualization: Advanced Concepts", chapter = "10", editor = "Hans Hagen", isbn = "978-3-939897-19-4", note = "The article was originally written in 2005 after the Dagstuhl Seminar on Scientific Visualization and reflects the state-of-the-art at that time.", series = "Dagstuhl Follow-Ups", keywords = "Illustrative Visualization, Volumetric Data", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bruckner-2010-IFC/", } @article{Rautek-2008-IDS, title = "Interaction-Dependent Semantics for Illustrative Volume Rendering", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2008", abstract = "In traditional illustration the choice of appropriate styles and rendering techniques is guided by the intention of the artist. For illustrative volume visualizations it is difficult to specify the mapping between the 3D data and the visual representation that preserves the intention of the user. The semantic layers concept establishes this mapping with a linguistic formulation of rules that directly map data features to rendering styles. With semantic layers fuzzy logic is used to evaluate the user defined illustration rules in a preprocessing step. In this paper we introduce interaction-dependent rules that are evaluated for each frame and are therefore computationally more expensive. Enabling interaction-dependent rules, however, allows the use of a new class of semantics, resulting in more expressive interactive illustrations. We show that the evaluation of the fuzzy logic can be done on the graphics hardware enabling the efficient use of interaction-dependent semantics. Further we introduce the flat rendering mode and discuss how different rendering parameters are influenced by the rule base. Our approach provides high quality illustrative volume renderings at interactive frame rates, guided by the specification of illustration rules.", month = may, journal = "Computer Graphics Forum", volume = "27", number = "3", pages = "847--854", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Rautek-2008-IDS/", } @phdthesis{bruckner-2008-IIV, title = "Interactive Illustrative Volume Visualization", author = "Stefan Bruckner", year = "2008", abstract = "Illustrations are essential for the effective communication of complex subjects. Their production, however, is a difficult and expensive task. In recent years, three-dimensional imaging has become a vital tool not only in medical diagnosis and treatment planning, but also in many technical disciplines (e.g., material inspection), biology, and archeology. Modalities such as X-Ray Computed Tomography (CT) and Magnetic Resonance Imaging (MRI) produce high-resolution volumetric scans on a daily basis. It seems counter-intuitive that even though such a wealth of data is available, the production of an illustration should still require a mainly manual and time-consuming process. This thesis is devoted to the computer-assisted generation of illustrations directly from volumetric data using advanced visualization techniques. The concept of a direct volume illustration system is introduced for this purpose. Instead of requiring an additional modeling step, this system allows the designer of an illustration to work directly on the measured data. Abstraction, a key component of traditional illustrations, is used in order to reduce visual clutter, emphasize important structures, and reveal hidden detail. Low-level abstraction techniques are concerned with the appearance of objects and allow flexible artistic shading of structures in volumetric data sets. High-level abstraction techniques control which objects are visible. For this purpose, novel methods for the generation of ghosted and exploded views are introduced. The visualization techniques presented in this thesis employ the features of current graphics hardware to achieve interactive performance. The resulting system allows the generation of expressive illustrations directly from volumetric data with applications in medical training, patient education, and scientific communication.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "volume rendering, illustrative visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/bruckner-2008-IIV/", } @inproceedings{patel_daniel_2007_IRSD, title = "Illustrative Rendering of Seismic Data", author = "Daniel Patel and Christopher Giertsen and John Thurmond and Eduard Gr\"{o}ller", year = "2007", abstract = "In our work we present techniques for illustrative rendering of interpreted seismic volume data by adopting elements from geology book illustrations. We also introduce combined visualization techniques of interpreted and uninterpreted data for validation, comparison and interdisciplinary communication reasons. We introduce the concept of smooth transitions between these two semantical levels. To achieve this we present transfer functions that map seismic volume attributes to 2D textures that flow according to a deformation volume describing the buckling and discontinuities of the layers of the seismic data.", month = nov, location = "Saarbr\"{u}cken, Germany", editor = "Hendrik. Lensch, Bodo Rosenhahn, H.P. Seidel", booktitle = "Proceeding of Vision Modeling and Visualization 2007", journal = "Vision, Modelling and Visualization", pages = "13--22", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/patel_daniel_2007_IRSD/", } @article{Rautek-2007-SLI, title = "Semantic Layers for Illustrative Volume Rendering", author = "Peter Rautek and Stefan Bruckner and Eduard Gr\"{o}ller", year = "2007", abstract = "Direct volume rendering techniques map volumetric attributes (e.g., density, gradient magnitude, etc.) to visual styles. Commonly this mapping is specified by a transfer function. The specification of transfer functions is a complex task and requires expert knowledge about the underlying rendering technique. In the case of multiple volumetric attributes and multiple visual styles the specification of the multi-dimensional transfer function becomes more challenging and non-intuitive. We present a novel methodology for the specification of a mapping from several volumetric attributes to multiple illustrative visual styles. We introduce semantic layers that allow a domain expert to specify the mapping in the natural language of the domain. A semantic layer defines the mapping of volumetric attributes to one visual style. Volumetric attributes and visual styles are represented as fuzzy sets. The mapping is specified by rules that are evaluated with fuzzy logic arithmetics. The user specifies the fuzzy sets and the rules without special knowledge about the underlying rendering technique. Semantic layers allow for a linguistic specification of the mapping from attributes to visual styles replacing the traditional transfer function specification.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "13", number = "6", note = "to be presented at IEEE Visualization 2007", pages = "1336--1343", keywords = "Illustrative Visualization, Volume Visualization, Focus+Context Techniques", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Rautek-2007-SLI/", } @article{bruckner-2007-STF, title = "Style Transfer Functions for Illustrative Volume Rendering", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2007", abstract = "Illustrative volume visualization frequently employs non-photorealistic rendering techniques to enhance important features or to suppress unwanted details. However, it is difficult to integrate multiple non-photorealistic rendering approaches into a single framework due to great differences in the individual methods and their parameters. In this paper, we present the concept of style transfer functions. Our approach enables flexible data-driven illumination which goes beyond using the transfer function to just assign colors and opacities. An image-based lighting model uses sphere maps to represent non-photorealistic rendering styles. Style transfer functions allow us to combine a multitude of different shading styles in a single rendering. We extend this concept with a technique for curvature-controlled style contours and an illustrative transparency model. Our implementation of the presented methods allows interactive generation of high-quality volumetric illustrations.", month = sep, journal = "Computer Graphics Forum", volume = "26", number = "3", note = "Eurographics 2007 3rd Best Paper Award", pages = "715--724", keywords = "illustrative visualization, transfer functions, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/bruckner-2007-STF/", } @techreport{TR-186-2-07-08, title = "Illustrative rendering of seismic data", author = "Daniel Patel and Christopher Giertsen and John Thurmond and Eduard Gr\"{o}ller", year = "2007", abstract = "We present multi attribute texture transfer functions for the generation of seismic illustrations. We render seismic data in the style of geological textbook illustrations by combining illustratively rendered axis aligned slices with volume rendering. We have extended the transfer function concept to map volume attributes to 2D textures that flow according to a deformation volume describing the buckling and discontinuites of the layers of the seismic data. Faults in the seismic layers are represented by texture disruptions. ", month = may, number = "TR-186-2-07-08", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "Seismic illustration, Texture transfer function, Texture mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/TR-186-2-07-08/", } @article{bruckner-2006-ICE, title = "Illustrative Context-Preserving Exploration of Volume Data", author = "Stefan Bruckner and S\"{o}ren Grimm and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2006", abstract = "In volume rendering it is very difficult to simultaneously visualize interior and exterior structures while preserving clear shape cues. Highly transparent transfer functions produce cluttered images with many overlapping structures, while clipping techniques completely remove possibly important context information. In this paper we present a new model for volume rendering, inspired by techniques from illustration. It provides a means of interactively inspecting the interior of a volumetric data set in a feature-driven way which retains context information. The context-preserving volume rendering model uses a function of shading intensity, gradient magnitude, distance to the eye point, and previously accumulated opacity to selectively reduce the opacity in less important data regions. It is controlled by two user-specified parameters. This new method represents an alternative to conventional clipping techniques, shares their easy and intuitive user control, but does not suffer from the drawback of missing context information.", month = nov, issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "6", volume = "12", pages = "1559--1569", keywords = "focus+context techniques, volume rendering, illustrative visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/bruckner-2006-ICE/", } @inproceedings{bruckner-2005-ICV, title = "Illustrative Context-Preserving Volume Rendering", author = "Stefan Bruckner and S\"{o}ren Grimm and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2005", abstract = "In volume rendering it is very difficult to simultaneously visualize interior and exterior structures while preserving clear shape cues. Very transparent transfer functions produce cluttered images with many overlapping structures, while clipping techniques completely remove possibly important context information. In this paper we present a new model for volume rendering, inspired by techniques from illustration that provides a means of interactively inspecting the interior of a volumetric data set in a feature-driven way which retains context information. The context-preserving volume rendering model uses a function of shading intensity, gradient magnitude, distance to the eye point, and previously accumulated opacity to selectively reduce the opacity in less important data regions. It is controlled by two user-specified parameters. This new method represents an alternative to conventional clipping techniques, shares their easy and intuitive user control, but does not suffer from the drawback of missing context information. ", month = may, booktitle = "Proceedings of EuroVis 2005", pages = "69--76", keywords = "non-photorealistic techniques, focus+context techniques, volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bruckner-2005-ICV/", }