@WorkshopTalk{scherzer2009c, title = "Casting Shadows in Real Time", author = "Michael Schwarz and Elmar Eisemann and Ulf Assarsson and Daniel Scherzer", year = "2009", abstract = "Shadows are crucial for enhancing realism, and they provide important visual cues. In recent years, many important contributions have been made in representation of both hard shadows and soft shadows. With the tremendous increase of computational power and capabilities of graphics hardware, high-quality real-time shadows are now a reachable goal. But with the growing volume of available choices, it is particularly difficult to pick the right solution and assess product shortcomings. Because currently there is no ideal approach available, algorithms should be selected in accordance with the context in which shadows are produced. The possibilities range across a wide spectrum, from very approximate but really efficient to slower but accurate, adapted only to smaller or only to larger sources, addressing directional lights or positional lights, or involving GPU or CPU-heavy computations. This course is a guide to better understanding of the limitations and failure cases, advantages and disadvantages, and suitability of the algorithms for different application scenarios. It focuses on real-time, interactive solutions but also discusses offline approaches.", month = dec, event = "SIGGRAPH Asia Course", location = "Yokohama, Japan", keywords = "shadows, real-time", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/scherzer2009c/", } @inproceedings{SSMW09, title = "Real-Time Soft Shadows Using Temporal Coherence", author = "Daniel Scherzer and Michael Schw\"{a}rzler and Oliver Mattausch and Michael Wimmer", year = "2009", abstract = "A vast amount of soft shadow map algorithms have been presented in recent years. Most use a single sample hard shadow map together with some clever filtering technique to calculate perceptually or even physically plausible soft shadows. On the other hand there is the class of much slower algorithms that calculate physically correct soft shadows by taking and combining many samples of the light. In this paper we present a new soft shadow method that combines the benefits of these approaches. It samples the light source over multiple frames instead of a single frame, creating only a single shadow map each frame. Where temporal coherence is low we use spatial filtering to estimate additional samples to create correct and very fast soft shadows. ", month = dec, isbn = "978-3642103308", series = "Lecture Notes in Computer Science", publisher = "Springer", location = "Las Vegas, Nevada, USA", editor = "Bebis, G.; Boyle, R.; Parvin, B.; Koracin, D.; Kuno, Y.; Wang, J.; Pajarola, R.; Lindstrom, P.; Hinkenjann, A.; Encarnacao, M.; Silva, C.; Coming, D.", booktitle = "Advances in Visual Computing: 5th International Symposium on Visual Computing (ISVC 2009)", pages = "13--24", keywords = "real-time rendering, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/SSMW09/", } @phdthesis{malik-thesis, title = "Feature Centric Volume Visualization", author = "Muhammad Muddassir Malik", year = "2009", abstract = "This thesis presents techniques and algorithms for the effective exploration of volumetric datasets. The Visualization techniques are designed to focus on user specified features of interest. The proposed techniques are grouped into four chapters namely feature peeling, computation and visualization of fabrication artifacts, locally adaptive marching cubes, and comparative visualization for parameter studies of dataset series. The presented methods enable the user to efficiently explore the volumetric dataset for features of interest. Feature peeling is a novel rendering algorithm that analyzes ray profiles along lines of sight. The profiles are subdivided according to encountered peaks and valleys at so called transition points. The sensitivity of these transition points is calibrated via two thresholds. The slope threshold is based on the magnitude of a peak following a valley, while the peeling threshold measures the depth of the transition point relative to the neighboring rays. This technique separates the dataset into a number of feature layers. Fabrication artifacts are of prime importance for quality control engineers for first part inspection of industrial components. Techniques are presented in this thesis to measure fabrication artifacts through direct comparison of a reference CAD model with the corresponding industrial 3D X-ray computed tomography volume. Information from the CAD model is used to locate corresponding points in the volume data. Then various comparison metrics are computed to measure differences (fabrication artifacts) between the CAD model and the volumetric dataset. The comparison metrics are classified as either geometry-driven comparison techniques or visual-driven comparison techniques. The locally adaptive marching cubes algorithm is a modification of the marching cubes algorithm where instead of a global iso-value, each grid point has its own iso-value. This defines an iso-value field, which modifies the case identification process in the algorithm. An iso-value field enables the algorithm to correct biases within the dataset like low frequency noise, contrast drifts, local density variations, and other artifacts introduced by the measurement process. It can also be used for blending between different iso-surfaces (e.g., skin, and bone in a medical dataset). Comparative visualization techniques are proposed to carry out parameter studies for the special application area of dimensional measurement using industrial 3D X-ray computed tomography. A dataset series is generated by scanning a specimen multiple times by varying parameters of the scanning device. A high resolution series is explored using a planar reformatting based visualization system. A multi-image view and an edge explorer are proposed for comparing and visualizing gray values and edges of several datasets simultaneously. For fast data retrieval and convenient usability the datasets are bricked and efficient data structures are used.", month = dec, pages = "105", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "marching cubes, feature peeling, difference measurement, multiple datasets, parameter visualization, comparative visualization, industrial computed tomography, volume visualization, fabrication artifacts, magnetic resonance imaging", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/malik-thesis/", } @mastersthesis{grosse-2009-avp, title = "Audio-Visual Perception in Interactive Virtual Environments", author = "Karl Grosse", year = "2009", abstract = "Interactive virtual environments (VEs) are gaining more and more fidelity. Their high quality stimuli undoubtedly increase the feeling of presence and immersion as “being in the world”, but maybe they also affect user’s performance on specific tasks. Vision and spatial hearing are the main contributors of our perception. Sight dominates clearly and has been in the focus of research for a long time, but maybe it is the audiovisual combination which facilitates the user in his decision making and in completing a task. Mere identification of the task is not enough. Of course one could find dozens of problems where spatial sound reproduction has a practical relevance. More interesting are those which reside on a high cognitive level. Tasks that combine visual stimuli and auditive perception with movement provide a wide field of activity like for example crossing a busy road, an every day task that contains a high information density and demands fast processing by the brain. But how does hearing have an impact on this? Does spatial audio lead to better performance? Can one adjust naturalistic, spatialized hearing virtually? This diploma thesis asseses the effect of spatial sound reproduction compared to conventional stereo sound or no sound at all. Within the scope of the practical part, a simulator was implemented to produce a virtual street crossing experiment. It was later used to perform a study with volunteer participants. The results give evidence that there is a statistically significant difference between spatialized sound rendering compared to stereo sound or no sound. In the future this can not be used solely to boost the naturalistic fidelity and authenticity of a virtual environment but also as a user supportive measure.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/grosse-2009-avp/", } @mastersthesis{recheis-2009-arr, title = "Automatic Recognition of Repeating Patterns in Rectified Facade Images", author = "Meinrad Recheis", year = "2009", abstract = "Building facades typically consist of multiple similar tiles which are arranged quite strictly in grid-like structures. The proposed method takes advantage of translational symmetries and is able to analyze and segment facades into tiles assuming that there are horizontal and vertical repetitions of similar tiles. In order to solve this quite complex computer vision task efficiently a Monte Carlo approach is presented which samples only selected image features. This method, which is meant to be a preprocessing step for more sophisticated tile segmentation and window identification in urban reconstruction tasks, is able to robustly identify orthogonal repetitive patterns on rectified facade images even if they are partially occluded, shadowed, blurry or otherwise damaged. Additionally, the algorithm is very running time efficient because neither quality of results nor the computational complexity are significantly depending on the image size.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/recheis-2009-arr/", } @article{jeschke-09-solver, title = "A GPU Laplacian Solver for Diffusion Curves and Poisson Image Editing", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "We present a new Laplacian solver for minimal surfaces—surfaces having a mean curvature of zero everywhere except at some fixed (Dirichlet) boundary conditions. Our solution has two main contributions: First, we provide a robust rasterization technique to transform continuous boundary values (diffusion curves) to a discrete domain. Second, we define a variable stencil size diffusion solver that solves the minimal surface problem. We prove that the solver converges to the right solution, and demonstrate that it is at least as fast as commonly proposed multigrid solvers, but much simpler to implement. It also works for arbitrary image resolutions, as well as 8 bit data. We show examples of robust diffusion curve rendering where our curve rasterization and diffusion solver eliminate the strobing artifacts present in previous methods. We also show results for real-time seamless cloning and stitching of large image panoramas.", month = dec, journal = "Transaction on Graphics (Siggraph Asia 2009)", volume = "28", number = "5", issn = "0730-0301", booktitle = "Transactions on Graphics (Siggraph Asia 2009)", organization = "ACM", publisher = "ACM Press", pages = "1--8", keywords = "Poisson equation, Line and Curve rendering , Diffusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-solver/", } @phdthesis{heinzl-2008-thesis, title = "Analysis and Visualization of Industrial CT Data", author = "Christoph Heinzl", year = "2009", abstract = "Industrial X-Ray 3D computed tomography (3DCT) is on the edge of advancing from a non destructive testing method to a fully standardized means of dimensional measurement for every day industrial use. Currently 3DCT has drawn attention especially in the area of first part inspections of new components, mainly in order to overcome limitations and drawbacks of common methods. Yet an increasing number of companies is benefitting from industrial 3DCT and sporadically the first pioneers start using industrial 3DCT for quality control in the production phase of a component. As 3DCT is still a very young technology of industrial quality control, this method also faces severe problems, which seriously affect measurement results. Some of the major drawbacks for quality control are the following: Artefacts modify the spatial greyvalues, generating artificial structures in the datasets, which do not correspond to reality. Discrete sampling introduces further irregularities due to the Nyquist- Shannon sampling theorem. Uncertainty information is missing when extracting dimensional measurement features. Specifications and limitations of the components and the special setup a 3DCT constrain the best achievable measurement precision. This thesis contributes to the state of the art by algorithmic evaluation of typical industrial tasks in the area of dimensional measurement using 3DCT. The main focus lies in the development and implementation of novel pipelines for everyday industrial use including comparisons to common methods. Convenient and easy to understand means of visualization are evaluated and used to provide insight into the generated results. In particular three pipelines are introduced, which cover some of the major aspects concerning metrology using industrial 3DCT. The considered aspects are robust surface extraction, artefact reduction via dual energy CT, local surface extraction of multi-material components, and statistical analysis of multi-material components. The generated results of each pipeline are demonstrated and verified using test specimens as well as real world components.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/heinzl-2008-thesis/", } @article{jeschke-09-rendering, title = "Rendering Surface Details with Diffusion Curves", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "Diffusion curve images (DCI) provide a powerful tool for efficient 2D image generation, storage and manipulation. A DCI consist of curves with colors defined on either side. By diffusing these colors over the image, the final result includes sharp boundaries along the curves with smoothly shaded regions between them. This paper extends the application of diffusion curves to render high quality surface details on 3D objects. The first extension is a view dependent warping technique that dynamically reallocates texture space so that object parts that appear large on screen get more texture for increased detail. The second extension is a dynamic feature embedding technique that retains crisp, anti-aliased curve details even in extreme closeups. The third extension is the application of dynamic feature embedding to displacement mapping and geometry images. Our results show high quality renderings of diffusion curve textures, displacements, and geometry images, all rendered interactively.", month = dec, journal = "Transaction on Graphics (Siggraph Asia 2009)", volume = "28", number = "5", issn = "0730-0301", booktitle = "Transactions on Graphics (Siggraph Asia 2009)", organization = "ACM", publisher = "ACM Press", pages = "1--8", keywords = "Geometry images, Displacement mapping, Diffusion curves, Line and Curve rendering ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-rendering/", } @inproceedings{LIPP-2009-PGL, title = "Parallel Generation of L-Systems", author = "Markus Lipp and Peter Wonka and Michael Wimmer", year = "2009", abstract = "This paper introduces a solution to compute L-systems on parallel architectures like GPUs and multi-core CPUs. Our solution can split the derivation of the L-system as well as the interpretation and geometry generation into thousands of threads running in parallel. We introduce a highly parallel algorithm for L-system evaluation that works on arbitrary L-systems, including parametric productions, context sensitive productions, stochastic production selection, and productions with side effects. Further we directly interpret the productions defined in plain-text, without requiring any compilation or transformation step (e.g., into shaders). Our algorithm is efficient in the sense that it requires no explicit inter-thread communication or atomic operations, and is thus completely lock free.", month = nov, isbn = "978-3980487481", location = "Braunschweig", editor = "Marcus Magnor, Bodo Rosenhahn, Holger Theisel", booktitle = "Vision, Modeling, and Visualization Workshop (VMV) 2009 ", pages = "205--214", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/LIPP-2009-PGL/", } @phdthesis{vucini-2009-phd, title = "On Visualization and Reconstruction from Non-uniform Point Sets", author = "Erald Vucini", year = "2009", abstract = "Technological and research advances in both acquisition and simulation devices provide continuously increasing high-resolution volumetric data that by far exceed today's graphical and display capabilities. Non-uniform representations offer a way of balancing this deluge of data by adaptively measuring (sampling) according to the importance (variance) of the data. Also, in many real-life situations the data are known only on a non-uniform representation. Processing of non-uniform data is a non-trivial task and hence more difficult when compared to processing of regular data. Transforming from non-uniform to uniform representations is a well-accepted paradigm in the signal processing community. In this thesis we advocate such a concept. The main motivation for adopting this paradigm is that most of the techniques and methods related to signal processing, data mining and data exploration are well-defined and stable for Cartesian data, but generally are non-trivial to apply to non-uniform data. Among other things, this will allow us to better exploit the capabilities of modern GPUs. In non-uniform representations sampling rates can vary drastically even by several orders of magnitude, making the decision on a target resolution a non-trivial trade-off between accuracy and efficiency. In several cases the points are spread non-uniformly with similar density across the volume, while in other cases the points have an enormous variance in distribution. In this thesis we present solutions to both cases. For the first case we suggest computing reconstructions of the same volume in different resolutions based on the level of detail we are interested in. The second case scenario is the main motivation for proposing a multi-resolution scheme, where the scale of reconstruction is decided adaptively based on the number of points in each subregion of the whole volume. We introduce a novel framework for 3D reconstruction and visualization from non-uniform scalar and vector data. We adopt a variational reconstruction approach. In this method non-uniform point sets are transformed to a uniform representation consisting of B-spline coefficients that are attached to the grid. With these coefficients we can define a C2 continuous function across the whole volume. Several testings were performed in order to analyze and fine-tune our framework. All the testings and the results of this thesis offer a view from a new and different perspective to the visualization and reconstruction from non-uniform point sets.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/vucini-2009-phd/", } @article{bruckner-2009-BVQ, title = "BrainGazer - Visual Queries for Neurobiology Research", author = "Stefan Bruckner and Veronika Solteszova and Eduard Gr\"{o}ller and Ji\v{r}\'{i} Hlad\r{u}vka and Katja B\"{u}hler and Jai Yu and Barry Dickson", year = "2009", abstract = "Neurobiology investigates how anatomical and physiological relationships in the nervous system mediate behavior. Molecular genetic techniques, applied to species such as the common fruit fly Drosophila melanogaster, have proven to be an important tool in this research. Large databases of transgenic specimens are being built and need to be analyzed to establish models of neural information processing. In this paper we present an approach for the exploration and analysis of neural circuits based on such a database. We have designed and implemented BrainGazer, a system which integrates visualization techniques for volume data acquired through confocal microscopy as well as annotated anatomical structures with an intuitive approach for accessing the available information. We focus on the ability to visually query the data based on semantic as well as spatial relationships. Additionally, we present visualization techniques for the concurrent depiction of neurobiological volume data and geometric objects which aim to reduce visual clutter. The described system is the result of an ongoing interdisciplinary collaboration between neurobiologists and visualization researchers.", month = nov, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "15", number = "6", pages = "1497--1504", keywords = "biomedical visualization, neurobiology, visual queries, volume visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-BVQ/", } @inproceedings{musialski-2009-sbfr, title = "Symmetry-Based Facade Repair", author = "Przemyslaw Musialski and Peter Wonka and Meinrad Recheis and Stefan Maierhofer and Werner Purgathofer", year = "2009", abstract = "In this paper we address the problem of removing unwanted image content in a single orthographic facade image. We exploit the regular structure present in building facades and introduce propagation process that is guided by the symmetry prevalent in the image. It removes larger unwanted image objects such as traffic lights, street signs, or cables as well as smaller noise, such as reflections in the windows. The output is intended as source for textures in urban reconstruction projects.", month = nov, isbn = "978-3980487481", location = "Braunschweig", editor = "Marcus Magnor, Bodo Rosenhahn, Holger Theisel ", booktitle = "Vision, Modeling, and Visualization Workshop 2009", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/musialski-2009-sbfr/", } @WorkshopTalk{Purgathofer-2009-rus, title = "Interactive Visual Editing of Grammars for Procedural Architecture", author = "Werner Purgathofer and Markus Lipp", year = "2009", month = oct, event = "Visual Computing in Fundamental, Academic and Applied Science and Research", location = "Moskau, Russia", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Purgathofer-2009-rus/", } @article{fuchs_vhml, title = "Visual Human+Machine Learning", author = "Raphael Fuchs and J\"{u}rgen Waser and Eduard Gr\"{o}ller", year = "2009", abstract = "In this paper we describe a novel method to integrate interactive visual analysis and machine learning to support the insight generation of the user. The suggested approach combines the vast search and processing power of the computer with the superior reasoning and pattern recognition capabilities of the human user. An evolutionary search algorithm has been adapted to assist in the fuzzy logic formalization of hypotheses that aim at explaining features inside multivariate, volumetric data. Up to now, users solely rely on their knowledge and expertise when looking for explanatory theories. However, it often remains unclear whether the selected attribute ranges represent the real explanation for the feature of interest. Other selections hidden in the large number of data variables could potentially lead to similar features. Moreover, as simulation complexity grows, users are confronted with huge multidimensional data sets making it almost impossible to find meaningful hypotheses at all. We propose an interactive cycle of knowledge-based analysis and automatic hypothesis generation. Starting from initial hypotheses, created with linking and brushing, the user steers a heuristic search algorithm to look for alternative or related hypotheses. The results are analyzed in information visualization views that are linked to the volume rendering. Individual properties as well as global aggregates are visually presented to provide insight into the most relevant aspects of the generated hypotheses. This novel approach becomes computationally feasible due to a GPU implementation of the time-critical parts in the algorithm. A thorough evaluation of search times and noise sensitivity as well as a case study on data from the automotive domain substantiate the usefulness of the suggested approach.", month = oct, journal = "IEEE TVCG", volume = "15", number = "6", issn = "1077-2626", pages = "1327--1334", keywords = "Volumetric Data, Interactive Visual Analysis, Knowledge Discovery, Genetic Algorithm, Curse of Dimensionality, Predictive Analysis, Computer-assisted Multivariate Data Expl, Multiple Competing Hypotheses", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/fuchs_vhml/", } @phdthesis{beyer-2009-gpu, title = "GPU-based Multi-Volume Rendering of Complex Data in Neuroscience and Neurosurgery", author = "Johanna Beyer", year = "2009", abstract = "Recent advances in image acquisition technology and its availability in the medical and bio-medical fields have lead to an unprecedented amount of high-resolution imaging data. However, the inherent complexity of this data, caused by its tremendous size, complex structure or multi-modality poses several challenges for current visualization tools. Recent developments in graphics hardware architecture have increased the versatility and processing power of today’s GPUs to the point where GPUs can be considered parallel scientific computing devices. The work in this thesis builds on the current progress in image acquisition techniques and graphics hardware architecture to develop novel 3D visualization methods for the fields of neurosurgery and neuroscience. The first part of this thesis presents an application and framework for planning of neurosurgical interventions. Concurrent GPU-based multi-volume rendering is used to visualize multiple radiological imaging modalities, delineating the patient’s anatomy, neurological function, and metabolic processes. Additionally, novel interaction metaphors are introduced, allowing the surgeon to plan and simulate the surgial approach to the brain based on the individual patient anatomy. The second part of this thesis focuses on GPU-based volume rendering techniques for large and complex EM data, as required in the field of neuroscience. A new mixed-resolution volume ray-casting approach is presented, which circumvents artifacts at block boundaries of different resolutions. NeuroTrace is introduced, an application for interactive segmentation and visualization of neural processes in EM data. EM data is extremely dense, heavily textured and exhibits a complex structure of interconnected nerve cells, making it difficult to achieve high-quality volume renderings. Therefore, this thesis presents a novel on-demand nonlinear noise removal and edge detection method which allows to enhance important structures (e.g., myelinated axons) while de-emphasizing less important regions of the data. In addition to the methods and concepts described above, this thesis tries to bridge the gap between state-of-the-art visualization research and the use of those visualization methods in actual medical and bio-medical applications.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/beyer-2009-gpu/", } @article{fritz-2009-ava, title = "A Visual Approach to Efficient Analysis and Quantification of Ductile Iron and Reinforced Sprayed Concrete", author = "Laura Fritz and Markus Hadwiger and Georg Geier and Gerhard Pittino and Eduard Gr\"{o}ller", year = "2009", month = oct, journal = "IEEE TVCG", volume = "15", number = "6", issn = "1077-2626", pages = "1343--1350", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/fritz-2009-ava/", } @article{fuchs-pcs-2009, title = "Predictor-Corrector Schemes for Visualization of Smoothed Particle Hydrodynamics Data", author = "Benjamin Schindler and Raphael Fuchs and John Biddiscombe and Ronald Peikert", year = "2009", month = oct, journal = "IEEE TVCG", volume = "15", number = "6", issn = "1077-2626", pages = "1243--1250", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/fuchs-pcs-2009/", } @mastersthesis{opitz-2009-cvv, title = "Classification and Visualization of Volume Data using Clustering", author = "Andreas Opitz", year = "2009", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/opitz-2009-cvv/", } @phdthesis{patel-2009-evr, title = "Expressive Visualization and Rapid Interpretation of Seismic Volumes", author = "Daniel Patel", year = "2009", abstract = "One of the most important resources in the world today is energy. Oil and gas provide two thirds of the world energy consumption, making the world completely dependent on it. Locating and recovering the remaining oil and gas reserves will be of high focus in society until competitive energy sources are found. The search for hydrocarbons is broadly speaking the topic of this thesis. Seismic measurements of the subsurface are collected to discover oil and gas trapped in the ground. Identifying oil and gas in the seismic measurements requires visualization and interpretation. Visualization is needed to present the data for further analysis. Interpretation is performed to identify important structures. Visualization is again required for presenting these structures to the user. This thesis investigates how computer assistance in producing high-quality visualizations and in interpretation can result in expressive visualization and rapid interpretation of seismic volumes. Expressive visualizations represent the seismic data in an easily digestible, intuitive and pedagogic form. This enables rapid interpretation which accelerates the nding of important structures.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/patel-2009-evr/", } @WorkshopTalk{traxler-2009-dhm, title = "Design of the Harris Matrix Composer, a new tool to create and edit Harris Matrices ", author = "Wolfgang Neubauer and Christoph Traxler", year = "2009", abstract = "The Harris Matrix - formulated by Dr. Edward C. Harris in 1973 - is the established way of representing the archaeological stratigraphy of an excavation. The Harris Matrix is a sequential diagram defining stratigraphic relations between stratigraphic units. It is an important method to document the stratification that is destroyed by the excavation process and hence a vital tool for analysis. The theory was recently extended to distinguish between two types of stratigraphic units, deposits and surfaces. Deposits represent layers that are dug away and hence are volumetric data, whereas surfaces represent immaterial interfaces between layers of stratigraphy. Based on the analysis of finds and samples temporal relations, like “later” or “contemporary” can be set to supplement stratigraphic ones. The analysis also results in a division of stratigraphic units into phases and periods. Phases are structural entities, like post-holes of an ancient dwelling. Periods represent a certain historical epoch. Since this extension of the theory is not considered by any tool, we decided to design and develop a new one, the Harris Matrix Composer (HMC). With the HMC a Harris Matrix can be created and edited by drawing its graph structure with an intuitive and easy to use GUI. A layout mechanism arranges units into a top-down structure that reflects their stratigraphic and temporal relations. The validity checker helps users to avoid errors. The HMC will have an interface to a GIS system to access digital archaeological data by selecting units, phases or periods in the Harris Matrix and hence in a meaningful way for analysis. In that respect it will close a gap in the archaeological toolbox.", month = sep, event = ""35 Jahre Harris Matrix" - International Conference on Archaeological Stratigraphy", location = "Vienna, Austria", address = "Austrian Academy of Sciences, Sonnenfelsgasse 19 , A-1010 Vienna ", organization = "Austrian Academy of Sciences, VIAS - Vienna Institute for Archaeological Science, University of Vienna and the Bermuda Maritime Museum.", publisher = "Austrian Academy of Sciences", keywords = "GUI Design, Usability, Archaeological Stratigraphy, Harris Matrix", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/traxler-2009-dhm/", } @inproceedings{SCHEIBLAUER-2009-IDCE, title = "Interactive Domitilla Catacomb Exploration", author = "Claus Scheiblauer and Norbert Zimmermann and Michael Wimmer", year = "2009", abstract = "In this paper we present an approach for interactive visualization and manipulation of huge point clouds. Archaeological monuments like the Domitilla Catacomb in Rome lead to data sets surpassing 1 Billion points or 20GB of storage space, which makes standard techniques like mesh conversion or in-core point-based rendering infeasible. Our system uses an out-of-core octree structure and a number of interactive editing tools to enable many archaeological tasks to be carried out on the whole point cloud that would not be possible using traditional methods. We allow fast selection, insertion and deletion of points, and through out-of-core rendering, the frame rate always stays above 20 frames per second on a fast workstation. To the best of our knowledge, this is the first interactive visualization of the complete data set of a large subterranean catacomb, and we show that direct point cloud visualization on the complete data set of a scan campaign is an important tool in archaeological practice.", month = sep, isbn = "978-3-905674-18-7", publisher = "Eurographics Association", location = "Malta", booktitle = "10th VAST International Symposium on Virtual Reality, Archaeology and Cultural Heritage (VAST09)", pages = "65--72", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/SCHEIBLAUER-2009-IDCE/", } @misc{Habel-09-RAT, title = "Real-Time Rendering and Animation of Trees", author = "Ralf Habel and Alexander Kusternig", year = "2009", abstract = "This demonstration combines novel methods for physically accurate yet efficient rendering and animation of trees under dynamic lighting conditions. A new leaf shading method is used that models the high-frequency structures such as veins and bulges to reproduce all important lighting attributes on a physical basis. Those structures are also used to calculate the translucency of leaves, which is modeled with physically based subsurface scattering, incorporating self-shadowing, thickness variations and varying albedo. This allows consistent reflective and translucent shading without constraining lighting or animation for close-up views. The individual deformation and animation of leaves and branches is defined by their physical properties such as shape and elasticity. A structural mechanics model is solved and combined with a length correction to achieve a physically plausible bending. To model the tree-wind interaction, a spectral approach is applied that allows for a physically guided animation as well as a high level of control. The applied methods have been published in "Physically Based Real-time Translucency for Leaves (EGSR 2007)" and "Physically Guided Animation of Trees (Eurographics 2009)". ", month = aug, keywords = "Real-Time Rendering, Trees, Animation", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel-09-RAT/", } @misc{LIPP-2009-PGL2, title = "Parallel Generation of L-Systems", author = "Markus Lipp and Peter Wonka and Michael Wimmer", year = "2009", abstract = "In this work we investigate whether it is possible to efficiently evaluate one of the most classical procedural modeling primitives, L-systems, directly on parallel architectures, exemplified by current GPUs and multi-core CPUs. The main motivation is to enable interactive editing of large L-systems by designers, therefore it is important to speed up the computation of L-systems in order to achieve low response times.", month = aug, location = "New Orleans, LA", event = "High-Performance Graphics 2009", Conference date = "Poster presented at High-Performance Graphics 2009 (2009-08-01--2009-08-03)", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/LIPP-2009-PGL2/", } @article{BITTNER-2009-AGVS, title = "Adaptive Global Visibility Sampling", author = "Jir\'{i} Bittner and Oliver Mattausch and Peter Wonka and Vlastimil Havran and Michael Wimmer", year = "2009", abstract = "In this paper we propose a global visibility algorithm which computes from-region visibility for all view cells simultaneously in a progressive manner. We cast rays to sample visibility interactions and use the information carried by a ray for all view cells it intersects. The main contribution of the paper is a set of adaptive sampling strategies based on ray mutations that exploit the spatial coherence of visibility. Our method achieves more than an order of magnitude speedup compared to per-view cell sampling. This provides a practical solution to visibility preprocessing and also enables a new type of interactive visibility analysis application, where it is possible to quickly inspect and modify a coarse global visibility solution that is constantly refined. ", month = aug, journal = "ACM Transactions on Graphics", volume = "28", number = "3", issn = "0730-0301", pages = "94:1--94:10", keywords = "occlusion culling, visibility sampling, visibility, PVS", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/BITTNER-2009-AGVS/", } @mastersthesis{LUKSCH-2009-BBC, title = "Implementation of an Improved Billboard Cloud Algorithm", author = "Christian Luksch", year = "2009", abstract = "Real-time rendering is a huge field with many applications. Large scale urban visualization or outdoor game environments set highest demands on the visual complexity of the virtual world. The frame rate on the other hand has to stay above 60 frames per seconds, otherwise movements and animations are not perceived smoothly anymore. Even though hardware gets faster every year, geometry simplification is essential to distribute the resources for optimal quality and performance. This thesis gives an overview of geometry simplification algorithms based on the billboard cloud principle. The problematic of the summarized algorithms are discussed and several improvements and possible modifications are described. The focus is on reducing simplification artifacts and improving the performance without affecting the visual quality. Finally, the results will be presented in an extensive evaluation. From the gained insight rules and parameters for an automatic geometry simplification process will be derived.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Geometry Simplification, Billboard Clouds, Real-Time Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/LUKSCH-2009-BBC/", } @mastersthesis{knecht-2009-MKN, title = "Real-Time Global Illumination Using Temporal Coherence", author = "Martin Knecht", year = "2009", abstract = "The goal of this thesis is to produce plausible global illumination in real time using temporal coherence. While direct illumination combined with precomputed static global illumination is widely used in today’s computer games and 3D applications, real-time global illumination that supports arbitrary dynamic scenes and light setups is still an active area of research. This master thesis gives an introduction to global illumination and discusses various methods that have been developed. However, since most of the existing methods need some kind of precomputation to calculate global illumination in real time, they also introduce limitations like static light, scenes or view points. Furthermore other algorithms are not suitable for the capabilities of current graphics hardware or are simply fake approaches. The core of this thesis is an improved version of the instant radiosity and imperfect shadow maps algorithm that reuses illumination information from previous frames. The previous approaches needed a high number of so called virtual point lights to get convincing results, whereas our method achieves visually pleasing results with only a few virtual point lights. As a second extension to the base algorithms we introduce a new method to compute multiple light bounces. In this method the fill rate is drastically reduced and therefore computation time is much lower than in previous aproaches.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Real Time Rendering, Global Illumination, Instant Radiosity, Temporal Coherence", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/knecht-2009-MKN/", } @mastersthesis{alsallakh-2009-iva, title = "Interactive Visual Analysis of Relational Data and Applications in Event-Based Business Analytics", author = "Bilal Alsallakh", year = "2009", abstract = "In this work, a framework for interactive visual analysis of attributed graphs has been developed. An attributed graph is an extension of the standard graph of a binary relation, which attaches a set of attributes to the nodes and edges. The implemented visual analysis techniques aim at the local level at enabling an intuitive navigation in the graph which reveals both the structure of the selected part of the graph and the attributes of the nodes and edges in this part. At the global level these techniques aim at understanding the distributions of the attributes in the graph as a whole or in specific parts in it and at spotting meaningful associations between the attributes and the relations. The work presents several extensions to the attributes such as graph‐theoretic features, values aggregated over the relations, and hierarchical grouping. All attributes are treated in a unified manner which helps performing elaborate analysis tasks using the existing tools. Additionally, novel graph drawing techniques are proposed. They are designed to understand attribute distributions and associations in the graph. These techniques can be additionally used to visualize results of queries in the data, which can be also visually defined using the attribute analysis tools. Finally, the work addresses several types of association analysis in relational data, along with visual analysis methods for them. It presents a perceptual enhancement for the well‐known parallel sets technique for association analysis in categorical data, and proposes extensions for employing it in relational data. Also, novels methods for other types of association analysis are introduced. The relational data in this work were defined upon typed events in an event‐based system, which offers a flexible architecture for real‐time analysis. Nevertheless, the presented analysis methods are generic and have been tested on two real‐world datasets. In the first dataset, entities for customers and products are derived from the purchase events, and various meaningful associations were found between the attributes and the relation (for example, which types of products the female customers bought more frequently, or at which age customers have higher interest for books). In the second dataset, events in an issue‐tracking system are analyzed to find out ticket assignment patterns and forwarding patterns between the support offices.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/alsallakh-2009-iva/", } @mastersthesis{brandorff-2009-erv, title = "Enhancement, Registration, and Visualization of High Resolution Episcopic Microscopy Data", author = "Clemens Brandorff", year = "2009", abstract = "Weninger et al. [25] developed a novel methodology for rapid 2D and 3D computer analysis and visualization of gene expression patterns. The data is generated by staining a specimen followed by an iterating process of cutting thin slices and capturing them with an episcopic microscope. The result is an high resolution 3D dataset. One channel contains anatomical information and a second channel contains the gene expression patterns. In this thesis we examine methods for enhancing, registrating and visualizing this novel kind of data. We address the uneven illumination of slices that are introduced by the methodology. We developed an algorithm to fit a quadric surface through the background pixels to estimate the illumination situation over the whole slice. This estimate is used to correct the slices of one dataset. Further, an extension of this methodology was researched. Recycling the already cut sections for staining them a second time allows the medical domain scientists to augment their technique with additional information. The result of the second data generation phase is a stack of unaligned slices. The manual processing of the sections introduces non-linear deformations. We explored several registration algorithms to align the two image stacks. We found a two step registration approach to yield the best results. In the first step a coarse affine registration is used to approximately align the datasets. The result of the first step is inspected and if necessary corrected by the user. In the second step a b-spline registration is used that compensates for the non-linear deformations of the 2D slices. For the visual inspection of the registration results and to present an overview of the datasets we implemented two visualization approaches. A checkerboard view is used to compare 2D slices, and a three dimensional approach based on direct volume rendering incorporates surface enhancement by gradient magnitude opacity modulation to emphasize the alignment of tissue boundaries.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/brandorff-2009-erv/", } @mastersthesis{puehringer-2009-sbm, title = "Sketch-based Modelling for Volume Visualization", author = "Nicolas P\"{u}hringer", year = "2009", abstract = "In the recent years the use of touch-sensitive input devices (e.g., tablet devices) heavily increased. Especially for drawing or sketching tasks, these devices - in combination with new user interface approaches - yield many possibilities to optimize traditional workflows. This thesis provides an approach for integrating this new user interfaces techniques into a volume visualization framework. The main goal is to account for the frequently encountered task of selecting specific structures of interest in a volume dataset which can not be separated by a transfer function setup. First, a gesture-based user interface is incorporated to build up a fluid and intuitive workflow without disrupting the user’s attention from the main working area. Further, a sketch-based modelling approach allows the user to easily generate 3D models out of 2D input sketches. These models serve as an initial selection on a structure of interest within a volume dataset. To automatically fit the generated models on the volume features, we present an algorithm for automatic deformation of mesh models on volume structures, resulting in a good approximation of the specific area. This flexible combination of techniques allows the user to achieve selections in a volume dataset within minutes of work. These can subsequently be used for masking, cropping or segmentation operations on the volume.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/puehringer-2009-sbm/", } @article{vucini_2009, title = "On Visualization and Reconstruction from Non-Uniform Point Sets using B-splines", author = "Erald Vucini and Torsten M\"{o}ller and Eduard Gr\"{o}ller", year = "2009", abstract = "In this paper we present a novel framework for the visualization and reconstruction from non-uniform point sets. We adopt a variational method for the reconstruction of 3D non-uniform data to a uniform grid of chosen resolution. We will extend this reconstruction to an efficient multi-resolution uniform representation of the underlying data. Our multi-resolution representation includes a traditional bottom-up multi-resolution approach and a novel top-down hierarchy for adaptive hierarchical reconstruction. Using a hybrid regularization functional we can improve the reconstruction results. Finally, we discuss further application scenarios and show rendering results to emphasize the effectiveness and quality of our proposed framework. By means of qualitative results and error comparisons we demonstrate superiority of our method compared to competing methods", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "3", note = "2nd Best Paper Award", issn = "0167-7055", pages = "1007--1014", keywords = "B-splines, Image Processing and Computer Vision, Non-uniform data, Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/vucini_2009/", } @article{wilkie-2009-cc, title = "A Robust Illumination Estimate for Chromatic Adaptation in Rendered Images", author = "Alexander Wilkie and Andrea Weidlich", year = "2009", abstract = "We propose a method that improves automatic colour correction operations for rendered images. In particular, we propose a robust technique for estimating the visible and pertinent illumination in a given scene. We do this at very low computational cost by mostly re-using information that is already being computed during the image synthesis process. Conventional illuminant estimations either operate only on 2D image data, or, if they do go beyond pure image analysis, only use information on the luminaires found in the scene. The latter is usually done with little or no regard for how the light sources actually affect the part of the scene that is being viewed. Our technique goes beyond that, and also takes object reflectance into account, as well as the incident light that is actually responsible for the colour of the objects that one sees. It is therefore able to cope with difficult cases, such as scenes with mixed illuminants, complex scenes with many light sources of varying colour, or strongly coloured indirect illumination. ", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1101--1109", keywords = "chromatic adaptation, predicitve rendering, colour constancy", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/wilkie-2009-cc/", } @article{cline-09-poisson, title = "Dart Throwing on Surfaces", author = "David Cline and Stefan Jeschke and Anshuman Razdan and Kenric White and Peter Wonka", year = "2009", abstract = "In this paper we present dart throwing algorithms to generate maximal Poisson disk point sets directly on 3D surfaces. We optimize dart throwing by efficiently excluding areas of the domain that are already covered by existing darts. In the case of triangle meshes, our algorithm shows dramatic speed improvement over comparable sampling methods. The simplicity of our basic algorithm naturally extends to the sampling of other surface types, including spheres, NURBS, subdivision surfaces, and implicits. We further extend the method to handle variable density points, and the placement of arbitrary ellipsoids without overlap. Finally, we demonstrate how to adapt our algorithm to work with geodesic instead of Euclidean distance. Applications for our method include fur modeling, the placement of mosaic tiles and polygon remeshing.", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1217--1226", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/cline-09-poisson/", } @article{bruckner-2009-IVV, title = "Instant Volume Visualization using Maximum Intensity Difference Accumulation", author = "Stefan Bruckner and Eduard Gr\"{o}ller", year = "2009", abstract = "It has long been recognized that transfer function setup for Direct Volume Rendering (DVR) is crucial to its usability. However, the task of finding an appropriate transfer function is complex and time-consuming even for experts. Thus, in many practical applications simpler techniques which do not rely on complex transfer functions are employed. One common example is Maximum Intensity Projection (MIP) which depicts the maximum value along each viewing ray. In this paper, we introduce Maximum Intensity Difference Accumulation (MIDA), a new approach which combines the advantages of DVR and MIP. Like MIP, MIDA exploits common data characteristics and hence does not require complex transfer functions to generate good visualization results. It does, however, feature occlusion and shape cues similar to DVR. Furthermore, we show that MIDA – in addition to being a useful technique in its own right – can be used to smoothly transition between DVR and MIP in an intuitive manner. MIDA can be easily implemented using volume raycasting and achieves real-time performance on current graphics hardware.", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "3", issn = "0167-7055", pages = "775--782", keywords = "illustrative visualization, maximum intensity projection, direct volume rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bruckner-2009-IVV/", } @inproceedings{piringer-2009-hds, title = "Hierarchical Difference Scatterplots - Interactive Visual Analysis of Data Cubes", author = "Harald Piringer and M. Buchetics and Helwig Hauser and Eduard Gr\"{o}ller", year = "2009", abstract = "Data cubes as employed by On-Line Analytical Processing (OLAP) play a key role in many application domains. The analysis typically involves to compare categories of different hierarchy levels with respect to size and pivoted values. Most existing visualization methods for pivoted values, however, are limited to single hierarchy levels. The main contribution of this paper is an approach called Hierarchical Difference Scatterplot (HDS). A HDS allows for relating multiple hierarchy levels and explicitly visualizes differences between them in the context of the absolute position of pivoted values. We discuss concepts of tightly coupling HDS to other types of tree visualizations and propose the integration in a setup of multiple views, which are linked by interactive queries on the data. We evaluate our approaches by analyzing social survey data in collaboration with a domain expert.", month = jun, isbn = "978-1-60558-670-0", publisher = "ACM", location = "Paris, France", editor = "Kai Puolam\"{a}ki", booktitle = "Proceedings of the ACM SIGKDD Workshop on Visual Analytics and Knowledge Discovery (VAKD", pages = "56--65", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/piringer-2009-hds/", } @article{weidlich-2009-dispersion, title = "Anomalous Dispersion in Predictive Rendering", author = "Andrea Weidlich and Alexander Wilkie", year = "2009", abstract = "In coloured media, the index of refraction does not decrease monotonically with increasing wavelength, but behaves in a quite non-monotonical way. This behaviour is called anomalous dispersion and results from the fact that the absorption of a material influences its index of refraction. So far, this interesting fact has not been widely acknowledged by the graphics community. In this paper, we demonstrate how to calculate the correct refractive index for a material based on its absorption spectrum with the Kramers-Kronig relation, and we discuss for which types of objects this effect is relevant in practice. ", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1065--1072", keywords = "Predictive rendering, Spectral Rendering, Dispersion", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich-2009-dispersion/", } @inproceedings{Reiter_2009_IXIA, title = "Improvement of X-Ray image acquisition using a GPU based 3DCT simulation tool", author = "Michael Reiter and Muhammad Muddassir Malik and Christoph Heinzl and Dietmar Salaberger and Eduard Gr\"{o}ller and Hubert Lettenbauer and Johann Kastner", year = "2009", abstract = "This paper presents a simulation tool for industrial X-Ray computed tomography (CT) systems which is able to predict the results of real measurements. Such a prediction helps the technician in measurement technology to minimize artefacts by using optimal measurement parameters and therefore it helps to get more accurate results. The presented simulation software offers an implementation for CPU’s and GPU’s. The performance difference between these implementa-tions is shown, for a specific test part. Furthermore a parameter variation has been carried out, to illustrate the influence of the acquisition settings. We use a multi-image view tool to compare and evaluate the acquired dataset series which contains CT data gained with different X-Ray source voltages and a different number of projections.", month = may, note = "not peer reviewed, will appear", location = "Wels, Austria", booktitle = "International Conference on Quality Control by Artificial Vision", keywords = "Computed tomography, CT simulation, Industrial X-Ray Imaging", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Reiter_2009_IXIA/", } @inproceedings{kohlmann-2009-cp, title = "Contextual Picking of Volumetric Structures", author = "Peter Kohlmann and Stefan Bruckner and Armin Kanitsar and Eduard Gr\"{o}ller", year = "2009", month = may, isbn = "978-1-4244-4404-5", location = "Peking, China", editor = "Peter Eades, Thomas Ertl, Han-Wei Shen", booktitle = "Proceedings of the IEEE Pacific Visualization Symposium 2009", pages = "185--192", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/kohlmann-2009-cp/", } @inproceedings{weidlich_2009_REL, title = "Rendering the Effect of Labradorescence", author = "Andrea Weidlich and Alexander Wilkie", year = "2009", abstract = "Labradorescence is a complex optical phenomenon that can be found in certain minerals, such as Labradorite or Spectrolite. Because of their unique colour properties these minerals are often used as gemstones and decorative objects. Since the phenomenon is strongly orientation dependent, such minerals need a special cut to make the most of their unique type of colourful sheen, which makes it desirable to be able to predict the final appearance of a given stone prior to the cutting process. Also, the peculiar properties of the effect make a believable replication with an ad-hoc shader dificult even for normal, non-predictive rendering purposes. We provide a reflectance model for labradorescence that is directly derived from the physical characteristics of such materials. Due to its inherent accuracy, it can be used for predictive rendering purposes, but also for generic rendering applications. ", month = may, isbn = "978-1-56881-470-4", publisher = "ACM", location = "Kelowna, British Columbia, Canada ", booktitle = "Proceedings of Graphics Interface 2009", pages = "79--85", keywords = "Predictive Rendering, Surface, Crystals", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich_2009_REL/", } @mastersthesis{kusternig-2009-rtr, title = "Real-Time Rendering of Dynamic Vegetation", author = "Alexander Kusternig", year = "2009", abstract = "Plants are present in almost any type of interactive virtual environment like video games, movie pre-visualization or architectural or urban walkthroughs. The simulation complexity of plants increases with the evolution of graphics hardware, but rendering of plants still poses a lot of challenges. This is due to both the inherent geometric complexity of an individual tree having thousands of branches and tens of thousands of leaves, and the complex light interactions between the plant and sunlight. A portion of incoming light is transmitted through leaves, resulting in the bright translucency e ect observed when looking at a leaf against the sun. Animating plants is another challenge, as thousands of interconnected branches and individual leaves have to react to turbulent wind moving through the treetop. All this should be performed at more than 60 frames per second for real-time interactive applications. This thesis presents novel algorithms to render leaves at very high detail with a physically based translucency model and to animate branches and leaves using a stochastic approach based on their physical properties. Both algorithms are executed entirely on the GPU in vertex and pixel shaders, so they can be easily integrated into any modern rendering pipeline. The eciency of the algorithms allows rendering and animating highly detailed plants with thousands of branches and tens of thousands of leaves at a frame rate of at 60 frames per second.", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/kusternig-2009-rtr/", } @mastersthesis{grasberger_2009_CSB, title = "CSB: Combining traditional CSG with Blobs", author = "Herbert Grasberger", year = "2009", month = may, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/grasberger_2009_CSB/", } @inproceedings{patel_2009_MC, title = "Moment Curves", author = "Daniel Patel and Martin Haidacher and Jean-Paul Balabanian and Eduard Gr\"{o}ller", year = "2009", abstract = "We define a transfer function based on the first and second statistical moments. We consider the evolution of the mean and variance with respect to a growing neighborhood around a voxel. This evolution defines a curve in 3D for which we identify important trends and project it back to 2D. The resulting 2D projection can be brushed for easy and robust classification of materials and material borders. The transfer function is applied to both CT and MR data.", month = apr, isbn = "978-1-4244-4404-5", location = "Peking, China", editor = "Peter Eades, Thomas Ertl, Han-Wei Shen", booktitle = "Proceedings of the IEEE Pacific Visualization Symposium 2009", pages = "201--208", keywords = "Statistical Moments, Volume Classification, Statistics", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/patel_2009_MC/", } @inproceedings{eibner-2009-gpc, title = "Generating Predictable and Convincing Folds for Leather Seat Design", author = "Gottfried Eibner and Anton Fuhrmann and Werner Purgathofer", year = "2009", month = apr, location = "Budmerice, Slowakei", booktitle = "Proceedings of the 25th Spring Conference on Computer Graphics (SCCG)", pages = "93--96", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/eibner-2009-gpc/", } @phdthesis{zambal-2009-ami, title = "Anatomical Modeling for Image Analysis in Cardiology", author = "Sebastian Zambal", year = "2009", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/zambal-2009-ami/", } @misc{Konyha_2009_survey, title = "Interactive Visual Analysis in Engineering: A Survey", author = "Zoltan Konyha and Kresimir Matkovic and Helwig Hauser", year = "2009", abstract = "Interactive visual analysis has become a very popular research field. There is a significant body of literature on making sense of massive data sets, on visualization and interaction techniques as well as on analysis concepts. However, surveying how those results can be applied to actual engineering problems, including both product and manufacturing design as well as evaluation of simulation and measurement data, has not been discussed sufficiently to date. In this paper we provide a selection of demonstration cases that document the potential benefits of using interactive visual analysis in a wide range of engineering domains, including the investigation of flow and particle dynamics, automotive engine design tasks and change management in the product design process. We attempt to identify some of the proven technological details such as the linking of space-time and attribute views through an application-wide coherent selection mechanism. This paper might be an interesting survey for readers with a relation to the engineering sector, both reflecting on available technological building blocks for interactive visual data analysis as well as exemplifying the potential benefits on behalf of the application side.", month = apr, publisher = "ACM", location = "Budmerice, Slowakei", issn = "ISSN 1335-5694", booktitle = "Posters at SCCG 2009", Conference date = "Poster presented at (2009-04-23--2009-04-25)", note = "31--38", pages = "31 – 38", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Konyha_2009_survey/", } @inproceedings{solteszova-avp-2009, title = "Advanced Volume Painting with Game Controllers", author = "Veronika Solteszova and Maurice Termeer and Eduard Gr\"{o}ller", year = "2009", month = apr, location = "Budmerice, Slowakei", booktitle = "Proceedings of the 25th Spring Conference on Computer Graphics (SCCG)", pages = "125--132", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/solteszova-avp-2009/", } @article{Habel_09_PGT, title = "Physically Guided Animation of Trees", author = "Ralf Habel and Alexander Kusternig and Michael Wimmer", year = "2009", abstract = "This paper presents a new method to animate the interaction of a tree with wind both realistically and in real time. The main idea is to combine statistical observations with physical properties in two major parts of tree animation. First, the interaction of a single branch with the forces applied to it is approximated by a novel efficient two step nonlinear deformation method, allowing arbitrary continuous deformations and circumventing the need to segment a branch to model its deformation behavior. Second, the interaction of wind with the dynamic system representing a tree is statistically modeled. By precomputing the response function of branches to turbulent wind in frequency space, the motion of a branch can be synthesized efficiently by sampling a 2D motion texture. Using a hierarchical form of vertex displacement, both methods can be combined in a single vertex shader, fully leveraging the power of modern GPUs to realistically animate thousands of branches and ten thousands of leaves at practically no cost.", month = mar, journal = "Computer Graphics Forum (Proceedings EUROGRAPHICS 2009)", volume = "28", number = "2", issn = "0167-7055", pages = "523--532", keywords = "Animation, Physically Guided animation, Vegetation, Trees", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel_09_PGT/", } @misc{kim_2009_iPhone, title = "iPhone/iPod Touch as Input Devices for Navigation in Immersive Virtual Environments", author = "Ji-Sun Kim and Denis Gracanin and Kresimir Matkovic and Francis Quek", year = "2009", abstract = "iPhone and iPod Touch are multi-touch handheld devices that provide new possibilities for interaction techniques. We describe iPhone/iPod Touch implementation of a navigation interaction technique originally developed for a larger multi-touch device (i.e. Lemur). The interaction technique implemented on an iPhone/iPod Touch was used for navigation tasks in a CAVE virtual environment. We performed a pilot study to measure the control accuracy and to observe how human subjects respond to the interaction technique on the iPhone and iPod Touch devices. We used the preliminary results to improve the design of the interaction technique.", month = mar, publisher = "IEEE", location = "Lafayette, Louisiana, USA", isbn = "978-1-4244-3943-0", event = "Virtual Reality Conference, 2009", editor = "Anthony Steed, Dirk Reiners, Robert W. Lindeman", booktitle = "Proceedings of IEEE VR 2009", Conference date = "Poster presented at Virtual Reality Conference, 2009 (2009-03-14--2009-03-18)", note = "261--262", pages = "261 – 262", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/kim_2009_iPhone/", } @mastersthesis{mueller-2009-ivs, title = "Interaktive Visualisierung Semantischer Graphen", author = "Stefan M\"{u}ller", year = "2009", abstract = "Wissensbasierte Systeme (WBS) stellen f\"{u}r viele Anwendungen einen immer wichtigeren Bereich dar. Aus Anwendersicht erlauben es WBS neuesWissen aus derWissensbasis zu folgern und bestehende Daten effizient zu verwalten. Aus Entwicklersicht wird die schnelle Anpassung an einen Problembereich erm\"{o}glicht, da Wissen von Gesch\"{a}fts-Logik getrennt wird. Das m2n Intelligence Management Framework stellt eine M\"{o}glichkeit dar, WBS, die komplett durch einen semantischen Graphen beschrieben sind, im Rapid-Prototyping-Verfahren zu erstellen. Dieser Graph wird mit Hilfes des Resource Description Framework (RDF) definiert. In dieser Arbeit werden M\"{o}glichkeiten aufgezeigt den semantischen Graphen einer m2n Anwendung zu visualisieren und interaktiv zu manipulieren. Dazu wurden existierendeWerkzeuge zur Visualisierung von RDF- und OWL-Daten sowie Technologien aus dem Bereich der Informations-Visualisierung untersucht. Darauf aufbauend wurde die bestehende Visualisierung \"{u}berarbeitet und Techniken zur direkten Manipulation, zur Fokus & Kontext Visualisierung und zum Linking & Brushing integriert. Die entwickelte Komponente kann auf beliebige Anwendungsf\"{a}lle, wie z.B. die Exploration und Modellierung von Ontologie-, Gesch\"{a}ftsprozess- und Instanz-Daten, angepasst werden. Um die Entwicklung zu evaluieren, wurde ein Thinking Aloud Test mit acht Nutzern des Systems durchgef\"{u}hrt. Die Ergebnisse des Tests flie{\ss}en in die Weiterentwicklung der Komponente ein.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/mueller-2009-ivs/", } @incollection{BITTNER-2009-GEFOC, title = "Game-Engine-Friendly Occlusion Culling", author = "Jir\'{i} Bittner and Oliver Mattausch and Michael Wimmer", year = "2009", abstract = "This article presents a method which minimizes the overhead associated with occlusion queries. The method reduces the number of required state changes and should integrate easily with most game engines. The key ideas are batching of the queries and interfacing with the game engine using a dedicated render queue. We also present some additional optimizations which reduce the number of queries issued as well as the number of rendered primitives. The algorithm is based on the well-known Coherent Hierarchical Culling algorithm.", month = mar, booktitle = "SHADERX7: Advanced Rendering Techniques", chapter = "8.3", editor = "Wolfang Engel", isbn = "1-58450-598-2", publisher = "Charles River Media", volume = "7", keywords = "real-time rendering, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/BITTNER-2009-GEFOC/", } @phdthesis{Habel_2009_PhD, title = "Real-time Rendering and Animation of Vegetation", author = "Ralf Habel", year = "2009", abstract = "Vegetation rendering and animation in real-time applications still pose a significant problem due to the inherent complexity of plants. Both the high geometric complexity and intricate light transport require specialized techniques to achieve high-quality rendering of vegetation in real time. This thesis presents new algorithms that address various areas of both vegetation rendering and animation. For grass rendering, an efficient algorithm to display dense and short grass is introduced. In contrast to previous methods, the new approach is based on ray tracing to avoid the massive overdraw of billboard or explicit geometry representation techniques, achieving independence of the complexity of the grass without losing the visual characteristics of grass such as parallax and occlusion effects as the viewpoint moves. Also, a method to efficiently render leaves is introduced. Leaves exhibit a complex light transport behavior due to subsurface scattering and special attention is given to the translucency of leaves, an integral part of leaf shading. The light transport through a leaf is precomputed and can be easily evaluated at runtime, making it possible to shade a massive amount of leaves while including the effects that occur due to the leaf structure such as varying albedo and thickness variations or self shadowing. To animate a tree, a novel deformation method based on a structural mechanics model that incorporates the important physical properties of branches is introduced. This model does not require the branches to be segmented by joints as other methods, achieving smooth and accurate bending, and can be executed fully on a GPU. To drive this deformation, an optimized spectral approach that also incorporates the physical properties of branches is used. This allows animating a highly detailed tree with thousands of branches and ten thousands of leaves efficiently. Additionally, a method to use dynamic skylight models in spherical harmonics precomputed radiance transfer techniques is introduced, allowing to change the skylight parameters in real time at no considerable cost and memory footprint.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Animation, Real-time Rendering, Vegetation", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel_2009_PhD/", } @article{malik-2009-CVFA, title = "Computation and Visualization of Fabrication Artifacts", author = "Muhammad Muddassir Malik and Christoph Heinzl and Eduard Gr\"{o}ller", year = "2009", abstract = "This paper proposes a novel technique to measure fabrication artifacts through direct comparison of a reference surface model with the corresponding industrial CT volume. Our technique uses the information from the surface model to locate corresponding points in the CT dataset. We then compute various comparison metrics to measure differences (fabrication artifacts) between the two datasets. The differences are presented to the user both visually as well as quantitatively. Our comparison techniques are divided into two groups, namely geometry-driven comparison techniques and visual-driven comparison techniques. The geometry-driven techniques provide an overview, while the visual-driven techniques can be used for a localized examination.", month = feb, journal = "Journal of WSCG", volume = "17", number = "1", issn = "Online: 1213-6964 (printed: 1213 – 6972)", pages = "17--24", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/malik-2009-CVFA/", } @inproceedings{GRELAUD-2009-EPA, title = "Efficient and Practical Audio-Visual Rendering for Games using Crossmodal Perception", author = "David Grelaud and Nicolas Bonneel and Michael Wimmer and Manuel Asselot and George Drettakis", year = "2009", abstract = "Interactive applications such as computer games, are inherently audio visual, requiring high-quality rendering of complex 3D audio soundscapes and graphics environments. A frequent source of audio events is impact sounds, typically generated with physics engines. In this paper, we first present an optimization allowing efficient usage of impact sounds in a unified audio rendering pipeline, also including prerecorded sounds. We also exploit a recent result on audio-visual crossmodal perception to introduce a new level-of-detail selection algorithm, which jointly chooses the quality level of audio and graphics rendering. We have integrated these two techniques as a comprehensive crossmodal audio-visual rendering pipeline in a home-grown game engine, thus demonstrating the potential utility of our approach.", month = feb, isbn = "978-1-60558-429-4", publisher = "ACM", location = "Boston, Massachusetts", address = "New York, NY, USA", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2009", pages = "177--182", keywords = "audio-visual rendering, crossmodal perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/GRELAUD-2009-EPA/", } @inproceedings{glanznig-2009-LAMC, title = "Locally Adaptive Marching Cubes through Iso-value Variation", author = "Michael Glanznig and Muhammad Muddassir Malik and Eduard Gr\"{o}ller", year = "2009", abstract = "We present a locally adaptive marching cubes algorithm. It is a modification of the marching cubes algorithm where instead of a global iso-value each grid point has its own iso-value. This defines an iso-value field, which modifies the case identification process in the algorithm. The marching cubes algorithm uses linear interpolation to compute intersections of the surface with the cell edges. Our modification computes the intersection of two general line segments, because there is no longer a constant iso-value at each cube vertex. An iso-value field enables the algorithm to correct biases within the dataset like low frequency noise, contrast drifts, local density variations and other artefacts introduced by the measurement process. It can also be used for blending between different isosurfaces (e.g., skin, veins and bone in a medical dataset).", month = feb, isbn = "978-80-86943-93-0", location = "Plzen, Tschechien", editor = "Vaclav Skala", booktitle = "Proceedings of the International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision", pages = "33--40", keywords = "isosurface correction, iso-value field, contouring, marching cubes, blending between isosurfaces", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/glanznig-2009-LAMC/", } @mastersthesis{froschauer-2009-iod, title = "Interactive Optimization, Distance Computation and Data Estimation in Parallel Coordinates", author = "Matthias Froschauer", year = "2009", abstract = "The eld of information visualization tries to nd graphical representations of data to explore regions of interest in potentially large data sets. Additionally, the use of algorithms to obtain exact solutions, which cannot be provided by basic visualization techniques, is a common approach in data analysis. This work focuses on optimization, distance computation and data estimation algorithms in the context of information visualization. Furthermore, information visualization is closely connected to interaction. To involve human abilities in the computation process, the goal is to embed these algorithms into an interactive environment. In an analysis dialog, the user observes the current solution, interprets the results and then formulates a strategy of how to proceed. This forms a tight loop of interaction, which uses human evaluation to improve the quality of the results. Optimization is a crucial approach in decision making. This work presents an interactive optimization approach, exempli ed by parallel coordinates, which are a common visualization technique when dealing with multi-dimensional problems. According to this goal-based approach, multi-dimensional distance computation is discussed as well as a data estimation approach with the objective of approximating simulations by the analysis of existing values. All these approaches are integrated in an existing visual analysis framework and deal with multi-dimensional goals, which can be de ned and modi ed interactively by the user. The goal of this work is to support decision makers to extract useful information from large data sets.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/froschauer-2009-iod/", } @mastersthesis{schwaerzler-2009-ass, title = "Accurate Soft Shadows in Real-Time Applications", author = "Michael Schw\"{a}rzler", year = "2009", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/schwaerzler-2009-ass/", } @mastersthesis{fritz-2009-ieq, title = "Interactive Exploration and Quantification of Industrial CT Data", author = "Laura Fritz", year = "2009", abstract = "Non-destructive testing (NDT) is a key aspect of present day engineering and development which examines the internal structures of industrial components such as machine parts, pipes and ropes without destroying them. Industrial pieces require critical inspection before they are assembled into a finished product in order to ensure safety, stability, and usefulness of the finished object. Therefore, the goal of this thesis is to explore industrial Computed Tomography (CT) volumes, with the goal to facilitate the whole quantification approach of the components at hand by bridging the gap between visualization on the one hand, and interactive quantification of features or defects on the other one. The standard approach for defect detection in industrial CT builds on region growing, which requires manually tuning parameters such as target ranges for density and size, variance, as well as sometimes also the specification of seed points. To circumvent repeating the whole process if the region growing results are not satisfactory, the method presented in this thesis allows interactive exploration of the parameter space. The exploration process is completely separated from region growing in an unattended pre-processing stage where the seeds are set automatically. The pre-computation results in a feature volume that tracks a feature size curve for each voxel over time, which is identified with the main region growing parameter such as variance. Additionally, a novel 3D transfer function domain over (density, feature size, time) is presented which allows for interactive exploration of feature classes. Features and feature size curves can also be explored individually, which helps with transfer function specification and allows coloring individual features and disabling features resulting from CT artifacts. Based on the classification obtained through exploration, the classified features can be quantified immediately. The visualization and quantification results of this thesis are demonstrated on different real-world industrial CT data sets.", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/fritz-2009-ieq/", } @misc{termeer-2009-scmr, title = "Patient-Specific Coronary Artery Supply Territory AHA Diagrams", author = "Maurice Termeer and Javier Oliv\'{a}n Besc\'{o}s and Marcel Breeuwer and Anna Vilanova i Bartroli and Frans Gerritsen and Eduard Gr\"{o}ller and Eike Nagel", year = "2009", abstract = "Introduction: The American Heart Association proposed a 17-segment model for the segmentation of the left ventricle together with a mapping from each segment to a supplying coronary artery. This proposal is based on population averages. Several studies have confirmed the inaccuracy of this mapping due to large anatomical variations of the coronary arteries among individuals. Several proposals have been made for a different mapping between the 17 segments and the coronary arteries. Purpose: Due to the large variation in coronary anatomy there is a need for a patient-specific assignment of ventricular segments to supplying coronary arteries. We propose to use a segmentation of the coronary arteries and the ventricular epicardium to compute this patient-specific mapping. Methods: The three primary coronary arteries (LAD, LCX and RCA) and the left ventricle are segmented in a whole-heart MRI (SSFP) or CT scan of at least 150 slices. For the coronary arteries we employ a semi-automatic vessel tracking algorithm. The left ventricle is segmented using a fully automatic approach. The epicardial surface of the resulting segmentation is represented as a quadrilateral mesh. The centerlines of the coronary arteries are projected on the epicardial surface. A Voronoi diagram of the projected arteries is computed using a geodesic distance metric. The patient-specific coronary supply territories are computed using a modified marching squares algorithm. The examples given here consist of three territories, but our approach is flexible enough to handle any amount of territories. Both the coronary supply territories and the coronary arteries are projected onto a bull’s eye plot using a parameterization of the left ventricle based on cylindrical coordinates, using the cardiac long axis as the primary axis of the cylinder (Figure 1a). The continuous nature of the epicardial surface is preserved in this projection. This means that the bull’s eye plot does not consist of rings representing slices, but that the distance to the center is proportional to the distance to the apex. This bull’s eye plot can for example be used as an overlay for the analysis of viability (Figure 1b). Figure 1. (a) Bull’s eye plot showing patient-specific coronary supply territories. The dotted lines represent the 17-segment model. (b) Patient-specific coronary supply territories as an overlay on a bull’s eye plot of a late enhancement scan. Results: We evaluated our method on image data from five patients. For each patient we produced both a standard 17-segment diagram and a diagram with the projection of the patient-specific coronary supply territories resulting from our approach. In both diagrams a projection of the segmented coronary arteries was shown. We then asked an experienced clinician to judge the correspondence between the coronary arteries and the suggested coronary supply territories for both diagrams. It was judged that our patient-specific coronary supply territories provide a better correlation with the position of the coronary arteries. The clinician expressed a preference to our method as compared to the standard 17-segment model. The continuous relation between the distance to the center of the bull’s eye plot and the distance to the apex caused some confusion with our clinician. Especially in combination with CMR data consisting of relatively few slices this relation should be clarified. Conclusion: With our method the relation between coronary arteries and areas supplied by these arteries is better visualized. This will help to better correlate the location of infarcted or ischemic areas to the coronaries that have caused the respective infarction or ischemia.", month = jan, journal = "Journal of Cardiovascular Magnetic Resonance", volume = "11", series = "1", location = "Orlando, Florida", issn = "1532-429X ", event = "SCMR 2009", booktitle = "Abstracts of the 12th Annual SCMR Scientific Sessions - 2009", Conference date = "Poster presented at SCMR 2009 (2009-01-29--2009-02-01)", note = "164--165", pages = "164 – 165", keywords = "coronary supply territories, patient-specific bulls eye plot", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/termeer-2009-scmr/", } @phdthesis{weidlich-2009-thesis, title = "Pseudochromatic Colourisation of Crystals in Predictive Image Synthesis", author = "Andrea Weidlich", year = "2009", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Predictive rendering, Crystal rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich-2009-thesis/", } @book{Habel-09-THB, title = "Real-time Rendering and Animation of Vegetation: Advances in displaying vegetation for interactive applications", author = "Ralf Habel", year = "2009", abstract = "Vegetation rendering and animation in real-time applications still pose a significant problem due to the inherent complexity of plants. Both the high geometric complexity and intricate light transport require specialized techniques to achieve high-quality rendering of vegetation in real time. This thesis presents new algorithms that address various areas of both vegetation rendering and animation. For grass rendering, an efficient algorithm to display grass is introduced. Also, a method to efficiently render leaves is introduced. Leaves exhibit a complex light transport behavior due to subsurface scattering and special attention is given to the translucency of leaves, an integral part of leaf shading. To animate a tree, a novel deformation method based on a structural mechanics model that incorporates the important physical properties of branches is introduced. To drive this deformation, an optimized spectral approach that also incorporates the physical properties of branches is used. Additionally, a method to use dynamic skylight models in spherical harmonics precomputed radiance transfer techniques is introduced, allowing to change the skylight parameters in real time. ", isbn = "978-3838104997", pages = "140", publisher = "Suedwestdeutscher Verlag fuer Hochschulschriften", keywords = "Real-time Rendering, Vegetation", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel-09-THB/", } @studentproject{Kinkelin_2009, title = "GPU Volume Raycasting using Bounding Interval Hierarchies", author = "Martin Kinkelin", year = "2009", abstract = "Traditional Direct Volume Raycasting (DVR) on the GPU is limited to uniform voxel grids stored as 3D textures. This approach is not optimal for sparse data sets or data sets with highly varying frequencies because it requires a trade-off between data structure size and the maximum reproducible frequency and it lacks implicit empty space skipping during raycasting. In this paper we present another approach, applying the Bounding Interval Hierarchy (BIH), a hierarchical spatial subdivision of elements traditionally used to accelerate surface raytracing, to volume raycasting on the GPU. Although connectivity information between voxels is lost and the texture filtering power of GPUs cannot be exploited, we show that it may be a viable alternative for DVR and that the approach is generic, allowing all sorts of renderable voxels (not-overlapping finite volume elements/primitives such as cuboids, ellipsoids and truncated Radial Basis Functions) for different tasks like rendering point sets as particle systems (e.g. using spherical voxels) and rendering volumes derived from traditional uniform grids (with implicit empty space skipping and the option for different levels of detail).", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Kinkelin_2009/", } @inproceedings{wilkie-209.pr, title = "Predictive Rendering", author = "Alexander Wilkie and Andrea Weidlich and Marcus Magnor and A. Chalmers", year = "2009", abstract = "This course intends to serve two closely related purposes: to provide an accurate definition of the term predictive rendering, the sub-discipline of computer graphics that attempts to provide reliable predictions of object appearance, and to present the technological foundations on which one can currently conduct research in this area. The first goal of the course – a clear definition of what predictive rendering actually is – seems to be necessary due to the extreme prevalence of its antonym, believable rendering. Practically all contemporary production computer graphics, as well as most current graphics research efforts, fall into the latter category. Coupled with the fact that in the collective mindset of the graphics community, the distinction between these areas has, for a variety of reasons, been somewhat blurry so far, a precise clarifying statement appears to be in order. The second, much larger and technical part of the course then presents the foundations of current predictive rendering. Unlike believable rendering, where any technology that delivers visually convincing (as opposed to radiometrically correct) results is acceptable for a given task, a predictive pipeline suffers from the fundamental problem that all components – modeling, rendering, display – have to be of a uniformly high quality to ensure a predictive result. This fact has, over time, no doubt served to deter many researchers from entering this promising and interesting field, in which a large amount of work still lies ahead of us. In the second part, we cover an entire predictive rendering pipeline, and for each stage present those – in some cases surprisingly few – graphics technologies that can be used in such a context. This course should enable anyone with a solid background in graphics to bootstrap a basic predictive rendering environment, with which further research, or perhaps even specialised production work, can be conducted.", booktitle = "SIGGRAPH Asia 2009 Courses", location = "Yokohama, Japan", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/wilkie-209.pr/", } @article{fuchs-2009-ncp, title = "Non-convex Polyhedral Volume of Interest Selection", author = "Raphael Fuchs and Volkmar Welker and Joachim Hornegger", year = "2009", abstract = "We introduce anovel approachto specify and edit volumes of interest (VOI for short) interactively. Enhancing the capabilities of standard systems we provide tools to edit the VOI by defining a not necessarily convex polyhedral bounding object.We suggest to use low-level editing interactions for moving, inserting and deleting vertices, edges and faces of the polyhedron. The low-level operations can be used as building blocks for more complex higher order operations fitting the application demands. Flexible initialization allows the user to select within a few clicks convex VOI that in the classical clipping plane model need the specification of a large number of cutting planes. In our model it is similarly simple to select non-convex VOI. Boolean combinations allowto select non-connected VOI of arbitrary complexity. The polyhedral VOI selection technique enables the user to define VOI with complex boundary structure interactively, in an easy to comprehend and predictable manner.", issn = "0895-6111", journal = "Journal of Computerized Medical Imaging and Graphics", number = "2", volume = "34", pages = "105--113", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/fuchs-2009-ncp/", } @article{fuchs-vom-2009, title = "Visualization of Multi-variate Scientific Data", author = "Raphael Fuchs and Helwig Hauser", year = "2009", abstract = "In this state-of-the-art report we discuss relevant research works related to the visualization of complex, multivariate data. We discuss how different techniques take effect at specific stages of the visualization pipeline and how they apply to multi-variate data sets being composed of scalars, vectors and tensors. We also provide a categorization of these techniques with the aim for a better overview of related approaches. Based on this classification we highlight combinable and hybrid approaches and focus on techniques that potentially lead towards new directions in visualization research. In the second part of this paper we take a look at recent techniques that are useful for the visualization of complex data sets either because they are general purpose or because they can be adapted to specific problems.", issn = "0167-7055", journal = "Computer Graphics Forum", number = "6", volume = "28", pages = "1670--1690", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/fuchs-vom-2009/", } @article{patel-2009-kav, title = "Knowledge-assisted visualization of seismic data", author = "Daniel Patel and Øyvind Sture and Helwig Hauser and Christopher Giertsen and Eduard Gr\"{o}ller", year = "2009", issn = "0097-8493", journal = "Computer & Graphics", note = "Publisher: Elsevier", number = "5", volume = "33", pages = "585--596", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/patel-2009-kav/", } @article{preim-2009-sve, title = "Survey of the Visual Exploration and Analysis of Perfusion Data", author = "Bernhard Preim and Steffen Oeltze and Matej Mlejnek and Eduard Gr\"{o}ller and Anja Hennemuth", year = "2009", issn = "1077-2626", journal = "IEEE Transaction on Visualization and Computer Graphics", number = "2", volume = "15", pages = "205--220", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/preim-2009-sve/", } @studentproject{Fuerst-2009-dfr, title = "Dynamic Fur Rendering", author = "Rene F\"{u}rst", year = "2009", abstract = "Fur is hard to render by conventional means, so usually specialized techniques are necessary to render realistic fur in a reasonable amount of time. Interactive fur rendering techniques exist, but usually there is no implementation readily available for artists to use. The goal of this project was to implement a dynamic fur rendering method like the dynamic fur simulation of Tomohide Kano in the vvvv toolkit. This is project will be used by FriendlyFire for an interactive demo application (friendlyfire.at).", keywords = "Hair, Friendly Fire, vvvv, Fur", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Fuerst-2009-dfr/", } @inproceedings{WIMMER-2009-CSR, title = "Casting Shadows in Real Time", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michael Wimmer", year = "2009", abstract = "Shadows are crucial for enhancing realism and provide important visual cues. In recent years, many important contributions have been made both for hard shadows and soft shadows. Often spurred by the tremendous increase of computational power and capabilities of graphics hardware, much progress has been made concerning visual quality and speed, making high-quality real-time shadows a reachable goal. But with the growing wealth of available choices, it is particularly difficult to pick the right solution and assess shortcomings. Because currently there is no ultimate approach available, algorithms should be selected in accordance to the context in which shadows are produced. The possibilities range across a wide spectrum; from very approximate but really efficient to slower but accurate, adapted only to smaller or only to larger sources, addressing directional lights or positional lights, or involving GPU- or CPU-heavy computations. This course tries to serve as a guide to better understand limitations and failure cases, advantages and disadvantages, and suitability of the algorithms for different application scenarios. We will focus on real-time to interactive solutions but also discuss offline approaches if needed for a better understanding.", booktitle = "ACM SIGGRAPH Asia 2009 Courses", location = "Yokohama, Japan", publisher = "ACM", note = "Lecturer: Daniel Scherzer", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-CSR/", } @talk{WIMMER-2009-ITC, title = "IT in Computerspielen: Ausbildung und Forschung", author = "Michael Wimmer", year = "2009", event = "Veranstaltungsreihe Was IT alles kann ", location = "Techgate Vienna", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ITC/", } @studentproject{knecht-2009-TAS, title = "Textture-atlases for large scenes", author = "Wolfgang Knecht", year = "2009", abstract = "This application generates texture-atlases for large scenes in order to create objects that are optimal for rendering with online occlusion culling", note = "1", keywords = "texture atlas, online occlusion culling, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/knecht-2009-TAS/", } @phdthesis{rautek-2009-vmv, title = "Semantic Visualization Mapping for Volume Illustration", author = "Peter Rautek", year = "2009", abstract = "Scientific visualization is the discipline of automatically rendering images from scientific data. Adequate visual abstractions are important to show relevant information in the data. Visual abstractions are a trade-off between showing detailed information and preventing visual overload. To use visual abstractions for the depiction of data, a mapping from data attributes to visual abstractions is needed. This mapping is called the visualization mapping. This thesis reviews the history of visual abstractions and visualizationmapping in the context of scientific visualization. Later a novel visual abstraction method called caricaturistic visualization is presented. The concept of exaggeration is the visual abstraction used for caricaturistic visualization. Principles from traditional caricatures are used to accentuate salient details of data while sparsely sketching the context. The visual abstractions described in this thesis are inspired by visual art and mostly by traditional illustration techniques. To make effective use of the recently developed visualizationmethods, that imitate illustration techniques, an expressive visualization mapping approach is required. In this thesis a visualization mapping method is investigated that makes explicit use of semantics to describe mappings from data attributes to visual abstractions. The semantic visualization mapping explicitly uses domain semantics and visual abstraction semantics to specify visualization rules. Illustrative visualization results are shown that are achieved with the semantic visualization mapping. The behavior of the automatically rendered interactive illustrations is specified using interaction-dependent visualization rules. Interactions like the change of the viewpoint, or the manipulation of a slicing plane are state of the art in volume visualization. In this thesis a method for more elaborate interaction techniques is presented. The behavior of the illustrations is specified with interaction-dependent rules that are integrated in the semantic visualization mapping approach.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/rautek-2009-vmv/", } @phdthesis{scherzer-thesis, title = "Applications of temporal coherence in real-time rendering", author = "Daniel Scherzer", year = "2009", abstract = "Real-time rendering imposes the challenging task of creating a new rendering of an input scene at least 60 times a second. Although computer graphics hardware has made staggering advances in terms of speed and freedom of programmability, there still exist a number of algorithms that are too expensive to be calculated in this time budget, like exact shadows or an exact global illumination solution. One way to circumvent this hard time limit is to capitalize on temporal coherence to formulate algorithms incremental in time. The main thesis of this work is that temporal coherence is a characteristic of real-time graphics that can be used to redesign well-known rendering methods to become faster, while exhibiting better visual fidelity. To this end we present our adaptations of algorithms from the fields of exact hard shadows, physically correct soft shadows and fast discrete LOD blending, in which we have successfully incorporated temporal coherence. Additionally, we provide a detailed context of previous work not only in the field of temporal coherence, but also in the respective fields of the presented algorithms.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "shadows, lod, real-time, image-space", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/scherzer-thesis/", } @studentproject{BirsakHanzl-2009, title = "SolarTransporter", author = "Michael Birsak and Michael Hanzl", year = "2009", abstract = "The aim of this project was the development of the Android game SolarTransporter and its public release. SolarTransporter is a 3D game based on the Android plattform. The aim of the game is to transport a given number of passengers to their favoured positions in 3 minutes per level. There are 7 levels, 7 bonus levels, many objects like asteroids and 2 highscores. Use the accelerometer to control the transporter! The full documentation is available at: http://www.cg.tuwien.ac.at/courses/projekte_old/vis/finished/MBirsakMHanzl/", keywords = "Android, 3D Game, OpenGL ES", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/BirsakHanzl-2009/", } @talk{WIMMER-2009-ARTR, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2009", abstract = "Real-time rendering is a quickly developing field in computer graphics. Recent advances in graphics hardware make it possible to tackle completely new challenges, and to rethink old ones. While previously, the main focus of real-time rendering lay on classical problems like visibility and level-of-detail rendering, nowadays we see new challenges in the form of interactive procedural content generation, handling of massive amounts of data, and interactive simulation of extremely complex objects like trees. In this talk, I will try to broaden the definition of real-time rendering and give some insights how to address new research challenges. Starting with a few classical problems like rendering accurate shadows, achieving smooth transitions between different levels of detail, and global visibility computations, I will then show a few examples of recent advances in real-time rendering. One challenge is the ever-increasing size of models due to automatic acquisition methods like range scanners. In a new system we have developed, we are able to visualize and interact with datasets of over 1 Billion raw points. Another source of large models is procedural modeling, and we have developed a method to aid designers in creating these models interactively. Finally, vegetation plays an important role in interactive scenes. I will show a system to simulate both illumination and animation in such complex vegetation very realistically.", event = "7th Eurographics Italian Chapter Conference 2009", location = "Verona, Italy", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ARTR/", } @talk{WIMMER-2009-VCCG, title = "Visibility Computation in Computer Graphics", author = "Michael Wimmer", year = "2009", abstract = "Visibility computation is an essential part of many computer graphics applications, for example for real-time rendering of very large scenes. Visibility can either be precomputed offline, which is a good strategy for static scenes, or calculated at runtime, which avoids precomputation and works well for dynamic scenes. In this presentation, I will cover the latest advances in both of these principal directions. For visibility precomputation, we have shown that sampling is superior to full geometric approaches for practical applications, due to its efficiency and robustness. For online visibility culling, we show how to make the best possible use of hardware occlusion queries without introducing latency and overhead.", event = "14th Computer Vision Winter Workshop (CVWW2009)", location = "Eibiswald, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-VCCG/", } @studentproject{fiedler-2009-php, title = "Procedural Human Posing Using CGA Grammars", author = "Stefan Fiedler", year = "2009", abstract = "This report presents a grammar for the procedural modeling of humanoid characters and poses based on CGA shape, a shape grammar for computer generated architecture. Various models can be derived with the same set of parametrized rules for geometric operations, and include a skeletal system for posing. The main part of this report defines basic rules and their effect on shapes and skeletons and discusses the results of an actual implementation of the grammar with examples.", keywords = "shape grammars, computer animation, character modeling, procedural modeling, computational geometry", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/fiedler-2009-php/", } @phdthesis{termeer-2009-cvc, title = "Comprehensive Visualization of Cardiac MRI Data", author = "Maurice Termeer", year = "2009", abstract = "Coronary artery disease is one of the leading causes of death in the western world. The continuous improvements in magnetic resonance imaging technology facilitate more accurate diagnoses by providing increasingly more detailed information on the viability, functioning, perfusion, and anatomy of a patient’s heart. This increasing amount of information creates the need for more efficient and more effective means of processing these data. This thesis presents several novel techniques that facilitate a more comprehensive visualization of a patient’s heart to assist in the diagnosis of coronary artery disease using magnetic resonance imaging (MRI). The volumetric bull’s eye plot is introduced as an extension of an existing visualization technique used in clinical practice---the bull’s eye plot. This novel concept offers a more comprehensive view on the viability of a patient’s heart by providing detailed information on the transmurality of scar while not suffering from discontinuities. Anatomical context is often lost due to abstract representations of data, or may be scarce due to the nature of the scanning protocol. Several techniques to restore the relation to anatomy are presented. The primary coronary arteries are segmented in a whole heart scan and mapped onto a volumetric bull’s eye plot, adding anatomical context to an abstract representation. Similarly, segmented late enhancement data are rendered along with a three-dimensional segmentation of the patient-specific myocardial and coronary anatomy. Additionally, coronary supply territories are computed from patient-specific data as an improvement over models based on population averages. Information on the perfusion of the myocardium provided by MRI is typically of fairly low resolution. Using high-resolution anatomical data, an approach to visualize simulated myocardial perfusion is presented, taking full advantage of the detailed information on perfusion. Finally, a truly comprehensive visualization of a cardiac MRI exam is explored by combining whole heart, late enhancement, functional, and perfusion scans in a single visualization. The concepts introduced help to build a more comprehensive view of the patient and the additional information may prove to be beneficial for the diagnostic process.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Cardiac MRI Visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/termeer-2009-cvc/", } @studentproject{PREINER-2009-GIPC, title = "Global Illumination for Point Clouds", author = "Reinhold Preiner", year = "2009", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/PREINER-2009-GIPC/", } @talk{WIMMER-2009-ARTR2, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2009", abstract = "Real-time rendering is a quickly developing field in computer graphics. Recent advances in graphics hardware make it possible to tackle completely new challenges, and to rethink old ones. While previously, the main focus of real-time rendering lay on classical problems like visibility and level-of-detail rendering, nowadays we see new challenges in the form of interactive procedural content generation, handling of massive amounts of data, and interactive simulation of extremely complex objects. In this talk, I will cover some of the recent advances we had in our group. First, we try to integrate procedural modeling techniques with the new parallel programming paradigms made commonly available through modern GPUs, and map L-system generation onto hardware to accelerate the generation of large L-systems. Then, I'll briefly show some results for really large scale visualization and editing of a huge point-based model consisting of over 1.2 Billion point samples of a Roman catacomb. Finally, I will treat a new approach to handle the classical visibility problem, where we show how to calculate visibility of a whole scene by exploiting the spatial coherence of visibility, thus accelerating the process so it becomes viable for interactive scene design. ", event = "University of Erlangen Research Seminar", location = "Erlangen, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ARTR2/", } @talk{WIMMER-2009-ARTR3, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2009", abstract = "Real-time rendering is a quickly developing field in computer graphics. Recent advances in graphics hardware make it possible to tackle completely new challenges, and to rethink old ones. While previously, the main focus of real-time rendering lay on classical problems like visibility and level-of-detail rendering, nowadays we see new challenges in the form of interactive procedural content generation, handling of massive amounts of data, and interactive simulation of extremely complex objects like trees. In this talk, I will try to broaden the definition of real-time rendering and give some insights how to address new research challenges. Starting with a few classical problems like rendering accurate shadows, achieving smooth transitions between different levels of detail, and global visibility computations, I will then show a few examples of recent advances in real-time rendering. One challenge is the ever-increasing size of models due to automatic acquisition methods like range scanners. In a new system we have developed, we are able to visualize and interact with datasets of over 1 Billion raw points. Another source of large models is procedural modeling, and we have developed a method to aid designers in creating these models interactively. Finally, vegetation plays an important role in interactive scenes. I will show a system to simulate both illumination and animation in such complex vegetation very realistically.", event = "25th Spring Conference on Computer Graphics (SCCG2009)", location = "Budmerice, Slovakia", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ARTR3/", } @article{bhagvat-09-frusta, title = "GPU Rendering of Relief Mapped Conical Frusta", author = "Deepali Bhagvat and Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "This paper proposes to use relief-mapped conical frusta (cones cut by planes) to skin skeletal objects. Based on this representation, current programmable graphics hardware can perform the rendering with only minimal communication between the CPU and GPU. A consistent definition of conical frusta including texture parametrization and a continuous surface normal is provided. Rendering is performed by analytical ray casting of the relief-mapped frusta directly on the GPU. We demonstrate both static and animated objects rendered using our technique and compare to polygonal renderings of similar quality.", issn = "0167-7055", journal = "Computer Graphics Forum", number = "28", volume = "8", pages = "2131--2139", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bhagvat-09-frusta/", } @talk{groeller-cvc-2009, title = "Comprehensive Visualization of Cardiac MRI Data", author = "Eduard Gr\"{o}ller", year = "2009", event = "AMI-ARCS 2009, 5th Workshop on Augmented Environments for Medical Imaging including augmented Reality in Computer-Aided Surgery", location = "Imperial College London, UK", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/groeller-cvc-2009/", } @talk{groeller-scv-2009, title = "Scientific Visualization", author = "Eduard Gr\"{o}ller", year = "2009", event = "Dagstuhl Seminar 09251", location = "Schloss Dagstuhl, Deutschland", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/groeller-scv-2009/", } @talk{groeller-svo-2009, title = "(Scientific) Visualization: Overview and own Research Contributions", author = "Eduard Gr\"{o}ller", year = "2009", event = "Kolloquium an der Fakult\"{a}t f\"{u}r Informatik, Uni Wien", location = "Wien, \"{O}sterreich", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/groeller-svo-2009/", } @talk{groeller-vks-2009, title = "Visualization with Knowledge and Style", author = "Eduard Gr\"{o}ller", year = "2009", event = "Keynote talk at International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision (WSCG)", location = "Plzen, Tschechien", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/groeller-vks-2009/", } @phdthesis{kohlmann-2009-lssl, title = "LiveSync: Smart Linking of 2D and 3D Views in Medical Applications", author = "Peter Kohlmann", year = "2009", abstract = "In this thesis two techniques for the smart linking of 2D and 3D views in medical applications are presented. Although real-time interactive 3D volume visualization is available even for very large data sets, it is used quite rarely in the clinical practice. A major obstacle for a better integration in the clinical workflow is the time-consuming process to adjust the parameters to generate diagnostically relevant images. The clinician has to take care of the appropriate viewpoint, zooming, transfer function setup, clipping planes, and other parameters. Because of this, current applications primarily employ 2D views generated through standard techniques such as multi-planar reformatting (MPR). The LiveSync interaction metaphor is a new concept to synchronize 2D slice views and 3D volumetric views of medical data sets. Through intuitive picking actions on the slice, the users define the anatomical structures they are interested in. The 3D volumetric view is updated automatically with the goal that the users are provided with diagnostically relevant images. To achieve this live synchronization a minimal set of derived information, without the need for segmented data sets or data-specific precomputations, is used. The presented system provides the physician with synchronized views which help to gain deeper insight into the medical data with minimal user interaction. Contextual picking is a novel method for the interactive identification of contextual interest points within volumetric data by picking on a direct volume rendered image. In clinical diagnostics the points of interest are often located in the center of anatomical structures. In order to derive the volumetric position, which allows a convenient examination of the intended structure, the system automatically extracts contextual meta information from the DICOM (Digital Imaging and Communications in Medicine) images and the setup of the medical workstation. Along a viewing ray for a volumetric picking, the ray profile is analyzed to detect structures which are similar to predefined templates from a knowledge base. It is demonstrated that the obtained position in 3D can be utilized to highlight a structure in 2D slice views, to interactively calculate approximate centerlines of tubular objects, or to place labels at contextually-defined 3D positions.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/kohlmann-2009-lssl/", } @studentproject{beham_2009.2009-EVS, title = "Enhancing Vienna Scene", author = "Michael Beham and Manuel Hochmayr", year = "2009", abstract = "Enhancing the Vienna scene for the FriendlyCulling occlusion culling application with dynamic objects.", keywords = "Occlusion Culling, Modelling, Maya", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/beham_2009.2009-EVS/", } @inproceedings{weidlich_2009_EPLBM, title = "Exploring the Potential of Layered BRDF Models", author = "Andrea Weidlich and Alexander Wilkie", year = "2009", abstract = "This course serves as a guide on the considerable potential of layered surface models. The key advantage of using such layered BRDFs over traditional, more general shading language constructs is that the end result is automatically highly physically plausible. In particular, we demonstrate on a simple layered surface model that combines several traditional BRDF components how a surprisingly large number of interesting and important surface types can be efficiently represented by using the same, not particularly complex, BRDF code. We also show how handy such an approach is for the eventual end user, whose main concern is the ease with which one can describe object appearance based only on a few intuitive parameters. We first discuss layered surface models in computer graphics and the constraints of modelling object appearance in a physically plausible fashion. We then demonstrate the techniques that can be used to efficiently evaluate layered BRDF models, give examples of the surface types that can be described in this way. We also go beyond plain surface models, and showcase how a texture-based combination of layered surface components can be used to describe highly complex object appearance attributes, while implicitly remaining physically plausible.", booktitle = "SIGGRAPH Asia 2009 Courses", location = "Yokohama, Japan", publisher = "webpage: http://portal.acm.org/citation.cfm?id=1665824", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich_2009_EPLBM/", } @article{Karnik-2010-routevis, title = "Route Visualization using Detail Lenses", author = "Pushpak Karnik and David Cline and Stefan Jeschke and Anshuman Razdan and Peter Wonka", year = "2009", abstract = "We present a method designed to address some limitations of typical route map displays of driving directions. The main goal of our system is to generate a printable version of a route map that shows the overview and detail views of the route within a single, consistent visual frame. Our proposed visualization provides a more intuitive spatial context than a simple list of turns. We present a novel multi-focus technique to achieve this goal, where the foci are defined by points-of-interest (POI) along the route. A detail lens that encapsulates the POI at a finer geospatial scale is created for each focus. The lenses are laid out on the map to avoid occlusion with the route and each other, and to optimally utilize the free space around the route. We define a set of layout metrics to evaluate the quality of a lens layout for a given route map visualization. We compare standard lens layout methods to our proposed method and demonstrate the effectiveness of our method in generating aesthetically pleasing layouts. Finally, we perform a user study to evaluate the effectiveness of our layout choices.", issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "2", volume = "16", pages = "235--247", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Karnik-2010-routevis/", } @xmascard{SCHEIBLAUER-2009-xmas, title = "X-Mas Card", author = "Claus Scheiblauer", year = "2009", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/SCHEIBLAUER-2009-xmas/", } @talk{jeschke-09-praguetalk, title = "Diffusion Curve Images--- Rendering in 2 and 3 Dimensions", author = "Stefan Jeschke", year = "2009", abstract = "Diffusion curve images (DCI) provide a powerful tool for efficient 2D image generation, storage and manipulation. A DCI consist of curves with colors defined on either side. By diffusing these colors over the image, the final result includes sharp boundaries along the curves with smoothly shaded regions between them. The first part of the talk presents a new Laplacian surface solver for a stable rendering of DCIs. It consists of a robust rasterization technique to transform the algebraic curves to the discrete image domain, and a variable stencil size diffusion solver that solves the minimal surface problem. The solver is proven to converge to the right solution, it is at least as fast as commonly used multigrid solvers, but much simpler to implement, works for arbitrary image resolutions, as well as 8 bit data. The second part of the talk extends the application of diffusion curves to render high quality surface details on 3D objects. The first extension is a view dependent warping technique that dynamically allocates more texture memory for details close to the observer. The second extension is a dynamic feature embedding technique that retains crisp, anti-aliased curve details even in extreme closeups. The third extension is the application of dynamic feature embedding to displacement mapping and geometry images. Our results show high quality renderings at interactive frame rates.", event = "Konversatorium Technical University of Prague ", location = "Prague", keywords = "Diffusion curves", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-praguetalk/", } @article{karnik-09-shapegrammar, title = "A Shape Grammar for Developing Glyph-based Visualizations", author = "Pushpak Karnik and Stefan Jeschke and David Cline and Anshuman Razdan and E. Wentz and Peter Wonka", year = "2009", abstract = "In this paper we address the question of how to quickly model glyph-based GIS visualizations. Our solution is based on using shape grammars to set up the different aspects of a visualization, including the geometric content of the visualization, methods for resolving layout conflicts and interaction methods. Our approach significantly increases modeling efficiency over similarly flexible systems currently in use.", issn = "0167-7055", journal = "Computer Graphics Forum", number = "8", volume = "28", pages = "2176--2188", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/karnik-09-shapegrammar/", } @article{gavrilescu-2009-postrend, title = "Post-Rendering Enhancements of Volumes", author = "Marius Gavrilescu and Vasile Manta and Werner Purgathofer", year = "2009", abstract = "The paper presents an approach to visually enhance representations of volume data as a means to improve volume visualization. Direct volume rendering is employed to represent several volume data sets, using the popular Ray Casting algorithm. The result is rendered to a texture via an off-screen framebuffer, which then goes through a post-rendering processing stage. This stage involves the application of image enhancement techniques such as the use of spatial filters, to produce clearer, sharper, and less noisy images of the rendered volume. Depending on the specifics of the volumetric data set, post-rendering enhancement may bring forth more relevant visual information or otherwise improve the overall quality of the resulting images.", issn = "1220 - 2169", journal = "Buletinul Institutului Politehnic din Iaşi", pages = "page 43--54", keywords = "feature enhancement, Ray-Casting, image filtering, volume visualization, post-rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/gavrilescu-2009-postrend/", } @studentproject{MuehlbacherArbesser-2009-LMDLightcuts, title = "Lightcuts in CUDA", author = "Thomas M\"{u}hlbacher and Clemens Arbesser", year = "2009", abstract = "Instant Radiosity is a very flexible method for rendering global illumination. Lightcuts is a hierarchical version of Instant Radiosity. This project analyzes and compares a CUDA and a CPU implementation of Lightcuts.", note = "1", keywords = "CUDA, Lightcuts, Instant Radiosity, Lighting and Material Design, Global Illumination", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/MuehlbacherArbesser-2009-LMDLightcuts/", }