@article{KOCH-2021-GVS, title = "Guided Visibility Sampling++", author = "Thomas Bernhard Koch and Michael Wimmer", year = "2021", abstract = "Visibility computation is a common problem in the field of computer graphics. Examples include occlusion culling, where parts of the scene are culled away, or global illumination simulations, which are based on the mutual visibility of pairs of points to calculate lighting. In this paper, an aggressive from-region visibility technique called Guided Visibility Sampling++ (GVS++) is presented. The proposed technique improves the Guided Visibility Sampling algorithm through improved sampling strategies, thus achieving low error rates on various scenes, and being over four orders of magnitude faster than the original CPU-based Guided Visibility Sampling implementation. We present sampling strategies that adaptively compute sample locations and use ray casting to determine a set of triangles visible from a flat or volumetric rectangular region in space. This set is called a potentially visible set (PVS). Based on initial random sampling, subsequent exploration phases progressively grow an intermediate solution. A termination criterion is used to terminate the PVS search. A modern implementation using the Vulkan graphics API and RTX ray tracing is discussed. Furthermore, we show optimizations that allow for an implementation that is over 20 times faster than a naive implementation.", month = apr, journal = "Proceedings of the ACM on Computer Graphics and Interactive Techniques", volume = "4", number = "1", issn = "2577-6193", doi = "10.1145/3451266", pages = "16", pages = "4:1--4:16", keywords = "visibility culling, real-time rendering, ray tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/KOCH-2021-GVS/", } @article{SCHUETZ-2021-PCC, title = "Rendering Point Clouds with Compute Shaders and Vertex Order Optimization", author = "Markus Sch\"{u}tz and Bernhard Kerbl and Michael Wimmer", year = "2021", abstract = "While commodity GPUs provide a continuously growing range of features and sophisticated methods for accelerating compute jobs, many state-of-the-art solutions for point cloud rendering still rely on the provided point primitives (GL_POINTS, POINTLIST, ...) of graphics APIs for image synthesis. In this paper, we present several compute-based point cloud rendering approaches that outperform the hardware pipeline by up to an order of magnitude and achieve significantly better frame times than previous compute-based methods. Beyond basic closest-point rendering, we also introduce a fast, high-quality variant to reduce aliasing. We present and evaluate several variants of our proposed methods with different flavors of optimization, in order to ensure their applicability and achieve optimal performance on a range of platforms and architectures with varying support for novel GPU hardware features. During our experiments, the observed peak performance was reached rendering 796 million points (12.7GB) at rates of 62 to 64 frames per second (50 billion points per second, 802GB/s) on an RTX 3090 without the use of level-of-detail structures. We further introduce an optimized vertex order for point clouds to boost the efficiency of GL_POINTS by a factor of 5x in cases where hardware rendering is compulsory. We compare different orderings and show that Morton sorted buffers are faster for some viewpoints, while shuffled vertex buffers are faster in others. In contrast, combining both approaches by first sorting according to Morton-code and shuffling the resulting sequence in batches of 128 points leads to a vertex buffer layout with high rendering performance and low sensitivity to viewpoint changes. ", month = jul, journal = "Computer Graphics Forum", volume = "40", number = "4", issn = "1467-8659", doi = "10.1111/cgf.14345", booktitle = "techreport", pages = "12", publisher = "Eurographics Association", pages = "115--126", keywords = "point-based rendering, compute shader, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/SCHUETZ-2021-PCC/", } @inproceedings{cardoso-2021-cost, title = "Cost Volume Refinement for Depth Prediction", author = "Joao Afonso Cardoso and Nuno Goncalves and Michael Wimmer", year = "2021", abstract = "Light-field cameras are becoming more popular in the consumer market. Their data redundancy allows, in theory, to accurately refocus images after acquisition and to predict the depth of each point visible from the camera. Combined, these two features allow for the generation of full-focus images, which is impossible in traditional cameras. Multiple methods for depth prediction from light fields (or stereo) have been proposed over the years. A large subset of these methods relies on cost-volume estimates – 3D objects where each layer represents a heuristic of whether each point in the image is at a certain distance from the camera. Generally, this volume is used to regress a depth map, which is then refined for better results. In this paper, we argue that refining the cost volumes is superior to refining the depth maps in order to further increase the accuracy of depth predictions. We propose a set of cost-volume refinement algorithms and show their effectiveness.", month = jan, isbn = "978-1-7281-8809-6", publisher = "IEEE", location = "Milan, Italy", event = "25th International Conference on Pattern Recognition (ICPR)", doi = "10.1109/ICPR48806.2021.9412730", booktitle = "Proceedings of the 25th International Conference on Pattern Recognition", pages = "354--361", keywords = "depth reconstruction, light fields, cost volumes", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/cardoso-2021-cost/", } @article{luidolt-2020-lightperceptionVR, title = "Gaze-Dependent Simulation of Light Perception in Virtual Reality", author = "Laura R. Luidolt and Michael Wimmer and Katharina Kr\"{o}sl", year = "2020", abstract = "The perception of light is inherently different inside a virtual reality (VR) or augmented reality (AR) simulation when compared to the real world. Conventional head-worn displays (HWDs) are not able to display the same high dynamic range of brightness and color as the human eye can perceive in the real world. To mimic the perception of real-world scenes in virtual scenes, it is crucial to reproduce the effects of incident light on the human visual system. In order to advance virtual simulations towards perceptual realism, we present an eye-tracked VR/AR simulation comprising effects for gaze-dependent temporal eye adaption, perceptual glare, visual acuity reduction, and scotopic color vision. Our simulation is based on medical expert knowledge and medical studies of the healthy human eye. We conducted the first user study comparing the perception of light in a real-world low-light scene to a VR simulation. Our results show that the proposed combination of simulated visual effects is well received by users and also indicate that an individual adaptation is necessary, because perception of light is highly subjective.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "Volume 26, Issue 12", issn = "1077-2626", doi = "10.1109/TVCG.2020.3023604", pages = "3557--3567", keywords = "perception, virtual reality, user studies", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/", } @inproceedings{Kroesl_2020_11_09, title = "CatARact: Simulating Cataracts in Augmented Reality", author = "Katharina Kr\"{o}sl and Carmine Elvezio and Laura R. Luidolt and Matthias H\"{u}rbe and Sonja Karst and Steven Feiner and Michael Wimmer", year = "2020", abstract = "For our society to be more inclusive and accessible, the more than 2.2 billion people worldwide with limited vision should be considered more frequently in design decisions, such as architectural planning. To help architects in evaluating their designs and give medical per-sonnel some insight on how patients experience cataracts, we worked with ophthalmologists to develop the first medically-informed, pilot-studied simulation of cataracts in eye-tracked augmented reality (AR). To test our methodology and simulation, we conducted a pilot study with cataract patients between surgeries of their two cataract-affected eyes. Participants compared the vision of their corrected eye, viewing through simulated cataracts, to that of their still affected eye, viewing an unmodified AR view. In addition, we conducted remote experiments via video call, live adjusting our simulation and comparing it to related work, with participants who had cataract surgery a few months before. We present our findings and insights from these experiments and outline avenues for future work.", month = nov, event = "IEEE International Symposium on Mixed and Augmented Reality (ISMAR).", booktitle = "IEEE International Symposium on Mixed and Augmented Reality (ISMAR).", pages = "1--10", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/Kroesl_2020_11_09/", } @article{SCHUETZ-2020-MPC, title = "Fast Out-of-Core Octree Generation for Massive Point Clouds", author = "Markus Sch\"{u}tz and Stefan Ohrhallinger and Michael Wimmer", year = "2020", abstract = "We propose an efficient out-of-core octree generation method for arbitrarily large point clouds. It utilizes a hierarchical counting sort to quickly split the point cloud into small chunks, which are then processed in parallel. Levels of detail are generated by subsampling the full data set bottom up using one of multiple exchangeable sampling strategies. We introduce a fast hierarchical approximate blue-noise strategy and compare it to a uniform random sampling strategy. The throughput, including out-of-core access to disk, generating the octree, and writing the final result to disk, is about an order of magnitude faster than the state of the art, and reaches up to around 6 million points per second for the blue-noise approach and up to around 9 million points per second for the uniform random approach on modern SSDs.", month = nov, journal = "Computer Graphics Forum", volume = "39", number = "7", issn = "1467-8659", doi = "10.1111/cgf.14134", pages = "13", publisher = "John Wiley & Sons, Inc.", pages = "1--13", keywords = "point clouds, point-based rendering, level of detail", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/SCHUETZ-2020-MPC/", } @inproceedings{erler-2020-p2s, title = "Points2Surf: Learning Implicit Surfaces from Point Clouds", author = "Philipp Erler and Paul Guerrero and Stefan Ohrhallinger and Michael Wimmer and Niloy Mitra", year = "2020", abstract = "A key step in any scanning-based asset creation workflow is to convert unordered point clouds to a surface. Classical methods (e.g., Poisson reconstruction) start to degrade in the presence of noisy and partial scans. Hence, deep learning based methods have recently been proposed to produce complete surfaces, even from partial scans. However, such data-driven methods struggle to generalize to new shapes with large geometric and topological variations. We present Points2Surf, a novel patch-based learning framework that produces accurate surfaces directly from raw scans without normals. Learning a prior over a combination of detailed local patches and coarse global information improves generalization performance and reconstruction accuracy. Our extensive comparison on both synthetic and real data demonstrates a clear advantage of our method over state-of-the-art alternatives on previously unseen classes (on average, Points2Surf brings down reconstruction error by 30% over SPR and by 270%+ over deep learning based SotA methods) at the cost of longer computation times and a slight increase in small-scale topological noise in some cases. Our source code, pre-trained model, and dataset are available on: https://github.com/ErlerPhilipp/points2surf ", month = oct, isbn = "978-3-030-58558-7", series = "Lecture Notes in Computer Science", publisher = "Springer International Publishing", location = "Glasgow, UK (online)", address = "Cham", event = "ECCV 2020", editor = "Vedaldi, Andrea and Bischof, Horst and Brox, Thomas and Frahm, Jan-Michael", doi = "10.1007/978-3-030-58558-7_7", booktitle = "Computer Vision -- ECCV 2020", journal = "Computer Vision – ECCV 2020", pages = "17", volume = "12350", pages = "108--124", keywords = "surface reconstruction, implicit surfaces, point clouds, patch-based, local and global, deep learning, generalization", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/erler-2020-p2s/", } @inproceedings{honic-2020-w78, title = "Scan to BIM for the Semi-Automated Generation of a Material Passport for an Existing Building", author = "Meliha Honic and Iva Kovacic and Ildar Gilmutdinov and Michael Wimmer", year = "2020", month = aug, event = "Proceedings of the 37th International Conference of CIB W78, Sao Paulo - online", booktitle = " Proceedings of the 37th International Conference of CIB W78", pages = "338--346", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/honic-2020-w78/", } @misc{brugger-2020-tdp, title = "Test Scene Design for Physically Based Rendering", author = "Elias Brugger and Christian Freude and Michael Wimmer", year = "2020", abstract = "Physically based rendering is a discipline in computer graphics which aims at reproducing certain light and material appearances that occur in the real world. Complex scenes can be difficult to compute for rendering algorithms. This paper introduces a new comprehensive test database of scenes that treat different light setups in conjunction with diverse materials and discusses its design principles. A lot of research is focused on the development of new algorithms that can deal with difficult light conditions and materials efficiently. This database delivers a comprehensive foundation for evaluating existing and newly developed rendering techniques. A final evaluation compares different results of different rendering algorithms for all scenes.", month = aug, URL = "https://www.cg.tuwien.ac.at/research/publications/2020/brugger-2020-tdp/", } @techreport{freude_2020_rs, title = "R-Score: A Novel Approach to Compare Monte Carlo Renderings", author = "Christian Freude and Hiroyuki Sakai and Karoly Zsolnai-Feh\'{e}r and Michael Wimmer", year = "2020", abstract = "In this paper, we propose a new approach for the comparison and analysis of Monte Carlo (MC) rendering algorithms. It is based on a novel similarity measure called render score (RS) that is specically designed for MC rendering, statistically motivated, and incorporates bias and variance. Additionally, we propose a comparison scheme that alleviates the need for practically converged reference images (RIs). Our approach can be used to compare and analyze dierent rendering methods by revealing detailed (per-pixel) dierences and subsequently potential conceptual or implementation-related issues, thereby offering a more informative and meaningful alternative to commonly used metrics.", month = aug, number = "TR-193-02-2020-4", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", note = "human contact: technical-report@cg.tuwien.ac.at", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/freude_2020_rs/", } @article{OTEPKA-2020-PPC, title = "Efficient Loading and Visualization of Massive Feature-Richt Point Clouds Without Hierarchical Acceleration Structures", author = "Johannes Otepka and Gottfried Mandlburger and Markus Sch\"{u}tz and Norbert Pfeifer and Michael Wimmer", year = "2020", abstract = "Nowadays, point clouds are the standard product when capturing reality independent of scale and measurement technique. Especially, Dense Image Matching (DIM) and Laser Scanning (LS) are state of the art capturing methods for a great variety of applications producing detailed point clouds up to billions of points. In-depth analysis of such huge point clouds typically requires sophisticated spatial indexing structures to support potentially long-lasting automated non-interactive processing tasks like feature extraction, semantic labelling, surface generation, and the like. Nevertheless, a visual inspection of the point data is often necessary to obtain an impression of the scene, roughly check for completeness, quality, and outlier rates of the captured data in advance. Also intermediate processing results, containing additional per-point computed attributes, may require visual analyses to draw conclusions or to parameterize further processing. Over the last decades a variety of commercial, free, and open source viewers have been developed that can visualise huge point clouds and colorize them based on available attributes. However, they have either a poor loading and navigation performance, visualize only a subset of the points, or require the creation of spatial indexing structures in advance. In this paper, we evaluate a progressive method that is capable of rendering any point cloud that fits in GPU memory in real time without the need of time consuming hierarchical acceleration structure generation. In combination with our multi-threaded LAS and LAZ loaders, we achieve load performance of up to 20 million points per second, display points already while loading, support flexible switching between different attributes, and rendering up to one billion points with visually appealing navigation behaviour. Furthermore, loading times of different data sets for different open source and commercial software packages are analysed.", month = aug, journal = "ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences", volume = "XLIII-B2-2020", issn = "1682-1750", doi = "10.5194/isprs-archives-XLIII-B2-2020-293-2020", pages = "293--300", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/OTEPKA-2020-PPC/", } @misc{kerbl-2020-improvencoding, title = "Improved Triangle Encoding for Cached Adaptive Tessellation", author = "Linus Horvath and Bernhard Kerbl and Michael Wimmer", year = "2020", month = jul, location = "online", event = "HPG 2020", Conference date = "Poster presented at HPG 2020 (2020-05-01--2020-06-22)", keywords = "GPU, tessellation, real-time", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/kerbl-2020-improvencoding/", } @article{zsolnaifeher-2020-pme, title = "Photorealistic Material Editing Through Direct Image Manipulation", author = "Karoly Zsolnai-Feh\'{e}r and Peter Wonka and Michael Wimmer", year = "2020", abstract = "Creating photorealistic materials for light transport algorithms requires carefully fine-tuning a set of material properties to achieve a desired artistic effect. This is typically a lengthy process that involves a trained artist with specialized knowledge. In this work, we present a technique that aims to empower novice and intermediate-level users to synthesize high-quality photorealistic materials by only requiring basic image processing knowledge. In the proposed workflow, the user starts with an input image and applies a few intuitive transforms (e.g., colorization, image inpainting) within a 2D image editor of their choice, and in the next step, our technique produces a photorealistic result that approximates this target image. Our method combines the advantages of a neural network-augmented optimizer and an encoder neural network to produce high-quality output results within 30 seconds. We also demonstrate that it is resilient against poorly-edited target images and propose a simple extension to predict image sequences with a strict time budget of 1-2 seconds per image. Video: https://www.youtube.com/watch?v=8eNHEaxsj18", month = jun, journal = "Computer Graphics Forum", volume = "39", number = "4", issn = "1467-8659", doi = "10.1111/cgf.14057", pages = "14", pages = "107--120", keywords = "neural rendering, neural networks, photorealistic rendering, photorealistic material editing", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/zsolnaifeher-2020-pme/", } @article{luksch_2020, title = "Real-Time Approximation of Photometric Polygonal Lights", author = "Christian Luksch and Lukas Prost and Michael Wimmer", year = "2020", abstract = "We present a real-time rendering technique for photometric polygonal lights. Our method uses a numerical integration technique based on a triangulation to calculate noise-free diffuse shading. We include a dynamic point in the triangulation that provides a continuous near-field illumination resembling the shape of the light emitter and its characteristics. We evaluate the accuracy of our approach with a diverse selection of photometric measurement data sets in a comprehensive benchmark framework. Furthermore, we provide an extension for specular reflection on surfaces with arbitrary roughness that facilitates the use of existing real-time shading techniques. Our technique is easy to integrate into real-time rendering systems and extends the range of possible applications with photometric area lights.", month = may, journal = "Proceedings of the ACM on Computer Graphics and Interactive Techniques", volume = "3", number = "1", issn = "2577-6193", doi = "10.1145/3384537", pages = "4.1--4.18", keywords = "area lights, photometric lights, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/luksch_2020/", } @article{schuetz-2020-PPC, title = "Progressive Real-Time Rendering of One Billion Points Without Hierarchical Acceleration Structures", author = "Markus Sch\"{u}tz and Gottfried Mandlburger and Johannes Otepka and Michael Wimmer", year = "2020", abstract = "Research in rendering large point clouds traditionally focused on the generation and use of hierarchical acceleration structures that allow systems to load and render the smallest fraction of the data with the largest impact on the output. The generation of these structures is slow and time consuming, however, and therefore ill-suited for tasks such as quickly looking at scan data stored in widely used unstructured file formats, or to immediately display the results of point-cloud processing tasks. We propose a progressive method that is capable of rendering any point cloud that fits in GPU memory in real time, without the need to generate hierarchical acceleration structures in advance. Our method supports data sets with a large amount of attributes per point, achieves a load performance of up to 100 million points per second, displays already loaded data in real time while remaining data is still being loaded, and is capable of rendering up to one billion points using an on-the-fly generated shuffled vertex buffer as its data structure, instead of slow-to-generate hierarchical structures. Shuffling is done during loading in order to allow efficiently filling holes with random subsets, which leads to a higher quality convergence behavior. ", month = may, journal = "Computer Graphics Forum", volume = "39", number = "2", issn = "1467-8659", doi = "10.1111/cgf.13911", booktitle = "EUROGRAPHICS", pages = "14", publisher = "John Wiley & Sons Ltd.", pages = "51--64", keywords = "point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/schuetz-2020-PPC/", } @inproceedings{unterguggenberger-2020-fmvr, title = "Fast Multi-View Rendering for Real-Time Applications", author = "Johannes Unterguggenberger and Bernhard Kerbl and Markus Steinberger and Dieter Schmalstieg and Michael Wimmer", year = "2020", abstract = "Efficient rendering of multiple views can be a critical performance factor for real-time rendering applications. Generating more than one view multiplies the amount of rendered geometry, which can cause a huge performance impact. Minimizing that impact has been a target of previous research and GPU manufacturers, who have started to equip devices with dedicated acceleration units. However, vendor-specific acceleration is not the only option to increase multi-view rendering (MVR) performance. Available graphics API features, shader stages and optimizations can be exploited for improved MVR performance, while generally offering more versatile pipeline configurations, including the preservation of custom tessellation and geometry shaders. In this paper, we present an exhaustive evaluation of MVR pipelines available on modern GPUs. We provide a detailed analysis of previous techniques, hardware-accelerated MVR and propose a novel method, leading to the creation of an MVR catalogue. Our analyses cover three distinct applications to help gain clarity on overall MVR performance characteristics. Our interpretation of the observed results provides a guideline for selecting the most appropriate one for various use cases on different GPU architectures.", month = may, isbn = "978-3-03868-107-6", organization = "Eurographics", location = "online", event = "EGPGV 2020", editor = "Frey, Steffen and Huang, Jian and Sadlo, Filip", doi = "10.2312/pgv.20201071", booktitle = "Eurographics Symposium on Parallel Graphics and Visualization", pages = "13--23", keywords = "Real-Time Rendering, Rasterization, Multi-View, OVR_multiview, Geometry Shader, Evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/unterguggenberger-2020-fmvr/", } @inproceedings{kroesl-2020-XREye, title = "XREye: Simulating Visual Impairments in Eye-Tracked XR ", author = "Katharina Kr\"{o}sl and Carmine Elvezio and Matthias H\"{u}rbe and Sonja Karst and Steven Feiner and Michael Wimmer", year = "2020", abstract = "Many people suffer from visual impairments, which can be difficult for patients to describe and others to visualize. To aid in understanding what people with visual impairments experience, we demonstrate a set of medically informed simulations in eye-tracked XR of several common conditions that affect visual perception: refractive errors (myopia, hyperopia, and presbyopia), cornea disease, and age-related macular degeneration (wet and dry).", month = mar, booktitle = "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", location = "(Atlanta) online", publisher = "IEEE", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/", } @article{PUEYO-2019-SCL, title = "Shrinking City Layouts", author = "Oriol Pueyo and Albert Sabria and Xavier Pueyo and Michael Wimmer and Gustavo Patow", year = "2020", abstract = "One important use of realistic city environments is in the video game industry. When a company works on a game whose action occurs in a real-world environment, a team of designers usually creates a simplified model of the real city. In particular, the resulting city is desired to be smaller in extent to increase playability and fun, avoiding long walks and “boring” neighborhoods. This is manual work, usually started from scratch, where the first step is to take the original city map as input, and from it create the street network of the final city, removing insignificant streets and bringing important places closer together in the process. This first draft of the city street network is like a kind of skeleton with the most important places connected, from which the artist can (and should) start working until the desired result is obtained. In this paper, we propose a solution to automatically generate such a first simplified street network draft. This is achieved by using the well-established seam-carving technique applied to a sckeleton of the city layout, built with the important landmarks and streets of the city. The output that our process provides is a street network that reduces the city area as much as the designer wants, preserving landmarks and key streets, while keeping the relative positions between them. For this, we run a shrinking process that reduces the area in an irregular way, prioritizing the removal of areas of less importance. This way, we achieve a smaller city but retain the essence of the real-world one. To further help the designer, we also present an automatic filling algorithm that adds unimportant streets to the shrunken skeleton.", month = feb, doi = "10.1016/j.cag.2019.11.004", issn = "0097-8493", journal = "Computers & Graphics", volume = "86", pages = "15--26", keywords = "procedural modeling, computer games", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/PUEYO-2019-SCL/", } @misc{SCHUETZ-2019-PCC, title = "Rendering Point Clouds with Compute Shaders", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2019", abstract = "We propose a compute shader based point cloud rasterizer with up to 10 times higher performance than classic point-based rendering with the GL_POINT primitive. In addition to that, our rasterizer offers 5 byte depth-buffer precision with uniform or customizable distribution, and we show that it is possible to implement a highquality splatting method that blends together overlapping fragments while still maintaining higher frame-rates than the traditional approach.", month = nov, isbn = "978-1-4503-6943-5/19/11", event = "SIGGRAPH Asia", Conference date = "Poster presented at SIGGRAPH Asia (2019-11)", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/SCHUETZ-2019-PCC/", } @inproceedings{Arleo-2019-vis, title = "Sabrina: Modeling and Visualization of Economy Data with Incremental Domain Knowledge", author = "Alessio Arleo and Christos Tsigkanos and Chao Jia and Roger Leite and Ilir Murturi and Manfred Klaffenb\"{o}ck and Schahram Dustdar and Silvia Miksch and Michael Wimmer and Johannes Sorger", year = "2019", abstract = "Investment planning requires knowledge of the financial landscape on a large scale, both in terms of geo-spatial and industry sector distribution. There is plenty of data available, but it is scattered across heterogeneous sources (newspapers, open data, etc.), which makes it difficult for financial analysts to understand the big picture. In this paper, we present Sabrina, a financial data analysis and visualization approach that incorporates a pipeline for the generation of firm-to-firm financial transaction networks. The pipeline is capable of fusing the ground truth on individual firms in a region with (incremental) domain knowledge on general macroscopic aspects of the economy. Sabrina unites these heterogeneous data sources within a uniform visual interface that enables the visual analysis process. In a user study with three domain experts, we illustrate the usefulness of Sabrina, which eases their analysis process.", month = oct, location = "Vancouver, British Columbia, Canada", event = " IEEE Visualization Conference (VIS)", booktitle = "IEEE VIS 2019", keywords = "Visualization, Visual Analytics", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Arleo-2019-vis/", } @talk{WIMMER-2019-CGSG, title = "Computer Graphics for Serious Games", author = "Michael Wimmer", year = "2019", abstract = "10 years ago, the focus of computer graphics was mostly the quality and speed of image generation, and serious games set in realistic environments profited from these advances. Meanwhile, commercial rendering engines leave little to be desired, but computer graphics research has opened other doors which might be relevant for application in serious games. In this talk, I will present some of our latest advances in computer graphics in simulation, rendering and content generation. I will show how we can now simulate visual impairments in virtual reality, which could be used in games to create empathy for people affected by these impairments. I will describe how we have advanced point-based rendering techniques to allow incorporating real environments into rendering applications with basically no preprocessing. On the other hand, virtual environments for serious games could be created efficiently by collaborative crowed-sourced procedural modeling. Finally, efficient simulations of floods and heavy rainfall may not only help experts, but might be the basis of serious games to increase public awareness of natural disasters and the effects of climate change.", month = sep, event = "11th International Conference on Virtual Worlds and Games for Serious Applications", location = "Vienna, Austria", keywords = "serious games", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/WIMMER-2019-CGSG/", } @article{Preiner_2019, title = "Gaussian-Product Subdivision Surfaces", author = "Reinhold Preiner and Tamy Boubekeur and Michael Wimmer", year = "2019", abstract = "Probabilistic distribution models like Gaussian mixtures have shown great potential for improving both the quality and speed of several geometric operators. This is largely due to their ability to model large fuzzy data using only a reduced set of atomic distributions, allowing for large compression rates at minimal information loss. We introduce a new surface model that utilizes these qualities of Gaussian mixtures for the definition and control of a parametric smooth surface. Our approach is based on an enriched mesh data structure, which describes the probability distribution of spatial surface locations around each vertex via a Gaussian covariance matrix. By incorporating this additional covariance information, we show how to define a smooth surface via a nonlinear probabilistic subdivision operator based on products of Gaussians, which is able to capture rich details at fixed control mesh resolution. This entails new applications in surface reconstruction, modeling, and geometric compression.", month = jul, journal = "ACM Transactions on Graphics", volume = "38", number = "4", issn = "0730-0301", doi = "10.1145/3306346.3323026", pages = "35:1--35:11", keywords = "Gaussian mixtures, surface reconstruction, subdivision surfaces", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Preiner_2019/", } @article{celarek_adam-2019-qelta, title = "Quantifying the Error of Light Transport Algorithms", author = "Adam Celarek and Wenzel Jakob and Michael Wimmer and Jaakko Lehtinen", year = "2019", abstract = "This paper proposes a new methodology for measuring the error of unbiased physically based rendering algorithms. The current state of the art includes mean squared error (MSE) based metrics and visual comparisons of equal-time renderings of competing algorithms. Neither is satisfying as MSE does not describe behavior and can exhibit significant variance, and visual comparisons are inherently subjective. Our contribution is two-fold: First, we propose to compute many short renderings instead of a single long run and use the short renderings to estimate MSE expectation and variance as well as per-pixel standard deviation. An algorithm that achieves good results in most runs, but with occasional outliers is essentially unreliable, which we wish to quantify numerically. We use per-pixel standard deviation to identify problematic lighting effects of rendering algorithms. The second contribution is the error spectrum ensemble (ESE), a tool for measuring the distribution of error over frequencies. The ESE serves two purposes: It reveals correlation between pixels and can be used to detect outliers, which offset the amount of error substantially.", month = jul, journal = "Computer Graphics Forum", volume = "38", number = "4", doi = "10.1111/cgf.13775", publisher = "The Eurographics Association and John Wiley & Sons Ltd.", pages = "111--121", keywords = "measuring error, light transport, global illumination", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/celarek_adam-2019-qelta/", } @article{CORNEL-2019-IVF, title = "Interactive Visualization of Flood and Heavy Rain Simulations", author = "Daniel Cornel and Andreas Buttinger-Kreuzhuber and Artem Konev and Zsolt Horvath and Michael Wimmer and Raimund Heidrich and J\"{u}rgen Waser", year = "2019", abstract = "In this paper, we present a real-time technique to visualize large-scale adaptive height fields withC1-continuous surfacereconstruction. Grid-based shallow water simulation is an indispensable tool for interactive flood management applications.Height fields defined on adaptive grids are often the only viable option to store and process the massive simulation data. Theirvisualization requires the reconstruction of a continuous surface from the spatially discrete simulation data. For regular grids,fast linear and cubic interpolation are commonly used for surface reconstruction. For adaptive grids, however, there exists nohigher-order interpolation technique fast enough for interactive applications.Our proposed technique bridges the gap between fast linear and expensive higher-order interpolation for adaptive surfacereconstruction. During reconstruction, no matter if regular or adaptive, discretization and interpolation artifacts can occur,which domain experts consider misleading and unaesthetic. We take into account boundary conditions to eliminate these artifacts,which include water climbing uphill, diving towards walls, and leaking through thin objects. We apply realistic water shadingwith visual cues for depth perception and add waves and foam synthesized from the simulation data to emphasize flow directions.The versatility and performance of our technique are demonstrated in various real-world scenarios. A survey conducted withdomain experts of different backgrounds and concerned citizens proves the usefulness and effectiveness of our technique.", month = jun, journal = "Computer Graphics Forum", volume = "38", number = "3", issn = "1467-8659", doi = "10.1111/cgf.13669", pages = "25--39", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/CORNEL-2019-IVF/", } @inproceedings{STEINLECHNER-2019-APS, title = "Adaptive Point-cloud Segmentation for Assisted Interactions", author = "Harald Steinlechner and Bernhard Rainer and Michael Schw\"{a}rzler and Georg Haaser and Attila Szabo and Stefan Maierhofer and Michael Wimmer", year = "2019", abstract = "In this work, we propose an interaction-driven approach streamlined to support and improve a wide range of real-time 2D interaction metaphors for arbitrarily large pointclouds based on detected primitive shapes. Rather than performing shape detection as a costly pre-processing step on the entire point cloud at once, a user-controlled interaction determines the region that is to be segmented next. By keeping the size of the region and the number of points small, the algorithm produces meaningful results and therefore feedback on the local geometry within a fraction of a second. We can apply these finding for improved picking and selection metaphors in large point clouds, and propose further novel shape-assisted interactions that utilize this local semantic information to improve the user’s workflow.", month = may, isbn = "978-1-4503-6310-5", series = "I3D ’19", publisher = "ACM", location = "Montreal, Quebec, Canada", event = "33rd Symposium on Interactive 3D Graphics and Games", editor = "Blenkhorn, Ari Rapkin", doi = "10.1145/3306131.3317023", booktitle = "Proceedings of the 33rd Symposium on Interactive 3D Graphics and Games", pages = "14:1--14:9", keywords = "Pointcloud Segmentation, Shape Detection, Interactive Editing", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/STEINLECHNER-2019-APS/", } @inproceedings{LUKSCH-2019-IGI, title = "Incrementally Baked Global Illumination", author = "Christian Luksch and Michael Wimmer and Michael Schw\"{a}rzler", year = "2019", abstract = "Global Illumination is affected by the slightest change in a 3D scene, requiring a complete reevaluation of the distributed light. In cases where real-time algorithms are not applicable due to high demands on the achievable accuracy, this recomputation from scratch results in artifacts like flickering or noise, disturbing the visual appearance and negatively affecting interactive lighting design workflows. We propose a novel system tackling this problem by providing incremental updates of a baked global illumination solution after scene modifications, and a re-convergence after a few seconds. Using specifically targeted incremental data structures and prioritization strategies in a many-light global illumination algorithm, we compute a differential update from one illumination state to another. We further demonstrate the use of a novel error balancing strategy making it possible to prioritize the illumination updates.", month = may, isbn = "978-1-4503-6310-5", series = "I3D ’19", publisher = "ACM", location = "Montreal, Quebec, Canada", event = "33rd Symposium on Interactive 3D Graphics and Games (I3D 2019)", editor = "Blenkhorn, Ari Rapkin", doi = "10.1145/3306131.3317015", booktitle = "Proceedings of the 33rd Symposium on Interactive 3D Graphics and Games (I3D 2019)", pages = "4:1--4:10", keywords = "Global Illumination, Instant Radiosity, Lightmaps", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/LUKSCH-2019-IGI/", } @misc{schuetz-2019-LCO, title = "Live Coding of a VR Render Engine in VR", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2019", abstract = "Live coding in virtual reality allows users to create and modify their surroundings through code without the need to leave the virtual reality environment. Previous work focuses on modifying the scene. We propose an application that allows developers to modify virtually everything at runtime, including the scene but also the render engine, shader code and input handling, using standard desktop IDEs through a desktop mirror. ", month = mar, publisher = "IEEE", location = "Osaka", address = "http://ieeevr.org/2019/", event = "IEEE VR 2019", doi = "https://doi.org/10.1109/VR.2019.8797760", Conference date = "Poster presented at IEEE VR 2019 (2019-03)", note = "1150--1151", pages = "1150 – 1151", keywords = "virtual reality, live coding, VR", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/schuetz-2019-LCO/", } @inproceedings{ZOTTI-2016-VAA, title = "Virtual Archaeoastronomy: Stellarium for Research and Outreach", author = "Georg Zotti and Florian Schaukowitsch and Michael Wimmer and Wolfgang Neubauer", year = "2019", abstract = "In the last few years, the open-source desktop planetarium program Stellarium has become ever more popular for research and dissemination of results in Cultural Astronomy. In this time we have added significant capabilities for applications in cultural astronomy to the program. The latest addition allows its use in a multi-screen installation running both completely automated and manually controlled setups. During the development time, also the accuracy of astronomical simulation has been greatly improved.", month = mar, isbn = "978-3-319-97006-6", publisher = "Springer", location = "Milano, Italy", event = "SIA 2016 (16th Conference of the Italian Society for Archaeoastronomy)", booktitle = "Archaeoastronomy in the Roman World (Proceedings 16th Conference of the Italian Society for Archaeoastronomy)", pages = "187--205", keywords = "stellarium", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/ZOTTI-2016-VAA/", } @incollection{BOKSANSKY-2019-RTS, title = "Ray Traced Shadows: Maintaining Real-Time Frame Rates", author = "Jakub Boksansky and Michael Wimmer and Jir\'{i} Bittner", year = "2019", abstract = "Efficient and accurate shadow computation is a long-standing problem in computer graphics. In real-time applications, shadows have traditionally been computed using the rasterization-based pipeline. With recent advances of graphics hardware, it is now possible to use ray tracing in real-time applications, making ray traced shadows a viable alternative to rasterization. While ray traced shadows avoid many problems inherent in rasterized shadows, tracing every shadow ray independently can become a bottleneck if the number of required rays rises, e.g., for high-resolution rendering, for scenes with multiple lights, or for area lights. Therefore, the computation should focus on image regions where shadows actually appear, in particular on the shadow boundaries. We present a practical method for ray traced shadows in real-time applications. Our method uses the standard rasterization pipeline for resolving primary-ray visibility and ray tracing for resolving visibility of light sources. We propose an adaptive sampling algorithm for shadow rays combined with an adaptive shadowfiltering method. These two techniques allow computing high-quality shadows with a limited number of shadow rays per pixel. We evaluated our method using a recent real-time ray tracing API (DirectX Raytracing) and compare the results with shadow mapping using cascaded shadow maps.", month = mar, address = "New York", booktitle = "Ray Tracing Gems: High-Quality and Real-Time Rendering with DXR and Other APIs", doi = "10.1007/978-1-4842-4427-2_13", editor = "Erik Haines and Tomas Akenine-M\"{o}ller", isbn = "978-1-4842-4426-5", publisher = "Springer", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/BOKSANSKY-2019-RTS/", } @inproceedings{schuetz-2019-CLOD, title = "Real-Time Continuous Level of Detail Rendering of Point Clouds", author = "Markus Sch\"{u}tz and Katharina Kr\"{o}sl and Michael Wimmer", year = "2019", abstract = "Real-time rendering of large point clouds requires acceleration structures that reduce the number of points drawn on screen. State-of-the art algorithms group and render points in hierarchically organized chunks with varying extent and density, which results in sudden changes of density from one level of detail to another, as well as noticeable popping artifacts when additional chunks are blended in or out. These popping artifacts are especially noticeable at lower levels of detail, and consequently in virtual reality, where high performance requirements impose a reduction in detail. We propose a continuous level-of-detail method that exhibits gradual rather than sudden changes in density. Our method continuously recreates a down-sampled vertex buffer from the full point cloud, based on camera orientation, position, and distance to the camera, in a point-wise rather than chunk-wise fashion and at speeds up to 17 million points per millisecond. As a result, additional details are blended in or out in a less noticeable and significantly less irritating manner as compared to the state of the art. The improved acceptance of our method was successfully evaluated in a user study.", month = mar, publisher = "IEEE", location = "Osaka, Japan", event = "IEEE VR 2019, the 26th IEEE Conference on Virtual Reality and 3D User Interfaces", doi = "10.1109/VR.2019.8798284", booktitle = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces", pages = "103--110", keywords = "point clouds, virtual reality, VR", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/schuetz-2019-CLOD/", } @inproceedings{kroesl-2019-ICthroughVR, title = "ICthroughVR: Illuminating Cataracts through Virtual Reality", author = "Katharina Kr\"{o}sl and Carmine Elvezio and Matthias H\"{u}rbe and Sonja Karst and Michael Wimmer and Steven Feiner", year = "2019", abstract = "Vision impairments, such as cataracts, affect how many people interact with their environment, yet are rarely considered by architects and lighting designers because of a lack of design tools. To address this, we present a method to simulate vision impairments caused by cataracts in virtual reality (VR), using eye tracking for gaze-dependent effects. We conducted a user study to investigate how lighting affects visual perception for users with cataracts. Unlike past approaches, we account for the user's vision and some constraints of VR headsets, allowing for calibration of our simulation to the same level of degraded vision for all participants.", month = mar, publisher = "IEEE", location = "Osaka, Japan", event = "IEEE VR 2019, the 26th IEEE Conference on Virtual Reality and 3D User Interfaces", doi = "10.1109/VR.2019.8798239", booktitle = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces", pages = "655--663", keywords = "vision impairments, cataracts, virtual reality, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/", } @article{ohrhallinger_stefan-2018-cgf, title = "FitConnect: Connecting Noisy 2D Samples by Fitted Neighborhoods", author = "Stefan Ohrhallinger and Michael Wimmer", year = "2019", abstract = "We propose a parameter-free method to recover manifold connectivity in unstructured 2D point clouds with high noise in terms of the local feature size. This enables us to capture the features which emerge out of the noise. To achieve this, we extend the reconstruction algorithm HNN-Crust, which connects samples to two (noise-free) neighbors and has been proven to output a manifold for a relaxed sampling condition. Applying this condition to noisy samples by projecting their k-nearest neighborhoods onto local circular fits leads to multiple candidate neighbor pairs and thus makes connecting them consistently an NP-hard problem. To solve this efficiently, we design an algorithm that searches that solution space iteratively on different scales of k. It achieves linear time complexity in terms of point count plus quadratic time in the size of noise clusters. Our algorithm FitConnect extends HNN-Crust seamlessly to connect both samples with and without noise, performs as local as the recovered features and can output multiple open or closed piece-wise curves. Incidentally, our method simplifies the output geometry by eliminating all but a representative point from noisy clusters. Since local neighborhood fits overlap consistently, the resulting connectivity represents an ordering of the samples along a manifold. This permits us to simply blend the local fits for denoising with the locally estimated noise extent. Aside from applications like reconstructing silhouettes of noisy sensed data, this lays important groundwork to improve surface reconstruction in 3D. Our open-source algorithm is available online.", month = feb, journal = "Computer Graphics Forum", volume = "38", number = "1", issn = "1467-8659", doi = "10.1111/cgf.13395", pages = "126--137", keywords = "curve fitting, noisy samples, guarantees, curve reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/ohrhallinger_stefan-2018-cgf/", } @inproceedings{ohrhallinger_stefan-2018-pg, title = "StretchDenoise: Parametric Curve Reconstruction with Guarantees by Separating Connectivity from Residual Uncertainty of Samples", author = "Stefan Ohrhallinger and Michael Wimmer", year = "2018", abstract = "We reconstruct a closed denoised curve from an unstructured and highly noisy 2D point cloud. Our proposed method uses a two-pass approach: Previously recovered manifold connectivity is used for ordering noisy samples along this manifold and express these as residuals in order to enable parametric denoising. This separates recovering low-frequency features from denoising high frequencies, which avoids over-smoothing. The noise probability density functions (PDFs) at samples are either taken from sensor noise models or from estimates of the connectivity recovered in the first pass. The output curve balances the signed distances (inside/outside) to the samples. Additionally, the angles between edges of the polygon representing the connectivity become minimized in the least-square sense. The movement of the polygon's vertices is restricted to their noise extent, i.e., a cut-off distance corresponding to a maximum variance of the PDFs. We approximate the resulting optimization model, which consists of higher-order functions, by a linear model with good correspondence. Our algorithm is parameter-free and operates fast on the local neighborhoods determined by the connectivity. %We augment a least-squares solver constrained by a linear system to also handle bounds. This enables us to guarantee stochastic error bounds for sampled curves corrupted by noise, e.g., silhouettes from sensed data, and we improve on the reconstruction error from ground truth. Source code is available online. An extended version is available at: https://arxiv.org/abs/1808.07778", month = aug, isbn = "978-3-03868-073-4", location = "Hong Kong", event = "Pacific Graphics 2018", editor = "H. Fu, A. Ghosh, and J. Kopf (Guest Editors)", booktitle = "Proceedings of Pacific Graphics 2018", pages = "1--4", keywords = "Denoising, Curve reconstruction, Optimization", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/ohrhallinger_stefan-2018-pg/", } @misc{schuetz-2018-PPC, title = "Progressive Real-Time Rendering of Unprocessed Point Clouds", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2018", abstract = "Rendering tens of millions of points in real time usually requires either high-end graphics cards, or the use of spatial acceleration structures. We introduce a method to progressively display as many points as the GPU memory can hold in real time by reprojecting what was visible and randomly adding additional points to uniformly converge towards the full result within a few frames. Our method heavily limits the number of points that have to be rendered each frame and it converges quickly and in a visually pleasing way, which makes it suitable even for notebooks with low-end GPUs. The data structure consists of a randomly shuffled array of points that is incrementally generated on-the-fly while points are being loaded. Due to this, it can be used to directly view point clouds in common sequential formats such as LAS or LAZ while they are being loaded and without the need to generate spatial acceleration structures in advance, as long as the data fits into GPU memory.", month = aug, publisher = "ACM", location = "Vancouver, Canada", isbn = "978-1-4503-5817-0/18/08", event = "ACM SIGGRAPH 2018", doi = "10.1145/3230744.3230816", Conference date = "Poster presented at ACM SIGGRAPH 2018 (2018-08-12--2018-08-16)", note = "Article 41--", pages = "Article 41 – ", keywords = "point based rendering, point cloud, LIDAR", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/schuetz-2018-PPC/", } @misc{kroesl-2018-TVS, title = "The Virtual Schoolyard: Attention Training in Virtual Reality for Children with Attentional Disorders", author = "Katharina Kr\"{o}sl and Anna Felnhofer and Johanna X. Kafka and Laura Schuster and Alexandra Rinnerthaler and Michael Wimmer and Oswald D. Kothgassner", year = "2018", abstract = "This work presents a virtual reality simulation for training different attentional abilities in children and adolescents. In an interdisciplinary project between psychology and computer science, we developed four mini-games that are used during therapy sessions to battle different aspects of attentional disorders. First experiments show that the immersive game-like application is well received by children. Our tool is also currently part of a treatment program in an ongoing clinical study.", month = aug, publisher = "ACM", location = "Vancouver, Canada", isbn = "978-1-4503-5817-0", event = "ACM SIGGRAPH 2018", doi = "10.1145/3230744.3230817", Conference date = "Poster presented at ACM SIGGRAPH 2018 (2018-08-12--2018-08-16)", note = "Article 27--", pages = "Article 27 – ", keywords = "virtual reality, attentional disorders, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/", } @article{zsolnai-2018-gms, title = "Gaussian Material Synthesis", author = "Karoly Zsolnai-Feh\'{e}r and Peter Wonka and Michael Wimmer", year = "2018", abstract = "We present a learning-based system for rapid mass-scale material synthesis that is useful for novice and expert users alike. The user preferences are learned via Gaussian Process Regression and can be easily sampled for new recommendations. Typically, each recommendation takes 40-60 seconds to render with global illumination, which makes this process impracticable for real-world workflows. Our neural network eliminates this bottleneck by providing high-quality image predictions in real time, after which it is possible to pick the desired materials from a gallery and assign them to a scene in an intuitive manner. Workflow timings against Disney’s “principled” shader reveal that our system scales well with the number of sought materials, thus empowering even novice users to generate hundreds of high-quality material models without any expertise in material modeling. Similarly, expert users experience a significant decrease in the total modeling time when populating a scene with materials. Furthermore, our proposed solution also offers controllable recommendations and a novel latent space variant generation step to enable the real-time fine-tuning of materials without requiring any domain expertise.", month = aug, journal = "ACM Transactions on Graphics (SIGGRAPH 2018)", volume = "37", number = "4", issn = "0730-0301", doi = "10.1145/3197517.3201307", pages = "76:1--76:14", keywords = "gaussian material synthesis, neural rendering, neural rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/zsolnai-2018-gms/", } @article{HECHER-2017-HDY, title = "How Do Users Map Points Between Dissimilar Shapes?", author = "Michael Hecher and Paul Guerrero and Peter Wonka and Michael Wimmer", year = "2018", abstract = "Finding similar points in globally or locally similar shapes has been studied extensively through the use of various point descriptors or shape-matching methods. However, little work exists on finding similar points in dissimilar shapes. In this paper, we present the results of a study where users were given two dissimilar two-dimensional shapes and asked to map a given point in the first shape to the point in the second shape they consider most similar. We find that user mappings in this study correlate strongly with simple geometric relationships between points and shapes. To predict the probability distribution of user mappings between any pair of simple two-dimensional shapes, two distinct statistical models are defined using these relationships. We perform a thorough validation of the accuracy of these predictions and compare our models qualitatively and quantitatively to well-known shape-matching methods. Using our predictive models, we propose an approach to map objects or procedural content between different shapes in different design scenarios.", month = aug, doi = "10.1109/TVCG.2017.2730877", issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "8", volume = "24", pages = "2327--2338", keywords = "shape matching, transformations, shape similarity", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/HECHER-2017-HDY/", } @article{birsak-2017-dpe, title = "Dynamic Path Exploration on Mobile Devices", author = "Michael Birsak and Przemyslaw Musialski and Peter Wonka and Michael Wimmer", year = "2018", abstract = "We present a novel framework for visualizing routes on mobile devices. Our framework is suitable for helping users explore their environment. First, given a starting point and a maximum route length, the system retrieves nearby points of interest (POIs). Second, we automatically compute an attractive walking path through the environment trying to pass by as many highly ranked POIs as possible. Third, we automatically compute a route visualization that shows the current user position, POI locations via pins, and detail lenses for more information about the POIs. The visualization is an animation of an orthographic map view that follows the current user position. We propose an optimization based on a binary integer program (BIP) that models multiple requirements for an effective placement of detail lenses. We show that our path computation method outperforms recently proposed methods and we evaluate the overall impact of our framework in two user studies.", month = may, doi = "10.1109/TVCG.2017.2690294", issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "5", volume = "24", pages = "1784--1798", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/birsak-2017-dpe/", } @article{Kathi-2018-VRB, title = "A VR-based user study on the effects of vision impairments on recognition distances of escape-route signs in buildings", author = "Katharina Kr\"{o}sl and Dominik Bauer and Michael Schw\"{a}rzler and Henry Fuchs and Michael Wimmer and Georg Suter", year = "2018", abstract = "In workplaces or publicly accessible buildings, escape routes are signposted according to official norms or international standards that specify distances, angles and areas of interest for the positioning of escape-route signs. In homes for the elderly, in which the residents commonly have degraded mobility and suffer from vision impairments caused by age or eye diseases, the specifications of current norms and standards may be insufficient. Quantifying the effect of symptoms of vision impairments like reduced visual acuity on recognition distances is challenging, as it is cumbersome to find a large number of user study participants who suffer from exactly the same form of vision impairments. Hence, we propose a new methodology for such user studies: By conducting a user study in virtual reality (VR), we are able to use participants with normal or corrected sight and simulate vision impairments graphically. The use of standardized medical eyesight tests in VR allows us to calibrate the visual acuity of all our participants to the same level, taking their respective visual acuity into account. Since we primarily focus on homes for the elderly, we accounted for their often limited mobility by implementing a wheelchair simulation for our VR application.", month = apr, journal = "The Visual Computer", volume = "34", number = "6-8", issn = "0178-2789", doi = "10.1007/s00371-018-1517-7", pages = "911--923", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/", } @article{santos-2017-dacge, title = "Distinctive Approaches to Computer Graphics Education", author = "Beatriz Sousa Santos and Jean-Michel Dischler and Valery Adzhiev and Eike Falk Anderson and Andrej Ferko and Oleg Fryazinov and Martin Il\v{c}\'{i}k and Ivana Il\v{c}\'{i}kov\'{a} and P. Slavik and Veronica Sundstedt and Lucie Svobodova and Michael Wimmer and Jiri Zara", year = "2018", abstract = "This paper presents the latest advances and research in Computer Graphics education in a nutshell. It is concerned with topics that were presented at the Education Track of the Eurographics Conference held in Lisbon in 2016. We describe works corresponding to approaches to Computer Graphics education that are unconventional in some way and attempt to tackle unsolved problems and challenges regarding the role of arts in computer graphics education, the role of research-oriented activities in undergraduate education and the interaction among different areas of Computer Graphics, as well as their application to courses or extra-curricular activities. We present related works addressing these topics and report experiences, successes and issues in implementing the approaches.", month = feb, doi = "10.1111/cgf.13305", issn = "1467-8659", journal = "Computer Graphics Forum", number = "1", volume = "37", pages = "403--412", keywords = "Computer Graphics Education", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/santos-2017-dacge/", } @article{steiner_2016_isad, title = "Integrated Structural-Architectural Design for Interactive Planning", author = "Bernhard Steiner and Elham Mousavian and Fatemeh Mehdizadeh Saradj and Michael Wimmer and Przemyslaw Musialski", year = "2017", abstract = "Traditionally, building floorplans are designed by architects with their usability, functionality, and architectural aesthetics in mind, however, the structural properties of the distribution of load-bearing walls and columns are usually not taken into account at this stage. In this paper we propose a novel approach for the design of architectural floorplans by integrating structural layout analysis directly into the planning process. In order to achieve this, we introduce a planning tool which interactively enforces checks for structural stability of the current design, and which on demand proposes how to stabilize it if necessary. Technically, our solution contains an interactive architectural modeling framework as well as a constrained optimization module where both are based on respective architectural rules. Using our tool, an architect can predict already in a very early planning stage which designs are structurally sound such that later changes due to stability reasons can be prevented. We compare manually computed solutions with optimal results of our proposed automated design process in order to show how much our proposed system can help architects to improve the process of laying out structural models optimally.", month = dec, doi = "10.1111/cgf.12996", issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "36", pages = "80--94", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/steiner_2016_isad/", } @article{leimer_2017_rbpesc, title = "Relation-Based Parametrization and Exploration of Shape Collections", author = "Kurt Leimer and Lukas Gersthofer and Michael Wimmer and Przemyslaw Musialski", year = "2017", abstract = "With online repositories for 3D models like 3D Warehouse becoming more prevalent and growing ever larger, new possibilities have emerged for both experienced and inexperienced users. These large collections of shapes can provide inspiration for designers or make it possible to synthesize new shapes by combining different parts from already existing shapes, which can be both easy to learn and a fast way of creating new shapes. But exploring large shape collections or searching for particular kinds of shapes can be difficult and time-consuming tasks as well, especially considering that online repositories are often disorganized. In our work, we propose a relation-based way to parametrize shape collections, allowing the user to explore the entire set of shapes based on the variability of spatial arrangements between pairs of parts. The way in which shapes differ from each other is captured automatically, resulting in a small number of exploration parameters. Furthermore, a copy-and-paste system for parts allows the user to change the structure of a shape, making it possible to explore the entire collection from any initial shape.", month = oct, issn = "0097-8493", journal = "Computers & Graphics", volume = "67", pages = "127--137", keywords = "3d database exploration, Model variability, Shape analysis, Shape collections", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/leimer_2017_rbpesc/", } @article{HU-2017-ETM, title = "Efficient Tree Modeling from Airborne LiDAR Point Clouds", author = "Shaojun Hu and Zhengrong Li and Zhiyi Zhang and Dongijan He and Michael Wimmer", year = "2017", abstract = "Modeling real-world trees is important in many application areas, including computer graphics, botany and forestry. An example of a modeling method is reconstruction from light detection and ranging (LiDAR) scans. In contrast to terrestrial LiDAR systems, airborne LiDAR systems – even current high-resolution systems – capture only very few samples on tree branches, which makes the reconstruction of trees from airborne LiDAR a challenging task. In this paper, we present a new method to model plausible trees with fine details from airborne LiDAR point clouds. To reconstruct tree models, first, we use a normalized cut method to segment an individual tree point cloud. Then, trunk points are added to supplement the incomplete point cloud, and a connected graph is constructed by searching sufficient nearest neighbors for each point. Based on the observation of real-world trees, a direction field is created to restrict branch directions. Then, branch skeletons are constructed using a bottom-up greedy algorithm with a priority queue, and leaves are arranged according to phyllotaxis. We demonstrate our method on a variety of examples and show that it can generate a plausible tree model in less than one second, in addition to preserving features of the original point cloud.", month = oct, issn = "0097-8493", journal = "Computers & Graphics", volume = "67", pages = "1--13", keywords = "tree modeling, LIDAR, point clouds", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/HU-2017-ETM/", } @article{ZOTTI-2017-BM, title = "Beyond 3D Models: Simulation of Temporally Evolving Models in Stellarium", author = "Georg Zotti and Florian Schaukowitsch and Michael Wimmer", year = "2017", abstract = "In recent years, the interactive visual exploration and demonstration of three-dimensional virtual models of buildings or natural structures of archaeoastronomical interest under a simulated sky has become available for users of the open-source desktop planetarium program Stellarium [Zotti, 2015, 2016]. Users can load an architectural model in the well-known OBJ format and walk around to explore sight lines or light-and-shadow interaction in present and past times [Frischer et al., 2016]. However, until now, the model itself did not change in time, and loading models for various building phases (e.g., the assumed order of building the various standing stones, timber circles and stone circles of Stonehenge) always required a break in simulation and user interaction to load a model for the next phase. On the other hand, displaying a model under the sky of the wrong time may lead to inappropriate conclusions. Large-area models required considerable time to load, and loading caused a reset of location, so the user interested in changes in a certain viewing axis had to recreate that view again. Given that Stellarium is an “astronomical time machine”, nowadays capable of replaying sky vistas thousands of years ago with increasing accuracy [Zotti et al., submitted] and also for models with several million triangular faces, it seemed worth to explore possibilities to also show changes over time in the simulated buildings. The Scenery3D plugin of Stellarium is, however, not a complete game engine, and replicating the infrastructure found in such game engines like Unity3D – for example to interactively move game objects, or load small sub-components like standing stones and place them at arbitrary coordinates – seemed overkill. The solution introduced here is remarkably simple and should be easily adoptable for the casual model-making researcher: the MTL material description for the model, a simple plain-text file that describes colour, reflection behaviour, photo-texture or transparency of the various parts of the object, can be extended for our rendering system. Newly introduced values describe dates where parts of the model can appear and disappear (with transitional transparency to allow for archaeological dating uncertainties). The model parts with these enhanced, time-aware materials appear to fade in during the indicated time, will be fully visible in their “active” time, and will fade out again when Stellarium is set to simulate the sky when the real-world structures most likely have vanished. The only requirement for the model creator is now to separate objects so that they receive unique materials that can then be identified and augmented with these entries in the MTL text file. The advantages of this new feature should be clear: an observer can remain in a certain location in the virtual model and let the land- and skyscape change over decades or centuries, without the need to load new models. This allows the simulation of construction and reconstruction phases while still always keeping particularly interesting viewpoints unchanged, and will always show the matching sky for the most appropriate reconstruction phase of the model. ", month = sep, journal = "Mediterranean Archaeology and Archaeometry", volume = "18", number = "4", issn = "1108-9628", doi = "10.5281/zenodo.1477972", booktitle = "25th SEAC Conference", pages = "501--506", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ZOTTI-2017-BM/", } @inproceedings{kroesl-2017-LiteMaker, title = "LiteMaker: Interactive Luminaire Development using Progressive Photon Tracing and Multi-Resolution Upsampling", author = "Katharina Kr\"{o}sl and Christian Luksch and Michael Schw\"{a}rzler and Michael Wimmer", year = "2017", abstract = "Industrial applications like luminaire development (the creation of a luminaire in terms of geometry and material) or lighting design (the efficient and aesthetic placement of luminaires in a virtual scene) rely heavily on high realism and physically correct simulations. Using typical approaches like CAD modeling and offline rendering, this requirement induces long processing times and therefore inflexible workflows. In this paper, we combine a GPU-based progressive photon-tracing algorithm to accurately simulate the light distribution of a luminaire with a novel multi-resolution image-filtering approach that produces visually meaningful intermediate results of the simulation process. By using this method in a 3D modeling environment, luminaire development is turned into an interactive process, allowing for real-time modifications and immediate feedback on the light distribution. Since the simulation results converge to a physically plausible solution that can be imported as a representation of a luminaire into a light-planning software, our work contributes to combining the two former decoupled workflows of luminaire development and lighting design, reducing the overall production time and cost for luminaire manufacturers. ", month = sep, isbn = "978-3-03868-049-9", publisher = "The Eurographics Association", location = "Bonn, Germany", event = "VMV 2017", editor = "Matthias Hullin and Reinhard Klein and Thomas Schultz and Angela Yao", doi = "10.2312/vmv.20171253", booktitle = "Vision, Modeling & Visualization", pages = "1--8", keywords = "Computing methodologies, Ray tracing, Image processing, Mesh geometry models", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/kroesl-2017-LiteMaker/", } @article{CORNEL-2017-FRS, title = "Forced Random Sampling: fast generation of importance-guided blue-noise samples", author = "Daniel Cornel and Hiroyuki Sakai and Christian Luksch and Michael Wimmer", year = "2017", abstract = "In computer graphics, stochastic sampling is frequently used to efficiently approximate complex functions and integrals. The error of approximation can be reduced by distributing samples according to an importance function, but cannot be eliminated completely. To avoid visible artifacts, sample distributions are sought to be random, but spatially uniform, which is called blue-noise sampling. The generation of unbiased, importance-guided blue-noise samples is expensive and not feasible for real-time applications. Sampling algorithms for these applications focus on runtime performance at the cost of having weak blue-noise properties. Blue-noise distributions have also been proposed for digital halftoning in the form of precomputed dither matrices. Ordered dithering with such matrices allows to distribute dots with blue-noise properties according to a grayscale image. By the nature of ordered dithering, this process can be parallelized easily. We introduce a novel sampling method called forced random sampling that is based on forced random dithering, a variant of ordered dithering with blue noise. By shifting the main computational effort into the generation of a precomputed dither matrix, our sampling method runs efficiently on GPUs and allows real-time importance sampling with blue noise for a finite number of samples. We demonstrate the quality of our method in two different rendering applications.", month = jun, journal = "The Visual Computer", volume = "33", number = "6", issn = "1432-2315", pages = "833--843", keywords = "blue-noise sampling, importance sampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/CORNEL-2017-FRS/", } @inproceedings{Radwan-2017-Occ, title = "Cut and Paint: Occlusion-Aware Subset Selection for Surface Processing", author = "Mohamed Radwan and Stefan Ohrhallinger and Elmar Eisemann and Michael Wimmer", year = "2017", abstract = "User-guided surface selection operations are straightforward for visible regions on a convex model. However, concave surfaces present a challenge because self-occlusions require multiple camera positions to get unobstructed views. Therefore, users often have to locate and switch to new unobstructed views in order to continue the operation. Our novel approach enables operations like painting or cutting in a single view, even on the backside of objects and for arbitrary depth complexity, with interactive performance. Continuous projection of a curve drawn in screen space onto the mesh guarantees seamless brush strokes or manifold cuts, unaffected by any occlusions. Our occlusion-aware surface-processing method enables a number of applications in an easy way. As examples, we show continuous painting on the surface, selecting regions for texturing, creating illustrative cutaways from nested models and animation of cutaways.", month = may, publisher = "Canadian Human-Computer Communications Society / Soci{\'e}t{\'e} canadienne du dialogue humain-machine", location = "Edmonton, Alberta, CA", event = "Graphics Interface 2017", doi = "10.20380/GI2017.11", booktitle = "Proceedings of Graphics Interface 2017", pages = "82--89", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Radwan-2017-Occ/", } @inproceedings{SCHWAERZLER-2017-SBGM, title = "Sketch-based Guided Modeling of 3D Buildings from Oriented Photos", author = "Michael Schw\"{a}rzler and Lisa-Maria Kellner and Stefan Maierhofer and Michael Wimmer", year = "2017", abstract = "Capturing urban scenes using photogrammetric methods has become an interesting alternative to laser scanning in the past years. For the reconstruction of CAD-ready 3D models, two main types of interactive approaches have become prevalent: One uses the generated 3D point clouds to reconstruct polygonal surfaces, while the other focuses on 2D interaction in the photos to define edges and faces. We propose a novel interactive system that combines and enhances these approaches in order to optimize current reconstruction and modeling workflows. Our main interaction target are the photos, allowing simple 2D interactions and edge-based snapping. We use the underlying segmented point cloud to define the 3D context in which the sketched polygons are projected whenever possible. An intuitive visual guiding interface gives the user feedback on the accuracy to expect with the current state of modeling to keep the necessary interactions at a minimum level.", month = feb, isbn = "978-1-4503-4886-7", publisher = "ACM", location = "San Francisco, CA", event = "I3D 2017", booktitle = "Proceedings of the 21st ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games", pages = "9:1--9:8", keywords = "3D modeling, guidance, photogrammetry", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/SCHWAERZLER-2017-SBGM/", } @inproceedings{JAHRMANN-2017-RRTG, title = "Responsive Real-Time Grass Rendering for General 3D Scenes", author = "Klemens Jahrmann and Michael Wimmer", year = "2017", abstract = "Grass plays an important role in most natural environments. Most interactive applications use image-based techniques to approximate fields of grass due to the high geometrical complexity, leading to visual artifacts. In this paper, we propose a grass-rendering technique that is capable of drawing each blade of grass as geometrical object in real time. Accurate culling methods together with an adaptable rendering pipeline ensure that only the blades of grass that are important for the visual appearance of the field of grass are rendered. In addition, we introduce a physical model that is evaluated for each blade of grass. This enables that a blade of grass can react to its environment by calculating the influence of gravity, wind and collisions. A major advantage of our approach is that it can render fields of grass of arbitrary shape and spatial alignment. Thus, in contrast to previous work, the blades of grass can be placed on any 3D model, which is not required to be a flat surface or a height map.", month = feb, isbn = "978-1-4503-4886-7", publisher = "ACM", location = "San Francisco, CA", event = "I3D 2017", booktitle = "Proceedings of the 21st ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games", pages = "6:1--6:10", keywords = "real-time rendering, grass rendering, hardware tessellation", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/JAHRMANN-2017-RRTG/", } @article{ZOTTI-2017-TSP, title = "The Skyscape Planetarium", author = "Georg Zotti and Florian Schaukowitsch and Michael Wimmer", year = "2017", abstract = "Communicating scientific topics in state of the art exhibitions frequently involves the creation of impressive visual installations. In the exhibition “STONEHENGE. –A Hidden Landscape.” in the MAMUZ museum for prehistory in Mistelbach, Lower Austria, LBI ArchPro presents recent research results from the Stonehenge Hidden Landscape Project. A central element of the exhibition which extends over two floors connected with open staircases is an assembly of original-sized replica of several stones of the central trilithon horseshoe which is seen from both floors. In the upper floor, visitors are at eye level with the lintels, and on a huge curved projection screen which extends along the long wall of the hall they can experience the view out over the Sarsen circle into the surrounding landscape. This paper describes the planning and creation of this part of the exhibition, and some first impressions after opening.", journal = "Culture and Cosmos", volume = "21", number = "1", issn = "1368-6534", booktitle = "24th SEAC Conference", pages = "269--281", keywords = "stellarium", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ZOTTI-2017-TSP/", } @incollection{SCHEIBLAUER-2015-WFC, title = "Workflow for Creating and Rendering Huge Point Models", author = "Claus Scheiblauer and Norbert Zimmermann and Michael Wimmer", year = "2017", booktitle = "Fundamentals of Virtual Archaeology: Theory and Practice", isbn = "9781466594760", note = "(to appear) 15.06.2017", publisher = "A K Peters/CRC Press", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/SCHEIBLAUER-2015-WFC/", } @inproceedings{WIMMER-2016-HARVEST4D, title = "Harvesting Dynamic 3DWorlds from Commodity Sensor Clouds", author = "Tamy Boubekeur and Paolo Cignoni and Elmar Eisemann and Michael Goesele and Reinhard Klein and Stefan Roth and Michael Weinmann and Michael Wimmer", year = "2016", abstract = "The EU FP7 FET-Open project "Harvest4D: Harvesting Dynamic 3D Worlds from Commodity Sensor Clouds" deals with the acquisition, processing, and display of dynamic 3D data. Technological progress is offering us a wide-spread availability of sensing devices that deliver different data streams, which can be easily deployed in the real world and produce streams of sampled data with increased density and easier iteration of the sampling process. These data need to be processed and displayed in a new way. The Harvest4D project proposes a radical change in acquisition and processing technology: instead of a goal-driven acquisition that determines the devices and sensors, its methods let the sensors and resulting available data determine the acquisition process. A variety of challenging problems need to be solved: huge data amounts, different modalities, varying scales, dynamic, noisy and colorful data. This short contribution presents a selection of the many scientific results produced by Harvest4D. We will focus on those results that could bring a major impact to the Cultural Heritage domain, namely facilitating the acquisition of the sampled data or providing advanced visual analysis capabilities.", month = oct, isbn = "978-3-03868-011-6", publisher = "Eurographics Association", location = "Genova, Italy", event = "GCH 2016", editor = "Chiara Eva Catalano and Livio De Luca", doi = "10.2312/gch.20161378", booktitle = "Proceedings of the 14th Eurographics Workshop on Graphics and Cultural Heritage", pages = "19--22", keywords = "acquisition, 3d scanning, reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/WIMMER-2016-HARVEST4D/", } @misc{leimer-2016-rpe, title = "Relation-Based Parametrization and Exploration of Shape Collections", author = "Kurt Leimer and Michael Wimmer and Przemyslaw Musialski", year = "2016", abstract = "With online repositories for 3D models like 3D Warehouse becoming more prevalent and growing ever larger, new possibilities have opened up for both experienced and inexperienced users alike. These large collections of shapes can provide inspiration for designers or make it possible to synthesize new shapes by combining different parts from already existing shapes, which can be both easy to learn and a fast way of creating new shapes. But exploring large shape collections or searching for particular kinds of shapes can be difficult and time-consuming tasks as well, especially considering that online repositories are often disorganized. In our work, we propose a relation-based way to parametrize shape collections, allowing the user to explore the entire set of shapes by controlling a small number of parameters.", month = jul, publisher = "ACM", location = "Anaheim, CA, USA", isbn = "978-1-4503-4371-8", event = "ACM SIGGRAPH 2016", booktitle = "ACM SIGGRAPH 2016 Posters", Conference date = "Poster presented at ACM SIGGRAPH 2016 (2016-07-24--2016-07-28)", note = "34:1--34:1", pages = "34:1 – 34:1", keywords = "3D database exploration, shape analysis, shape collections", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/leimer-2016-rpe/", } @inproceedings{ilcik-2016-cescg, title = "20 Years of the Central European Seminar on Computer Graphics", author = "Martin Il\v{c}\'{i}k and Ivana Il\v{c}\'{i}kov\'{a} and Andrej Ferko and Michael Wimmer", year = "2016", abstract = "The Central European Seminar on Computer Graphics is an annual scientific seminar for undergraduate students of computer graphics, vision and visual computing. Its main mission is to promote graphics research and to motivate students to pursue academic careers. An international committee of experts guides their research work for several months. At the end, students present their results at a three days seminar to an audience of approx. 100 students and professors. All attendants actively participate in discussions and workshops focused on academic skills and career planing for young researchers. Interactive sessions on innovation help them to identify the value of their ideas and motivate them to continue in their work.", month = may, publisher = "The Eurographics Association", location = "Lisbon", issn = "1017-4656", editor = "Beatriz Sousa Santos and Jean-Michel Dischler", booktitle = "Eurographics 2016 Education Papers", pages = "25--30", keywords = "Promotion of undergraduate research, Student seminar", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ilcik-2016-cescg/", } @article{arikan-2015-dmrt, title = "Multi-Depth-Map Raytracing for Efficient Large-Scene Reconstruction", author = "Murat Arikan and Reinhold Preiner and Michael Wimmer", year = "2016", abstract = "With the enormous advances of the acquisition technology over the last years, fast processing and high-quality visualization of large point clouds have gained increasing attention. Commonly, a mesh surface is reconstructed from the point cloud and a high-resolution texture is generated over the mesh from the images taken at the site to represent surface materials. However, this global reconstruction and texturing approach becomes impractical with increasing data sizes. Recently, due to its potential for scalability and extensibility, a method for texturing a set of depth maps in a preprocessing and stitching them at runtime has been proposed to represent large scenes. However, the rendering performance of this method is strongly dependent on the number of depth maps and their resolution. Moreover, for the proposed scene representation, every single depth map has to be textured by the images, which in practice heavily increases processing costs. In this paper, we present a novel method to break these dependencies by introducing an efficient raytracing of multiple depth maps. In a preprocessing phase, we first generate high-resolution textured depth maps by rendering the input points from image cameras and then perform a graph-cut based optimization to assign a small subset of these points to the images. At runtime, we use the resulting point-to-image assignments (1) to identify for each view ray which depth map contains the closest ray-surface intersection and (2) to efficiently compute this intersection point. The resulting algorithm accelerates both the texturing and the rendering of the depth maps by an order of magnitude.", month = feb, doi = "10.1109/TVCG.2015.2430333", issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "2", volume = "22", pages = "1127--1137", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/arikan-2015-dmrt/", } @article{ohrhallinger-2016-sgp, title = "Curve Reconstruction with Many Fewer Samples", author = "Stefan Ohrhallinger and Scott A. Mitchell and Michael Wimmer", year = "2016", abstract = "We consider the problem of sampling points from a collection of smooth curves in the plane, such that the Crust family of proximity-based reconstruction algorithms can rebuild the curves. Reconstruction requires a dense sampling of local features, i.e., parts of the curve that are close in Euclidean distance but far apart geodesically. We show that epsilon<0.47-sampling is sufficient for our proposed HNN-CRUST variant, improving upon the state-of-the-art requirement of epsilon<1/3-sampling. Thus we may reconstruct curves with many fewer samples. We also present a new sampling scheme that reduces the required density even further than epsilon<0.47-sampling. We achieve this by better controlling the spacing between geodesically consecutive points. Our novel sampling condition is based on the reach, the minimum local feature size along intervals between samples. This is mathematically closer to the reconstruction density requirements, particularly near sharp-angled features. We prove lower and upper bounds on reach rho-sampling density in terms of lfs epsilon-sampling and demonstrate that we typically reduce the required number of samples for reconstruction by more than half. ", journal = "Computer Graphics Forum", volume = "35", number = "5", issn = "1467-8659", pages = "167--176", keywords = "sampling condition, curve reconstruction, curve sampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ohrhallinger-2016-sgp/", } @inproceedings{ilcik-2016-cmssg, title = "Collaborative Modeling with Symbolic Shape Grammars", author = "Martin Il\v{c}\'{i}k and Michael Wimmer", year = "2016", abstract = "Generative design based on symbolic grammars is oriented on individual artists. Team work is not supported since single scripts produced by various artists have to be linked and maintained manually with a lot of effort. The main motivation for a collaborative modeling framework was to reduce the script management required for large projects. We achieved even more by extending the design paradigm to a cloud environment where everyone is part of a huge virtual team. The main contribution of the presented work is a web-based modeling system with a specialized variant of a symbolic shape grammar.", location = "Oulu, Finland", booktitle = "Proceedings of eCAADe 2016", pages = "417--426", keywords = "collaboration, procedural modeling, procedural modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ilcik-2016-cmssg/", } @article{musialski_2016_sosp, title = "Non-Linear Shape Optimization Using Local Subspace Projections", author = "Przemyslaw Musialski and Christian Hafner and Florian Rist and Michael Birsak and Michael Wimmer and Leif Kobbelt", year = "2016", abstract = "In this paper we present a novel method for non-linear shape optimization of 3d objects given by their surface representation. Our method takes advantage of the fact that various shape properties of interest give rise to underdetermined design spaces implying the existence of many good solutions. Our algorithm exploits this by performing iterative projections of the problem to local subspaces where it can be solved much more efficiently using standard numerical routines. We demonstrate how this approach can be utilized for various shape optimization tasks using different shape parameterizations. In particular, we show how to efficiently optimize natural frequencies, mass properties, as well as the structural yield strength of a solid body. Our method is flexible, easy to implement, and very fast.", journal = "ACM Transactions on Graphics", volume = "35", number = "4", issn = "0730-0301", doi = "10.1145/2897824.2925886", pages = "87:1--87:13", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/musialski_2016_sosp/", } @inproceedings{SCHUETZ-2015-HQP, title = "High-Quality Point Based Rendering Using Fast Single Pass Interpolation", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2015", abstract = "We present a method to improve the visual quality of point cloud renderings through a nearest-neighbor-like interpolation of points. This allows applications to render points at larger sizes in order to reduce holes, without reducing the readability of fine details due to occluding points. The implementation requires only few modifications to existing shaders, making it eligible to be integrated in software applications without major design changes.", month = sep, location = "Granada, Spain", booktitle = "Proceedings of Digital Heritage 2015 Short Papers", pages = "369--372", keywords = "point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/SCHUETZ-2015-HQP/", } @article{musialski-2015-souos, title = "Reduced-Order Shape Optimization Using Offset Surfaces", author = "Przemyslaw Musialski and Thomas Auzinger and Michael Birsak and Michael Wimmer and Leif Kobbelt", year = "2015", abstract = "Given the 2-manifold surface of a 3d object, we propose a novel method for the computation of an offset surface with varying thickness such that the solid volume between the surface and its offset satisfies a set of prescribed constraints and at the same time minimizes a given objective functional. Since the constraints as well as the objective functional can easily be adjusted to specific application requirements, our method provides a flexible and powerful tool for shape optimization. We use manifold harmonics to derive a reduced-order formulation of the optimization problem, which guarantees a smooth offset surface and speeds up the computation independently from the input mesh resolution without affecting the quality of the result. The constrained optimization problem can be solved in a numerically robust manner with commodity solvers. Furthermore, the method allows simultaneously optimizing an inner and an outer offset in order to increase the degrees of freedom. We demonstrate our method in a number of examples where we control the physical mass properties of rigid objects for the purpose of 3d printing. ", month = aug, journal = "ACM Transactions on Graphics (ACM SIGGRAPH 2015)", volume = "34", number = "4", issn = "0730-0301", doi = "10.1145/2766955", pages = "102:1--102:9", keywords = "reduced-order models, shape optimization, computational geometry, geometry processing, physical mass properties", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/musialski-2015-souos/", } @article{guerrero-2015-lsp, title = "Learning Shape Placements by Example", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer and Peter Wonka", year = "2015", abstract = "We present a method to learn and propagate shape placements in 2D polygonal scenes from a few examples provided by a user. The placement of a shape is modeled as an oriented bounding box. Simple geometric relationships between this bounding box and nearby scene polygons define a feature set for the placement. The feature sets of all example placements are then used to learn a probabilistic model over all possible placements and scenes. With this model we can generate a new set of placements with similar geometric relationships in any given scene. We introduce extensions that enable propagation and generation of shapes in 3D scenes, as well as the application of a learned modeling session to large scenes without additional user interaction. These concepts allow us to generate complex scenes with thousands of objects with relatively little user interaction.", month = aug, journal = "ACM Transactions on Graphics", volume = "34", number = "4", issn = "0730-0301", doi = "10.1145/2766933", pages = "108:1--108:13", keywords = "modeling by example, complex model generation", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/guerrero-2015-lsp/", } @misc{hafner-2015-onff, title = "Optimization of Natural Frequencies for Fabrication-Aware Shape Modeling", author = "Christian Hafner and Przemyslaw Musialski and Thomas Auzinger and Michael Wimmer and Leif Kobbelt", year = "2015", abstract = "Given a target shape and a target frequency, we automatically synthesize a shape that exhibits this frequency as part of its natural spectrum, while resembling the target shape as closely as possible. We employ finite element modal analysis with thin-shell elements to accurately predict the acoustic behavior of 3d solids. Our optimization pipeline uses an input surface and automatically calculates an inner offset surface to describe a volumetric solid. The solid exhibits a sound with the desired pitch if fabricated from the targeted material. In order to validate our framework, we optimize the shape of a tin bell to exhibit a sound at 1760 Hz. We fabricate the bell by casting it from a mold and measure the frequency peaks in its natural ringing sound. The measured pitch agrees with our simulation to an accuracy of 2.5%. In contrast to previous method, we only use reference material parameters and require no manual tuning.", month = aug, publisher = "ACM", note = "Lecturer: P. Musialski", location = "Los Angeles, CA, USA", event = "ACM SIGGRAPH 2015", booktitle = "Proceedings of ACM SIGGRAPH 2015 Posters", Conference date = "Poster presented at ACM SIGGRAPH 2015 (2015-08-09--2015-08-13)", keywords = "natural frequencies, modal analysis, shape optimization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/hafner-2015-onff/", } @article{Jimenez_SSS_2015, title = "Separable Subsurface Scattering", author = "Jorge Jimenez and Karoly Zsolnai-Feh\'{e}r and Adrian Jarabo and Christian Freude and Thomas Auzinger and Xian-Chun Wu and Javier van der Pahlen and Michael Wimmer and Diego Gutierrez", year = "2015", abstract = "In this paper we propose two real-time models for simulating subsurface scattering for a large variety of translucent materials, which need under 0.5 milliseconds per frame to execute. This makes them a practical option for real-time production scenarios. Current state-of-the-art, real-time approaches simulate subsurface light transport by approximating the radially symmetric non-separable diffusion kernel with a sum of separable Gaussians, which requires multiple (up to twelve) 1D convolutions. In this work we relax the requirement of radial symmetry to approximate a 2D diffuse reflectance profile by a single separable kernel. We first show that low-rank approximations based on matrix factorization outperform previous approaches, but they still need several passes to get good results. To solve this, we present two different separable models: the first one yields a high-quality diffusion simulation, while the second one offers an attractive trade-off between physical accuracy and artistic control. Both allow rendering subsurface scattering using only two 1D convolutions, reducing both execution time and memory consumption, while delivering results comparable to techniques with higher cost. Using our importance-sampling and jittering strategies, only seven samples per pixel are required. Our methods can be implemented as simple post-processing steps without intrusive changes to existing rendering pipelines. https://www.youtube.com/watch?v=P0Tkr4HaIVk", month = jun, journal = "Computer Graphics Forum", volume = "34", number = "6", issn = "1467-8659", pages = "188--197", keywords = "separable, realtime rendering, subsurface scattering, filtering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Jimenez_SSS_2015/", } @article{Ilcik_2015_LAY, title = "Layer-Based Procedural Design of Facades", author = "Martin Il\v{c}\'{i}k and Przemyslaw Musialski and Thomas Auzinger and Michael Wimmer", year = "2015", abstract = "We present a novel procedural framework for interactively modeling building fa\c{c}ades. Common procedural approaches, such as shape grammars, assume that building fa\c{c}ades are organized in a tree structure, while in practice this is often not the case. Consequently, the complexity of their layout description becomes unmanageable for interactive editing. In contrast, we obtain a fa\c{c}ade by composing multiple overlapping layers, where each layer contains a single rectilinear grid of fa\c{c}ade elements described by two simple generator patterns. This way, the design process becomes more intuitive and the editing effort for complex layouts is significantly reduced. To achieve this, we present a method for the automated merging of different layers in the form of a mixed discrete and continuous optimization problem. Finally, we provide several modeling examples and a comparison to shape grammars in order to highlight the advantages of our method when designing realistic building fa\c{c}ades. You can find the paper video at https://vimeo.com/118400233 .", month = may, journal = "Computer Graphics Forum", volume = "34", number = "2", issn = "1467-8659", pages = "205--216", keywords = "procedural modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Ilcik_2015_LAY/", } @article{MATTAUSCH-2015-CHCRT, title = "CHC+RT: Coherent Hierarchical Culling for Ray Tracing", author = "Oliver Mattausch and Jir\'{i} Bittner and Alberto Jaspe and Enrico Gobbetti and Michael Wimmer and Renato Pajarola", year = "2015", abstract = "We propose a new technique for in-core and out-of-core GPU ray tracing using a generalization of hierarchical occlusion culling in the style of the CHC++ method. Our method exploits the rasterization pipeline and hardware occlusion queries in order to create coherent batches of work for localized shader-based ray tracing kernels. By combining hierarchies in both ray space and object space, the method is able to share intermediate traversal results among multiple rays. We exploit temporal coherence among similar ray sets between frames and also within the given frame. A suitable management of the current visibility state makes it possible to benefit from occlusion culling for less coherent ray types like diffuse reflections. Since large scenes are still a challenge for modern GPU ray tracers, our method is most useful for scenes with medium to high complexity, especially since our method inherently supports ray tracing highly complex scenes that do not fit in GPU memory. For in-core scenes our method is comparable to CUDA ray tracing and performs up to 5.94 × better than pure shader-based ray tracing.", month = may, journal = "Computer Graphics Forum", volume = "34", number = "2", issn = "1467-8659", pages = "537--548", keywords = "occlusion culling, ray tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/MATTAUSCH-2015-CHCRT/", } @techreport{ROEGNER-2015-IBR, title = "Image-based Reprojection Using a Non-local Means Algorithm", author = "Clemens R\"{o}gner and Michael Wimmer and Johannes Hanika and Carsten Dachsbacher", year = "2015", abstract = "We introduce an image-based approach to increase the framerate of image sequences generated with offline rendering algorithms. Our method handles in most cases reflections and refractions better than existing image-based temporal coherence techniques. The proposed technique is also more accurate than some image-based upsampling methods, because it calculates an individual result for each pixel. Our proposed algorithm takes a pair of frames and generates motion vectors for each pixel. This allows for adding a new frame between that pair and thus increasing the framerate. To find the motion vectors, we utilize the non-local means denoising algorithm, which determines the similarity of two pixels by their surrounding and reinterpret that similarity as the likelihood of movement from one pixel to the other. This is similar to what it is done in video encoding to reduce file size, but in our case is done for each pixel individually instead of a block-wise approach, making our technique more accurate. Our method also improves on work in the field of real-time rendering. Such techniques use motion vectors, which are generated through knowledge about the movement of objects within the scene. This can lead to problems when the optical flow in an image sequence is not coherent with the objects movement. Our method avoids those problems. Furthermore, previous work has shown, that the non-local means algorithm can be optimized for parallel execution, which signicantly reduces the time to execute our proposed technique as well. ", month = apr, number = "TR-186-2-15-02", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "optical flow, offline rendering, image reprojection, temporal upsampling, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/ROEGNER-2015-IBR/", } @inproceedings{WEBER-2015-PRA, title = "Parallel Reyes-style Adaptive Subdivision with Bounded Memory Usage", author = "Thomas Weber and Michael Wimmer and John Owens", year = "2015", abstract = "Recent advances in graphics hardware have made it a desirable goal to implement the Reyes algorithm on current graphics cards. One key component in this algorithm is the bound-and-split phase, where surface patches are recursively split until they are smaller than a given screen-space bound. While this operation has been successfully parallelized for execution on the GPU using a breadth-first traversal, the resulting implementations are limited by their unpredictable worst-case memory consumption and high global memory bandwidth utilization. In this paper, we propose an alternate strategy that allows limiting the amount of necessary memory by controlling the number of assigned worker threads. The result is an implementation that scales to the performance of the breadth-first approach while offering three advantages: significantly decreased memory usage, a smooth and predictable tradeoff between memory usage and performance, and increased locality for surface processing. This allows us to render scenes that would require too much memory to be processed by the breadth-first method.", month = feb, isbn = "978-1-4503-3392-4", publisher = "ACM", organization = "ACM", location = "San Francisco, CA", booktitle = "Proceedings of the 19th Symposium on Interactive 3D Graphics and Games (i3D 2015)", pages = "39--45", keywords = "micro-rasterization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/WEBER-2015-PRA/", } @misc{KREUZER-2015-DPA, title = "Depixelizing Pixel Art in Real-Time", author = "Felix Kreuzer and Johannes Kopf and Michael Wimmer", year = "2015", abstract = "Pixel art was frequently employed in games of the 90s and earlier. On today's large and high-resolution displays, pixel art looks blocky. Recently, an algorithm was introduced to create a smooth, resolution-independent vector representation from pixel art. However, the algorithm is far too slow for interactive use, for example in a game. This poster presents an efficient implementation of the algorithm on the GPU, so that it runs at real-time rates and can be incorporated into current game emulators. Extended Abstract: http://dl.acm.org/citation.cfm?id=2721395", month = feb, publisher = "ACM New York, NY, USA", location = "San Francisco, CA", isbn = "978-1-4503-3392-4", event = "19th Symposium on Interactive 3D Graphics and Games", booktitle = "Proceedings of the 19th Symposium on Interactive 3D Graphics and Games", Conference date = "Poster presented at 19th Symposium on Interactive 3D Graphics and Games (2015-02-27--2015-03-01)", note = "130--130", pages = "130 – 130", keywords = "image processing, depixelizing, pixel art", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/KREUZER-2015-DPA/", } @talk{WIMMER-2015-CGFC, title = "Real-Time Computer Graphics and the Future of CAAD", author = "Michael Wimmer", year = "2015", abstract = "The gap between real-time and non-real-time computer graphics is closing. Current approaches are able to render large amounts of geometry at interactive rates while at the same time providing an unprecedented level of feedback and support for the user. Given these developments, it will be possible to bridge the classical divide between CAAD modelling and visualization that is still pre-eminent in the architectural world. This keynote talk will take one step into this direction, presenting both recent research conducted at TU Wien's Rendering Group and the computer graphics community world-wide.", event = "33rd Annual eCAADe conference", location = "Vienna, Austria", keywords = "computer graphics", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/WIMMER-2015-CGFC/", } @inproceedings{wallner-2015-ModelingRoutinization, title = "Modeling Routinization in Games: An Information Theory Approach", author = "Simon Wallner and Martin Pichlmair and Michael Hecher and Michael Wimmer", year = "2015", abstract = "Routinization is the result of practicing until an action stops being a goal-directed process. This paper formulates a definition of routinization in games based on prior research in the fields of activity theory and practice theory. Routinization is analyzed using the formal model of discrete-time, discrete-space Markov chains and information theory to measure the actual error between the dynamically trained models and the player interaction. Preliminary research supports the hypothesis that Markov chains can be effectively used to model routinization in games. A full study design is presented to further explore and verify this hypothesis.", isbn = "978-1-4503-3466-2", series = "CHI PLAY ", publisher = "ACM", location = "London, United Kingdom", booktitle = "Proceedings of the 2015 Annual Symposium on Computer-Human Interaction in Play", pages = "727--732", keywords = "Games, Routinization, Markov Chains, Information Theory", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/wallner-2015-ModelingRoutinization/", } @talk{WIMMER-2015-CGMC, title = "Computer Graphics Meets Computational Design", author = "Michael Wimmer", year = "2015", abstract = "In this talk, I will report on recent advancements in Computer Graphics, which will be of great interest for next-generation computational design tools. I will present methods for modeling from images, modeling by examples and multiple examples, but also procedural modeling, modeling of physical behavior and light transport, all recently developed in our group. The common rationale behind our research is that we exploit real-time processing power and computer graphics algorithms to enable interactive computational design tools that allow short feedback loops in design processes.", event = "VGS Invited Talks Series", location = "Brno University of Technology, Czech Republic", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/WIMMER-2015-CGMC/", } @article{Guerrero-2014-TPS, title = "Partial Shape Matching using Transformation Parameter Similarity", author = "Paul Guerrero and Thomas Auzinger and Michael Wimmer and Stefan Jeschke", year = "2014", abstract = "In this paper, we present a method for non-rigid, partial shape matching in vector graphics. Given a user-specified query region in a 2D shape, similar regions are found, even if they are non-linearly distorted. Furthermore, a non-linear mapping is established between the query regions and these matches, which allows the automatic transfer of editing operations such as texturing. This is achieved by a two-step approach. First, point-wise correspondences between the query region and the whole shape are established. The transformation parameters of these correspondences are registered in an appropriate transformation space. For transformations between similar regions, these parameters form surfaces in transformation space, which are extracted in the second step of our method. The extracted regions may be related to the query region by a non-rigid transform, enabling non-rigid shape matching.", month = nov, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "33", pages = "1--14", keywords = "Shape Matching, Texture Transfer, Non-Rigid, Deformable, Edit Propagation, Partial", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Guerrero-2014-TPS/", } @inproceedings{ymca, title = "YMCA - Your Mesh Comparison Application", author = "Johanna Schmidt and Reinhold Preiner and Thomas Auzinger and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2014", abstract = "Polygonal meshes can be created in several different ways. In this paper we focus on the reconstruction of meshes from point clouds, which are sets of points in 3D. Several algorithms that tackle this task already exist, but they have different benefits and drawbacks, which leads to a large number of possible reconstruction results (i.e., meshes). The evaluation of those techniques requires extensive comparisons between different meshes which is up to now done by either placing images of rendered meshes side-by-side, or by encoding differences by heat maps. A major drawback of both approaches is that they do not scale well with the number of meshes. This paper introduces a new comparative visual analysis technique for 3D meshes which enables the simultaneous comparison of several meshes and allows for the interactive exploration of their differences. Our approach gives an overview of the differences of the input meshes in a 2D view. By selecting certain areas of interest, the user can switch to a 3D representation and explore the spatial differences in detail. To inspect local variations, we provide a magic lens tool in 3D. The location and size of the lens provide further information on the variations of the reconstructions in the selected area. With our comparative visualization approach, differences between several mesh reconstruction algorithms can be easily localized and inspected.", month = nov, series = "VAST ", publisher = "IEEE Computer Society", note = "http://dx.doi.org/10.1109/VAST.2014.7042491", location = "Paris, France", booktitle = "IEEE Visual Analytics Science and Technology", keywords = "mesh comparison, 3D data exploration, focus+context, comparative visualization, Visual analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/ymca/", } @article{arikan-2014-pcvis, title = "Large-Scale Point-Cloud Visualization through Localized Textured Surface Reconstruction", author = "Murat Arikan and Reinhold Preiner and Claus Scheiblauer and Stefan Jeschke and Michael Wimmer", year = "2014", abstract = "In this paper, we introduce a novel scene representation for the visualization of large-scale point clouds accompanied by a set of high-resolution photographs. Many real-world applications deal with very densely sampled point-cloud data, which are augmented with photographs that often reveal lighting variations and inaccuracies in registration. Consequently, the high-quality representation of the captured data, i.e., both point clouds and photographs together, is a challenging and time-consuming task. We propose a two-phase approach, in which the first (preprocessing) phase generates multiple overlapping surface patches and handles the problem of seamless texture generation locally for each patch. The second phase stitches these patches at render-time to produce a high-quality visualization of the data. As a result of the proposed localization of the global texturing problem, our algorithm is more than an order of magnitude faster than equivalent mesh-based texturing techniques. Furthermore, since our preprocessing phase requires only a minor fraction of the whole dataset at once, we provide maximum flexibility when dealing with growing datasets.", month = sep, issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "9", volume = "20", pages = "1280--1292", keywords = "image-based rendering, large-scale models, color, surface representation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/arikan-2014-pcvis/", } @article{bernhard-2014-GTOM, title = "Gaze-To-Object Mapping During Visual Search in 3D Virtual Environments ", author = "Matthias Bernhard and Efstathios Stavrakis and Michael Hecher and Michael Wimmer", year = "2014", abstract = "Stimuli obtained from highly dynamic 3D virtual environments and synchronous eye-tracking data are commonly used by algorithms that strive to correlate gaze to scene objects, a process referred to as Gaze-To-Object Mapping (GTOM). We propose to address this problem with a probabilistic approach using Bayesian inference. The desired result of the inference is a predicted probability density function (PDF) specifying for each object in the scene a probability to be attended by the user. To evaluate the quality of a predicted attention PDF, we present a methodology to assess the information value (i.e., likelihood) in the predictions of dierent approaches that can be used to infer object attention. To this end, we propose an experiment based on a visual search task which allows us to determine the object of attention at a certain point in time under controlled conditions. We perform this experiment with a wide range of static and dynamic visual scenes to obtain a ground-truth evaluation data set, allowing us to assess GTOM techniques in a set of 30 particularly challenging cases.", month = aug, journal = "ACM Transactions on Applied Perception (Special Issue SAP 2014)", volume = "11", number = "3", issn = "1544-3558", pages = "14:1--14:17", keywords = "object-based attention, eye-tracking, virtual environments, visual attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/", } @article{preiner2014clop, title = "Continuous Projection for Fast L1 Reconstruction", author = "Reinhold Preiner and Oliver Mattausch and Murat Arikan and Renato Pajarola and Michael Wimmer", year = "2014", abstract = "With better and faster acquisition devices comes a demand for fast robust reconstruction algorithms, but no L1-based technique has been fast enough for online use so far. In this paper, we present a novel continuous formulation of the weighted locally optimal projection (WLOP) operator based on a Gaussian mixture describing the input point density. Our method is up to 7 times faster than an optimized GPU implementation of WLOP, and achieves interactive frame rates for moderately sized point clouds. We give a comprehensive quality analysis showing that our continuous operator achieves a generally higher reconstruction quality than its discrete counterpart. Additionally, we show how to apply our continuous formulation to spherical mixtures of normal directions, to also achieve a fast robust normal reconstruction. Project Page: https://www.cg.tuwien.ac.at/~preiner/projects/clop/", month = aug, journal = "ACM Transactions on Graphics (Proc. of ACM SIGGRAPH 2014)", volume = "33", number = "4", issn = "0730-0301", doi = "10.1145/2601097.2601172", pages = "47:1--47:13", keywords = "point set, Gaussian mixture, Hierarchical EM, upsampling, dynamic reconstruction, L1 reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/preiner2014clop/", } @article{hecher-2014-MH, title = "A Comparative Perceptual Study of Soft Shadow Algorithms", author = "Michael Hecher and Matthias Bernhard and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2014", abstract = "We performed a perceptual user study of algorithms that approximate soft shadows in real time. Although a huge body of soft-shadow algorithms have been proposed, to our knowledge this is the first methodical study for comparing different real-time shadow algorithms with respect to their plausibility and visual appearance. We evaluated soft-shadow properties like penumbra overlap with respect to their relevance to shadow perception in a systematic way, and we believe that our results can be useful to guide future shadow approaches in their methods of evaluation. In this study, we also capture the predominant case of an inexperienced user observing shadows without comparing to a reference solution, such as when watching a movie or playing a game. One important result of this experiment is to scientifically verify that real-time soft-shadow algorithms, despite having become physically based and very realistic, can nevertheless be intuitively distinguished from a correct solution by untrained users.", month = jun, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", number = "5", volume = "11", pages = "5:1--5:21", keywords = "Perception Studies, Soft Shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/", } @article{LUKSCH-2014-RTR, title = "Real-Time Rendering of Glossy Materials with Regular Sampling", author = "Christian Luksch and Robert F. Tobler and Thomas M\"{u}hlbacher and Michael Schw\"{a}rzler and Michael Wimmer", year = "2014", abstract = "Rendering view-dependent, glossy surfaces to increase the realism in real-time applications is a computationally complex task, that can only be performed by applying some approximations—especially when immediate changes in the scene in terms of material settings and object placement are a necessity. The use of environment maps is a common approach to this problem, but implicates performance problems due to costly pre-filtering steps or expensive sampling. We, therefore, introduce a regular sampling scheme for environment maps that relies on an efficient MIP-map-based filtering step, and minimizes the number of necessary samples for creating a convincing real-time rendering of glossy BRDF materials.", month = jun, journal = "The Visual Computer", volume = "30", number = "6-8", issn = "0178-2789", pages = "717--727", keywords = "real-time rendering , BRDFs", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/LUKSCH-2014-RTR/", } @inproceedings{Radwan-2014-CDR, title = "Efficient Collision Detection While Rendering Dynamic Point Clouds", author = "Mohamed Radwan and Stefan Ohrhallinger and Michael Wimmer", year = "2014", abstract = "A recent trend in interactive environments is the use of unstructured and temporally varying point clouds. This is driven by both affordable depth cameras and augmented reality simulations. One research question is how to perform collision detection on such point clouds. State-of-the-art methods for collision detection create a spatial hierarchy in order to capture dynamic point cloud surfaces, but they require O(NlogN) time for N points. We propose a novel screen-space representation for point clouds which exploits the property of the underlying surface being 2D. In order for dimensionality reduction, a 3D point cloud is converted into a series of thickened layered depth images. This data structure can be constructed in O(N) time and allows for fast surface queries due to its increased compactness and memory coherency. On top of that, parts of its construction come for free since they are already handled by the rendering pipeline. As an application we demonstrate online collision detection between dynamic point clouds. It shows superior accuracy when compared to other methods and robustness to sensor noise since uncertainty is hidden by the thickened boundary.", month = may, isbn = "978-1-4822-6003-8", publisher = "Canadian Information Processing Society", location = "Montreal, Quebec, Canada ", issn = "0713-5424", event = "Graphics Interface 2014", booktitle = "Proceedings of the 2014 Graphics Interface Conference", pages = "25--33", keywords = "bounding volumes, layered depth images, collision detection, point cloud, dynamic", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Radwan-2014-CDR/", } @article{birsak-2014-agtb, title = "Automatic Generation of Tourist Brochures", author = "Michael Birsak and Przemyslaw Musialski and Peter Wonka and Michael Wimmer", year = "2014", abstract = "We present a novel framework for the automatic generation of tourist brochures that include routing instructions and additional information presented in the form of so-called detail lenses. The first contribution of this paper is the automatic creation of layouts for the brochures. Our approach is based on the minimization of an energy function that combines multiple goals: positioning of the lenses as close as possible to the corresponding region shown in an overview map, keeping the number of lenses low, and an efficient numbering of the lenses. The second contribution is a route-aware simplification of the graph of streets used for traveling between the points of interest (POIs). This is done by reducing the graph consisting of all shortest paths through the minimization of an energy function. The output is a subset of street segments that enable traveling between all the POIs without considerable detours, while at the same time guaranteeing a clutter-free visualization. Video: http://www.youtube.com/watch?v=t3w7uxzSR-Y", month = apr, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2014)", volume = "33", number = "2", issn = "1467-8659", pages = "449--458", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/birsak-2014-agtb/", } @inproceedings{bernhard-2014-EFD, title = "The Effects of Fast Disparity Adjustments in Gaze-Controlled Stereoscopic Applications", author = "Matthias Bernhard and Camillo Dellmour and Michael Hecher and Efstathios Stavrakis and Michael Wimmer", year = "2014", abstract = "With the emergence of affordable 3D displays, stereoscopy is becoming a commodity. However, often users report discomfort even after brief exposures to stereo content. One of the main reasons is the conflict between vergence and accommodation that is caused by 3D displays. We investigate dynamic adjustment of stereo parameters in a scene using gaze data in order to reduce discomfort. In a user study, we measured stereo fusion times after abrupt manipulation of disparities using gaze data. We found that gaze-controlled manipulation of disparities can lower fusion times for large disparities. In addition we found that gaze-controlled disparity adjustment should be applied in a personalized manner and ideally performed only at the extremities or outside the comfort zone of subjects. These results provide important insight on the problems associated with fast disparity manipulation and are essential for developing appealing gaze-contingent and gaze-controlled applications.", month = mar, isbn = "978-1-4503-2751-0", publisher = "ACM", location = "Safety Harbor, FL, USA", editor = "Pernilla Qvarfordt and Dan Witzner Hansen", booktitle = "Proceedings of the Symposium on Eye Tracking Research and Applications (ETRA 2014)", pages = "111--118", keywords = "stereoscopic rendering, comfort models, fusion time, eye tracking", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/", } @article{Guerrero-2014-GRF, title = "Edit Propagation using Geometric Relationship Functions", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer and Peter Wonka", year = "2014", abstract = "We propose a method for propagating edit operations in 2D vector graphics, based on geometric relationship functions. These functions quantify the geometric relationship of a point to a polygon, such as the distance to the boundary or the direction to the closest corner vertex. The level sets of the relationship functions describe points with the same relationship to a polygon. For a given query point we ?rst determine a set of relationships to local features, construct all level sets for these relationships and accumulate them. The maxima of the resulting distribution are points with similar geometric relationships. We show extensions to handle mirror symmetries, and discuss the use of relationship functions as local coordinate systems. Our method can be applied for example to interactive ?oor-plan editing, and is especially useful for large layouts, where individual edits would be cumbersome. We demonstrate populating 2D layouts with tens to hundreds of objects by propagating relatively few edit operations.", month = mar, journal = "ACM Transactions on Graphics", volume = "33", number = "2", issn = "0730-0301", doi = "10.1145/2591010", pages = "15:1--15:15", keywords = "Shape Modeling, Floor Plans, Edit Propagation, Geometric Relationship Functions", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Guerrero-2014-GRF/", } @talk{WIMMER-2014-DWNT, title = "Do we need the full reconstruction pipeline?", author = "Michael Wimmer", year = "2014", abstract = "The traditional cultural heritage documentation pipeline from acquisition using a range scanner to interactive display to the user is a tedious and labor-intensive process. In particular, reconstructing high-quality meshes from large point clouds can be time consuming. In this talk, I will present shortcuts to this pipeline. The first idea is not to reconstruct a mesh at all, but keep the original point cloud as long as possible. I will discuss the challenges in maintaining interactivity and high quality when dealing with the display and manipulation of huge point clouds. The second idea is to reconstruct extremely simple models for regular and man-made structures, using shape analysis and user guidance. These models can be shown in end-user installations and require very few resources for display. ", event = "EU-Korea Conference on Science and Technology", location = "Vienna, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/WIMMER-2014-DWNT/", } @article{Auzinger_Mistelbauer_2013_CSR, title = "Vessel Visualization using Curved Surface Reformation", author = "Thomas Auzinger and Gabriel Mistelbauer and Ivan Baclija and R\"{u}diger Schernthaner and Arnold K\"{o}chl and Michael Wimmer and Eduard Gr\"{o}ller and Stefan Bruckner", year = "2013", abstract = "Visualizations of vascular structures are frequently used in radiological investigations to detect and analyze vascular diseases. Obstructions of the blood flow through a vessel are one of the main interests of physicians, and several methods have been proposed to aid the visual assessment of calcifications on vessel walls. Curved Planar Reformation (CPR) is a wide-spread method that is designed for peripheral arteries which exhibit one dominant direction. To analyze the lumen of arbitrarily oriented vessels, Centerline Reformation (CR) has been proposed. Both methods project the vascular structures into 2D image space in order to reconstruct the vessel lumen. In this paper, we propose Curved Surface Reformation (CSR), a technique that computes the vessel lumen fully in 3D. This offers high-quality interactive visualizations of vessel lumina and does not suffer from problems of earlier methods such as ambiguous visibility cues or premature discretization of centerline data. Our method maintains exact visibility information until the final query of the 3D lumina data. We also present feedback from several domain experts.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE Scientific Visualization 2013)", volume = "19", number = "12", pages = "2858--2867", keywords = "Surface Approximation, Vessel, Reformation, Volume Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_Mistelbauer_2013_CSR/", } @inproceedings{birsak-2013-sta, title = "Seamless Texturing of Archaeological Data", author = "Michael Birsak and Przemyslaw Musialski and Murat Arikan and Michael Wimmer", year = "2013", abstract = "In this paper we propose a framework for out-of-core real-time rendering of high-quality textured archaeological data-sets. Our input is a triangle mesh and a set of calibrated and registered photographs. Our system performs the actual mapping of the photos to the mesh for high-quality reconstructions, which is a task referred to as the labeling problem. Another problem of such mappings are seams that arise on junctions between triangles that contain information from different photos. These are are approached with blending methods, referred to as leveling. We address both problems and introduce a novel labeling approach based on occlusion detection using depth maps that prevents texturing of parts of the model with images that do not contain the expected region. Moreover, we propose an improved approach for seam-leveling that penalizes too large values and helps to keep the resulting colors in a valid range. For high-performance visualization of the 3D models with a huge amount of textures, we make use of virtual texturing, and present an application that generates the needed texture atlas in significantly less time than existing scripts. Finally, we show how the mentioned components are integrated into a visualization application for digitized archaeological site.", month = oct, isbn = "978-1-4799-3168-2 ", publisher = "IEEE", note = "DOI: 10.1109/DigitalHeritage.2013.6743749", location = "Marseille, France", booktitle = "Digital Heritage International Congress (DigitalHeritage), 2013", pages = "265--272 ", keywords = "digital cultural heritage, out-of-core real-time rendering, seamless texturing, virtual texturing", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/birsak-2013-sta/", } @article{ohrhallinger_stefan-2013-smi, title = "Minimizing Edge Length to Connect Sparsely Sampled Unorganized Point Sets", author = "Stefan Ohrhallinger and Sudhir Mudur and Michael Wimmer", year = "2013", abstract = "Most methods for interpolating unstructured point clouds handle densely sampled point sets quite well but get into trouble when the point set contains regions with much sparser sampling, a situation often encountered in practice. In this paper, we present a new method that provides a better interpolation of sparsely sampled features. We pose the surface construction problem as finding the triangle mesh which minimizes the sum of all triangles’ longest edge. The output is a closed manifold triangulated surface Bmin. Exact computation of Bmin for sparse sampling is most probably NP-hard, and therefore we introduce suitable heuristics for its computing. The algorithm first connects the points by triangles chosen in order of their longest edge and with the requirement that all edges must have at least 2 incident triangles. This yields a closed non-manifold shape which we call the Boundary Complex. Then we transform it into a manifold triangulation using topological operations. We show that in practice, runtime is linear to that of the Delaunay triangulation of the points.", month = oct, journal = "Computers & Graphics (Proceedings of Shape Modeling International 2013)", volume = "37", number = "6", issn = "0097-8493", pages = "645--658", keywords = "point cloud, reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ohrhallinger_stefan-2013-smi/", } @inproceedings{Auzinger_2013_NSAA, title = "Non-Sampled Anti-Aliasing", author = "Thomas Auzinger and Przemyslaw Musialski and Reinhold Preiner and Michael Wimmer", year = "2013", abstract = "In this paper we present a parallel method for high-quality edge anti-aliasing. In contrast to traditional graphics hardware methods, which rely on massive oversampling to combat aliasing issues in the rasterization process, we evaluate a closed-form solution of the associated prefilter convolution. This enables the use of a wide range of filter functions with arbitrary kernel sizes, as well as general shading methods such as texture mapping or complex illumination models. Due to the use of analytic solutions, our results are exact in the mathematical sense and provide objective ground-truth for other anti-aliasing methods and enable the rigorous comparison of different models and filters. An efficient implementation on general purpose graphics hardware is discussed and several comparisons to existing techniques and of various filter functions are given.", month = sep, isbn = "978-3-905674-51-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Lugano, Switzerland", event = "Vision, Modelin, Visualization (VMV)", editor = "Michael Bronstein and Jean Favre and Kai Hormann", booktitle = "Proceedings of the 18th International Workshop on Vision, Modeling and Visualization (VMV 2013)", pages = "169--176", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_NSAA/", } @misc{Auzinger_2013_SAR, title = "Sampled and Analytic Rasterization", author = "Thomas Auzinger and Michael Wimmer", year = "2013", abstract = "In this poster we present an overview of exact anti-aliasing (AA) methods in rasterization. In contrast to the common supersampling approaches for visibility AA (e.g. MSAA) or both visibility and shading AA (e.g. SSAA, decoupled sampling), prefiltering provides the mathematically exact solution to the aliasing problem. Instead of averaging a set a supersamples, the input data is convolved with a suitable low-pass filter before sampling is applied. Recent work showed that for both visibility signals and simple shading models, a closed-form solution to the convolution integrals can be found. As our main contribution, we present a classification of both sample-based and analytic AA approaches for rasterization and analyse their strengths and weaknesses.", month = sep, series = "VMV ", publisher = "Eurographics Association", location = "Lugano, Switzerland", isbn = "978-3-905674-51-4", event = "VMV 2013", booktitle = "Proceedings of the 18th International Workshop on Vision, Modeling and Visualization", Conference date = "Poster presented at VMV 2013 (2013-09-11--2013-09-13)", note = "223--224", pages = "223 – 224", keywords = "Anti-Aliasing, Rasterization, Sampling, Supersampling, Prefiltering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_SAR/", } @article{musialski-2013-surcgf, title = "A Survey of Urban Reconstruction", author = "Przemyslaw Musialski and Peter Wonka and Daniel G. Aliaga and Michael Wimmer and Luc van Gool and Werner Purgathofer", year = "2013", abstract = "This paper provides a comprehensive overview of urban reconstruction. While there exists a considerable body of literature, this topic is still under very active research. The work reviewed in this survey stems from the following three research communities: computer graphics, computer vision, and photogrammetry and remote sensing. Our goal is to provide a survey that will help researchers to better position their own work in the context of existing solutions, and to help newcomers and practitioners in computer graphics to quickly gain an overview of this vast field. Further, we would like to bring the mentioned research communities to even more interdisciplinary work, since the reconstruction problem itself is by far not solved.", month = sep, issn = "1467-8659", journal = "Computer Graphics Forum", number = "6", volume = "32", pages = "146--177", keywords = "facade modeling, state-of-the-art report, multi-view stereo, structure from motion, urban modeling, urban reconstruction, inverse-procedural modeling, facade reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/musialski-2013-surcgf/", } @inproceedings{STURN-2013-LSI, title = "Landspotting: A Serious iPad Game for Improving Global Land Cover", author = "Tobias Sturn and Dietmar Pangerl and Linda See and Steffen Fritz and Michael Wimmer", year = "2013", abstract = "Current satellite-derived land cover products, which are very important for answering many crucial research and policy-related questions, show huge disagreements. In this paper we present a serious game for the iPad with the purpose of improving global land cover data. We describe the game, discuss the design decisions made and outline the challenges faced while developing the game. We evaluate how well the players are able to annotate land cover by comparing the game against expert validations collected using the Geo-Wiki tool and provide evidence that games can be a useful way to increase the quality of global land cover.", month = jul, isbn = "978-3-87907-532-4", publisher = "Verlag der \"{O}sterreichischen Akademie der Wissenschaften Austrian Academy of Sciences Press ", organization = "Z_GIS - Department of Geoinformatics", location = "University of Salzburg", booktitle = "Proceedings of the GI-Forum 2013 -- Creating the GISociety", pages = "81--90", keywords = "Landspotting, Serious Game, Improving Global Land Cover", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/STURN-2013-LSI/", } @inproceedings{JAHRMANN-2013-IGR, title = "Interactive Grass Rendering Using Real-Time Tessellation", author = "Klemens Jahrmann and Michael Wimmer", year = "2013", abstract = "Grass rendering is needed for many outdoor scenes, but for real-time applications, rendering each blade of grass as geometry has been too expensive so far. This is why grass is most often drawn as a texture mapped onto the ground or grass patches rendered as transparent billboard quads. Recent approaches use geometry for blades that are near the camera and flat geometry for rendering further away. In this paper, we present a technique which is capable of rendering whole grass fields in real time as geometry by exploiting the capabilities of the tessellation shader. Each single blade of grass is rendered as a two-dimensional tessellated quad facing its own random direction. This enables each blade of grass to be influenced by wind and to interact with its environment. In order to adapt the grass field to the current scene, special textures are developed which encode on the one hand the density and height of the grass and on the other hand its look and composition.", month = jun, isbn = "978-80-86943-74-9", location = "Plzen, CZ", editor = "Manuel Oliveira and Vaclav Skala", booktitle = "WSCG 2013 Full Paper Proceedings", pages = "114--122", keywords = "grass rendering, real-time rendering, billboards", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/JAHRMANN-2013-IGR/", } @inproceedings{scheiblauer-2013-wscg, title = "Analysis of Interactive Editing Operations for Out-of-Core Point-Cloud Hierarchies", author = "Claus Scheiblauer and Michael Wimmer", year = "2013", abstract = "In this paper we compare the time and space complexity of editing operations on two data structures which are suitable for visualizing huge point clouds. The first data structure was introduced by Scheiblauer and Wimmer [SW11] and uses only the original points from a source data set for building a level-of-detail hierarchy that can be used for rendering points clouds. The second data structure introduced by Wand et al. [WBB+07] requires additional points for the level-of-detail hierarchy and therefore needs more memory when stored on disk. Both data structures are based on an octree hierarchy and allow for deleting and inserting points. Besides analyzing and comparing these two data structures we also introduce an improvement to the points deleting algorithm for the data structure of Wand et al. [WBB+07], which thus allows for a more efficient node loading strategy during rendering.", month = jun, isbn = "978-80-86943-74-9", publisher = "Union Agency", location = "Plzen", editor = "Vaclav Skala", booktitle = "WSCG 2013 Full Paper Proceedings", pages = "123--132", keywords = "complexity analysis, point clouds, data structures, viewing algorithms", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/scheiblauer-2013-wscg/", } @inproceedings{Musialski-2013-ipmum, title = "Inverse-Procedural Methods for Urban Models", author = "Przemyslaw Musialski and Michael Wimmer", year = "2013", abstract = "Procedural modeling is an elegant and fast way to generate huge complex and realistically looking urban sites. Due to its generative nature it can also be referred to as forward-procedural modeling. Its major drawback is the usually quite complicated way of control. To overcome this difficulty a novel modeling paradigm has been introduced: it is commonly referred to as inverse procedural modeling, and its goal is to generate compact procedural descriptions of existing models---in the best case in an automatic manner as possible. These compact procedural representations can be used as a source for the synthesis of identical or similar objects, applied in various simulations and other studies of urban environments. We believe that this technology is still a widely unexplored ground and that it will prove itself as a very important tool in the reconstruction process. In this paper we sketch how inverse procedural modeling can be applied in the urban modeling field.", month = may, isbn = "978-3-905674-46-0", publisher = "Eurographics Association", location = "Girona, Spain", issn = "2307-8251", editor = "V. Tourre and G. Besuievsky", booktitle = "Proceedings of Eurographics Workshop on Urban Data Modelling and Visualisation (UDMV 2013)", pages = "31--32", keywords = "inverse procedural modeling, urban modeling, urban reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Musialski-2013-ipmum/", } @inproceedings{STURN-2013-LGI, title = "Landspotting - Games for Improving Global Land Cover", author = "Tobias Sturn and Michael Wimmer and Peter Purgathofer and Steffen Fritz", year = "2013", abstract = "Current satellite-derived land cover products, which are very important for answering many crucial questions, show huge disagreements. In this paper, we introduce four serious game prototypes - a Facebook strategy game played on Google Maps, a Facebook tagging game, a tower-defense game, and an aesthetic tile game for the iPad - with the purpose of improving global land cover data. We describe the games in detail and discuss the design decisions we made and challenges we faced while developing the games. We evaluate how much the players have already been able to improve global land cover data and provide evidence that games can be a useful way to increase the quality of this data. Finally, we discuss how the main game is being perceived by the players and what has to be further improved to attract a bigger audience.", month = may, location = "Chania, Greece", booktitle = "Proceedings of Foundations of Digital Games Conference 2013 (FDG 2013)", pages = "117--125", keywords = "Improving Global Land Cover, Serious Games, Landspotting", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/STURN-2013-LGI/", } @inproceedings{ilcik-2013-cipmi, title = "Challenges and Ideas in Procedural Modeling of Interiors", author = "Martin Il\v{c}\'{i}k and Michael Wimmer", year = "2013", abstract = "While the creation of convincing cityscapes from the outside is already possible, there is a lack of robust and efficient techniques for modeling the interior of buildings. In particular, we focus on challenges for the subdivision of the interior space into rooms and for placement of furniture in those rooms.", month = may, isbn = "978-3-905674-46-0", publisher = "Eurographics Association", location = "Girona, Spain", issn = "2307-8251", editor = "Vincent Tourre and Gonzalo Besuievsky", booktitle = "Proceedings of Eurographics Workshop on Urban Data Modelling and Visualisation (UDMV 2013)", pages = "29--30", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ilcik-2013-cipmi/", } @article{Auzinger_2013_AnaVis, title = "Analytic Visibility on the GPU", author = "Thomas Auzinger and Michael Wimmer and Stefan Jeschke", year = "2013", abstract = "This paper presents a parallel, implementation-friendly analytic visibility method for triangular meshes. Together with an analytic filter convolution, it allows for a fully analytic solution to anti-aliased 3D mesh rendering on parallel hardware. Building on recent works in computational geometry, we present a new edge-triangle intersection algorithm and a novel method to complete the boundaries of all visible triangle regions after a hidden line elimination step. All stages of the method are embarrassingly parallel and easily implementable on parallel hardware. A GPU implementation is discussed and performance characteristics of the method are shown and compared to traditional sampling-based rendering methods.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "1467-8659", pages = "409--418", keywords = "GPU, anti-aliasing, SIMD, filter, rendering, analytic, visibility, close-form, hidden surface elimination, hidden surface removal, GPGPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_AnaVis/", } @article{MATTAUSCH-2013-FSBE, title = "Freeform Shadow Boundary Editing", author = "Oliver Mattausch and Takeo Igarashi and Michael Wimmer", year = "2013", abstract = "We present an algorithm for artistically modifying physically based shadows. With our tool, an artist can directly edit the shadow boundaries in the scene in an intuitive fashion similar to freeform curve editing. Our algorithm then makes these shadow edits consistent with respect to varying light directions and scene configurations, by creating a shadow mesh from the new silhouettes. The shadow mesh helps a modified shadow volume algorithm cast shadows that conform to the artistic shadow boundary edits, while providing plausible interaction with dynamic environments, including animation of both characters and light sources. Our algorithm provides significantly more fine-grained local and direct control than previous artistic light editing methods, which makes it simple to adjust the shadows in a scene to reach a particular effect, or to create interesting shadow shapes and shadow animations. All cases are handled with a single intuitive interface, be it soft shadows, or (self-)shadows on arbitrary receivers.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "0167-7055", pages = "175--184", keywords = "shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/MATTAUSCH-2013-FSBE/", } @incollection{sundstedt-2013-vag, title = "Visual Attention and Gaze Behaviour in Games: An Object-Based Approach", author = "Veronica Sundstedt and Matthias Bernhard and Efstathios Stavrakis and Erik Reinhard and Michael Wimmer", year = "2013", abstract = "This chapter presents state-of-the-art methods that tap the potential of psychophysics for the purpose of understanding game players' behavior. Studying gaze behavior in gaming environments has recently gained momentum as it affords a better understanding of gamers' visual attention. However, while knowing where users are attending in a computer game would be useful at a basic level, it does not provide insight into what users are interested in, or why. An answer to these questions can be tremendously useful to game designers, enabling them to improve gameplay, selectively increase visual fidelity, and optimize the distribution of computing resources. Furthermore, this could be useful in verifying game mechanics, improving game AI and smart positioning of advertisements within games, all being applications widely desirable across the games industry. Techniques are outlined to collect gaze data, and map fixation points back to semantic objects in a gaming environment, enabling a deeper understanding of how players interact with games. ", month = apr, booktitle = "Game Analytics: Maximizing the Value of Player Data ", editor = "M. Seif El-Nasr, A. Drachen, A. Canossa, K. Isbister,", isbn = "9781447147688", publisher = "Springer", keywords = "Eye Tracking, Visual Attention, Computer Games", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/sundstedt-2013-vag/", } @inproceedings{SCHWAERZLER-2013-FPCSS, title = "Fast Percentage Closer Soft Shadows using Temporal Coherence", author = "Michael Schw\"{a}rzler and Christian Luksch and Daniel Scherzer and Michael Wimmer", year = "2013", abstract = "We propose a novel way to efficiently calculate soft shadows in real-time applications by overcoming the high computational effort involved with the complex corresponding visibility estimation each frame: We exploit the temporal coherence prevalent in typical scene movement, making the estimation of a new shadow value only necessary whenever regions are newly disoccluded due to camera adjustment, or the shadow situation changes due to object movement. By extending the typical shadow mapping algorithm by an additional light-weight buffer for the tracking of dynamic scene objects, we can robustly and efficiently detect all screen space fragments that need to be updated, including not only the moving objects themselves, but also the soft shadows they cast. By applying this strategy to the popular Percentage Closer Soft Shadow algorithm (PCSS), we double rendering performance in scenes with both static and dynamic objects - as prevalent in various 3D game levels - while maintaining the visual quality of the original approach.", month = mar, isbn = "978-1-4503-1956-0", publisher = "ACM", location = "Orlando, Florida", address = "New York, NY, USA", booktitle = "Proceedings of ACM Symposium on Interactive 3D Graphics and Games 2013", pages = "79--86", keywords = "real-time, temporal coherence, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/SCHWAERZLER-2013-FPCSS/", } @inproceedings{LUKSCH-2013-FLM, title = "Fast Light-Map Computation with Virtual Polygon Lights", author = "Christian Luksch and Robert F. Tobler and Ralf Habel and Michael Schw\"{a}rzler and Michael Wimmer", year = "2013", abstract = "We propose a new method for the fast computation of light maps using a many-light global-illumination solution. A complete scene can be light mapped on the order of seconds to minutes, allowing fast and consistent previews for editing or even generation at loading time. In our method, virtual point lights are clustered into a set of virtual polygon lights, which represent a compact description of the illumination in the scene. The actual light-map generation is performed directly on the GPU. Our approach degrades gracefully, avoiding objectionable artifacts even for very short computation times. ", month = mar, isbn = "978-1-4503-1956-0", publisher = "ACM", location = "Orlando, Florida", booktitle = "Proceedings of ACM Symposium on Interactive 3D Graphics and Games 2013", pages = "87--94", keywords = "instant radiosity, global illumination, light-maps", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/LUKSCH-2013-FLM/", } @article{knecht_martin_2013_ReflRefrObjsMR, title = "Reflective and Refractive Objects for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Christoph Winklhofer and Michael Wimmer", year = "2013", abstract = "In this paper, we present a novel rendering method which integrates reflective or refractive objects into a differential instant radiosity (DIR) framework usable for mixed-reality (MR) applications. This kind of objects are very special from the light interaction point of view, as they reflect and refract incident rays. Therefore they may cause high-frequency lighting effects known as caustics. Using instant-radiosity (IR) methods to approximate these high-frequency lighting effects would require a large amount of virtual point lights (VPLs) and is therefore not desirable due to real-time constraints. Instead, our approach combines differential instant radiosity with three other methods. One method handles more accurate reflections compared to simple cubemaps by using impostors. Another method is able to calculate two refractions in real-time, and the third method uses small quads to create caustic effects. Our proposed method replaces parts in light paths that belong to reflective or refractive objects using these three methods and thus tightly integrates into DIR. In contrast to previous methods which introduce reflective or refractive objects into MR scenarios, our method produces caustics that also emit additional indirect light. The method runs at real-time frame rates, and the results show that reflective and refractive objects with caustics improve the overall impression for MR scenarios.", month = mar, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE VR 2013)", volume = "19", number = "4", issn = "1077-2626", pages = "576--582", keywords = "Mixed Reality, Caustics, Reflections, Refractions", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/knecht_martin_2013_ReflRefrObjsMR/", } @incollection{schedl-2013-gP4, title = "Simulating partial occlusion in post-processing depth-of-field methods", author = "David Schedl and Michael Wimmer", year = "2013", abstract = "This chapter describes a method for simulating Depth of Field (DoF). In particular, we investigate the so-called partial occlusion effect: objects near the camera blurred due to DoF are actually semitransparent and therefore result in partially visible background objects. This effect is strongly apparent in miniature- and macro photography and in film making. Games and interactive applications are nowadays becoming more cinematic, including strong DoF effects, and therefore it is important to be able to convincingly approximate the partial-occlusion effect. We show how to do so in this chapter; with the proposed optimizations even in real time.", month = mar, booktitle = "GPU Pro 4: Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "9781466567436", note = "to appear", publisher = "A K Peters", keywords = "depth of field, realtime, layers, blurring", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/schedl-2013-gP4/", } @article{fink-2013-cag, title = "Teaching a Modern Graphics Pipeline Using a Shader-based Software Renderer", author = "Heinrich Fink and Thomas Weber and Michael Wimmer", year = "2013", abstract = "This paper presents the syllabus for an introductory computer graphics course that emphasizes the use of programmable shaders while teaching raster-level algorithms at the same time. We describe a Java-based framework that is used for programming assignments in this course. This framework implements a shader-enabled software renderer and an interactive 3D editor. Teaching shader programming in concert with the low-level graphics pipeline makes it easier for our students to learn modern OpenGL with shaders in our follow-up intermediate course. We also show how to create attractive course material by using COLLADA, an open standard for 3D content exchange, and our approach to organizing the practical course.", month = feb, issn = "0097-8493", journal = "Computers & Graphics", number = "1--2", volume = "37", pages = "12--20", keywords = "teaching, programmable shading, CG education, course organization, COLLADA", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/fink-2013-cag/", } @article{arikan-2013-osn, title = "O-Snap: Optimization-Based Snapping for Modeling Architecture", author = "Murat Arikan and Michael Schw\"{a}rzler and Simon Fl\"{o}ry and Michael Wimmer and Stefan Maierhofer", year = "2013", abstract = "In this paper, we introduce a novel reconstruction and modeling pipeline to create polygonal models from unstructured point clouds. We propose an automatic polygonal reconstruction that can then be interactively refined by the user. An initial model is automatically created by extracting a set of RANSAC-based locally fitted planar primitives along with their boundary polygons, and then searching for local adjacency relations among parts of the polygons. The extracted set of adjacency relations is enforced to snap polygon elements together, while simultaneously fitting to the input point cloud and ensuring the planarity of the polygons. This optimization-based snapping algorithm may also be interleaved with user interaction. This allows the user to sketch modifications with coarse and loose 2D strokes, as the exact alignment of the polygons is automatically performed by the snapping. The generated models are coarse, offer simple editing possibilities by design and are suitable for interactive 3D applications like games, virtual environments etc. The main innovation in our approach lies in the tight coupling between interactive input and automatic optimization, as well as in an algorithm that robustly discovers the set of adjacency relations.", month = jan, journal = "ACM Transactions on Graphics", volume = "32", number = "1", issn = "0730-0301", doi = "10.1145/2421636.2421642", pages = "6:1--6:15", keywords = "interactive modeling, surface reconstruction, geometric optimization", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/arikan-2013-osn/", } @inproceedings{EISEMANN-2013-ERT, title = "Efficient Real-Time Shadows", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michal Valient and Michael Wimmer", year = "2013", abstract = "This course provides an overview of efficient, real-time shadow algorithms. It presents the theoretical background but also discusses implementation details for facilitating efficient realizations (hard and soft shadows, volumetric shadows, reconstruction techniques). These elements are of relevance to both experts and practitioners. The course also reviews budget considerations and analyzes performance trade-offs, using examples from various AAA game titles and film previsualization tools. While physical accuracy can sometimes be replaced by plausible shadows, especially for games, film production requires more precision, such as scalable solutions that can deal with highly detailed geometry. The course builds upon earlier SIGGRAPH courses as well as the recent book Real-Time Shadows (A K Peters, 2011) by four of the instructors (due to its success, a second edition is planned for 2014). And with two instructors who have worked on AAA game and movie titles, the course presents interesting behind-the-scenes information that illuminates key topics.", booktitle = "ACM SIGGRAPH 2013 Courses", isbn = "978-1-4503-2339-0", location = "Anaheim, CA", publisher = "ACM", pages = "18:1--18:54", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/EISEMANN-2013-ERT/", } @article{SCHERZER-2012-TCM, title = "Temporal Coherence Methods in Real-Time Rendering", author = "Daniel Scherzer and Lei Yang and Oliver Mattausch and Diego Nehab and Pedro V. Sander and Michael Wimmer and Elmar Eisemann", year = "2012", abstract = "Nowadays, there is a strong trend towards rendering to higher-resolution displays and at high frame rates. This development aims at delivering more detail and better accuracy, but it also comes at a significant cost. Although graphics cards continue to evolve with an ever-increasing amount of computational power, the speed gain is easily counteracted by increasingly complex and sophisticated shading computations. For real-time applications, the direct consequence is that image resolution and temporal resolution are often the first candidates to bow to the performance constraints (e.g., although full HD is possible, PS3 and XBox often render at lower resolutions). In order to achieve high-quality rendering at a lower cost, one can exploit temporal coherence (TC). The underlying observation is that a higher resolution and frame rate do not necessarily imply a much higher workload, but a larger amount of redundancy and a higher potential for amortizing rendering over several frames. In this survey, we investigate methods that make use of this principle and provide practical and theoretical advice on how to exploit temporal coherence for performance optimization. These methods not only allow incorporating more computationally intensive shading effects into many existing applications, but also offer exciting opportunities for extending high-end graphics applications to lower-spec consumer-level hardware. To this end, we first introduce the notion and main concepts of TC, including an overview of historical methods. We then describe a general approach, image-space reprojection, with several implementation algorithms that facilitate reusing shading information across adjacent frames. We also discuss data-reuse quality and performance related to reprojection techniques. Finally, in the second half of this survey, we demonstrate various applications that exploit TC in real-time rendering. ", month = dec, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "31", pages = "2378--2408", keywords = "remote rendering; sampling, perception-based rendering, occlusion culling, non-photo-realistic rendering, level-of-detail, large data visualization, image-based rendering, global illumination, frame interpolation, anti-aliasing, shadows, streaming, temporal coherance, upsampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/SCHERZER-2012-TCM/", } @inproceedings{scheiblauer-2012-chnt, title = "Graph-based Guidance in Huge Point Clouds", author = "Claus Scheiblauer and Michael Wimmer", year = "2012", abstract = "In recent years the use of remote sensing devices like laser scanners in the documentation of excavation sites or cultural heritage sites has led to huge point cloud models from these sites. These data sets may cover complete sites including galleries, corridors, halls, and open places. Orienting oneself in the point cloud becomes a challenge, if one is not familiar with the layout of the site. Therefore we propose a graph-based guidance system to show tourists round the point cloud models. The tourists can navigate interactively through the point cloud, but they are tied to a predefined 3D graph which represents the possible ways, and which connects the points of interest.", month = nov, isbn = "978-3-200-03281-1", location = "Vienna, Austria", booktitle = "Proceedings of the 17th International Conference on Cultural Heritage and New Technologies", keywords = "user interface, navigation, point rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/scheiblauer-2012-chnt/", } @article{knecht_martin_2012_RSMR, title = "Reciprocal Shading for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Oliver Mattausch and Michael Wimmer", year = "2012", abstract = "In this paper we present a novel plausible rendering method for mixed reality systems, which is useful for many real-life application scenarios, like architecture, product visualization or edutainment. To allow virtual objects to seamlessly blend into the real environment, the real lighting conditions and the mutual illumination effects between real and virtual objects must be considered, while maintaining interactive frame rates. The most important such effects are indirect illumination and shadows cast between real and virtual objects. Our approach combines Instant Radiosity and Differential Rendering. In contrast to some previous solutions, we only need to render the scene once in order to find the mutual effects of virtual and real scenes. In addition, we avoid artifacts like double shadows or inconsistent color bleeding which appear in previous work. The dynamic real illumination is derived from the image stream of a fish-eye lens camera. The scene gets illuminated by virtual point lights, which use imperfect shadow maps to calculate visibility. A sufficiently fast scene reconstruction is done at run-time with Microsoft's Kinect sensor. Thus a time-consuming manual pre-modeling step of the real scene is not necessary. Our results show that the presented method highly improves the illusion in mixed-reality applications and significantly diminishes the artificial look of virtual objects superimposed onto real scenes.", month = nov, issn = "0097-8493", journal = "Computers & Graphics", number = "7", volume = "36", pages = "846--856", keywords = "Differential rendering, Reconstruction, Instant radiosity, Microsoft Kinect, Real-time global illumination, Mixed reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/knecht_martin_2012_RSMR/", } @inproceedings{SCHWAERZLER-2012-FAS, title = "Fast Accurate Soft Shadows with Adaptive Light Source Sampling", author = "Michael Schw\"{a}rzler and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2012", abstract = "Physically accurate soft shadows in 3D applications can be simulated by taking multiple samples from all over the area light source and accumulating them. Due to the unpredictability of the size of the penumbra regions, the required sampling density has to be high in order to guarantee smooth shadow transitions in all cases. Hence, several hundreds of shadow maps have to be evaluated in any scene configuration, making the process computationally expensive. Thus, we suggest an adaptive light source subdivision approach to select the sampling points adaptively. The main idea is to start with a few samples on the area light, evaluating there differences using hardware occlusion queries, and adding more sampling points if necessary. Our method is capable of selecting and rendering only the samples which contribute to an improved shadow quality, and hence generate shadows of comparable quality and accuracy. Even though additional calculation time is needed for the comparison step, this method saves valuable rendering time and achieves interactive to real-time frame rates in many cases where a brute force sampling method does not. ", month = nov, isbn = "978-3-905673-95-1", publisher = "Eurographics Association", location = "Magdeburg, Germany", booktitle = "Proceedings of the 17th International Workshop on Vision, Modeling, and Visualization (VMV 2012)", pages = "39--46", keywords = "soft shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/SCHWAERZLER-2012-FAS/", } @misc{Auzinger_2012_GeigerCam, title = "GeigerCam: Measuring Radioactivity with Webcams", author = "Thomas Auzinger and Ralf Habel and Andreas Musilek and Dieter Hainz and Michael Wimmer", year = "2012", abstract = "Measuring radioactivity is almost exclusively a professional task in the realms of science, industry and defense, but recent events spur the interest in low-cost consumer detection devices. We show that by using image processing techniques, a current, only slightly modified, off-the-shelf HD webcam can be used to measure alpha, beta as well as gamma radiation. In contrast to dedicated measurement devices such as Geiger counters, our framework can classify the type of radiation and can differentiate between various kinds of radioactive materials. By optically insulating the camera's imaging sensor, recordings at extreme exposure and gain values are possible, and the partly very faint signals detectable. The camera is set to the longest exposure time possible and to a very high gain to detect even faint signals. During measurements, GPU assisted real-time image processing of the direct video feed is used to treat the remaining noise by tracking the noise spectrum per pixel, incorporating not only spatial but also temporal variations due to temperature changes and spontaneous emissions. A confidence value per pixel based on event probabilities is calculated to identify potentially hit pixels. Finally, we use morphological clustering to group pixels into particle impact events and analyze their energies. Our approach results in a simple device that can be operated on any computer and costs only $20-30, an order of magnitude cheaper than entry-level nuclear radiation detectors.", month = aug, publisher = "ACM", location = "Los Angeles, CA", address = "New York, NY, USA", isbn = "978-1-4503-1682-8", event = "ACM SIGGRAPH 2012", editor = "Dan Wexler", booktitle = "ACM SIGGRAPH 2012 Posters", Conference date = "Poster presented at ACM SIGGRAPH 2012 (2012-08-05--2012-08-09)", note = "40:1--40:1", pages = "40:1 – 40:1", keywords = "radioactivity, webcam, measurement", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Auzinger_2012_GeigerCam/", } @article{MATTAUSCH-2012-TIS, title = "Tessellation-Independent Smooth Shadow Boundaries", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer and Takeo Igarashi", year = "2012", abstract = "We propose an efficient and light-weight solution for rendering smooth shadow boundaries that do not reveal the tessellation of the shadow-casting geometry. Our algorithm reconstructs the smooth contours of the underlying mesh and then extrudes shadow volumes from the smooth silhouettes to render the shadows. For this purpose we propose an improved silhouette reconstruction using the vertex normals of the underlying smooth mesh. Then our method subdivides the silhouette loops until the contours are sufficiently smooth and project to smooth shadow boundaries. This approach decouples the shadow smoothness from the tessellation of the geometry and can be used to maintain equally high shadow quality for multiple LOD levels. It causes only a minimal change to the fill rate, which is the well-known bottleneck of shadow volumes, and hence has only small overhead. ", month = jun, journal = "Computer Graphics Forum", volume = "4", number = "31", issn = "1467-8659", pages = "1465--1470", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/MATTAUSCH-2012-TIS/", } @article{schedl-2012-dof, title = "A layered depth-of-field method for solving partial occlusion", author = "David Schedl and Michael Wimmer", year = "2012", abstract = "Depth of field (DoF) represents a distance range around a focal plane, where objects on an image are crisp. DoF is one of the effects which significantly contributes to the photorealism of images and therefore is often simulated in rendered images. Various methods for simulating DoF have been proposed so far, but little tackle the issue of partial occlusion: Blurry objects near the camera are semi-transparent and result in partially visible background objects. This effect is strongly apparent in miniature and macro photography. In this work a DoF method is presented which simulates partial occlusion. The contribution of this work is a layered method where the scene is rendered into layers. Blurring is done efficiently with recursive Gaussian filters. Due to the usage of Gaussian filters big artifact-free blurring radii can be simulated at reasonable costs.", month = jun, journal = "Journal of WSCG", volume = "20", number = "3", issn = "1213-6972", pages = "239--246", keywords = "realtime, rendering, depth-of-field, layers, depth peeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/schedl-2012-dof/", } @article{knecht_martin_2012_BRDFEstimation, title = "Interactive BRDF Estimation for Mixed-Reality Applications", author = "Martin Knecht and Georg Tanzmeister and Christoph Traxler and Michael Wimmer", year = "2012", abstract = "Recent methods in augmented reality allow simulating mutual light interactions between real and virtual objects. These methods are able to embed virtual objects in a more sophisticated way than previous methods. However, their main drawback is that they need a virtual representation of the real scene to be augmented in the form of geometry and material properties. In the past, this representation had to be modeled in advance, which is very time consuming and only allows for static scenes. We propose a method that reconstructs the surrounding environment and estimates its Bidirectional Reflectance Distribution Function (BRDF) properties at runtime without any preprocessing. By using the Microsoft Kinect sensor and an optimized hybrid CPU & GPU-based BRDF estimation method, we are able to achieve interactive frame rates. The proposed method was integrated into a differential instant radiosity rendering system to demonstrate its feasibility.", month = jun, journal = "Journal of WSCG", volume = "20", number = "1", issn = "1213-6972", pages = "47--56", keywords = "Augmented Reality, BRDF Estimation, Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/knecht_martin_2012_BRDFEstimation/", } @article{musialski-2012-icb, title = "Interactive Coherence-Based Fa\c{c}ade Modeling", author = "Przemyslaw Musialski and Michael Wimmer and Peter Wonka", year = "2012", abstract = "We propose a novel interactive framework for modeling building fa\c{c}ades from images. Our method is based on the notion of coherence-based editing which allows exploiting partial symmetries across the fa\c{c}ade at any level of detail. The proposed workflow mixes manual interaction with automatic splitting and grouping operations based on unsupervised cluster analysis. In contrast to previous work, our approach leads to detailed 3d geometric models with up to several thousand regions per fa\c{c}ade. We compare our modeling scheme to others and evaluate our approach in a user study with an experienced user and several novice users.", month = may, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "661--670", keywords = "facade modeling, urban modeling, facade reconstruction, image-based modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/musialski-2012-icb/", } @article{Habel_2012_PSP, title = "Practical Spectral Photography", author = "Ralf Habel and Michael Kudenov and Michael Wimmer", year = "2012", abstract = "We introduce a low-cost and compact spectral imaging camera design based on unmodified consumer cameras and a custom camera objective. The device can be used in a high-resolution configuration that measures the spectrum of a column of an imaged scene with up to 0.8 nm spectral resolution, rivalling commercial non-imaging spectrometers, and a mid-resolution hyperspectral mode that allows the spectral measurement of a whole image, with up to 5 nm spectral resolution and 120x120 spatial resolution. We develop the necessary calibration methods based on halogen/fluorescent lamps and laser pointers to acquire all necessary information about the optical system. We also derive the mathematical methods to interpret and reconstruct spectra directly from the Bayer array images of a standard RGGB camera. This objective design introduces accurate spectral remote sensing to computational photography, with numerous applications in color theory, colorimetry, vision and rendering, making the acquisition of a spectral image as simple as taking a high-dynamic-range image.", month = may, journal = "Computer Graphics Forum (Proceedings EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "449--458", keywords = "Computational Photography, Spectroscopy, Computed Tomography Imaging Spectrometer, Practical", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Habel_2012_PSP/", } @inproceedings{fink-2012-cg1, title = "Teaching a Modern Graphics Pipeline Using a Shader-based Software Renderer", author = "Heinrich Fink and Thomas Weber and Michael Wimmer", year = "2012", abstract = "Shaders are a fundamental pattern of the modern graphics pipeline. This paper presents a syllabus for an introductory computer graphics course that emphasizes the use of programmable shaders while teaching raster-level algorithms at the same time. We describe a Java-based framework that is used for programming assignments in this course. This framework implements a shader-enabled software renderer and an interactive 3D editor. We also show how to create attractive course materials by using COLLADA, an open standard for 3D content exchange.", month = may, publisher = "Eurographics Association", location = "Cagliari, Italy", issn = "1017-4656", event = "Eurographics 2012", editor = "Giovanni Gallo and Beatriz Sousa Santos", booktitle = "Eurographics 2012 -- Education Papers", pages = "73--80", keywords = "Education, Collada, Java, Introductory Computer Graphics, Software Rasterizer", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/fink-2012-cg1/", } @inproceedings{musialski-2012-sur, title = "A Survey of Urban Reconstruction", author = "Przemyslaw Musialski and Peter Wonka and Daniel G. Aliaga and Michael Wimmer and Luc van Gool and Werner Purgathofer", year = "2012", abstract = "This paper provides a comprehensive overview of urban reconstruction. While there exists a considerable body of literature, this topic is still under very active research. The work reviewed in this survey stems from the following three research communities: computer graphics, computer vision, and photogrammetry and remote sensing. Our goal is to provide a survey that will help researchers to better position their own work in the context of existing solutions, and to help newcomers and practitioners in computer graphics to quickly gain an overview of this vast field. Further, we would like to bring the mentioned research communities to even more interdisciplinary work, since the reconstruction problem itself is by far not solved. ", month = may, booktitle = "EUROGRAPHICS 2012 State of the Art Reports", location = "Cagliari, Sardinia, Italy", publisher = "Eurographics Association", series = "EG STARs", pages = "1--28", keywords = "facade modeling, structure from motion, multi-view stereo, urban reconstruction, inverse-procedural modeling, urban modeling, image-based modeling, city reconstruction, state-of-the-art report", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/musialski-2012-sur/", } @inproceedings{preiner_2012_AS, title = "Auto Splats: Dynamic Point Cloud Visualization on the GPU", author = "Reinhold Preiner and Stefan Jeschke and Michael Wimmer", year = "2012", abstract = "Capturing real-world objects with laser-scanning technology has become an everyday task. Recently, the acquisition of dynamic scenes at interactive frame rates has become feasible. A high-quality visualization of the resulting point cloud stream would require a per-frame reconstruction of object surfaces. Unfortunately, reconstruction computations are still too time-consuming to be applied interactively. In this paper we present a local surface reconstruction and visualization technique that provides interactive feedback for reasonably sized point clouds, while achieving high image quality. Our method is performed entirely on the GPU and in screen pace, exploiting the efficiency of the common rasterization pipeline. The approach is very general, as no assumption is made about point connectivity or sampling density. This naturally allows combining the outputs of multiple scanners in a single visualization, which is useful for many virtual and augmented reality applications.", month = may, isbn = " 978-3-905674-35-4", organization = "Eurographics Association 2012", location = "Cagliari", editor = "H. Childs and T. Kuhlen", booktitle = "Proceedings of Eurographics Symposium on Parallel Graphics and Visualization", pages = "139--148", keywords = "point clouds, surface reconstruction, point rendering, Auto Splats, KNN search, GPU rendering, point based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/preiner_2012_AS/", } @WorkshopTalk{Fritz-2012, title = "Landspotting: Social gaming to collect vast amounts of data for satellite validation", author = "Steffen Fritz and Peter Purgathofer and F. Kayali and M. Fellner and Michael Wimmer and Tobias Sturn and Josef Schuh and G. Triebnig and S. Krause and F. Schindler and M. Kollegger and C. Perger and M. D\"{u}rauer and W. Haberl and L. See and Ian McCallum", year = "2012", abstract = "At present there is no single satellite-derived global land cover product that is accurate enough to provide reliable estimates of forest or cropland area to determine, e.g., how much additional land is available to grow biofuels or to tackle problems of food security. The Landspotting Project aims to improve the quality of this land cover information by vastly increasing the amount of in-situ validation data available for calibration and validation of satellite-derived land cover. The Geo-Wiki (Geo-Wiki.org) system currently allows users to compare three satellite derived land cover products and validate them using Google Earth. However, there is presently no incentive for anyone to provide this data so the amount of validation through Geo-Wiki has been limited. However, recent competitions have proven that incentive driven campaigns can rapidly create large amounts of input. The LandSpotting Project is taking a truly innovative approach through the development of the Landspotting game. The game engages users whilst simultaneously collecting a large amount of in-situ land cover information. The development of the game is informed by the current raft of successful social gaming that is available on the internet and as mobile applications, many of which are geo-spatial in nature. Games that are integrated within a social networking site such as Facebook illustrate the power to reach and continually engage a large number of individuals. The number of active Facebook users is estimated to be greater than 400 million, where 100 million are accessing Facebook from mobile devices. The Landspotting Game has similar game mechanics as the famous strategy game "Civilization" (i.e. build, harvest, research, war, diplomacy, etc.). When a player wishes to make a settlement, they must first classify the land cover over the area they wish to settle. As the game is played on the earth surface with Google Maps, we are able to record and store this land cover/land use classification geographically. Every player can play the game for free (i.e. a massive multiplayer online game). Furthermore, it is a social game on Facebook (e.g. invite friends, send friends messages, purchase gifts, help friends, post messages onto the wall, etc). The game is played in a web browser, therefore it runs everywhere (where Flash is supported) without requiring the user to install anything additional. At the same time, the Geo-Wiki system will be modified to use the acquired in-situ validation information to create new outputs: a hybrid land cover map, which takes the best information from each individual product to create a single integrated version; a database of validation points that will be freely available to the land cover user community; and a facility that allows users to create a specific targeted validation area, which will then be provided to the crowdsourcing community for validation. These outputs will turn Geo-Wiki into a valuable system for earth system scientists. ", month = apr, event = "European Geosciences Union General Assembly 2012", location = "Austria Center Vienna, Session ESSI2.9, room 7 ", keywords = "Social Games with Purpose, Landspotting", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Fritz-2012/", } @techreport{TR-186-2-12-01, title = "Interactive Screen-Space Triangulation for High-Quality Rendering of Point Clouds", author = "Reinhold Preiner and Michael Wimmer", year = "2012", abstract = "This technical report documents work that is a precursor to the Auto Splatting technique. We present a rendering method that reconstructs high quality images from unorganized colored point data. While previous real-time image reconstruction approaches for point clouds make use of preprocessed data like point radii or normal estimations, our algorithm only requires position and color data as input and produces a reconstructed color image, normal map and depth map which can instantly be used to apply further deferred lighting passes. Our method performs a world-space neighbor search and a subsequent normal estimation in screen-space, and uses the geometry shader to triangulate the color, normal and depth information of the points. To achieve correct visibility and closed surfaces in the projected image a temporal coherence approach reuses triangulated depth information and provides adaptive neighbor search radii. Our algorithm is especially suitable for insitu high-quality visualization of big datasets like 3D-scans, making otherwise time-consuming preprocessing steps to reconstruct surface normals or point radii dispensable.", month = apr, number = "TR-186-2-12-01", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "screen triangulation, point rendering, nearest neighbors, screen-space, point clouds", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/TR-186-2-12-01/", } @incollection{MATTAUSCH-2012-EOV, title = "Efficient Online Visibility for Shadow Maps", author = "Oliver Mattausch and Jir\'{i} Bittner and Ari Silvennoinen and Daniel Scherzer and Michael Wimmer", year = "2012", abstract = "Standard online occlusion culling is able to vastly improve the rasterization performance of walkthrough applications by identifying large parts of the scene as invisible from the camera and rendering only the visible geometry. However, it is of little use for the acceleration of shadow map generation (i.e., rasterizing the scene from the light view [Williams 78]), so that typically a high percentage of the geometry will be visible when rendering shadow maps. For example, in outdoor scenes typical viewpoints are near the ground and therefore have significant occlusion, while light viewpoints are higher up and see most of the geometry. Our algorithm remedies this situation by quickly detecting and culling the geometry that does not contribute to the shadow in the final image.", month = feb, booktitle = "GPU Pro 3: Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "978-1439887820", publisher = "CRC Press", keywords = "shadow maps, visibility culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/MATTAUSCH-2012-EOV/", } @inproceedings{EISEMANN-2012-ERT, title = "Efficient Real-Time Shadows", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michal Valient and Michael Wimmer", year = "2012", abstract = "This course is a resource for applying efficient, real-time shadow algorithms. It builds on a solid foundation (previous courses at SIGGRAPH Asia 2009 and Eurographics 2010, including comprehensive course notes) and the 2011 book Real-Time Shadows (AK Peters) written by four of the presenters. The book is a compendium of many topics in the realm of shadow computation.", booktitle = "ACM SIGGRAPH 2012 Courses", isbn = "978-1-4503-1678-1", location = "Los Angeles, CA", publisher = "ACM", pages = "18:1--18:53", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/EISEMANN-2012-ERT/", } @article{bernhard-2011-bmtf, title = "Bi-modal Task Faciliation in a Virtual Traffic Scenario through Spatialized Sound Rendering ", author = "Matthias Bernhard and Karl Grosse and Michael Wimmer", year = "2011", abstract = "Audio rendering is generally used to increase the realism of Virtual Environments (VE). In addition, audio rendering may also improve the performance in specific tasks carried out in interactive applications such as games or simulators. In this paper we investigate the effect of the quality of sound rendering on task performance in a task which is inherently vision dominated. The task is a virtual traffic gap crossing scenario with two elements: first, to discriminate crossable and uncrossable gaps in oncoming traffic, and second, to find the right timing to start crossing the street without an accident. A study was carried out with 48 participants in an immersive Virtual Environment setup with a large screen and headphones. Participants were grouped into three different conditions. In the first condition, spatialized audio rendering with head-related transfer function (HRTF) filtering was used. The second group was tested with conventional stereo rendering, and the remaining group ran the experiment in a mute condition. Our results give a clear evidence that spatialized audio improves task performance compared to the unimodal mute condition. Since all task-relevant information was in the participants' field-of-view, we conclude that an enhancement of task performance results from a bimodal advantage due to the integration of visual and auditory spatial cues.", month = nov, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", note = "Article No. 24", number = "4", volume = "8", pages = "1--22", keywords = "bimodal task faciliation, pedestrian safety, virtual environments, audio-visual perception, head related transfer functions", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-bmtf/", } @inproceedings{knecht-2011-CBCM, title = "Adaptive Camera-Based Color Mapping For Mixed-Reality Applications", author = "Martin Knecht and Christoph Traxler and Werner Purgathofer and Michael Wimmer", year = "2011", abstract = "We present a novel adaptive color mapping method for virtual objects in mixed-reality environments. In several mixed-reality applications, added virtual objects should be visually indistinguishable from real objects. Recent mixed-reality methods use global-illumination algorithms to approach this goal. However, simulating the light distribution is not enough for visually plausible images. Since the observing camera has its very own transfer function from real-world radiance values to RGB colors, virtual objects look artificial just because their rendered colors do not match with those of the camera. Our approach combines an on-line camera characterization method with a heuristic to map colors of virtual objects to colors as they would be seen by the observing camera. Previous tone-mapping functions were not designed for use in mixed-reality systems and thus did not take the camera-specific behavior into account. In contrast, our method takes the camera into account and thus can also handle changes of its parameters during runtime. The results show that virtual objects look visually more plausible than by just applying tone-mapping operators.", month = oct, isbn = "978-1-4577-2183-0 ", publisher = "IEEE/IET Electronic Library (IEL), IEEE-Wiley eBooks Library, VDE VERLAG Conference Proceedings", note = "E-ISBN: 978-1-4577-2184-7", location = "Basel, Switzerland", booktitle = "Proceedings of the 2011 IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2011)", pages = "165--168", keywords = "Color Matching, Differential Rendering, Mixed Reality, Tone Mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/knecht-2011-CBCM/", } @inproceedings{bernhard-2011-maicg, title = "Manipulating Attention in Computer Games", author = "Matthias Bernhard and Le Zhang and Michael Wimmer", year = "2011", abstract = "In computer games, a user’s attention is focused on the current task, and task-irrelevant details remain unnoticed. This behavior, known as inattentional blindness, is a main problem for the optimal placement of information or advertisements. We propose a guiding principle based on Wolfe’s theory of Guided Search, which predicts the saliency of objects during a visual search task. Assuming that computer games elicit visual search tasks frequently, we applied this model in a “reverse” direction: Given a target item (e.g., advertisement) which should be noticed by the user, we choose a frequently searched game item and modify it so that it shares some perceptual features (e.g., color or orientation) with the target item. A memory experiment with 36 participants showed that in an action video game, advertisements were more noticeable to users when this method is applied.", month = jun, isbn = "9781457712852", publisher = "IEEE", location = "Ithaca, NY", booktitle = "Proceedings of the IEEE IVMSP Workshop on Perception and Visual Signal Analysis", pages = "153--158", keywords = "saliency, attention guidance, inattentional blindness, in-game advertising, guided search", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-maicg/", } @article{scheiblauer-2011-cag, title = "Out-of-Core Selection and Editing of Huge Point Clouds", author = "Claus Scheiblauer and Michael Wimmer", year = "2011", abstract = "In this paper we present an out-of-core editing system for point clouds, which allows selecting and modifying arbitrary parts of a huge point cloud interactively. We can use the selections to segment the point cloud, to delete points, or to render a preview of the model without the points in the selections. Furthermore we allow for inserting points into an already existing point cloud. All operations are conducted on a rendering optimized data structure that uses the raw point cloud from a laser scanner, and no additionally created points are needed for an ecient level-of-detail (LOD) representation using this data structure. We also propose an algorithm to alleviate the artifacts when rendering a point cloud with large discrepancies in density in dierent areas by estimating point sizes heuristically. These estimated point sizes can be used to mimic a closed surface on the raw point cloud, also when the point cloud is composed of several raw laser scans.", month = apr, issn = "0097-8493", journal = "Computers & Graphics", number = "2", volume = "35", pages = "342--351", keywords = "Graphics data structures and data types, Viewing algorithms, Point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/scheiblauer-2011-cag/", } @article{lipp2011a, title = "Interactive Modeling of City Layouts using Layers of Procedural Content", author = "Markus Lipp and Daniel Scherzer and Peter Wonka and Michael Wimmer", year = "2011", abstract = "In this paper, we present new solutions for the interactive modeling of city layouts that combine the power of procedural modeling with the flexibility of manual modeling. Procedural modeling enables us to quickly generate large city layouts, while manual modeling allows us to hand-craft every aspect of a city. We introduce transformation and merging operators for both topology preserving and topology changing transformations based on graph cuts. In combination with a layering system, this allows intuitive manipulation of urban layouts using operations such as drag and drop, translation, rotation etc. In contrast to previous work, these operations always generate valid, i.e., intersection-free layouts. Furthermore, we introduce anchored assignments to make sure that modifications are persistent even if the whole urban layout is regenerated. ", month = apr, journal = "Computer Graphics Forum (Proceedings EG 2011)", volume = "30", number = "2", issn = "0167-7055", pages = "345--354", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/lipp2011a/", } @inproceedings{knecht_martin-2011-FPSPAR, title = "A Framework For Perceptual Studies In Photorealistic Augmented Reality", author = "Martin Knecht and Andreas D\"{u}nser and Christoph Traxler and Michael Wimmer and Raphael Grasset", year = "2011", abstract = "In photorealistic augmented reality virtual objects are integrated in the real world in a seamless visual manner. To obtain a perfect visual augmentation these objects must be rendered indistinguishable from real objects and should be perceived as such. In this paper we propose a research test bed framework to study the different unresolved perceptual issues in photorealistic augmented reality and its application to different disciplines. The framework computes a global illumination approximation in real-time and therefore leverages a new class of experimental research topics.", month = mar, location = "Singapore", editor = "Frank Steinicke, Pete Willemsen", booktitle = "Proceedings of the 3rd IEEE VR 2011 Workshop on Perceptual Illusions in Virtual Environments", pages = "27--32", keywords = "photorealistic augmented reality, real-time global illumination, human perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/knecht_martin-2011-FPSPAR/", } @article{scherzer2011d, title = "A Survey of Real-Time Hard Shadow Mapping Methods", author = "Daniel Scherzer and Michael Wimmer and Werner Purgathofer", year = "2011", abstract = "Due to its versatility, speed and robustness, shadow mapping has always been a popular algorithm for fast hard shadow generation since its introduction in 1978, first for off-line film productions and later increasingly so in real-time graphics. So it is not surprising that recent years have seen an explosion in the number of shadow map related publications. The last survey that encompassed shadow mapping approaches, but was mainly focused on soft shadow generation, dates back to 2003~cite{HLHS03}, while the last survey for general shadow generation dates back to 1990~cite{Woo:1990:SSA}. No survey that describes all the advances made in hard shadow map generation in recent years exists. On the other hand, shadow mapping is widely used in the game industry, in production, and in many other applications, and it is the basis of many soft shadow algorithms. Due to the abundance of articles on the topic, it has become very hard for practitioners and researchers to select a suitable shadow algorithm, and therefore many applications miss out on the latest high-quality shadow generation approaches. The goal of this survey is to rectify this situation by providing a detailed overview of this field. We provide a detailed analysis of shadow mapping errors and derive a comprehensive classification of the existing methods. We discuss the most influential algorithms, consider their benefits and shortcomings and thereby provide the readers with the means to choose the shadow algorithm best suited to their needs. ", month = feb, issn = "0167-7055", journal = "Computer Graphics Forum", number = "1", volume = "30", pages = "169--186", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/scherzer2011d/", } @incollection{matt2011, title = "Temporal Screen-Space Ambient Occlusion", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2011", month = feb, booktitle = "GPU Pro 2", editor = "Wolfgang Engel", isbn = "978-1568817187", publisher = "A.K. Peters", keywords = "ambient occlusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/matt2011/", } @inproceedings{bittner-2011-scc, title = "Shadow Caster Culling for Efficient Shadow Mapping", author = "Jir\'{i} Bittner and Oliver Mattausch and Ari Silvennoinen and Michael Wimmer", year = "2011", abstract = "We propose a novel method for efficient construction of shadow maps by culling shadow casters which do not contribute to visible shadows. The method uses a mask of potential shadow receivers to cull shadow casters using a hierarchical occlusion culling algorithm. We propose several variants of the receiver mask implementations with different culling efficiency and computational costs. For scenes with statically focused shadow maps we designed an efficient strategy to incrementally update the shadow map, which comes close to the rendering performance for unshadowed scenes. We show that our method achieves 3x-10x speedup for rendering large city like scenes and 1.5x-2x speedup for rendering an actual game scene.", month = feb, isbn = "978-1-4503-0565-5", publisher = "ACM", organization = "ACM SIGGRAPH", location = "San Francisco", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2011", pages = "81--88", keywords = "occlusion culling, shadow mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bittner-2011-scc/", } @incollection{Habel_LSN_2011, title = "Level-of-Detail and Streaming Optimized Irradiance Normal Mapping", author = "Ralf Habel and Anders Nilsson and Michael Wimmer", year = "2011", month = feb, booktitle = "GPU Pro 2", editor = "Wolfgang Engel", isbn = "978-1568817187", publisher = "A.K. Peters", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/Habel_LSN_2011/", } @inproceedings{scherzer2011c, title = "A Survey on Temporal Coherence Methods in Real-Time Rendering", author = "Daniel Scherzer and Lei Yang and Oliver Mattausch and Diego Nehab and Pedro V. Sander and Michael Wimmer and Elmar Eisemann", year = "2011", abstract = "Nowadays, there is a strong trend towards rendering to higher-resolution displays and at high frame rates. This development aims at delivering more detail and better accuracy, but it also comes at a significant cost. Although graphics cards continue to evolve with an ever-increasing amount of computational power, the processing gain is counteracted to a high degree by increasingly complex and sophisticated pixel computations. For real-time applications, the direct consequence is that image resolution and temporal resolution are often the first candidates to bow to the performance constraints (e.g., although full HD is possible, PS3 and XBox often render at lower resolutions). In order to achieve high-quality rendering at a lower cost, one can exploit emph{temporal coherence} (TC). The underlying observation is that a higher resolution and frame rate do not necessarily imply a much higher workload, but a larger amount of redundancy and a higher potential for amortizing rendering over several frames. In this STAR, we will investigate methods that make use of this principle and provide practical and theoretical advice on how to exploit temporal coherence for performance optimization. These methods not only allow us to incorporate more computationally intensive shading effects into many existing applications, but also offer exciting opportunities for extending high-end graphics applications to lower-spec consumer-level hardware. To this end, we first introduce the notion and main concepts of TC, including an overview of historical methods. We then describe a key data structure, the so-called emph{reprojection cache}, with several supporting algorithms that facilitate reusing shading information from previous frames. Its usefulness is illustrated in the second part of the STAR, where we present various applications. We illustrate how expensive pixel shaders, multi-pass shading effects, stereo rendering, shader antialiasing, shadow casting, and global-illumination effects can profit from pixel reuse. Furthermore, we will see that optimizations for visibility culling and object-space global illumination can also be achieved by exploiting TC. This STAR enables the reader to gain an overview of many techniques in this cutting-edge field and provides many insights into algorithmic choices and implementation issues. It delivers working knowledge of how various existing techniques are optimized via data reuse. Another goal of this STAR is to inspire the reader and to raise awareness for temporal coherence as an elegant tool that could be a crucial component to satisfy the recent need for higher resolution and more detailed content. ", booktitle = "EUROGRAPHICS 2011 State of the Art Reports", location = "Llandudno UK", publisher = "Eurographics Association", pages = "101--126", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/scherzer2011c/", } @book{EISEMANN-2011-RTS, title = "Real-Time Shadows", author = "Elmar Eisemann and Michael Schwarz and Ulf Assarsson and Michael Wimmer", year = "2011", abstract = "Important elements of games, movies, and other computer-generated content, shadows are crucial for enhancing realism and providing important visual cues. In recent years, there have been notable improvements in visual quality and speed, making high-quality realistic real-time shadows a reachable goal. Real-Time Shadows is a comprehensive guide to the theory and practice of real-time shadow techniques. It covers a large variety of different effects, including hard, soft, volumetric, and semi-transparent shadows. The book explains the basics as well as many advanced aspects related to the domain of shadow computation. It presents interactive solutions and practical details on shadow computation. The authors compare various algorithms for creating real-time shadows and illustrate how they are used in different situations. They explore the limitations and failure cases, advantages and disadvantages, and suitability of the algorithms in several applications. Source code, videos, tutorials, and more are available on the book’s website.", isbn = "978-1568814384", pages = "398", publisher = "A.K. Peters", keywords = "computer games, real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/EISEMANN-2011-RTS/", } @article{mattausch-2010-tao, title = "High-Quality Screen-Space Ambient Occlusion using Temporal Coherence", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2010", abstract = "Ambient occlusion is a cheap but effective approximation of global illumination. Recently, screen-space ambient occlusion (SSAO) methods, which sample the frame buffer as a discretization of the scene geometry, have become very popular for real-time rendering. We present temporal SSAO (TSSAO), a new algorithm which exploits temporal coherence to produce high-quality ambient occlusion in real time. Compared to conventional SSAO, our method reduces both noise as well as blurring artifacts due to strong spatial filtering, faithfully representing fine-grained geometric structures. Our algorithm caches and reuses previously computed SSAO samples, and adaptively applies more samples and spatial filtering only in regions that do not yet have enough information available from previous frames. The method works well for both static and dynamic scenes.", month = dec, issn = "0167-7055", journal = "Computer Graphics Forum", number = "8", volume = "29", pages = "2492--2503", keywords = "temporal coherence, ambient occlusion, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/mattausch-2010-tao/", } @inproceedings{knecht_martin_2010_DIR, title = "Differential Instant Radiosity for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Oliver Mattausch and Werner Purgathofer and Michael Wimmer", year = "2010", abstract = "In this paper we present a novel plausible realistic rendering method for mixed reality systems, which is useful for many real life application scenarios, like architecture, product visualization or edutainment. To allow virtual objects to seamlessly blend into the real environment, the real lighting conditions and the mutual illumination effects between real and virtual objects must be considered, while maintaining interactive frame rates (20-30fps). The most important such effects are indirect illumination and shadows cast between real and virtual objects. Our approach combines Instant Radiosity and Differential Rendering. In contrast to some previous solutions, we only need to render the scene once in order to find the mutual effects of virtual and real scenes. The dynamic real illumination is derived from the image stream of a fish-eye lens camera. We describe a new method to assign virtual point lights to multiple primary light sources, which can be real or virtual. We use imperfect shadow maps for calculating illumination from virtual point lights and have significantly improved their accuracy by taking the surface normal of a shadow caster into account. Temporal coherence is exploited to reduce flickering artifacts. Our results show that the presented method highly improves the illusion in mixed reality applications and significantly diminishes the artificial look of virtual objects superimposed onto real scenes.", month = oct, note = "Best Paper Award!", location = "Seoul", booktitle = "Proceedings of the 2010 IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2010)", pages = "99--107", keywords = "Instant Radiosity, Differential Rendering, Real-time Global Illumination, Mixed Reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/knecht_martin_2010_DIR/", } @article{bernhard-2010-gph, title = "An Empirical Pipeline to Derive Gaze Prediction Heuristics for 3D Action Games", author = "Matthias Bernhard and Efstathios Stavrakis and Michael Wimmer", year = "2010", abstract = "Gaze analysis and prediction in interactive virtual environments, such as games, is a challenging topic since the 3D perspective and variations of the viewpoint as well as the current task introduce many variables that affect the distribution of gaze. In this article, we present a novel pipeline to study eye-tracking data acquired from interactive 3D applications. The result of the pipeline is an importance map which scores the amount of gaze spent on each object. This importance map is then used as a heuristic to predict a user’s visual attention according to the object properties present at runtime. The novelty of this approach is that the analysis is performed in object space and the importance map is defined in the feature space of high-level properties. High-level properties are used to encode task relevance and other attributes, such as eccentricity, which may have an impact on gaze behavior. The pipeline has been tested with an exemplary study on a first-person shooter game. In particular, a protocol is presented describing the data acquisition procedure, the learning of different importance maps from the data, and finally an evaluation of the performance of the derived gaze predictors. A metric measuring the degree of correlation between attention predicted by the importance map and the actual gaze yielded clearly positive results. The correlation becomes particularly strong when the player is attentive to an in-game task.", month = oct, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", number = "1", volume = "8", pages = "4:1--4:30", keywords = "gaze predictor, video games, virtual environments, eye-tracking, gaze analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bernhard-2010-gph/", } @article{LIPP-2010-PGMS, title = "Parallel Generation of Multiple L-Systems", author = "Markus Lipp and Peter Wonka and Michael Wimmer", year = "2010", abstract = "This paper introduces a solution to compute L-systems on parallel architectures like GPUs and multi-core CPUs. Our solution can split the derivation of the L-system as well as the interpretation and geometry generation into thousands of threads running in parallel. We introduce a highly parallel algorithm for L-system evaluation that works on arbitrary L-systems, including parametric productions, context sensitive productions, stochastic production selection, and productions with side effects. This algorithm is further extended to allow evaluation of multiple independent L-systems in parallel. In contrast to previous work, we directly interpret the productions defined in plain-text, without requiring any compilation or transformation step (e.g., into shaders). Our algorithm is efficient in the sense that it requires no explicit inter-thread communication or atomic operations, and is thus completely lock free.", month = oct, issn = "0097-8493", journal = "Computers & Graphics", number = "5", volume = "34", pages = "585--593", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/LIPP-2010-PGMS/", } @WorkshopTalk{ilcik-2011-pmous, title = "Procedural Modeling of Urbanistic Scenes", author = "Martin Il\v{c}\'{i}k and Markus Lipp and Johannes Scharl and Michael Wimmer", year = "2010", abstract = "The aim of the GameWorld project was the procedural generation of cities and buildings on different levels of control, from manual to fully automatic modeling. Target applications included mainly computer games, virtual reality applications and other digital content creation scenarios for entertainment industry, architectural, historical and archaeological visualizations, and much more.", month = jun, event = "Austrian-Russian Joint Seminar - Visual Computing in Fundamental, Academic and Applied Science and Research", location = "Vienna, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/ilcik-2011-pmous/", } @article{bagar2010, title = "A Layered Particle-Based Fluid Model for Real-Time Rendering of Water", author = "Florian Bagar and Daniel Scherzer and Michael Wimmer", year = "2010", abstract = "We present a physically based real-time water simulation and rendering method that brings volumetric foam to the real-time domain, significantly increasing the realism of dynamic fluids. We do this by combining a particle-based fluid model that is capable of accounting for the formation of foam with a layered rendering approach that is able to account for the volumetric properties of water and foam. Foam formation is simulated through Weber number thresholding. For rendering, we approximate the resulting water and foam volumes by storing their respective boundary surfaces in depth maps. This allows us to calculate the attenuation of light rays that pass through these volumes very efficiently. We also introduce an adaptive curvature flow filter that produces consistent fluid surfaces from particles independent of the viewing distance.", month = jun, journal = "Computer Graphics Forum (Proceedings EGSR 2010)", volume = "29", number = "4", issn = "0167-7055", pages = "1383--1389", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bagar2010/", } @inproceedings{ilcik-2010-ps, title = "Procedural Skeletons: Kinematic Extensions to CGA-Shape Grammars", author = "Martin Il\v{c}\'{i}k and Stefan Fiedler and Werner Purgathofer and Michael Wimmer", year = "2010", abstract = "Procedural modeling for architectural scenes was as yet limited to static objects only. We introduce a novel extension layer for shape grammars which creates a skeletal system for posing and interactive manipulation of generated models. Various models can be derived with the same set of parametrized rules for geometric operations. Separation of geometry generation and pose synthesis improves design efficiency and reusability. Moreover, by formal analysis of production rules we show how to efficiently update complex kinematic hierarchies created by the skeletons, allowing state-of-the-art interactive visual rule editing.", month = may, isbn = "978-80-223-2644-5", publisher = "Comenius University, Bratislava", booktitle = "Proceedings of the Spring Conference on Computer Graphics 2010", pages = "177--184", keywords = "procedural modeling, shape grammars, architecture, skeletal animation", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/ilcik-2010-ps/", } @inproceedings{Habel-2010-EIN, title = "Efficient Irradiance Normal Mapping", author = "Ralf Habel and Michael Wimmer", year = "2010", abstract = "Irradiance normal mapping is a method to combine two popular techniques, light mapping and normal mapping, and is used in games such as Half-Life 2 or Halo 3. This combination allows using low-resolution light caching on surfaces with only a few coefficients which are evaluated by normal maps to render spatial high-frequency changes in the lighting. Though there are dedicated bases for this purpose such as the Half-Life 2 basis, higher order basis functions such as quadratic Spherical Harmonics are needed for an accurate representation. However, a full spherical basis is not needed since the irradiance is stored on the surface of a scene. In order to represent the irradiance signals efficiently, we propose a novel polynomial, hemispherically orthonormal basis function set that is specifically designed to carry a directional irradiance signal on the hemisphere and which makes optimal use of the number of coefficients. To compare our results with previous work, we analyze the relations and attributes of previously proposed basis systems and show that 6 coefficients are sufficient to accurately represent an irradiance signal on the hemisphere. To create the necessary irradiance signals, we use Spherical Harmonics as an intermediate basis due to their fast filtering capabilities.", month = feb, isbn = "978-1-60558-939-8", publisher = "ACM", location = "Washington D.C.", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2010", pages = "189--195", keywords = "irradiance, real-time rendering, normal mapping, lightmap", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Habel-2010-EIN/", } @article{preiner_2010_GIPC, title = "Real-Time Global Illumination for Point Cloud Scenes", author = "Reinhold Preiner and Michael Wimmer", year = "2010", abstract = "In this paper we present a real-time global illumination approach for illuminating scenes containing large point clouds. Our approach is based on the distribution of Virtual Point Lights (VPLs) in the scene, which are then used for the indirect illumination of the visible surfaces, using Imperfect Shadow Maps for visibility calculation of the VPLs. We are able to render multiple indirect light bounces, where each light bounce accounts for the transport of both the diffuse and the specular fraction of the reflected light.", journal = "Computer Graphics & Geometry", number = "1", volume = "12", pages = "2--16", keywords = "virtual point lights, imperfect shadow maps, point rendering, point clouds, global illumination, VPL, ISM", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/preiner_2010_GIPC/", } @inproceedings{scherzer_2010a, title = "A Survey of Real-Time Hard Shadow Mapping Methods", author = "Daniel Scherzer and Michael Wimmer and Werner Purgathofer", year = "2010", abstract = "Due to its versatility, speed and robustness, shadow mapping has always been a popular algorithm for fast hard shadow generation since its introduction in 1978, first for off-line film productions and later increasingly so in real-time graphics. So it is not surprising that recent years have seen an explosion in the number of shadow map related publications. The last survey that encompassed shadow mapping approaches, but was mainly focused on soft shadow generation, dates back to 2003~cite{HLHS03} and the last survey for general shadow generation dates back to 1990~cite{Woo:1990:SSA}. No survey that describes all the advances made in hard shadow map generation in recent years exists. On the other hand, shadow mapping is widely used in the game industry, in production, and in many other applications, and it is the basis of many soft shadow algorithms. Due to the abundance of articles on the topic, it has become very hard for practioners and researchers to select a suitable shadow algorithm, and therefore many applications miss out on the latest high-quality shadow generation approaches. %Real-time research was always tempted to bring global lighting techniques into the real-time domain. One of the most popular adaptations in this respect are hard shadows. It is therefore not surprising that real-time hard shadow generation has been one of the most active areas in research in recent years. But what is surprising is that the last state-of-the-art report that encompassed this field dates back to 1990~cite{Woo:1990:SSA}, were only the beginnings of this field were explored. The goal of this survey is to rectify this situation by providing a detailed overview of this field. We provide a detailed analysis of shadow mapping errors and derive from this a comprehensive classification of the existing methods. We discuss the most influential algorithms, consider their benefits and shortcomings and thereby provide the reader with the means to choose the shadow algorithm best suited to her needs.", booktitle = "EUROGRAPHICS 2010 State of the Art Reports", location = "Norrk\"{o}ping, Sweden", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/scherzer_2010a/", } @inproceedings{SSMW09, title = "Real-Time Soft Shadows Using Temporal Coherence", author = "Daniel Scherzer and Michael Schw\"{a}rzler and Oliver Mattausch and Michael Wimmer", year = "2009", abstract = "A vast amount of soft shadow map algorithms have been presented in recent years. Most use a single sample hard shadow map together with some clever filtering technique to calculate perceptually or even physically plausible soft shadows. On the other hand there is the class of much slower algorithms that calculate physically correct soft shadows by taking and combining many samples of the light. In this paper we present a new soft shadow method that combines the benefits of these approaches. It samples the light source over multiple frames instead of a single frame, creating only a single shadow map each frame. Where temporal coherence is low we use spatial filtering to estimate additional samples to create correct and very fast soft shadows. ", month = dec, isbn = "978-3642103308", series = "Lecture Notes in Computer Science", publisher = "Springer", location = "Las Vegas, Nevada, USA", editor = "Bebis, G.; Boyle, R.; Parvin, B.; Koracin, D.; Kuno, Y.; Wang, J.; Pajarola, R.; Lindstrom, P.; Hinkenjann, A.; Encarnacao, M.; Silva, C.; Coming, D.", booktitle = "Advances in Visual Computing: 5th International Symposium on Visual Computing (ISVC 2009)", pages = "13--24", keywords = "real-time rendering, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/SSMW09/", } @inproceedings{LIPP-2009-PGL, title = "Parallel Generation of L-Systems", author = "Markus Lipp and Peter Wonka and Michael Wimmer", year = "2009", abstract = "This paper introduces a solution to compute L-systems on parallel architectures like GPUs and multi-core CPUs. Our solution can split the derivation of the L-system as well as the interpretation and geometry generation into thousands of threads running in parallel. We introduce a highly parallel algorithm for L-system evaluation that works on arbitrary L-systems, including parametric productions, context sensitive productions, stochastic production selection, and productions with side effects. Further we directly interpret the productions defined in plain-text, without requiring any compilation or transformation step (e.g., into shaders). Our algorithm is efficient in the sense that it requires no explicit inter-thread communication or atomic operations, and is thus completely lock free.", month = nov, isbn = "978-3980487481", location = "Braunschweig", editor = "Marcus Magnor, Bodo Rosenhahn, Holger Theisel", booktitle = "Vision, Modeling, and Visualization Workshop (VMV) 2009 ", pages = "205--214", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/LIPP-2009-PGL/", } @inproceedings{SCHEIBLAUER-2009-IDCE, title = "Interactive Domitilla Catacomb Exploration", author = "Claus Scheiblauer and Norbert Zimmermann and Michael Wimmer", year = "2009", abstract = "In this paper we present an approach for interactive visualization and manipulation of huge point clouds. Archaeological monuments like the Domitilla Catacomb in Rome lead to data sets surpassing 1 Billion points or 20GB of storage space, which makes standard techniques like mesh conversion or in-core point-based rendering infeasible. Our system uses an out-of-core octree structure and a number of interactive editing tools to enable many archaeological tasks to be carried out on the whole point cloud that would not be possible using traditional methods. We allow fast selection, insertion and deletion of points, and through out-of-core rendering, the frame rate always stays above 20 frames per second on a fast workstation. To the best of our knowledge, this is the first interactive visualization of the complete data set of a large subterranean catacomb, and we show that direct point cloud visualization on the complete data set of a scan campaign is an important tool in archaeological practice.", month = sep, isbn = "978-3-905674-18-7", publisher = "Eurographics Association", location = "Malta", booktitle = "10th VAST International Symposium on Virtual Reality, Archaeology and Cultural Heritage (VAST09)", pages = "65--72", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/SCHEIBLAUER-2009-IDCE/", } @article{BITTNER-2009-AGVS, title = "Adaptive Global Visibility Sampling", author = "Jir\'{i} Bittner and Oliver Mattausch and Peter Wonka and Vlastimil Havran and Michael Wimmer", year = "2009", abstract = "In this paper we propose a global visibility algorithm which computes from-region visibility for all view cells simultaneously in a progressive manner. We cast rays to sample visibility interactions and use the information carried by a ray for all view cells it intersects. The main contribution of the paper is a set of adaptive sampling strategies based on ray mutations that exploit the spatial coherence of visibility. Our method achieves more than an order of magnitude speedup compared to per-view cell sampling. This provides a practical solution to visibility preprocessing and also enables a new type of interactive visibility analysis application, where it is possible to quickly inspect and modify a coarse global visibility solution that is constantly refined. ", month = aug, journal = "ACM Transactions on Graphics", volume = "28", number = "3", issn = "0730-0301", pages = "94:1--94:10", keywords = "occlusion culling, visibility sampling, visibility, PVS", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/BITTNER-2009-AGVS/", } @misc{LIPP-2009-PGL2, title = "Parallel Generation of L-Systems", author = "Markus Lipp and Peter Wonka and Michael Wimmer", year = "2009", abstract = "In this work we investigate whether it is possible to efficiently evaluate one of the most classical procedural modeling primitives, L-systems, directly on parallel architectures, exemplified by current GPUs and multi-core CPUs. The main motivation is to enable interactive editing of large L-systems by designers, therefore it is important to speed up the computation of L-systems in order to achieve low response times.", month = aug, location = "New Orleans, LA", event = "High-Performance Graphics 2009", Conference date = "Poster presented at High-Performance Graphics 2009 (2009-08-01--2009-08-03)", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/LIPP-2009-PGL2/", } @incollection{BITTNER-2009-GEFOC, title = "Game-Engine-Friendly Occlusion Culling", author = "Jir\'{i} Bittner and Oliver Mattausch and Michael Wimmer", year = "2009", abstract = "This article presents a method which minimizes the overhead associated with occlusion queries. The method reduces the number of required state changes and should integrate easily with most game engines. The key ideas are batching of the queries and interfacing with the game engine using a dedicated render queue. We also present some additional optimizations which reduce the number of queries issued as well as the number of rendered primitives. The algorithm is based on the well-known Coherent Hierarchical Culling algorithm.", month = mar, booktitle = "SHADERX7: Advanced Rendering Techniques", chapter = "8.3", editor = "Wolfang Engel", isbn = "1-58450-598-2", publisher = "Charles River Media", volume = "7", keywords = "real-time rendering, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/BITTNER-2009-GEFOC/", } @article{Habel_09_PGT, title = "Physically Guided Animation of Trees", author = "Ralf Habel and Alexander Kusternig and Michael Wimmer", year = "2009", abstract = "This paper presents a new method to animate the interaction of a tree with wind both realistically and in real time. The main idea is to combine statistical observations with physical properties in two major parts of tree animation. First, the interaction of a single branch with the forces applied to it is approximated by a novel efficient two step nonlinear deformation method, allowing arbitrary continuous deformations and circumventing the need to segment a branch to model its deformation behavior. Second, the interaction of wind with the dynamic system representing a tree is statistically modeled. By precomputing the response function of branches to turbulent wind in frequency space, the motion of a branch can be synthesized efficiently by sampling a 2D motion texture. Using a hierarchical form of vertex displacement, both methods can be combined in a single vertex shader, fully leveraging the power of modern GPUs to realistically animate thousands of branches and ten thousands of leaves at practically no cost.", month = mar, journal = "Computer Graphics Forum (Proceedings EUROGRAPHICS 2009)", volume = "28", number = "2", issn = "0167-7055", pages = "523--532", keywords = "Animation, Physically Guided animation, Vegetation, Trees", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel_09_PGT/", } @inproceedings{GRELAUD-2009-EPA, title = "Efficient and Practical Audio-Visual Rendering for Games using Crossmodal Perception", author = "David Grelaud and Nicolas Bonneel and Michael Wimmer and Manuel Asselot and George Drettakis", year = "2009", abstract = "Interactive applications such as computer games, are inherently audio visual, requiring high-quality rendering of complex 3D audio soundscapes and graphics environments. A frequent source of audio events is impact sounds, typically generated with physics engines. In this paper, we first present an optimization allowing efficient usage of impact sounds in a unified audio rendering pipeline, also including prerecorded sounds. We also exploit a recent result on audio-visual crossmodal perception to introduce a new level-of-detail selection algorithm, which jointly chooses the quality level of audio and graphics rendering. We have integrated these two techniques as a comprehensive crossmodal audio-visual rendering pipeline in a home-grown game engine, thus demonstrating the potential utility of our approach.", month = feb, isbn = "978-1-60558-429-4", publisher = "ACM", location = "Boston, Massachusetts", address = "New York, NY, USA", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2009", pages = "177--182", keywords = "audio-visual rendering, crossmodal perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/GRELAUD-2009-EPA/", } @talk{WIMMER-2009-ARTR2, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2009", abstract = "Real-time rendering is a quickly developing field in computer graphics. Recent advances in graphics hardware make it possible to tackle completely new challenges, and to rethink old ones. While previously, the main focus of real-time rendering lay on classical problems like visibility and level-of-detail rendering, nowadays we see new challenges in the form of interactive procedural content generation, handling of massive amounts of data, and interactive simulation of extremely complex objects. In this talk, I will cover some of the recent advances we had in our group. First, we try to integrate procedural modeling techniques with the new parallel programming paradigms made commonly available through modern GPUs, and map L-system generation onto hardware to accelerate the generation of large L-systems. Then, I'll briefly show some results for really large scale visualization and editing of a huge point-based model consisting of over 1.2 Billion point samples of a Roman catacomb. Finally, I will treat a new approach to handle the classical visibility problem, where we show how to calculate visibility of a whole scene by exploiting the spatial coherence of visibility, thus accelerating the process so it becomes viable for interactive scene design. ", event = "University of Erlangen Research Seminar", location = "Erlangen, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ARTR2/", } @inproceedings{WIMMER-2009-CSR, title = "Casting Shadows in Real Time", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michael Wimmer", year = "2009", abstract = "Shadows are crucial for enhancing realism and provide important visual cues. In recent years, many important contributions have been made both for hard shadows and soft shadows. Often spurred by the tremendous increase of computational power and capabilities of graphics hardware, much progress has been made concerning visual quality and speed, making high-quality real-time shadows a reachable goal. But with the growing wealth of available choices, it is particularly difficult to pick the right solution and assess shortcomings. Because currently there is no ultimate approach available, algorithms should be selected in accordance to the context in which shadows are produced. The possibilities range across a wide spectrum; from very approximate but really efficient to slower but accurate, adapted only to smaller or only to larger sources, addressing directional lights or positional lights, or involving GPU- or CPU-heavy computations. This course tries to serve as a guide to better understand limitations and failure cases, advantages and disadvantages, and suitability of the algorithms for different application scenarios. We will focus on real-time to interactive solutions but also discuss offline approaches if needed for a better understanding.", booktitle = "ACM SIGGRAPH Asia 2009 Courses", location = "Yokohama, Japan", publisher = "ACM", note = "Lecturer: Daniel Scherzer", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-CSR/", } @talk{WIMMER-2009-VCCG, title = "Visibility Computation in Computer Graphics", author = "Michael Wimmer", year = "2009", abstract = "Visibility computation is an essential part of many computer graphics applications, for example for real-time rendering of very large scenes. Visibility can either be precomputed offline, which is a good strategy for static scenes, or calculated at runtime, which avoids precomputation and works well for dynamic scenes. In this presentation, I will cover the latest advances in both of these principal directions. For visibility precomputation, we have shown that sampling is superior to full geometric approaches for practical applications, due to its efficiency and robustness. For online visibility culling, we show how to make the best possible use of hardware occlusion queries without introducing latency and overhead.", event = "14th Computer Vision Winter Workshop (CVWW2009)", location = "Eibiswald, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-VCCG/", } @talk{WIMMER-2009-ARTR, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2009", abstract = "Real-time rendering is a quickly developing field in computer graphics. Recent advances in graphics hardware make it possible to tackle completely new challenges, and to rethink old ones. While previously, the main focus of real-time rendering lay on classical problems like visibility and level-of-detail rendering, nowadays we see new challenges in the form of interactive procedural content generation, handling of massive amounts of data, and interactive simulation of extremely complex objects like trees. In this talk, I will try to broaden the definition of real-time rendering and give some insights how to address new research challenges. Starting with a few classical problems like rendering accurate shadows, achieving smooth transitions between different levels of detail, and global visibility computations, I will then show a few examples of recent advances in real-time rendering. One challenge is the ever-increasing size of models due to automatic acquisition methods like range scanners. In a new system we have developed, we are able to visualize and interact with datasets of over 1 Billion raw points. Another source of large models is procedural modeling, and we have developed a method to aid designers in creating these models interactively. Finally, vegetation plays an important role in interactive scenes. I will show a system to simulate both illumination and animation in such complex vegetation very realistically.", event = "7th Eurographics Italian Chapter Conference 2009", location = "Verona, Italy", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ARTR/", } @talk{WIMMER-2009-ARTR3, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2009", abstract = "Real-time rendering is a quickly developing field in computer graphics. Recent advances in graphics hardware make it possible to tackle completely new challenges, and to rethink old ones. While previously, the main focus of real-time rendering lay on classical problems like visibility and level-of-detail rendering, nowadays we see new challenges in the form of interactive procedural content generation, handling of massive amounts of data, and interactive simulation of extremely complex objects like trees. In this talk, I will try to broaden the definition of real-time rendering and give some insights how to address new research challenges. Starting with a few classical problems like rendering accurate shadows, achieving smooth transitions between different levels of detail, and global visibility computations, I will then show a few examples of recent advances in real-time rendering. One challenge is the ever-increasing size of models due to automatic acquisition methods like range scanners. In a new system we have developed, we are able to visualize and interact with datasets of over 1 Billion raw points. Another source of large models is procedural modeling, and we have developed a method to aid designers in creating these models interactively. Finally, vegetation plays an important role in interactive scenes. I will show a system to simulate both illumination and animation in such complex vegetation very realistically.", event = "25th Spring Conference on Computer Graphics (SCCG2009)", location = "Budmerice, Slovakia", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ARTR3/", } @talk{WIMMER-2009-ITC, title = "IT in Computerspielen: Ausbildung und Forschung", author = "Michael Wimmer", year = "2009", event = "Veranstaltungsreihe Was IT alles kann ", location = "Techgate Vienna", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ITC/", } @article{guerrero-2008-sli, title = "Real-time Indirect Illumination and Soft Shadows in Dynamic Scenes Using Spherical Lights", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer", year = "2008", abstract = "We present a method for rendering approximate soft shadows and diffuse indirect illumination in dynamic scenes. The proposed method approximates the original scene geometry with a set of tightly fitting spheres. In previous work, such spheres have been used to dynamically evaluate the visibility function to render soft shadows. In this paper, each sphere also acts as a low-frequency secondary light source, thereby providing diffuse one-bounce indirect illumination. The method is completely dynamic and proceeds in two passes: In a first pass, the light intensity distribution on each sphere is updated based on sample points on the corresponding object surface and converted into the spherical harmonics basis. In a second pass, this radiance information and the visibility are accumulated to shade final image pixels. The sphere approximation allows us to compute visibility and diffuse reflections of an object at interactive frame rates of over 20 fps for moderately complex scenes.", month = oct, journal = "Computer Graphics Forum", number = "8", volume = "27", pages = "2154--2168", keywords = "global illumination, precomputed radiance transfer, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/guerrero-2008-sli/", } @inproceedings{SUNDSTEDT-2008-ASF, title = "A Psychophysical Study of Fixation Behavior in a Computer Game", author = "Veronica Sundstedt and Efstathios Stavrakis and Michael Wimmer and Erik Reinhard", year = "2008", abstract = "Prediction of gaze behavior in gaming environments can be a tremendously useful asset to game designers, enabling them to improve gameplay, selectively increase visual fidelity, and optimize the distribution of computing resources. The use of saliency maps is currently being advocated as the method of choice for predicting visual attention, crucially under the assumption that no specific task is present. This is achieved by analyzing images for low-level features such as motion, contrast, luminance, etc. However, the majority of computer games are designed to be easily understood and pose a task readily apparent to most players. Our psychophysical experiment shows that in a task-oriented context such as gaming, the predictive power of saliency maps at design time can be weak. Thus, we argue that a more involved protocol utilizing eye tracking, as part of the computer game design cycle, can be sufficiently robust to succeed in predicting fixation behavior of players.", month = aug, isbn = "978-1-59593-981-4", publisher = "ACM", location = "Los Angeles, California", editor = "Sarah Creem-Regehr and Karol Myszkowski", doi = "10.1145/1394281.1394288", booktitle = "ACM Symposium on Applied Perception in Graphics and Visualization 2008", pages = "43--50", keywords = "saliency, eye tracking, electronic games, visual attention, psychophysics", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/SUNDSTEDT-2008-ASF/", } @article{LIPP-2008-IEV, title = "Interactive Visual Editing of Grammars for Procedural Architecture", author = "Markus Lipp and Peter Wonka and Michael Wimmer", year = "2008", abstract = "We introduce a real-time interactive visual editing paradigm for shape grammars, allowing the creation of rulebases from scratch without text file editing. In previous work, shape-grammar based procedural techniques were successfully applied to the creation of architectural models. However, those methods are text based, and may therefore be difficult to use for artists with little computer science background. Therefore the goal was to enable a visual workflow combining the power of shape grammars with traditional modeling techniques. We extend previous shape grammar approaches by providing direct and persistent local control over the generated instances, avoiding the combinatorial explosion of grammar rules for modifications that should not affect all instances. The resulting visual editor is flexible: All elements of a complex state-of-the-art grammar can be created and modified visually.", month = aug, journal = "ACM Transactions on Graphics", volume = "27", number = "3", note = "Article No. 102", issn = "0730-0301", doi = "10.1145/1360612.1360701", pages = "102:1--10", keywords = "procedural modeling, shape grammars, architectural modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/LIPP-2008-IEV/", } @article{SCHERZER-2008-FSR, title = "Frame Sequential Interpolation for Discrete Level-of-Detail Rendering", author = "Daniel Scherzer and Michael Wimmer", year = "2008", abstract = "In this paper we present a method for automatic interpolation between adjacent discrete levels of detail to achieve smooth LOD changes in image space. We achieve this by breaking the problem into two passes: We render the two LOD levels individually and combine them in a separate pass afterwards. The interpolation is formulated in a way that only one level has to be updated per frame and the other can be reused from the previous frame, thereby causing roughly the same render cost as with simple non interpolated discrete LOD rendering, only incurring the slight overhead of the final combination pass. Additionally we describe customized interpolation schemes using visibility textures. The method was designed with the ease of integration into existing engines in mind. It requires neither sorting nor blending of objects, nor does it introduce any constrains in the LOD used. The LODs can be coplanar, alpha masked, animated, impostors, and intersecting, while still interpolating smoothly. ", month = jun, journal = "Computer Graphics Forum (Proceedings EGSR 2008)", volume = "27", number = "4", issn = "0167-7055", pages = "1175--1181", keywords = "LOD blending, real-time rendering, levels of detail", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/SCHERZER-2008-FSR/", } @article{CADIK-2008-EHD, title = "Evaluation of HDR Tone Mapping Methods Using Essential Perceptual Attributes", author = "Martin \v{C}ad\'{i}k and Michael Wimmer and L\'{a}szl\'{o} Neumann and Alessandro Artusi", year = "2008", abstract = "The problem of reproducing high dynamic range images on devices with restricted dynamic range has gained a lot of interest in the computer graphics community. There exist various approaches to this issue, which span several research areas including computer graphics, image processing, color vision, physiological aspects, etc. These approaches assume a thorough knowledge of both the objective and subjective attributes of an image. However, no comprehensive overview and analysis of such attributes has been published so far. In this contribution, we present an overview about the effects of basic image attributes in HDR tone mapping. Furthermore, we propose a scheme of relationships between these attributes, leading to the definition of an overall image quality measure. We present results of subjective psychophysical experiments that we have performed to prove the proposed relationship scheme. Moreover, we also present an evaluation of existing tone mapping methods (operators) with regard to these attributes. Finally, the execution of with-reference and without a real reference perceptual experiments gave us the opportunity to relate the obtained subjective results. Our effort is not just useful to get into the tone mapping field or when implementing a tone mapping method, but it also sets the stage for well-founded quality comparisons between tone mapping methods. By providing good definitions of the different attributes, user-driven or fully automatic comparisons are made possible. ", month = jun, issn = "0097-8493", journal = "Computers & Graphics", number = "3", volume = "32", pages = "330--349", keywords = "high dynamic range, tone mapping operators, tone mapping evaluation, image attributes", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/CADIK-2008-EHD/", } @inproceedings{Habel_08_SSH, title = "Efficient Spherical Harmonics Lighting with the Preetham Skylight Model", author = "Ralf Habel and Bogdan Mustata and Michael Wimmer", year = "2008", abstract = "We present a fast and compact representation of a skylight model for spherical harmonics lighting, especially for outdoor scenes. This representation allows dynamically changing the sun position and weather conditions on a per frame basis. We chose the most used model in real-time graphics, the Preetham skylight model, because it can deliver both realistic colors and dynamic range and its extension into spherical harmonics can be used to realistically light a scene. We separate the parameters of the Preetham skylight models' spherical harmonics extension and perform a polynomial two-dimensional non-linear least squares fit for the principal parameters to achieve both negligible memory and computation costs. Additionally, we execute a domain specific Gibbs phenomena suppression to remove ringing artifacts.", month = apr, publisher = "Eurographics Association", location = "Crete, Greece", issn = "1017-4656", editor = "Katerina Mania and Erik Reinhard", booktitle = "Eurographics 2008 - Short Papers", pages = "119--122", keywords = "Natural Phenomena, Spherical Harmonics, Skylight", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Habel_08_SSH/", } @article{mattausch-2008-CHC, title = "CHC++: Coherent Hierarchical Culling Revisited", author = "Oliver Mattausch and Jir\'{i} Bittner and Michael Wimmer", year = "2008", abstract = "We present a new algorithm for efficient occlusion culling using hardware occlusion queries. The algorithm significantly improves on previous techniques by making better use of temporal and spatial coherence of visibility. This is achieved by using adaptive visibility prediction and query batching. As a result of the new optimizations the number of issued occlusion queries and the number of rendering state changes are significantly reduced. We also propose a simple method for determining tighter bounding volumes for occlusion queries and a method which further reduces the pipeline stalls. The proposed method provides up to an order of magnitude speedup over the previous state of the art. The new technique is simple to implement, does not rely on hardware calibration and integrates well with modern game engines.", month = apr, journal = "Computer Graphics Forum (Proceedings Eurographics 2008)", volume = "27", number = "2", issn = "0167-7055", pages = "221--230", keywords = "temporal coherence, dynamic occlusion culling, occlusion queries", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/mattausch-2008-CHC/", } @talk{WIMMER-2008-ART, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2008", abstract = "This talk will give a brief summary of recent research activities in the field of real-time rendering conducted by the real-time rendering group at the Vienna University of Technology. Highlights are interactive procedural architecture, physically based tree animation and visibility culling.", event = "INRIA Research Seminar", location = "INRIA, Grenoble, France", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/WIMMER-2008-ART/", } @talk{WIMMER-2008-PVR, title = "Precomputing Visibility for Real-Time Rendering of Large Scenes", author = "Michael Wimmer", year = "2008", abstract = "Visibility computation is an essential part of any real-time rendering pipeline for large scenes. Visibility can either be precomputed offline, which is a good strategy for static scenes, or calculated at runtime, which avoids precomputation and works well for dynamic scenes. In this presentation, I will cover the latest advances in both of these principal directions. For visibility precomputation, we have shown that sampling is superior to full geometric approaches for practical applications, due to its efficiency and robustness. For online visibility culling, we show how to make the best possible use of hardware occlusion queries without introducing latency and overhead.", event = "Graphical Visionday 2008", location = "Kopenhagen, Denmark", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/WIMMER-2008-PVR/", } @talk{WIMMER-2008-CAR, title = "Current Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2008", abstract = "This talk will give a brief summary of recent research activities in the field of real-time rendering conducted by the real-time rendering group at the Vienna University of Technology. Highlights are interactive procedural architecture, physically based tree animation and visibility culling.", event = "Graphics Lunch", location = "ETH Z\"{u}rich, Switzerland", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/WIMMER-2008-CAR/", } @inproceedings{CHARALAMBOS-2007-HLOD, title = "Optimized HLOD Refinement Driven by Hardware Occlusion Queries", author = "Jean Pierre Charalambos and Jir\'{i} Bittner and Michael Wimmer and Eduardo Romero", year = "2007", abstract = "We present a new method for integrating hierarchical levels of detail (HLOD) with occlusion culling. The algorithm refines the HLOD hierarchy using geometric criteria as well as the occlusion information. For the refinement we use a simple model which takes into account the possible distribution of the visible pixels. The traversal of the HLOD hierarchy is optimized by a new algorithm which uses spatial and temporal coherence of visibility. We predict the HLOD refinement condition for the current frame based on the results from the last frame. This allows an efficient update of the front of termination nodes as well as an efficient scheduling of hardware occlusion queries. Compared to previous approaches, the new method improves on speed as well as image quality. The results indicate that the method is very close to the optimal scheduling of occlusion queries for driving the HLOD refinement.", month = nov, isbn = "978-3-540-76855-9", series = "Lecture Notes in Computer Science, volume 4841", publisher = "Springer", location = "Lake Tahoe, Nevada/California", editor = "Bebis, G.; Boyle, R.; Parvin, B.; Koracin, D.; Paragios, N.; Tanveer, S.-M.; Ju, T.; Liu, Z.; Coquillart, S.; Cruz-Neira, C.; M\"{o}ller, T.; Malzbender, T.", booktitle = "Advances in Visual Computing (Third International Symposium on Visual Computing -- ISVC 2007)", pages = "106--117", keywords = "occlusion queries, levels of detail, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/CHARALAMBOS-2007-HLOD/", } @techreport{TR-186-2-07-09, title = "Rendering Imperfections: Dust, Scratches, Aging,...", author = "Michael Schw\"{a}rzler and Michael Wimmer", year = "2007", abstract = "In order to incrase the realism of an image or a scene in a computergraphics application, so-called imperfections are often used during rendering. These are techniques which add details like dirt, scratches, dust or aging effects to the models and textures. Realism is improved through imperfections since computer generated models are usually too perfect to be accepted as realistic by human observers. By making them, for example, dusty and scratched, people can imagine them being part of their real world much more easily. This article gives an overview of currently used imperfections techniques and algorithms. Topics like textures, scratches, aging, dust, weathering, lichen growth and terrain erosion are covered.", month = sep, number = "TR-186-2-07-09", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "scratches, dust, imperfections, aging", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/TR-186-2-07-09/", } @inproceedings{Scherzer-2007-PCS, title = "Pixel-Correct Shadow Maps with Temporal Reprojection and Shadow Test Confidence", author = "Daniel Scherzer and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "Shadow mapping suffers from spatial aliasing (visible as blocky shadows) as well as temporal aliasing (visible as flickering). Several methods have already been proposed for reducing such artifacts, but so far none is able to provide satisfying results in real time. This paper extends shadow mapping by reusing information of previously rasterized images, stored efficiently in a so-called history buffer. This buffer is updated in every frame and then used for the shadow calculation. In combination with a special confidence-based method for the history buffer update (based on the current shadow map), temporal and spatial aliasing can be completely removed. The algorithm converges in about 10 to 60 frames and during convergence, shadow borders are sharpened over time. Consequently, in case of real-time frame rates, the temporal shadow adaption is practically imperceptible. The method is simple to implement and is as fast as uniform shadow mapping, incurring only the minor speed hit of the history buffer update. It works together with advanced filtering methods like percentage closer filtering and more advanced shadow mapping techniques like perspective or light space perspective shadow maps.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "45--50", keywords = "shadow mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Scherzer-2007-PCS/", } @inproceedings{JESCHKE-2007-ISC, title = "Interactive Smooth and Curved Shell Mapping", author = "Stefan Jeschke and Stephan Mantler and Michael Wimmer", year = "2007", abstract = "Shell mapping is a technique to represent three-dimensional surface details. This is achieved by extruding the triangles of an existing mesh along their normals, and mapping a 3D function (e.g., a 3D texture) into the resulting prisms. Unfortunately, such a mapping is nonlinear. Previous approaches perform a piece-wise linear approximation by subdividing the prisms into tetrahedrons. However, such an approximation often leads to severe artifacts. In this paper we present a correct (i.e., smooth) mapping that does not rely on a decomposition into tetrahedrons. We present an efficient GPU ray casting algorithm which provides correct parallax, self-occlusion, and silhouettes, at the cost of longer rendering times. The new formulation also allows modeling shells with smooth curvatures using Coons patches within the prisms. Tangent continuity between adjacent prisms is guaranteed, while the mapping itself remains local, i.e. every curved prism content is modeled at runtime in the GPU without the need for any precomputation. This allows instantly replacing animated triangular meshes with prism-based shells.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "10", pages = "351--360", keywords = "Display algorithms, Shading", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/JESCHKE-2007-ISC/", } @inproceedings{Habel_2007_RTT, title = "Physically Based Real-Time Translucency for Leaves", author = "Ralf Habel and Alexander Kusternig and Michael Wimmer", year = "2007", abstract = "This paper presents a new shading model for real-time rendering of plant leaves that reproduces all important attributes of a leaf and allows for a large number of leaves to be shaded. In particular, we use a physically based model for accurate subsurface scattering on the translucent side of directly lit leaves. For real-time rendering of this model, we formulate it as an image convolution process and express the result in an efficient directional basis that is fast to evaluate. We also propose a data acquisition method for leaves that uses off-the-shelf devices.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "253--263", keywords = "Realtime Rendering, Natural Scene Rendering, Physically Based Rendering, Natural Phenomena", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Habel_2007_RTT/", } @inproceedings{MATTAUSCH-2007-OSP, title = "Optimized Subdivisions for Preprocessed Visibility", author = "Oliver Mattausch and Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2007", abstract = "This paper describes a new tool for preprocessed visibility. It puts together view space and object space partitioning in order to control the render cost and memory cost of the visibility description generated by a visibility solver. The presented method progressively refines view space and object space subdivisions while minimizing the associated render and memory costs. Contrary to previous techniques, both subdivisions are driven by actual visibility information. We show that treating view space and object space together provides a powerful method for controlling the efficiency of the resulting visibility data structures.", month = may, isbn = "978-1-56881-337-0", publisher = "Canadian Human-Computer Communications Society", location = "Montreal, Canada", editor = "Christopher G. Healey and Edward Lank", booktitle = "Proceedings of Graphics Interface 2007", pages = "335--342", keywords = "visibility preprocessing, potentially visible sets, view cells", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/MATTAUSCH-2007-OSP/", } @inproceedings{GIEGL-2007-FVS, title = "Fitted Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "Too little shadow map resolution and resulting undersampling artifacts, perspective and projection aliasing, have long been a fundamental problem of shadowing scenes with shadow mapping. We present a new smart, real-time shadow mapping algorithm that virtually increases the resolution of the shadow map beyond the GPU hardware limit where needed. We first sample the scene from the eye-point on the GPU to get the needed shadow map resolution in different parts of the scene. We then process the resulting data on the CPU and finally arrive at a hierarchical grid structure, which we traverse in kd-tree fashion, shadowing the scene with shadow map tiles where needed. Shadow quality can be traded for speed through an intuitive parameter, with a homogeneous quality reduction in the whole scene, down to normal shadow mapping. This allows the algorithm to be used on a wide range of hardware.", month = may, isbn = "978-1-56881-337-0", publisher = "Canadian Human-Computer Communications Society", location = "Montreal, Canada", editor = "Christopher G. Healey and Edward Lank", booktitle = "Proceedings of Graphics Interface 2007", pages = "159--168", keywords = "real-time shadowing, shadows, shadow maps, large environemnts", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-FVS/", } @habilthesis{WIMMER-2007-RTR, title = "Real-Time Rendering", author = "Michael Wimmer", year = "2007", abstract = "Real-time rendering is concerned with the display of computer-generated images at rates which let a human observer believe that she is looking at a smooth animation. This thesis deals with several contributions to the field of real-time rendering that improve either the performance of rendering algorithms or the quality of the displayed images. Light-Space Perspective Shadow Maps improve the quality of real-time rendering by providing better looking shadow rendering, one of the most popular research topics in real-time rendering. Conversely, Coherent Hierarchical Culling and Guided Visibility Sampling improve the performance of real-time rendering through visibility culling. One is designed for runtime computation and the other for preprocessing. Finally, real-time rendering is extended from traditional polygon rendering to a new type of dataset that has recently gained importance, namely point clouds, especially huge datasets that cannot be loaded into main memory. ", month = may, URL = "https://www.cg.tuwien.ac.at/research/publications/2007/WIMMER-2007-RTR/", } @misc{MANTLER-2007-DMBBC, title = "Displacement Mapped Billboard Clouds", author = "Stephan Mantler and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "This paper introduces displacement mapped billboard clouds (DMBBC), a new image-based rendering primitive for the fast display of geometrically complex objects at medium to far distances. The representation is based on the well-known billboard cloud (BBC) technique, which represents an object as several textured rectangles in order to dramatically reduce its geometric complexity. Our new method uses boxes instead of rectangles, each box representing a volumetric part of the model. Rendering the contents of a box is done entirely on the GPU using ray casting. DMBBCs will often obviate the need to switch to full geometry for closer distances, which is especially helpful for scenes that are densely populated with complex objects, e.g. for vegetation scenes. We show several ways to store the volumetric information, with different tradeoffs between memory requirements and image quality. In addition we discuss techniques to accelerate the ray casting algorithm, and a way for smoothly switching between DMBBCs for medium distances and BBCs for far distances.", month = apr, event = "Symposium on Interactive 3D Graphics and Games", Conference date = "Poster presented at Symposium on Interactive 3D Graphics and Games (2007-04-30--2007-05-02)", keywords = "rendering acceleration, billboard clouds, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/MANTLER-2007-DMBBC/", } @inproceedings{GIEGL-2007-QV1, title = "Queried Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "Shadowing scenes by shadow mapping has long suffered from the fundamental problem of undersampling artifacts due to too low shadow map resolution, leading to so-called perspective and projection aliasing. In this paper we present a new real-time shadow mapping algorithm capable of shadowing large scenes by virtually increasing the resolution of the shadow map beyond the GPU hardware limit. We start with a brute force approach that uniformly increases the resolution of the whole shadow map. We then introduce a smarter version which greatly increases runtime performance while still being GPU-friendly. The algorithm contains an easy to use performance/quality-tradeoff parameter, making it tunable to a wide range of graphics hardware.", month = apr, isbn = "978-1-59593-628-8", publisher = "ACM Press", location = "Seattle, WA", address = "New York, NY, USA", booktitle = "Proceedings of ACM SIGGRAPH 2007 Symposium on Interactive 3D Graphics and Games", pages = "65--72", keywords = "shadow maps, shadows, real-time shadowing, large environemnts", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-QV1/", } @article{GIEGL-2007-UNP, title = "Unpopping: Solving the Image-Space Blend Problem for Smooth Discrete LOD Transitions", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "This paper presents a new, simple and practical algorithm to avoid artifacts when switching between discrete levels of detail (LOD) by smoothly blending LOD representations in image space. We analyze the alternatives of conventional alpha-blending and so-called late-switching (the switching of LODs markusquote{far enough} from the eye-point), widely thought to solve the LOD switching discontinuity problem, and conclude that they either do not work in practice, or defeat the concept of LODs. In contrast we show that our algorithm produces visually pleasing blends for static and animated discrete LODs, for discrete LODs with different types of LOD representations (e.g. billboards and meshes) and even to some extent totally different objects with similar spatial extent, with a very small runtime overhead.", month = mar, issn = "0167-7055", journal = "Computer Graphics Forum", number = "1", volume = "26", pages = "46--49", keywords = "popping, LOD switching, levels of detail, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-UNP/", } @techreport{TR-186-2-07-01, title = "Displacement Mapped Billboard Clouds", author = "Stephan Mantler and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "This paper introduces displacement mapped billboard clouds (DMBBC), a new image-based rendering primitive for the fast display of geometrically complex objects at medium to far distances. The representation is based on the well-known billboard cloud (BBC) technique, which represents an object as several textured rectangles in order to dramatically reduce its geometric complexity. Our new method uses boxes instead of rectangles, each box representing a volumetric part of the model. Rendering the contents of a box is done entirely on the GPU using ray casting. DMBBCs will often obviate the need to switch to full geometry for closer distances, which is especially helpful for scenes that are densely populated with complex objects, e.g. for vegetation scenes. We show several ways to store the volumetric information, with different tradeoffs between memory requirements and image quality. In addition we discuss techniques to accelerate the ray casting algorithm, and a way for smoothly switching between DMBBCs for medium distances and BBCs for far distances.", month = jan, number = "TR-186-2-07-01", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "rendering acceleration, billboard clouds, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/TR-186-2-07-01/", } @article{Habel_2007_IAG, title = "Instant Animated Grass", author = "Ralf Habel and Michael Wimmer and Stefan Jeschke", year = "2007", abstract = "This paper introduces a technique for rendering animated grass in real time. The technique uses front-to-back compositing of implicitly defined grass slices in a fragment shader and therefore significantly reduces the overhead associated with common vegetation rendering systems. We also introduce a texture-based animation scheme that combines global wind movements with local turbulences. Since the technique is confined to a fragment shader, it can be easily integrated into any rendering system and used as a material in existing scenes. ", month = jan, journal = "Journal of WSCG", volume = "15", number = "1-3", note = "ISBN 978-80-86943-00-8", issn = "1213-6972", pages = "123--128", keywords = "Real-time Rendering, Natural Scene Rendering, Natural Phenomena, GPU Programming", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Habel_2007_IAG/", } @talk{WIMMER-2007-GAR, title = "Gametools: Advanced Rendering Effects for Next-Gen Engines", author = "Michael Wimmer", year = "2007", abstract = "The GameTools Project is an EU project from the 6th Framework Programme that brings together leading European computer graphics experts from universities in Austria, France, Hungary and Spain with European industrial partners from the fields of computer game development and virtual reality to create next generation real-time 3D libraries for Geometry, Visibility and Global Illumination for the PC platform, with an extension to consoles PS2, XBox, PS3, XBox 360 planned. With the project now completed after 3 years, this talk will introduce you to the advanced technology available partly as Open Source, partly under licensing. The project comprises technologies such as continuous multiresolution models for animated characters, massive tree rendering, robust PVS generation for visiblity determination of arbitrarily large game levels, and real-time global illumination effects such as soft shadows, real-time radiosity, caustics, cloud rendering, and many more. The effects created in the GameTools project are available as plugins that can be incorporated into any game engine, and are demonstrated with the Open Source Ogre engine.", event = "Games Convention Developers Conference 2007", location = "Leipzig, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/WIMMER-2007-GAR/", } @incollection{GIEGL-2006-QVS, title = "Queried Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2006", abstract = "In this article we present a new real-time shadow mapping algorithm capable of shadowing large scenes by virtually increasing the resolution of the shadow map beyond the GPU hardware limit. We start with a brute force approach that uniformly increases the resolution of the whole shadow map. We then introduce a smarter version which greatly increases runtime performance while still being GPU-friendly. The algorithm contains an easy to use performance/quality-tradeoff parameter, making it tunable to a wide range of graphics hardware.", month = dec, booktitle = "ShaderX 5 -- Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "1-58450-499-4", publisher = "Charles River Media", series = "ShaderX", volume = "5", keywords = "shadows, shadow mapping, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/GIEGL-2006-QVS/", } @inproceedings{WIMMER-2006-DWN, title = "Do We Need Accurate Reconstruction?", author = "Michael Wimmer", year = "2006", abstract = "The accurate reconstruction of high-quality representations from range scanning devices for archaeology is very time consuming and costly. The objective of this paper is to show that this accurate reconstruction step can be avoided in many cases. Instead, we present a method to make range scanning data instantly available to archaeologists and other scientists, so that they can immediately experiment and work with the data.", month = oct, location = "Vienna City Hall, Vienna, Austria", booktitle = "Proceedings of 11th International Congress on Cultural Heritage and New Technologies", keywords = "point-based rendering, laser scanning, virtual reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WIMMER-2006-DWN/", } @inproceedings{CADIK-2006-IAQ, title = "Image Attributes and Quality for Evaluation of Tone Mapping Operators", author = "Martin \v{C}ad\'{i}k and Michael Wimmer and L\'{a}szl\'{o} Neumann and Alessandro Artusi", year = "2006", abstract = "The problem of reproducing high dynamic range images on devices with restricted dynamic range has gained a lot of interest in the computer graphics community. There exist various approaches to this issue, which span several research areas including computer graphics, image processing, color science, physiology, neurology, psychology, etc. These approaches assume a thorough knowledge of both the objective and subjective attributes of an image. However, no comprehensive overview and analysis of such attributes has been published so far. In this paper, we present an overview of image quality attributes of different tone mapping methods. Furthermore, we propose a scheme of relationships between these attributes, leading to the definition of an overall image quality measure. We present results of subjective psychophysical tests that we have performed to prove the proposed relationship scheme. We also present the evaluation of existing tone mapping methods with regard to these attributes. Our effort is not just useful to get into the tone mapping field or when implementing a tone mapping operator, but it also sets the stage for well-founded quality comparisons between tone mapping operators. By providing good definitions of the different attributes, user-driven or fully automatic comparisons are made possible at all.", month = oct, publisher = "National Taiwan University Press", location = "Taipe, Taiwan", booktitle = "Proceedings of Pacific Graphics 2006 (14th Pacific Conference on Computer Graphics and Applications)", pages = "35--44", keywords = "tone mapping evaluation, tone mapping, high-dynamic range images", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/CADIK-2006-IAQ/", } @article{WONKA-2006-GVS, title = "Guided Visibility Sampling", author = "Peter Wonka and Michael Wimmer and Kaichi Zhou and Stefan Maierhofer and Gerd Hesina and Alexander Reshetov", year = "2006", abstract = "This paper addresses the problem of computing the triangles visible from a region in space. The proposed aggressive visibility solution is based on stochastic ray shooting and can take any triangular model as input. We do not rely on connectivity information, volumetric occluders, or the availability of large occluders, and can therefore process any given input scene. The proposed algorithm is practically memoryless, thereby alleviating the large memory consumption problems prevalent in several previous algorithms. The strategy of our algorithm is to use ray mutations in ray space to cast rays that are likely to sample new triangles. Our algorithm improves the sampling efficiency of previous work by over two orders of magnitude.", month = jul, journal = "ACM Transactions on Graphics", volume = "25", number = "3", note = "Proceedings ACM SIGGRAPH 2006", issn = "0730-0301", doi = "10.1145/1141911.1141914", pages = "494--502", keywords = "visibility, visibility sampling, occlusion culling, PVS", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WONKA-2006-GVS/", } @inproceedings{WIMMER-2006-IP, title = "Instant Points: Fast Rendering of Unprocessed Point Clouds", author = "Michael Wimmer and Claus Scheiblauer", year = "2006", abstract = "We present an algorithm to display enormous unprocessed point clouds at interactive rates without requiring long preprocessing. The novelty here is that we do not make any assumptions about sampling density or availability of normal vectors for the points. This is very important because such information is available only after lengthy postprocessing of scanned datasets, whereas users want to interact with the dataset immediately. Instant Points is an out-of-core algorithm that makes use of nested octrees and an enhanced version of sequential point trees.", month = jul, isbn = "3-90567-332-0", publisher = "Eurographics Association", organization = "Eurographics", location = "Boston, USA", booktitle = "Proceedings Symposium on Point-Based Graphics 2006", pages = "129--136", keywords = "unprocessed point clouds, point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WIMMER-2006-IP/", } @inproceedings{MATTAUSCH-2006-AVC, title = "Adaptive Visibility-Driven View Cell Construction", author = "Oliver Mattausch and Jir\'{i} Bittner and Michael Wimmer", year = "2006", abstract = "We present a new method for the automatic partitioning of view space into a multi-level view cell hierarchy. We use a cost-based model in order to minimize the average rendering time. Unlike previous methods, our model takes into account the actual visibility in the scene, and the partition is not restricted to planes given by the scene geometry. We show that the resulting view cell hierarchy works for different types of scenes and gives lower average rendering time than previously used methods.", month = jun, isbn = "3-90567-335-5", publisher = "Eurographics Association", organization = "Eurographics", location = "Nicosia, Cyprus", editor = "Wolfgang Heidrich and Tomas Akenine-Moller", booktitle = "Rendering Techniques 2006 (Proceedings Eurographics Symposium on Rendering)", pages = "195--206", keywords = "view cells, real-time rendering, visibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/MATTAUSCH-2006-AVC/", } @incollection{Wimmer-2006, title = "Robust Shadow Mapping with Light Space Perspective Shadow Maps", author = "Michael Wimmer and Daniel Scherzer", year = "2006", abstract = "In this paper, we present a new shadow mapping technique that improves upon the quality of perspective and uniform shadow maps. Our technique uses a perspective transform specified in light space which allows treating all lights as directional lights and does not change the direction of the light sources. This gives all the benefits of the perspective mapping but avoids the problems inherent in perspective shadow mapping like singularities in post-perspective space, missed shadow casters etc. Furthermore, we show that both uniform and perspective shadow maps distribute the perspective aliasing error that occurs in shadow mapping unequally over the available depth range. We therefore propose a transform that equalizes this error and gives equally pleasing results for near and far viewing distances. Our method is simple to implement, requires no scene analysis and is therefore as fast as uniform shadow mapping.", month = mar, booktitle = "ShaderX 4 -- Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "1-58450-425-0", publisher = "Charles River Media", series = "ShaderX", volume = "4", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Wimmer-2006/", } @talk{WIMMER-2006-PSM, title = "Practical Shadow Mapping", author = "Michael Wimmer", year = "2006", event = "Games Convention Developers Conference 2006", location = "Leipzig, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WIMMER-2006-PSM/", } @book{WIMMER-2006-CES, title = "CESCG 2000-2005 Best Papers Selection", author = "Michael Wimmer and Andrej Ferko and L\'{a}szl\'{o} Szirmay-Kalos and Helwig Hauser", year = "2006", abstract = "The Central European Seminar on Computer Graphics, or CESCG, as it is commonly known by its participants, was established to bring computer graphics students together across boundaries of universities and countries, originally from Central Europe, nowadays from almost all parts of the old European continent. CESCG came into existence ten years ago, which is a long time not only for a conference but also for its inventors, organizers, and participants. The 10th year anniversary makes us look back and recall how the child called CESCG was born and how it grew into its teenager years.", isbn = "3-85403-204-8", pages = "279", publisher = "Austrian Computer Society", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WIMMER-2006-CES/", } @talk{WIMMER-2006-SIV, title = "Sampling in Visibility", author = "Michael Wimmer", year = "2006", event = "Ayia Napa Summer Seminar on Recent Results in Rendering and Modeling in Computer Graphics", location = "Aiya Napa, Cyprus", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WIMMER-2006-SIV/", } @incollection{PURGATHOFER-2006-IHB, title = "Grafische Datenverarbeitung", author = "Werner Purgathofer and Michael Wimmer and Alexander Wilkie", year = "2006", abstract = "Die grafische Datenverarbeitung oder Computergrafik umfa{\ss}t im weitesten Sinn alle Methoden und Techniken, bei denen mit dem Computer Bilder und Bildinformationen erstellt oder verarbeitet werden.", address = "Munich", booktitle = "Informatik Handbuch (4th Edition)", editor = "Peter Rechenberg and Gustav Pomberger", isbn = "3-446-40185-7", publisher = "Carl Hanser Verlag", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/PURGATHOFER-2006-IHB/", } @inproceedings{jeschke-05-ISTAR, title = "Image-based Representations for Accelerated Rendering of Complex Scenes", author = "Stefan Jeschke and Michael Wimmer and Werner Purgathofer", year = "2005", abstract = "This paper gives an overview of image-based representations commonly used for reducing the geometric complexity of a scene description in order to accelerate the rendering process. Several different types of representations and ways for using them have been presented, which are classified and discussed here. Furthermore, the overview includes techniques for accelerating the rendering of static scenes or scenes with animations and/or dynamic lighting effects. The advantages and drawbacks of the different approaches are illuminated, and unsolved problems and roads for further research are shown.", month = aug, booktitle = "EUROGRAPHICS 2005 State of the Art Reports", editor = "Y. Chrysanthou and M. Magnor", location = "Dublin, Ireland", publisher = "The Eurographics Association and The Image Synthesis Group", organization = "EUROGRAPHICS", pages = "1--20", keywords = "Impostors, Display Algorithms, Three Dimensional Graphics and Realism, Color, Shading, Shadowing and Texture", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-ISTAR/", } @inproceedings{bittner-2005-egsr, title = "Fast Exact From-Region Visibility in Urban Scenes", author = "Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2005", abstract = "We present a fast exact from-region visibility algorithm for 2.5D urban scenes. The algorithm uses a hierarchical subdivision of line-space for identifying visibility interactions in a 2D footprint of the scene. Visibility in the remaining vertical dimension is resolved by testing for the existence of lines stabbing sequences of virtual portals. Our results show that exact analytic from-region visibility in urban scenes can be computed at times comparable or even superior to recent conservative methods. ", month = jun, isbn = "3-905673-23-1", publisher = "Eurographics Association", organization = "Eurographics", location = "Konstanz, Germany", editor = "Kavita Bala and Philip Dutr\'{e}", booktitle = "Rendering Techniques 2005 (Proceedings Eurographics Symposium on Rendering)", pages = "223--230", keywords = "real-time rendering, visibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bittner-2005-egsr/", } @inproceedings{jeschke-05-AIP, title = "Automatic Impostor Placement for Guaranteed Frame Rates and Low Memory Requirements", author = "Stefan Jeschke and Michael Wimmer and Heidrun Schumann and Werner Purgathofer", year = "2005", abstract = "Impostors are image-based primitives commonly used to replace complex geometry in order to reduce the rendering time needed for displaying complex scenes. However, a big problem is the huge amount of memory required for impostors. This paper presents an algorithm that automatically places impostors into a scene so that a desired frame rate and image quality is always met, while at the same time not requiring enormous amounts of impostor memory. The low memory requirements are provided by a new placement method and through the simultaneous use of other acceleration techniques like visibility culling and geometric levels of detail.", month = apr, isbn = "1-59593-013-2", publisher = "ACM Press", organization = "ACM", location = "Washington DC", booktitle = "Proceedings of ACM SIGGRAPH 2005 Symposium on Interactive 3D Graphics and Games", pages = "103--110", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-AIP/", } @incollection{Wimmer-2005-HOQ, title = "Hardware Occlusion Queries Made Useful", author = "Michael Wimmer and Jir\'{i} Bittner", year = "2005", abstract = "Hardware occlusion queries make it possible for an application to ask the 3D API whether or not any pixels would be drawn if a particular object was rendered. With this feature, applications can check to see whether or not the bounding boxes of complex objects are visible; if the bounds are occluded, the application can skip drawing those objects. In this chapter, we present a simple and powerful algorithm to solve the problem of latency and CPU/GPU stall typically associated with a naive usage of hardware occlusion queries.", month = mar, booktitle = "GPU Gems 2: Programming Techniques for High-Performance Graphics and General-Purpose Computation", editor = "Matt Pharr and Randima Fernando", isbn = "0-32133-559-7", publisher = "Addison-Wesley", keywords = "occlusion culling, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/Wimmer-2005-HOQ/", } @article{Bittner-2004-CHC, title = "Coherent Hierarchical Culling: Hardware Occlusion Queries Made Useful", author = "Jir\'{i} Bittner and Michael Wimmer and Harald Piringer and Werner Purgathofer", year = "2004", abstract = "We present a simple but powerful algorithm for optimizing the usage of hardware occlusion queries in arbitrary complex scenes. Our method minimizes the number of issued queries and reduces the delays due to the latency of query results. We reuse the results of the occlusion queries from the last frame in order to initiate and schedule the queries in the next frame. This is done by processing nodes of a spatial hierarchy in front-to-back order, interleaving occlusion queries with the rendering of certain previously visible nodes. The proposed scheduling of the queries makes use of spatial and temporal coherence of visibility. Despite its simplicity, the algorithm achieves good culling efficiency for scenes of various characteristics. The implementation of the algorithm is straightforward, and it can be easily integrated in existing real-time rendering packages using various spatial data structures.", month = sep, journal = "Computer Graphics Forum", volume = "23", number = "3", note = "Proceedings EUROGRAPHICS 2004", issn = "0167-7055", pages = "615--624", keywords = "occlusion query, visibility, real-time rendering, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Bittner-2004-CHC/", } @inproceedings{Wimmer-2004-LSPM, title = "Light Space Perspective Shadow Maps", author = "Michael Wimmer and Daniel Scherzer and Werner Purgathofer", year = "2004", abstract = "In this paper, we present a new shadow mapping technique that improves the quality of perspective and uniform shadow maps. Our technique uses a perspective transform specified in light space which allows treating all lights as directional lights and does not change the direction of the light sources. This gives all the benefits of the perspective mapping but avoids the problems inherent in perspective shadow mapping like singularities in post-perspective space, missed shadow casters etc. Furthermore, we show that both uniform and perspective shadow maps distribute the perspective aliasing error that occurs in shadow mapping unequally over the available z-range. We therefore propose a transform that equalizes this error and gives equally pleasing results for near and far viewing distances. Our method is simple to implement, requires no scene analysis and is therefore as fast as uniform shadow mapping.", month = jun, isbn = "3-905673-12-6", publisher = "Eurographics Association", organization = "Eurographics", location = "Norrk\"{o}ping, Sweden", editor = "Alexander Keller and Henrik W. Jensen", booktitle = "Rendering Techniques 2004 (Proceedings Eurographics Symposium on Rendering)", pages = "143--151", keywords = "shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Wimmer-2004-LSPM/", } @article{Wonka-2003-Ins, title = "Instant Architecture", author = "Peter Wonka and Michael Wimmer and Fran\c{c}ois Sillion and William Ribarsky", year = "2003", abstract = "This paper presents a new method for the automatic modeling of architecture. Building designs are derived using split grammars, a new type of parametric set grammar based on the concept of shape. The paper also introduces an attribute matching system and a separate control grammar, which offer the flexibility required to model buildings using a large variety of different styles and design ideas. Through the adaptive nature of the design grammar used, the created building designs can either be generic or adhere closely to a specified goal, depending on the amount of data available.", month = jul, journal = "ACM Transaction on Graphics", volume = "22", number = "3", note = "Proceedings ACM SIGGRAPH 2003", issn = "0730-0301", doi = "10.1145/882262.882324", pages = "669--677", keywords = "architecture, shape grammars, urban environments, modeling, real-time simulation, building design", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Wonka-2003-Ins/", } @inproceedings{Wimmer-2003-RTE, title = "Rendering Time Estimation for Real-Time Rendering", author = "Michael Wimmer and Peter Wonka", year = "2003", abstract = "This paper addresses the problem of estimating the rendering time for a real-time simulation. We study different factors that contribute to the rendering time in order to develop a framework for rendering time estimation. Given a viewpoint (or view cell) and a list of potentially visible objects, we propose several algorithms that can give reasonable upper limits for the rendering time on consumer hardware. This paper also discusses several implementation issues and design choices that are necessary to make the rendering time predictable. Finally, we lay out two extensions to current rendering hardware which would allow implementing a system with constant frame rates.", month = jun, isbn = "3-905673-03-7", publisher = "Eurographics Association", organization = "Eurographics", location = "Leuven, Belgium", editor = "Per Christensen and Daniel Cohen-Or", booktitle = "Rendering Techniques 2003 (Proceedings Eurographics Symposium on Rendering)", pages = "118--129", keywords = "graphics hardware, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Wimmer-2003-RTE/", } @inproceedings{Artusi-2003-Del, title = "Delivering Interactivity to Complex Tone Mapping Operators", author = "Alessandro Artusi and Jir\'{i} Bittner and Michael Wimmer and Alexander Wilkie", year = "2003", abstract = "The accurate display of high dynamic range images requires the application of complex tone mapping operators. These operators are computationally costly, which prevents their usage in interactive applications. We propose a general framework that delivers interactive performance to an important subclass of tone mapping operators, namely global tone mapping operators. The proposed framework consists of four steps: sampling the input image, applying the tone mapping operator, tting the point-sampled tone mapping curve, and reconstructing the tone mapping curve for all pixels of the input image. We show how to make use of recent graphics hardware while keeping the advantage of generality by performing tone mapping in software. We demonstrate the capabilities of our method by accelerating several common global tone mapping operators and integrating the operators in a real-time rendering application.", month = jun, isbn = "3-905673-03-7", publisher = "Eurographics Association", organization = "Eurographics", location = "Leuven, Belgium", editor = "Per Christensen and Daniel Cohen-Or", booktitle = "Rendering Techniques 2003 (Proceedings Eurographics Symposium on Rendering)", pages = "38--44", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Artusi-2003-Del/", } @inproceedings{Jeschke-2002-TDMR, title = "Textured Depth Meshes for Real-Time Rendering of Arbitrary Scenes", author = "Stefan Jeschke and Michael Wimmer", year = "2002", abstract = "This paper presents a new approach to generate textured depth meshes (TDMs), an impostor-based scene representation that can be used to accelerate the rendering of static polygonal models. The TDMs are precalculated for a fixed viewing region (view cell). The approach relies on a layered rendering of the scene to produce a voxel-based representation. Secondary, a highly complex polygon mesh is constructed that covers all the voxels. Afterwards, this mesh is simplified using a special error metric to ensure that all voxels stay covered. Finally, the remaining polygons are resampled using the voxel representation to obtain their textures. The contribution of our approach is manifold: first, it can handle polygonal models without any knowledge about their structure. Second, only scene parts that may become visible from within the view cell are represented, thereby cutting down on impostor complexity and storage costs. Third, an error metric guarantees that the impostors are practically indistinguishable compared to the original model (i.e. no rubber-sheet effects or holes appear as in most previous approaches). Furthermore, current graphics hardware is exploited for the construction and use of the impostors.", month = jun, isbn = "1-58133-534-3", publisher = "Eurographics Association", organization = "Eurographics", location = "Pisa, Italy", editor = "Paul Debevec and Simon Gibson", booktitle = "Rendering Techniques 2002 (Proceedings Eurographics Workshop on Rendering)", pages = "181--190", keywords = "Rendering, Walkthrough, Computer Graphics, Impostors", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Jeschke-2002-TDMR/", } @inproceedings{Jeschke-2002-LEMA, title = "Layered Environment-Map Impostors for Arbitrary Scenes", author = "Stefan Jeschke and Michael Wimmer and Heidrun Schumann", year = "2002", abstract = "This paper presents a new impostor-based approach to accelerate the rendering of very complex static scenes. The scene is partitioned into viewing regions, and a layered impostor representation is precalculated for each of them. An optimal placement of impostor layers guarantees that our representation is indistinguishable from the original geometry. Furthermore the algorithm exploits common graphics hardware both during preprocessing and rendering. Moreover the impostor representation is compressed using several strategies to cut down on storage space.", month = may, isbn = "1-56881-183-7", publisher = "AK Peters Ltd.", location = "Calgary, CA", editor = "Wolfgang St\"{u}rzlinger and Michael McCool", booktitle = "Proceedings of Graphics Interface 2002", pages = "1--8", keywords = "virtual environments, environment maps, impostors, walkthroughs, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Jeschke-2002-LEMA/", } @techreport{TR-186-2-02-04, title = "An Error Metric for Layered Environment Map Impostors", author = "Stefan Jeschke and Michael Wimmer", year = "2002", abstract = "Impostors are image-based primitives commonly used to replace complex geometry in order to accelerate the rendering of large virtual environments. This paper describes a “layered impostor technique” used for representing distant scene-parts when seen from a bounded viewing region. A special layer placement is derived which bounds the geometric error introduced by parallaxes to a defined value. In combination with a special technique for image generation, a high-quality impostor representation without image artifacts can be obtained.", month = feb, number = "TR-186-2-02-04", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "impostors, real-time rendering, virtual", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/TR-186-2-02-04/", } @inproceedings{Bittner-2001-Vis, title = "Visibility Preprocessing for Urban Scenes using Line Space Subdivision", author = "Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2001", abstract = "We present an algorithm for visibility preprocessing of urban environments. The algorithm uses a subdivision of line space to analytically calculate a conservative potentially visible set for a given region in the scene. We present a detailed evaluation of our method including a comparison to another recently published visibility preprocessing algorithm. To the best of our knowledge the proposed method is the first algorithm that scales to large scenes and efficiently handles large view cells.", month = oct, isbn = "0-7695-1227-5", publisher = "IEEE Computer Society Press", location = "Tokyo, Japan", editor = "Bob Werner", booktitle = "Proceedings of Pacific Graphics 2001 (Ninth Pacific Conference on Computer Graphics and Applications)", pages = "276--284", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Bittner-2001-Vis/", } @article{Wonka-2001-Ins, title = "Instant Visibility", author = "Peter Wonka and Michael Wimmer and Fran\c{c}ois Sillion", year = "2001", abstract = "We present an online occlusion culling system which computes visibility in parallel to the rendering pipeline. We show how to use point visibility algorithms to quickly calculate a tight potentially visible set (PVS) which is valid for several frames, by shrinking the occluders used in visibility calculations by an adequate amount. These visibility calculations can be performed on a visibility server, possibly a distinct computer communicating with the display host over a local network. The resulting system essentially combines the advantages of online visibility processing and region-based visibility calculations, allowing asynchronous processing of visibility and display operations. We analyze two different types of hardware-based point visibility algorithms and address the problem of bounded calculation time which is the basis for true real-time behavior. Our results show reliable, sustained 60 Hz performance in a walkthrough with an urban environment of nearly 2 million polygons, and a terrain flyover.", month = sep, journal = "Computer Graphics Forum", volume = "20", number = "3", note = "G\"{u}nther Enderle [Best Paper] Award, Best Student Paper Award. A. Chalmers and T.-M. Rhyne (eds.), Proceedings EUROGRAPHICS 2001", issn = "0167-7055", pages = "411--421", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wonka-2001-Ins/", } @inproceedings{Wimmer-2001-CCL, title = "Computerspiele in der Computergraphik-Lehre", author = "Michael Wimmer", year = "2001", month = sep, isbn = "3-85403-157-2", series = "Band 2", publisher = "Universit\"{a}t Wien", editor = "Kurt Bauknecht and Wilfried Brauer and Thomas A. M\"{u}ck", booktitle = "Wirtschaft und Wissenschaft in der Network Economy -- Visionen und Wirklichkeit, Tagungsband der GI/OCG-Jahrestagung", pages = "1199--1204", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wimmer-2001-CCL/", } @phdthesis{Wimmer-thesis, title = "Representing and Rendering Distant Objects for Real-Time Visualization", author = "Michael Wimmer", year = "2001", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wimmer-thesis/", } @inproceedings{Wimmer-2001-Poi, title = "Point-Based Impostors for Real-Time Visualization", author = "Michael Wimmer and Peter Wonka and Fran\c{c}ois Sillion", year = "2001", abstract = "We present a new data structure for encoding the appearance of a geometric model as seen from a viewing region (view cell). This representation can be used in interactive or real-time visualization applications to replace a complex model by an impostor, maintaining high quality rendering while cutting down rendering time. Our approach relies on an object-space sampled representation similar to a point cloud or a layered depth image, but introduces two fundamental additions to previous techniques. First, the sampling rate is controlled to provide sufficient density across all possible viewing conditions from the specified view cell. Second, a correct, antialiased representation of the plenoptic function is computed using Monte Carlo integration. Our system therefore achieves high quality rendering using a simple representation with bounded complexity. We demonstrate the method for an application in urban visualization.", month = jun, isbn = "3-211-83709-4", publisher = "Springer-Verlag", organization = "Eurographics", editor = "Steven J. Gortler and Karol Myszkowski", booktitle = "Rendering Techniques 2001 (Proceedings Eurographics Workshop on Rendering)", pages = "163--176", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wimmer-2001-Poi/", } @inproceedings{wonka-2000-VisP, title = "Visibility Preprocessing with Occluder Fusion for Urban Walkthroughs", author = "Peter Wonka and Michael Wimmer and Dieter Schmalstieg", year = "2000", abstract = "This paper presents an efficient algorithm for occlusion culling of urban environments. It is conservative and accurate in finding all significant occlusion. It discretizes the scene into view cells, for which cell-to-object visibility is precomputed, making on-line overhead negligible. Unlike other precomputation methods for view cells, it is able to conservatively compute all forms of occluder interaction for an arbitrary number of occluders. To speed up preprocessing, standard graphics hardware is exploited and occluder occlusion is considered. A walkthrough application running an 8 million polygon model of the city of Vienna on consumer-level hardware illustrates our results.", month = jun, isbn = "3-211-83535-0", publisher = "Springer-Verlag Wien New York", organization = "Eurographics", location = "held in Brno, Czech Republic, June 26-28, 2000", editor = "Bernard P\'{e}roche and Holly Rushmeier", booktitle = "Rendering Techniques 2000 (Proceedings Eurographics Workshop on Rendering)", pages = "71--82", keywords = "Visibility determination, image-based rendering., occluder occlusion, occluder fusion, urban environments, walkthrough, real-time graphics, shadow algorithms, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2000/wonka-2000-VisP/", } @article{Wimmer-1999-FWIb, title = "Fast Walkthroughs with Image Caches and Ray Casting", author = "Michael Wimmer and Markus Giegl and Dieter Schmalstieg", year = "1999", abstract = "We present an output-sensitive rendering algorithm for accelerating walkthroughs of large, densely occluded virtual environments using a multi-stage Image Based Rendering Pipeline. In the first stage, objects within a certain distance are rendered using the traditional graphics pipeline, whereas the remaining scene is rendered by a pixel-based approach using an Image Cache, horizon estimation to avoid calculating sky pixels, and finally, ray casting. The time complexity of this approach does not depend on the total number of primitives in the scene. We have measured speedups of up to one oder of magnitude.", month = dec, issn = "0097-8493", journal = "Computers and Graphics", number = "6", volume = "23", pages = "831--838", URL = "https://www.cg.tuwien.ac.at/research/publications/1999/Wimmer-1999-FWIb/", } @inproceedings{Wimmer-1999-FWIa, title = "Fast Walkthroughs with Image Caches and Ray Casting", author = "Michael Wimmer and Markus Giegl and Dieter Schmalstieg", year = "1999", abstract = "We present an output-sensitive rendering algorithm for accelerating walkthroughs of large, densely occluded virtual environments using a multi-stage Image Based Rendering Pipeline. In the first stage, objects within a certain distance are rendered using the traditional graphics pipeline, whereas the remaining scene is rendered by a pixel-based approach using an Image Cache, horizon estimation to avoid calculating sky pixels, and finally, ray casting. The time complexity of this approach does not depend on the total number of primitives in the scene. We have measured speedups of up to one oder of magnitude.", month = jun, isbn = "3-211-83347-1", publisher = "Springer-Verlag Wien", organization = "Eurographics", editor = "Michael Gervautz and Dieter Schmalstieg and Axel Hildebrand", booktitle = "Virtual Environments '99. Proceedings of the 5th Eurographics Workshop on Virtual Environments", pages = "73--84", URL = "https://www.cg.tuwien.ac.at/research/publications/1999/Wimmer-1999-FWIa/", } @techreport{Wimmer-1998-SMO, title = "Load Balancing for Smooth LODs", author = "Michael Wimmer and Dieter Schmalstieg", year = "1998", abstract = "The paper shows how to derive an analytic formula for the following problem: given a set of objects in a continuous level of detail representation, and given a total number of polygons to display, determine the number of polygons to use for each object so that the best overall appearance is achieved. This improves on the situation of discrete levels of detail, where the problem has been shown to be equivalent to a constrained knapsack-problem.", month = dec, number = "TR-186-2-98-31", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "smooth LODs, level of detail, load balancing, virtual environments", URL = "https://www.cg.tuwien.ac.at/research/publications/1998/Wimmer-1998-SMO/", } @inproceedings{Wimmer-1997-ITT, title = "Interactive Techniques in Three-dimensional Modeling", author = "Michael Wimmer and Robert F. Tobler", year = "1997", month = jun, isbn = "80-223-1176-6", organization = "Comenius University, Bratislava, Slovakia", editor = "Wolfgang Stra{\ss}er", booktitle = "13th Spring Conference on Computer Graphics", pages = "41--48", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/Wimmer-1997-ITT/", } @mastersthesis{Wimmer-1996-ITB, title = "Interaktive Techniken im Bereich des dreidimensionalen Modelings", author = "Michael Wimmer", year = "1996", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/1996/Wimmer-1996-ITB/", }