@article{SCHUETZ-2020-MPC, title = "Fast Out-of-Core Octree Generation for Massive Point Clouds", author = "Markus Sch\"{u}tz and Stefan Ohrhallinger and Michael Wimmer", year = "2020", abstract = "We propose an efficient out-of-core octree generation method for arbitrarily large point clouds. It utilizes a hierarchical counting sort to quickly split the point cloud into small chunks, which are then processed in parallel. Levels of detail are generated by subsampling the full data set bottom up using one of multiple exchangeable sampling strategies. We introduce a fast hierarchical approximate blue-noise strategy and compare it to a uniform random sampling strategy. The throughput, including out-of-core access to disk, generating the octree, and writing the final result to disk, is about an order of magnitude faster than the state of the art, and reaches up to around 6 million points per second for the blue-noise approach and up to around 9 million points per second for the uniform random approach on modern SSDs.", month = nov, journal = "Computer Graphics Forum", volume = "39", number = "7", issn = "1467-8659", doi = "10.1111/cgf.14134", pages = "13", publisher = "John Wiley & Sons, Inc.", pages = "1--13", keywords = "point clouds, point-based rendering, level of detail", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/SCHUETZ-2020-MPC/", } @techreport{freude_2020_rs, title = "R-Score: A Novel Approach to Compare Monte Carlo Renderings", author = "Christian Freude and Hiroyuki Sakai and Karoly Zsolnai-Feh\'{e}r and Michael Wimmer", year = "2020", abstract = "In this paper, we propose a new approach for the comparison and analysis of Monte Carlo (MC) rendering algorithms. It is based on a novel similarity measure called render score (RS) that is specically designed for MC rendering, statistically motivated, and incorporates bias and variance. Additionally, we propose a comparison scheme that alleviates the need for practically converged reference images (RIs). Our approach can be used to compare and analyze dierent rendering methods by revealing detailed (per-pixel) dierences and subsequently potential conceptual or implementation-related issues, thereby offering a more informative and meaningful alternative to commonly used metrics.", month = aug, number = "TR-193-02-2020-4", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", note = "human contact: technical-report@cg.tuwien.ac.at", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/freude_2020_rs/", } @misc{kerbl-2020-improvencoding, title = "Improved Triangle Encoding for Cached Adaptive Tessellation", author = "Linus Horvath and Bernhard Kerbl and Michael Wimmer", year = "2020", month = jul, location = "online", event = "HPG 2020", Conference date = "Poster presented at HPG 2020 (2020-05-01--2020-06-22)", keywords = "GPU, tessellation, real-time", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/kerbl-2020-improvencoding/", } @article{zsolnaifeher-2020-pme, title = "Photorealistic Material Editing Through Direct Image Manipulation", author = "Karoly Zsolnai-Feh\'{e}r and Peter Wonka and Michael Wimmer", year = "2020", abstract = "Creating photorealistic materials for light transport algorithms requires carefully fine-tuning a set of material properties to achieve a desired artistic effect. This is typically a lengthy process that involves a trained artist with specialized knowledge. In this work, we present a technique that aims to empower novice and intermediate-level users to synthesize high-quality photorealistic materials by only requiring basic image processing knowledge. In the proposed workflow, the user starts with an input image and applies a few intuitive transforms (e.g., colorization, image inpainting) within a 2D image editor of their choice, and in the next step, our technique produces a photorealistic result that approximates this target image. Our method combines the advantages of a neural network-augmented optimizer and an encoder neural network to produce high-quality output results within 30 seconds. We also demonstrate that it is resilient against poorly-edited target images and propose a simple extension to predict image sequences with a strict time budget of 1-2 seconds per image. Video: https://www.youtube.com/watch?v=8eNHEaxsj18", month = jun, journal = "Computer Graphics Forum", volume = "39", number = "4", issn = "1467-8659", doi = "10.1111/cgf.14057", pages = "14", pages = "107--120", keywords = "neural rendering, neural networks, photorealistic rendering, photorealistic material editing", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/zsolnaifeher-2020-pme/", } @article{luksch_2020, title = "Real-Time Approximation of Photometric Polygonal Lights", author = "Christian Luksch and Lukas Prost and Michael Wimmer", year = "2020", abstract = "We present a real-time rendering technique for photometric polygonal lights. Our method uses a numerical integration technique based on a triangulation to calculate noise-free diffuse shading. We include a dynamic point in the triangulation that provides a continuous near-field illumination resembling the shape of the light emitter and its characteristics. We evaluate the accuracy of our approach with a diverse selection of photometric measurement data sets in a comprehensive benchmark framework. Furthermore, we provide an extension for specular reflection on surfaces with arbitrary roughness that facilitates the use of existing real-time shading techniques. Our technique is easy to integrate into real-time rendering systems and extends the range of possible applications with photometric area lights.", month = may, journal = "Proceedings of the ACM on Computer Graphics and Interactive Techniques", volume = "3", number = "1", issn = "2577-6193", doi = "10.1145/3384537", pages = "4.1--4.18", keywords = "area lights, photometric lights, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/luksch_2020/", } @article{schuetz-2020-PPC, title = "Progressive Real-Time Rendering of One Billion Points Without Hierarchical Acceleration Structures", author = "Markus Sch\"{u}tz and Gottfried Mandlburger and Johannes Otepka and Michael Wimmer", year = "2020", abstract = "Research in rendering large point clouds traditionally focused on the generation and use of hierarchical acceleration structures that allow systems to load and render the smallest fraction of the data with the largest impact on the output. The generation of these structures is slow and time consuming, however, and therefore ill-suited for tasks such as quickly looking at scan data stored in widely used unstructured file formats, or to immediately display the results of point-cloud processing tasks. We propose a progressive method that is capable of rendering any point cloud that fits in GPU memory in real time, without the need to generate hierarchical acceleration structures in advance. Our method supports data sets with a large amount of attributes per point, achieves a load performance of up to 100 million points per second, displays already loaded data in real time while remaining data is still being loaded, and is capable of rendering up to one billion points using an on-the-fly generated shuffled vertex buffer as its data structure, instead of slow-to-generate hierarchical structures. Shuffling is done during loading in order to allow efficiently filling holes with random subsets, which leads to a higher quality convergence behavior. ", month = may, journal = "Computer Graphics Forum", volume = "39", number = "2", issn = "1467-8659", doi = "10.1111/cgf.13911", booktitle = "EUROGRAPHICS", pages = "14", publisher = "John Wiley & Sons Ltd.", pages = "51--64", keywords = "point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/schuetz-2020-PPC/", } @inproceedings{tatzgern-2020-sst, title = "Stochastic Substitute Trees for Real-Time Global Illumination", author = "Wolfgang Tatzgern and Benedikt Mayr and Bernhard Kerbl and Markus Steinberger", year = "2020", abstract = "With the introduction of hardware-supported ray tracing and deep learning for denoising, computer graphics has made a considerable step toward real-time global illumination. In this work, we present an alternative global illumination method: The stochastic substitute tree (SST), a hierarchical structure inspired by lightcuts with light probability distributions as inner nodes. Our approach distributes virtual point lights (VPLs) in every frame and efficiently constructs the SST over those lights by clustering according to Morton codes. Global illumination is approximated by sampling the SST and considers the BRDF at the hit location as well as the SST nodes’ intensities for importance sampling directly from inner nodes of the tree. To remove the introduced Monte Carlo noise, we use a recurrent autoencoder. In combination with temporal filtering, we deliver real-time global illumination for complex scenes with challenging light distributions.", month = may, event = "I3D ’20", booktitle = "Symposium on Interactive 3D Graphics and Games", pages = "1--9", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/tatzgern-2020-sst/", } @inproceedings{unterguggenberger-2020-fmvr, title = "Fast Multi-View Rendering for Real-Time Applications", author = "Johannes Unterguggenberger and Bernhard Kerbl and Markus Steinberger and Dieter Schmalstieg and Michael Wimmer", year = "2020", abstract = "Efficient rendering of multiple views can be a critical performance factor for real-time rendering applications. Generating more than one view multiplies the amount of rendered geometry, which can cause a huge performance impact. Minimizing that impact has been a target of previous research and GPU manufacturers, who have started to equip devices with dedicated acceleration units. However, vendor-specific acceleration is not the only option to increase multi-view rendering (MVR) performance. Available graphics API features, shader stages and optimizations can be exploited for improved MVR performance, while generally offering more versatile pipeline configurations, including the preservation of custom tessellation and geometry shaders. In this paper, we present an exhaustive evaluation of MVR pipelines available on modern GPUs. We provide a detailed analysis of previous techniques, hardware-accelerated MVR and propose a novel method, leading to the creation of an MVR catalogue. Our analyses cover three distinct applications to help gain clarity on overall MVR performance characteristics. Our interpretation of the observed results provides a guideline for selecting the most appropriate one for various use cases on different GPU architectures.", month = may, isbn = "978-3-03868-107-6", organization = "Eurographics", location = "online", event = "EGPGV 2020", editor = "Frey, Steffen and Huang, Jian and Sadlo, Filip", doi = "10.2312/pgv.20201071", booktitle = "Eurographics Symposium on Parallel Graphics and Visualization", pages = "13--23", keywords = "Real-Time Rendering, Rasterization, Multi-View, OVR_multiview, Geometry Shader, Evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/unterguggenberger-2020-fmvr/", } @inproceedings{kroesl-2020-XREye, title = "XREye: Simulating Visual Impairments in Eye-Tracked XR ", author = "Katharina Kr\"{o}sl and Carmine Elvezio and Matthias H\"{u}rbe and Sonja Karst and Steven Feiner and Michael Wimmer", year = "2020", abstract = "Many people suffer from visual impairments, which can be difficult for patients to describe and others to visualize. To aid in understanding what people with visual impairments experience, we demonstrate a set of medically informed simulations in eye-tracked XR of several common conditions that affect visual perception: refractive errors (myopia, hyperopia, and presbyopia), cornea disease, and age-related macular degeneration (wet and dry).", month = mar, booktitle = "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", location = "(Atlanta) online", publisher = "IEEE", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/", } @phdthesis{zsolnai-feher-thesis-2019, title = "Photorealistic Material Learning and Synthesis", author = "Karoly Zsolnai-Feh\'{e}r", year = "2019", abstract = "Light transport simulations are the industry-standard way of creating convincing photorealistic imagery and are widely used in creating animation movies, computer animations, medical and architectural visualizations among many other notable applications. These techniques simulate how millions of rays of light interact with a virtual scene, where the realism of the final output depends greatly on the quality of the used materials and the geometry of the objects within this scene. In this thesis, we endeavor to address two key issues pertaining to photorealistic material synthesis: first, creating convincing photorealistic materials requires years of expertise in this field and requires a non-trivial amount of trial and error from the side of the artist. We propose two learning-based methods that enables novice users to easily and quickly synthesize photorealistic materials by learning their preferences and recommending arbitrarily many new material models that are in line with their artistic vision. We also augmented these systems with a neural renderer that performs accurate light-transport simulation for these materials orders of magnitude quicker than the photorealistic rendering engines commonly used for these tasks. As a result, novice users are now able to perform mass-scale material synthesis, and even expert users experience a significant improvement in modeling times when many material models are sought. Second, simulating subsurface light transport leads to convincing translucent material visualizations, however, most published techniques either take several hours to compute an image, or make simplifying assumptions regarding the underlying physical laws of volumetric scattering. We propose a set of real-time methods to remedy this issue by decomposing well-known 2D convolution filters into a set of separable 1D convolutions while retaining a high degree of visual accuracy. These methods execute within a few milliseconds and can be inserted into state-of-the-art rendering systems as a simple post-processing step without introducing intrusive changes into the rendering pipeline.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "neural rendering, machine learning, photorealistic rendering, ray tracing, global illumination, material synthesis", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/zsolnai-feher-thesis-2019/", } @phdthesis{klein_2019_PHD, title = "Instant Construction of Atomistic Models for Visualization in Integrative Cell Biology", author = "Tobias Klein", year = "2019", abstract = "AbstractComputational models have advanced research of integrative cell biology in variousways. Especially in the biological mesoscale, the scale between atoms and cellularenvironments, computational models improve the understanding and qualitative anal-ysis. The mesoscale is an important range, since it represents the range of scalesthat are not fully accessible to a single experimental technique. Complex molecularassemblies within this scale have been visualized with x-ray crystallography, thoughonly in isolation. Mesoscale models shows how molecules are assembled into morecomplex subcelluar environments that orchestrate the processes of life. The skillfulcombination of the results of imaging and experimental techniques provides a glimpseof the processes, which are happening here. Only recently, biologists have startedto unify the various sources of information. They have begun to computationallyassemble and subsequently visualize complex environments, such as viruses or bacteria.Currently, we live in an opportune time for researching integrative structural biologydue to several factors. First and foremost, the wealth of data, driven through sourceslike online databases, makes structural information about biological entities publiclyavailable. In addition to that, the progress of parallel processors builds the foundationto instantly construct and render large mesoscale environments in atomistic detail.Finally, new scientific advances in visualization allow the efficient rendering of complexbiological phenomena with millions of structural units.In this cumulative thesis, we propose several novel techniques that facilitate the instantconstruction of mesoscale structures. The common methodological strategy of thesetechniques and insight from this thesis is “compute instead of store”. This approacheliminates the storage and memory management complexity, and enables instantchanges of the constructed models. Combined, our techniques are capable of instantlyconstructing large-scale biological environments using the basic structural buildingblocks of cells. These building blocks are mainly nucleic acids, lipids, and solubleproteins. For the generation of long linear polymers formed by nucleic acids, wepropose a parallel construction technique that makes use of a midpoint displacementalgorithm. The efficient generation of lipid membranes is realized through a texturesynthesis approach that makes use of the Wang tiling concept. For the population ofsoluble proteins, we present a staged algorithm, whereby each stage is processed inparallel. We have integrated the instant construction approach into a visual environmentin order to improve several aspects. First, it allows immediate feedback on the createdix structures and the results of parameter changes. Additionally, the integration ofconstruction in visualization builds the foundation for visualization systems that striveto construct large-scale environments on-the-fly. Lastly, it advances the qualitativeanalysis of biological mesoscale environments, where a multitude of synthesized modelsis required. In order to disseminate the physiology of biological mesoscale models,we propose a novel concept that simplifies the creation of multi-scale proceduralanimations. ", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/klein_2019_PHD/", } @misc{SCHUETZ-2019-PCC, title = "Rendering Point Clouds with Compute Shaders", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2019", abstract = "We propose a compute shader based point cloud rasterizer with up to 10 times higher performance than classic point-based rendering with the GL_POINT primitive. In addition to that, our rasterizer offers 5 byte depth-buffer precision with uniform or customizable distribution, and we show that it is possible to implement a highquality splatting method that blends together overlapping fragments while still maintaining higher frame-rates than the traditional approach.", month = nov, isbn = "978-1-4503-6943-5/19/11", event = "SIGGRAPH Asia", Conference date = "Poster presented at SIGGRAPH Asia (2019-11)", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/SCHUETZ-2019-PCC/", } @article{klein_2019_PMP, title = "Multi-Scale Procedural Animations of Microtubule Dynamics Based on Measured Data", author = "Tobias Klein and Ivan Viola and Eduard Gr\"{o}ller and Peter Mindek", year = "2019", abstract = "Biologists often use computer graphics to visualize structures, which due to physical limitations are not possible to imagewith a microscope. One example for such structures are microtubules, which are present in every eukaryotic cell. They are part ofthe cytoskeleton maintaining the shape of the cell and playing a key role in the cell division. In this paper, we propose a scientifically-accurate multi-scale procedural model of microtubule dynamics as a novel application scenario for procedural animation, which cangenerate visualizations of their overall shape, molecular structure, as well as animations of the dynamic behaviour of their growth anddisassembly. The model is spanning from tens of micrometers down to atomic resolution. All the aspects of the model are driven byscientific data. The advantage over a traditional, manual animation approach is that when the underlying data change, for instance dueto new evidence, the model can be recreated immediately. The procedural animation concept is presented in its generic form, withseveral novel extensions, facilitating an easy translation to other domains with emergent multi-scale behavior.", month = aug, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "26", number = "1", doi = "10.1109/TVCG.2019.2934612", pages = "622--632", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/klein_2019_PMP/", } @article{celarek_adam-2019-qelta, title = "Quantifying the Error of Light Transport Algorithms", author = "Adam Celarek and Wenzel Jakob and Michael Wimmer and Jaakko Lehtinen", year = "2019", abstract = "This paper proposes a new methodology for measuring the error of unbiased physically based rendering algorithms. The current state of the art includes mean squared error (MSE) based metrics and visual comparisons of equal-time renderings of competing algorithms. Neither is satisfying as MSE does not describe behavior and can exhibit significant variance, and visual comparisons are inherently subjective. Our contribution is two-fold: First, we propose to compute many short renderings instead of a single long run and use the short renderings to estimate MSE expectation and variance as well as per-pixel standard deviation. An algorithm that achieves good results in most runs, but with occasional outliers is essentially unreliable, which we wish to quantify numerically. We use per-pixel standard deviation to identify problematic lighting effects of rendering algorithms. The second contribution is the error spectrum ensemble (ESE), a tool for measuring the distribution of error over frequencies. The ESE serves two purposes: It reveals correlation between pixels and can be used to detect outliers, which offset the amount of error substantially.", month = jul, journal = "Computer Graphics Forum", volume = "38", number = "4", doi = "10.1111/cgf.13775", publisher = "The Eurographics Association and John Wiley & Sons Ltd.", pages = "111--121", keywords = "measuring error, light transport, global illumination", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/celarek_adam-2019-qelta/", } @inproceedings{kroesl-2019-ThesisFF, title = "Simulating Vision Impairments in VR and AR", author = "Katharina Kr\"{o}sl", year = "2019", abstract = "1.3 billion people worldwide are affected by vision impairments, according to the World Health Organization. However, vision impairments are hardly ever taken into account when we design our cities, buildings, emergency signposting, or lighting systems. With this research, we want to develop realistic, medically based simulations of eye diseases in VR and AR, which allow calibrating vision impairments to the same level for different users. This allows us to conduct user studies with participants with normal sight and graphically simulated vision impairments, to determine the effects of these impairments on perception, and to investigate lighting concepts under impaired vision conditions. This thesis will, for the first time, provide methods for architects and designers to evaluate their designs for accessibility and to develop lighting systems that can enhance the perception of people with vision impairments.", month = jun, booktitle = "ACM SIGGRAPH THESIS FAST FORWARD 2019", keywords = "vision impairments, cataracts, virtual reality, augmented reality, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/", } @article{CORNEL-2019-IVF, title = "Interactive Visualization of Flood and Heavy Rain Simulations", author = "Daniel Cornel and Andreas Buttinger-Kreuzhuber and Artem Konev and Zsolt Horvath and Michael Wimmer and Raimund Heidrich and J\"{u}rgen Waser", year = "2019", abstract = "In this paper, we present a real-time technique to visualize large-scale adaptive height fields withC1-continuous surfacereconstruction. Grid-based shallow water simulation is an indispensable tool for interactive flood management applications.Height fields defined on adaptive grids are often the only viable option to store and process the massive simulation data. Theirvisualization requires the reconstruction of a continuous surface from the spatially discrete simulation data. For regular grids,fast linear and cubic interpolation are commonly used for surface reconstruction. For adaptive grids, however, there exists nohigher-order interpolation technique fast enough for interactive applications.Our proposed technique bridges the gap between fast linear and expensive higher-order interpolation for adaptive surfacereconstruction. During reconstruction, no matter if regular or adaptive, discretization and interpolation artifacts can occur,which domain experts consider misleading and unaesthetic. We take into account boundary conditions to eliminate these artifacts,which include water climbing uphill, diving towards walls, and leaking through thin objects. We apply realistic water shadingwith visual cues for depth perception and add waves and foam synthesized from the simulation data to emphasize flow directions.The versatility and performance of our technique are demonstrated in various real-world scenarios. A survey conducted withdomain experts of different backgrounds and concerned citizens proves the usefulness and effectiveness of our technique.", month = jun, journal = "Computer Graphics Forum", volume = "38", number = "3", issn = "1467-8659", doi = "10.1111/cgf.13669", pages = "25--39", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/CORNEL-2019-IVF/", } @misc{kerbl_2019_planet_poster, title = "Real-time Rendering of Procedural Planets at Arbitrary Altitudes", author = "Florian Michelic and Michael Kenzel and Karl Haubenwallner and Bernhard Kerbl and Markus Steinberger", year = "2019", abstract = "Focusing on real-time, high-fidelity rendering, we present a novel approach for combined consideration of four major phenomena that define the visual representation of entire planets: We present a simple and fast solution for a distortion-free generation of 3D planetary terrain, spherical ocean waves and efficient rendering of volumetric clouds along with atmospheric scattering. Our approach to terrain and ocean mesh generation relies on a projected, persistent grid that can instantaneously and smoothly adapt to fast-changing viewpoints. For generating planetary ocean surfaces, we present a wave function that creates seamless, evenly spaced waves across the entire planet without causing unsightly artifacts. We further show how to render volumetric clouds in combination with precomputed atmospheric scattering and account for their contribution to light transport above ground. Our method provides mathematically consistent approximations of cloud-atmosphere interactions and works for any view point and direction, ensuring continuous transitions in appearance as the viewer moves from ground to space. Among others, our approach supports cloud shadows, light shafts, ocean reflections, and earth shadows on the clouds. The sum of these effects can be visualized at more than 120 frames per second on current graphics processing units.", month = may, note = "Voted best poster of I3D '19", location = "Montreal, Canada", event = "I3D 2019", Conference date = "Poster presented at I3D 2019 (2019-05-21--2019-05-23)", keywords = "planet, rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/kerbl_2019_planet_poster/", } @inproceedings{LUKSCH-2019-IGI, title = "Incrementally Baked Global Illumination", author = "Christian Luksch and Michael Wimmer and Michael Schw\"{a}rzler", year = "2019", abstract = "Global Illumination is affected by the slightest change in a 3D scene, requiring a complete reevaluation of the distributed light. In cases where real-time algorithms are not applicable due to high demands on the achievable accuracy, this recomputation from scratch results in artifacts like flickering or noise, disturbing the visual appearance and negatively affecting interactive lighting design workflows. We propose a novel system tackling this problem by providing incremental updates of a baked global illumination solution after scene modifications, and a re-convergence after a few seconds. Using specifically targeted incremental data structures and prioritization strategies in a many-light global illumination algorithm, we compute a differential update from one illumination state to another. We further demonstrate the use of a novel error balancing strategy making it possible to prioritize the illumination updates.", month = may, isbn = "978-1-4503-6310-5", series = "I3D ’19", publisher = "ACM", location = "Montreal, Quebec, Canada", event = "33rd Symposium on Interactive 3D Graphics and Games (I3D 2019)", editor = "Blenkhorn, Ari Rapkin", doi = "10.1145/3306131.3317015", booktitle = "Proceedings of the 33rd Symposium on Interactive 3D Graphics and Games (I3D 2019)", pages = "4:1--4:10", keywords = "Global Illumination, Instant Radiosity, Lightmaps", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/LUKSCH-2019-IGI/", } @inproceedings{STEINLECHNER-2019-APS, title = "Adaptive Point-cloud Segmentation for Assisted Interactions", author = "Harald Steinlechner and Bernhard Rainer and Michael Schw\"{a}rzler and Georg Haaser and Attila Szabo and Stefan Maierhofer and Michael Wimmer", year = "2019", abstract = "In this work, we propose an interaction-driven approach streamlined to support and improve a wide range of real-time 2D interaction metaphors for arbitrarily large pointclouds based on detected primitive shapes. Rather than performing shape detection as a costly pre-processing step on the entire point cloud at once, a user-controlled interaction determines the region that is to be segmented next. By keeping the size of the region and the number of points small, the algorithm produces meaningful results and therefore feedback on the local geometry within a fraction of a second. We can apply these finding for improved picking and selection metaphors in large point clouds, and propose further novel shape-assisted interactions that utilize this local semantic information to improve the user’s workflow.", month = may, isbn = "978-1-4503-6310-5", series = "I3D ’19", publisher = "ACM", location = "Montreal, Quebec, Canada", event = "33rd Symposium on Interactive 3D Graphics and Games", editor = "Blenkhorn, Ari Rapkin", doi = "10.1145/3306131.3317023", booktitle = "Proceedings of the 33rd Symposium on Interactive 3D Graphics and Games", pages = "14:1--14:9", keywords = "Pointcloud Segmentation, Shape Detection, Interactive Editing", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/STEINLECHNER-2019-APS/", } @misc{schuetz-2019-LCO, title = "Live Coding of a VR Render Engine in VR", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2019", abstract = "Live coding in virtual reality allows users to create and modify their surroundings through code without the need to leave the virtual reality environment. Previous work focuses on modifying the scene. We propose an application that allows developers to modify virtually everything at runtime, including the scene but also the render engine, shader code and input handling, using standard desktop IDEs through a desktop mirror. ", month = mar, publisher = "IEEE", location = "Osaka", address = "http://ieeevr.org/2019/", event = "IEEE VR 2019", doi = "https://doi.org/10.1109/VR.2019.8797760", Conference date = "Poster presented at IEEE VR 2019 (2019-03)", note = "1150--1151", pages = "1150 – 1151", keywords = "virtual reality, live coding, VR", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/schuetz-2019-LCO/", } @incollection{BOKSANSKY-2019-RTS, title = "Ray Traced Shadows: Maintaining Real-Time Frame Rates", author = "Jakub Boksansky and Michael Wimmer and Jir\'{i} Bittner", year = "2019", abstract = "Efficient and accurate shadow computation is a long-standing problem in computer graphics. In real-time applications, shadows have traditionally been computed using the rasterization-based pipeline. With recent advances of graphics hardware, it is now possible to use ray tracing in real-time applications, making ray traced shadows a viable alternative to rasterization. While ray traced shadows avoid many problems inherent in rasterized shadows, tracing every shadow ray independently can become a bottleneck if the number of required rays rises, e.g., for high-resolution rendering, for scenes with multiple lights, or for area lights. Therefore, the computation should focus on image regions where shadows actually appear, in particular on the shadow boundaries. We present a practical method for ray traced shadows in real-time applications. Our method uses the standard rasterization pipeline for resolving primary-ray visibility and ray tracing for resolving visibility of light sources. We propose an adaptive sampling algorithm for shadow rays combined with an adaptive shadowfiltering method. These two techniques allow computing high-quality shadows with a limited number of shadow rays per pixel. We evaluated our method using a recent real-time ray tracing API (DirectX Raytracing) and compare the results with shadow mapping using cascaded shadow maps.", month = mar, address = "New York", booktitle = "Ray Tracing Gems: High-Quality and Real-Time Rendering with DXR and Other APIs", doi = "10.1007/978-1-4842-4427-2_13", editor = "Erik Haines and Tomas Akenine-M\"{o}ller", isbn = "978-1-4842-4426-5", publisher = "Springer", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/BOKSANSKY-2019-RTS/", } @inproceedings{schuetz-2019-CLOD, title = "Real-Time Continuous Level of Detail Rendering of Point Clouds", author = "Markus Sch\"{u}tz and Katharina Kr\"{o}sl and Michael Wimmer", year = "2019", abstract = "Real-time rendering of large point clouds requires acceleration structures that reduce the number of points drawn on screen. State-of-the art algorithms group and render points in hierarchically organized chunks with varying extent and density, which results in sudden changes of density from one level of detail to another, as well as noticeable popping artifacts when additional chunks are blended in or out. These popping artifacts are especially noticeable at lower levels of detail, and consequently in virtual reality, where high performance requirements impose a reduction in detail. We propose a continuous level-of-detail method that exhibits gradual rather than sudden changes in density. Our method continuously recreates a down-sampled vertex buffer from the full point cloud, based on camera orientation, position, and distance to the camera, in a point-wise rather than chunk-wise fashion and at speeds up to 17 million points per millisecond. As a result, additional details are blended in or out in a less noticeable and significantly less irritating manner as compared to the state of the art. The improved acceptance of our method was successfully evaluated in a user study.", month = mar, publisher = "IEEE", location = "Osaka, Japan", event = "IEEE VR 2019, the 26th IEEE Conference on Virtual Reality and 3D User Interfaces", doi = "10.1109/VR.2019.8798284", booktitle = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces", pages = "103--110", keywords = "point clouds, virtual reality, VR", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/schuetz-2019-CLOD/", } @inproceedings{ZOTTI-2016-VAA, title = "Virtual Archaeoastronomy: Stellarium for Research and Outreach", author = "Georg Zotti and Florian Schaukowitsch and Michael Wimmer and Wolfgang Neubauer", year = "2019", abstract = "In the last few years, the open-source desktop planetarium program Stellarium has become ever more popular for research and dissemination of results in Cultural Astronomy. In this time we have added significant capabilities for applications in cultural astronomy to the program. The latest addition allows its use in a multi-screen installation running both completely automated and manually controlled setups. During the development time, also the accuracy of astronomical simulation has been greatly improved.", month = mar, isbn = "978-3-319-97006-6", publisher = "Springer", location = "Milano, Italy", event = "SIA 2016 (16th Conference of the Italian Society for Archaeoastronomy)", booktitle = "Archaeoastronomy in the Roman World (Proceedings 16th Conference of the Italian Society for Archaeoastronomy)", pages = "187--205", keywords = "stellarium", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/ZOTTI-2016-VAA/", } @inproceedings{kroesl-2019-ICthroughVR, title = "ICthroughVR: Illuminating Cataracts through Virtual Reality", author = "Katharina Kr\"{o}sl and Carmine Elvezio and Matthias H\"{u}rbe and Sonja Karst and Michael Wimmer and Steven Feiner", year = "2019", abstract = "Vision impairments, such as cataracts, affect how many people interact with their environment, yet are rarely considered by architects and lighting designers because of a lack of design tools. To address this, we present a method to simulate vision impairments caused by cataracts in virtual reality (VR), using eye tracking for gaze-dependent effects. We conducted a user study to investigate how lighting affects visual perception for users with cataracts. Unlike past approaches, we account for the user's vision and some constraints of VR headsets, allowing for calibration of our simulation to the same level of degraded vision for all participants.", month = mar, publisher = "IEEE", location = "Osaka, Japan", event = "IEEE VR 2019, the 26th IEEE Conference on Virtual Reality and 3D User Interfaces", doi = "10.1109/VR.2019.8798239", booktitle = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces", pages = "655--663", keywords = "vision impairments, cataracts, virtual reality, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/", } @inproceedings{STEINLECHNER-2019-ICT, title = "A Novel Approach for Immediate, Interactive CT Data Visualization andEvaluation using GPU-based Segmentation and Visual Analysis", author = "Harald Steinlechner and Georg Haaser and Bernd Oberdorfer and Daniel Habe and Stefan Maierhofer and Michael Schw\"{a}rzler and Eduard Gr\"{o}ller", year = "2019", abstract = "CT data of industrially produced cast metal parts are often afflicted with artefacts due to complex geometries ill-suited for the scanning process. Simple global threshold-based porosity detection algorithms usually fail to deliver meaningful results. Other adaptive methods can handle image artefacts, but require long preprocessing times. This makes an efficient analysis workflow infeasible. We propose an alternative approach for analyzing and visualizing volume defects in a fully interactive manner, where analyzing volumes becomes more of an interactive exploration instead of time-consuming parameter guessing interrupted by long processing times. Our system is based on a highly efficient GPU implementation of a segmentation algorithm for porosity detection. The runtime is on the order of seconds for a full volume and parametrization is kept simple due to a single threshold parameter. A fully interactive user interface comprised of multiple linked views allows to quickly identify defects of interest, while filtering out artefacts even in noisy areas.", month = feb, location = "Padova, Italy", event = "International Conference on Industrial Computed Tomography (ICT) 2019", editor = "Simone Carmignato", booktitle = "International Conference on Industrial Computed Tomography (ICT) 2019", pages = "1--6", keywords = "CT, GPU, Inclusion Detection, Interactive Visualisation, VisualAnalysis, Parallel Coordinates, Volume Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/STEINLECHNER-2019-ICT/", } @article{Konev-FCV2018, title = "Fast cutaway visualization of sub-terrain tubular networks", author = "Artem Konev and Manuel Matusich and Ivan Viola and Hendrik Schulze and Daniel Cornel and J\"{u}rgen Waser", year = "2018", month = oct, doi = "https://doi.org/10.1016/j.cag.2018.07.004", issn = "0097-8493", journal = "Computers & Graphics", number = "5", pages = "25–35", volume = "75", pages = "25--35", keywords = "Cutaway visualization, Procedural billboarding, Subsurface networks", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Konev-FCV2018/", } @inproceedings{kroesl-2018-DC, title = "[DC] Computational Design of Smart Lighting Systems for Visually Impaired People, using VR and AR Simulations", author = "Katharina Kr\"{o}sl", year = "2018", abstract = "This Doctoral Consortium paper presents my dissertation research in a multidisciplinary setting, spanning over the areas of architecture, specifically lighting design and building information modeling, to virtual reality (VR) and perception. Since vision impairments are hardly taken into account in architecture and lighting design today, this research aims to provide the necessary tools to quantify the effects of vision impairments, so design guidelines regarding these impairments can be developed. Another research goal is the determination of the influence of different lighting conditions on the perception of people with vision impairments. This would allow us to develop smart lighting systems that can aid visually impaired people by increasing their visual perception of their environment. This paper also outlines the concept for a tool to automatically generate lighting solutions and compare and test them in VR, as design aid for architects and lighting designers.", month = oct, publisher = "IEEE", location = "Munich", event = "ISMAR 2018", booktitle = "Proceedings of the 2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", keywords = "vision impairments, lighting design, virtual reality, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-DC/", } @misc{schuetz-2018-PPC, title = "Progressive Real-Time Rendering of Unprocessed Point Clouds", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2018", abstract = "Rendering tens of millions of points in real time usually requires either high-end graphics cards, or the use of spatial acceleration structures. We introduce a method to progressively display as many points as the GPU memory can hold in real time by reprojecting what was visible and randomly adding additional points to uniformly converge towards the full result within a few frames. Our method heavily limits the number of points that have to be rendered each frame and it converges quickly and in a visually pleasing way, which makes it suitable even for notebooks with low-end GPUs. The data structure consists of a randomly shuffled array of points that is incrementally generated on-the-fly while points are being loaded. Due to this, it can be used to directly view point clouds in common sequential formats such as LAS or LAZ while they are being loaded and without the need to generate spatial acceleration structures in advance, as long as the data fits into GPU memory.", month = aug, publisher = "ACM", location = "Vancouver, Canada", isbn = "978-1-4503-5817-0/18/08", event = "ACM SIGGRAPH 2018", doi = "10.1145/3230744.3230816", Conference date = "Poster presented at ACM SIGGRAPH 2018 (2018-08-12--2018-08-16)", note = "Article 41--", pages = "Article 41 – ", keywords = "point based rendering, point cloud, LIDAR", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/schuetz-2018-PPC/", } @article{zsolnai-2018-gms, title = "Gaussian Material Synthesis", author = "Karoly Zsolnai-Feh\'{e}r and Peter Wonka and Michael Wimmer", year = "2018", abstract = "We present a learning-based system for rapid mass-scale material synthesis that is useful for novice and expert users alike. The user preferences are learned via Gaussian Process Regression and can be easily sampled for new recommendations. Typically, each recommendation takes 40-60 seconds to render with global illumination, which makes this process impracticable for real-world workflows. Our neural network eliminates this bottleneck by providing high-quality image predictions in real time, after which it is possible to pick the desired materials from a gallery and assign them to a scene in an intuitive manner. Workflow timings against Disney’s “principled” shader reveal that our system scales well with the number of sought materials, thus empowering even novice users to generate hundreds of high-quality material models without any expertise in material modeling. Similarly, expert users experience a significant decrease in the total modeling time when populating a scene with materials. Furthermore, our proposed solution also offers controllable recommendations and a novel latent space variant generation step to enable the real-time fine-tuning of materials without requiring any domain expertise.", month = aug, journal = "ACM Transactions on Graphics (SIGGRAPH 2018)", volume = "37", number = "4", issn = "0730-0301", doi = "10.1145/3197517.3201307", pages = "76:1--76:14", keywords = "gaussian material synthesis, neural rendering, neural rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/zsolnai-2018-gms/", } @misc{kroesl-2018-TVS, title = "The Virtual Schoolyard: Attention Training in Virtual Reality for Children with Attentional Disorders", author = "Katharina Kr\"{o}sl and Anna Felnhofer and Johanna X. Kafka and Laura Schuster and Alexandra Rinnerthaler and Michael Wimmer and Oswald D. Kothgassner", year = "2018", abstract = "This work presents a virtual reality simulation for training different attentional abilities in children and adolescents. In an interdisciplinary project between psychology and computer science, we developed four mini-games that are used during therapy sessions to battle different aspects of attentional disorders. First experiments show that the immersive game-like application is well received by children. Our tool is also currently part of a treatment program in an ongoing clinical study.", month = aug, publisher = "ACM", location = "Vancouver, Canada", isbn = "978-1-4503-5817-0", event = "ACM SIGGRAPH 2018", doi = "10.1145/3230744.3230817", Conference date = "Poster presented at ACM SIGGRAPH 2018 (2018-08-12--2018-08-16)", note = "Article 27--", pages = "Article 27 – ", keywords = "virtual reality, attentional disorders, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/", } @phdthesis{schwaerzler_2018_phd, title = "Advances in the Multimodal 3D Reconstruction and Modeling of Buildings", author = "Michael Schw\"{a}rzler", year = "2018", abstract = "Driven by the need for faster and more efficient workflows in the digitization of urban environments, the availability of affordable 3D data-acquisition systems for buildings has drastically increased in the last years: Laser scanners and photogrammetric methods both produce millions of 3D points within minutes of acquisition time. They are applied both on street-level as well as from above using drones, and are used to enhance traditional tachymetric measurements in surveying. However, these 3D data points are not the only available information: Extracted meta data from images, simulation results (e.g., from light simulations), 2D floor plans, and semantic tags – especially from the upcoming Building Information Modeling (BIM) systems – are becoming increasingly important. The challenges this multimodality poses during the reconstruction of CAD-ready 3D buildings are manifold: Apart from handling the enormous size of the data that is collected during the acquisition steps, the different data sources must also be registered to each other in order to be applicable in a common context – which can be difficult in case of missing or erroneous information. Nevertheless, the potential for improving both the workflow efficiency as well as the quality of the reconstruction results is huge: Missing information can be substituted by data from other sources, information about spatial or semantic relations can be utilized to overcome limitations, and interactive modeling complexity can be reduced (e.g., by limiting interactions to a two-dimensional space). In this thesis, four publications are presented which aim at providing freely combinable “building blocks” for the creation of helpful methods and tools for advancing the field of Multimodal Urban Reconstruction. First, efficient methods for the calculation of shadows cast by area light sources are presented – one with a focus on the most efficient generation of physically accurate penumbras, and the other one with the goal of reusing soft shadow information in consecutive frames to avoid costly recalculations. Then, a novel, optimization-supported reconstruction and modeling tool is presented, which employs sketch-based interactions and snapping techniques to create water-tight 3D building models. An extension to this system is demonstrated consecutively: There, 2D photos act as the only interaction canvas for the simple, sketch-based creation of building geometry and the corresponding textures. Together, these methods form a solid foundation for the creation of common, multimodal environments targeted at the reconstruction of 3D building models.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/schwaerzler_2018_phd/", } @article{Kathi-2018-VRB, title = "A VR-based user study on the effects of vision impairments on recognition distances of escape-route signs in buildings", author = "Katharina Kr\"{o}sl and Dominik Bauer and Michael Schw\"{a}rzler and Henry Fuchs and Michael Wimmer and Georg Suter", year = "2018", abstract = "In workplaces or publicly accessible buildings, escape routes are signposted according to official norms or international standards that specify distances, angles and areas of interest for the positioning of escape-route signs. In homes for the elderly, in which the residents commonly have degraded mobility and suffer from vision impairments caused by age or eye diseases, the specifications of current norms and standards may be insufficient. Quantifying the effect of symptoms of vision impairments like reduced visual acuity on recognition distances is challenging, as it is cumbersome to find a large number of user study participants who suffer from exactly the same form of vision impairments. Hence, we propose a new methodology for such user studies: By conducting a user study in virtual reality (VR), we are able to use participants with normal or corrected sight and simulate vision impairments graphically. The use of standardized medical eyesight tests in VR allows us to calibrate the visual acuity of all our participants to the same level, taking their respective visual acuity into account. Since we primarily focus on homes for the elderly, we accounted for their often limited mobility by implementing a wheelchair simulation for our VR application.", month = apr, journal = "The Visual Computer", volume = "34", number = "6-8", issn = "0178-2789", doi = "10.1007/s00371-018-1517-7", pages = "911--923", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/", } @inproceedings{PB-VRVis-2018-005, title = "An Automated Verification Workflow for Planned Lighting Setups using BIM", author = "Andreas Walch and Katharina Kr\"{o}sl and Christian Luksch and David Pichler and Thomas Pipp and Michael Schw\"{a}rzler", year = "2018", abstract = "The use of Building Information Modeling (BIM) methods is becoming more and more established in the planning stage, during the construction, and for the management of buildings. Tailored BIM software packages allow to handle a vast amount of relevant aspects, but have so far not been covering specialized tasks like the evaluation of light distributions in and around a 3D model of a building. To overcome this limitation, we demonstrate the use of the open-source IFC format for preparing and exchanging BIM data to be used in our interactive light simulation system. By exploiting the availability of 3D data and semantic descriptions, it is possible to automatically place measurement surfaces in the 3D scene, and evaluate the suitability and sustainability of a planned lighting design according to given constraints and industry norms. Interactive visualizations for fast analysis of the simulation results, created using state-of-the-art web technologies, are seamlessly integrated in the 3D work environment, helping the lighting designer to quickly improve the initial lighting solution with a few clicks.", month = apr, isbn = "978-3-9504173-5-7", series = "REAL CORP", event = "REAL CORP 2018", editor = "M. Schrenk and V. V. Popovich and P. Zeile and P. Elisei and C. Beyerand G. Navratil", booktitle = "REAL CORP 2018, Proceedings", pages = "55–65", pages = "55--65", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/PB-VRVis-2018-005/", } @phdthesis{preiner_2017_phd, title = "Dynamic and Probabilistic Point-Cloud Processing", author = "Reinhold Preiner", year = "2017", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/preiner_2017_phd/", } @article{ZOTTI-2017-BM, title = "Beyond 3D Models: Simulation of Temporally Evolving Models in Stellarium", author = "Georg Zotti and Florian Schaukowitsch and Michael Wimmer", year = "2017", abstract = "In recent years, the interactive visual exploration and demonstration of three-dimensional virtual models of buildings or natural structures of archaeoastronomical interest under a simulated sky has become available for users of the open-source desktop planetarium program Stellarium [Zotti, 2015, 2016]. Users can load an architectural model in the well-known OBJ format and walk around to explore sight lines or light-and-shadow interaction in present and past times [Frischer et al., 2016]. However, until now, the model itself did not change in time, and loading models for various building phases (e.g., the assumed order of building the various standing stones, timber circles and stone circles of Stonehenge) always required a break in simulation and user interaction to load a model for the next phase. On the other hand, displaying a model under the sky of the wrong time may lead to inappropriate conclusions. Large-area models required considerable time to load, and loading caused a reset of location, so the user interested in changes in a certain viewing axis had to recreate that view again. Given that Stellarium is an “astronomical time machine”, nowadays capable of replaying sky vistas thousands of years ago with increasing accuracy [Zotti et al., submitted] and also for models with several million triangular faces, it seemed worth to explore possibilities to also show changes over time in the simulated buildings. The Scenery3D plugin of Stellarium is, however, not a complete game engine, and replicating the infrastructure found in such game engines like Unity3D – for example to interactively move game objects, or load small sub-components like standing stones and place them at arbitrary coordinates – seemed overkill. The solution introduced here is remarkably simple and should be easily adoptable for the casual model-making researcher: the MTL material description for the model, a simple plain-text file that describes colour, reflection behaviour, photo-texture or transparency of the various parts of the object, can be extended for our rendering system. Newly introduced values describe dates where parts of the model can appear and disappear (with transitional transparency to allow for archaeological dating uncertainties). The model parts with these enhanced, time-aware materials appear to fade in during the indicated time, will be fully visible in their “active” time, and will fade out again when Stellarium is set to simulate the sky when the real-world structures most likely have vanished. The only requirement for the model creator is now to separate objects so that they receive unique materials that can then be identified and augmented with these entries in the MTL text file. The advantages of this new feature should be clear: an observer can remain in a certain location in the virtual model and let the land- and skyscape change over decades or centuries, without the need to load new models. This allows the simulation of construction and reconstruction phases while still always keeping particularly interesting viewpoints unchanged, and will always show the matching sky for the most appropriate reconstruction phase of the model. ", month = sep, journal = "Mediterranean Archaeology and Archaeometry", volume = "18", number = "4", issn = "1108-9628", doi = "10.5281/zenodo.1477972", booktitle = "25th SEAC Conference", pages = "501--506", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ZOTTI-2017-BM/", } @inproceedings{kroesl-2017-LiteMaker, title = "LiteMaker: Interactive Luminaire Development using Progressive Photon Tracing and Multi-Resolution Upsampling", author = "Katharina Kr\"{o}sl and Christian Luksch and Michael Schw\"{a}rzler and Michael Wimmer", year = "2017", abstract = "Industrial applications like luminaire development (the creation of a luminaire in terms of geometry and material) or lighting design (the efficient and aesthetic placement of luminaires in a virtual scene) rely heavily on high realism and physically correct simulations. Using typical approaches like CAD modeling and offline rendering, this requirement induces long processing times and therefore inflexible workflows. In this paper, we combine a GPU-based progressive photon-tracing algorithm to accurately simulate the light distribution of a luminaire with a novel multi-resolution image-filtering approach that produces visually meaningful intermediate results of the simulation process. By using this method in a 3D modeling environment, luminaire development is turned into an interactive process, allowing for real-time modifications and immediate feedback on the light distribution. Since the simulation results converge to a physically plausible solution that can be imported as a representation of a luminaire into a light-planning software, our work contributes to combining the two former decoupled workflows of luminaire development and lighting design, reducing the overall production time and cost for luminaire manufacturers. ", month = sep, isbn = "978-3-03868-049-9", publisher = "The Eurographics Association", location = "Bonn, Germany", event = "VMV 2017", editor = "Matthias Hullin and Reinhard Klein and Thomas Schultz and Angela Yao", doi = "10.2312/vmv.20171253", booktitle = "Vision, Modeling & Visualization", pages = "1--8", keywords = "Computing methodologies, Ray tracing, Image processing, Mesh geometry models", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/kroesl-2017-LiteMaker/", } @article{CORNEL-2017-FRS, title = "Forced Random Sampling: fast generation of importance-guided blue-noise samples", author = "Daniel Cornel and Hiroyuki Sakai and Christian Luksch and Michael Wimmer", year = "2017", abstract = "In computer graphics, stochastic sampling is frequently used to efficiently approximate complex functions and integrals. The error of approximation can be reduced by distributing samples according to an importance function, but cannot be eliminated completely. To avoid visible artifacts, sample distributions are sought to be random, but spatially uniform, which is called blue-noise sampling. The generation of unbiased, importance-guided blue-noise samples is expensive and not feasible for real-time applications. Sampling algorithms for these applications focus on runtime performance at the cost of having weak blue-noise properties. Blue-noise distributions have also been proposed for digital halftoning in the form of precomputed dither matrices. Ordered dithering with such matrices allows to distribute dots with blue-noise properties according to a grayscale image. By the nature of ordered dithering, this process can be parallelized easily. We introduce a novel sampling method called forced random sampling that is based on forced random dithering, a variant of ordered dithering with blue noise. By shifting the main computational effort into the generation of a precomputed dither matrix, our sampling method runs efficiently on GPUs and allows real-time importance sampling with blue noise for a finite number of samples. We demonstrate the quality of our method in two different rendering applications.", month = jun, journal = "The Visual Computer", volume = "33", number = "6", issn = "1432-2315", pages = "833--843", keywords = "blue-noise sampling, importance sampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/CORNEL-2017-FRS/", } @inproceedings{JAHRMANN-2017-RRTG, title = "Responsive Real-Time Grass Rendering for General 3D Scenes", author = "Klemens Jahrmann and Michael Wimmer", year = "2017", abstract = "Grass plays an important role in most natural environments. Most interactive applications use image-based techniques to approximate fields of grass due to the high geometrical complexity, leading to visual artifacts. In this paper, we propose a grass-rendering technique that is capable of drawing each blade of grass as geometrical object in real time. Accurate culling methods together with an adaptable rendering pipeline ensure that only the blades of grass that are important for the visual appearance of the field of grass are rendered. In addition, we introduce a physical model that is evaluated for each blade of grass. This enables that a blade of grass can react to its environment by calculating the influence of gravity, wind and collisions. A major advantage of our approach is that it can render fields of grass of arbitrary shape and spatial alignment. Thus, in contrast to previous work, the blades of grass can be placed on any 3D model, which is not required to be a flat surface or a height map.", month = feb, isbn = "978-1-4503-4886-7", publisher = "ACM", location = "San Francisco, CA", event = "I3D 2017", booktitle = "Proceedings of the 21st ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games", pages = "6:1--6:10", keywords = "real-time rendering, grass rendering, hardware tessellation", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/JAHRMANN-2017-RRTG/", } @article{ZOTTI-2017-TSP, title = "The Skyscape Planetarium", author = "Georg Zotti and Florian Schaukowitsch and Michael Wimmer", year = "2017", abstract = "Communicating scientific topics in state of the art exhibitions frequently involves the creation of impressive visual installations. In the exhibition “STONEHENGE. –A Hidden Landscape.” in the MAMUZ museum for prehistory in Mistelbach, Lower Austria, LBI ArchPro presents recent research results from the Stonehenge Hidden Landscape Project. A central element of the exhibition which extends over two floors connected with open staircases is an assembly of original-sized replica of several stones of the central trilithon horseshoe which is seen from both floors. In the upper floor, visitors are at eye level with the lintels, and on a huge curved projection screen which extends along the long wall of the hall they can experience the view out over the Sarsen circle into the surrounding landscape. This paper describes the planning and creation of this part of the exhibition, and some first impressions after opening.", journal = "Culture and Cosmos", volume = "21", number = "1", issn = "1368-6534", booktitle = "24th SEAC Conference", pages = "269--281", keywords = "stellarium", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ZOTTI-2017-TSP/", } @article{mindek-2017-marion, title = "Visualization Multi-Pipeline for Communicating Biology", author = "Peter Mindek and David Kou\v{r}il and Johannes Sorger and David Toloudis and Blair Lyons and Graham Johnson and Eduard Gr\"{o}ller and Ivan Viola", year = "2017", abstract = "We propose a system to facilitate biology communication by developing a pipeline to support the instructional visualization of heterogeneous biological data on heterogeneous user-devices. Discoveries and concepts in biology are typically summarized with illustrations assembled manually from the interpretation and application of heterogenous data. The creation of such illustrations is time consuming, which makes it incompatible with frequent updates to the measured data as new discoveries are made. Illustrations are typically non-interactive, and when an illustration is updated, it still has to reach the user. Our system is designed to overcome these three obstacles. It supports the integration of heterogeneous datasets, reflecting the knowledge that is gained from different data sources in biology. After pre-processing the datasets, the system transforms them into visual representations as inspired by scientific illustrations. As opposed to traditional scientific illustration these representations are generated in real-time - they are interactive. The code generating the visualizations can be embedded in various software environments. To demonstrate this, we implemented both a desktop application and a remote-rendering server in which the pipeline is embedded. The remote-rendering server supports multi-threaded rendering and it is able to handle multiple users simultaneously. This scalability to different hardware environments, including multi-GPU setups, makes our system useful for efficient public dissemination of biological discoveries. ", journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "24", number = "1", keywords = "Biological visualization, remote rendering, public dissemination", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-marion/", } @incollection{SCHEIBLAUER-2015-WFC, title = "Workflow for Creating and Rendering Huge Point Models", author = "Claus Scheiblauer and Norbert Zimmermann and Michael Wimmer", year = "2017", booktitle = "Fundamentals of Virtual Archaeology: Theory and Practice", isbn = "9781466594760", note = "(to appear) 15.06.2017", publisher = "A K Peters/CRC Press", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/SCHEIBLAUER-2015-WFC/", } @inproceedings{WIMMER-2016-HARVEST4D, title = "Harvesting Dynamic 3DWorlds from Commodity Sensor Clouds", author = "Tamy Boubekeur and Paolo Cignoni and Elmar Eisemann and Michael Goesele and Reinhard Klein and Stefan Roth and Michael Weinmann and Michael Wimmer", year = "2016", abstract = "The EU FP7 FET-Open project "Harvest4D: Harvesting Dynamic 3D Worlds from Commodity Sensor Clouds" deals with the acquisition, processing, and display of dynamic 3D data. Technological progress is offering us a wide-spread availability of sensing devices that deliver different data streams, which can be easily deployed in the real world and produce streams of sampled data with increased density and easier iteration of the sampling process. These data need to be processed and displayed in a new way. The Harvest4D project proposes a radical change in acquisition and processing technology: instead of a goal-driven acquisition that determines the devices and sensors, its methods let the sensors and resulting available data determine the acquisition process. A variety of challenging problems need to be solved: huge data amounts, different modalities, varying scales, dynamic, noisy and colorful data. This short contribution presents a selection of the many scientific results produced by Harvest4D. We will focus on those results that could bring a major impact to the Cultural Heritage domain, namely facilitating the acquisition of the sampled data or providing advanced visual analysis capabilities.", month = oct, isbn = "978-3-03868-011-6", publisher = "Eurographics Association", location = "Genova, Italy", event = "GCH 2016", editor = "Chiara Eva Catalano and Livio De Luca", doi = "10.2312/gch.20161378", booktitle = "Proceedings of the 14th Eurographics Workshop on Graphics and Cultural Heritage", pages = "19--22", keywords = "acquisition, 3d scanning, reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/WIMMER-2016-HARVEST4D/", } @article{arikan-2015-dmrt, title = "Multi-Depth-Map Raytracing for Efficient Large-Scene Reconstruction", author = "Murat Arikan and Reinhold Preiner and Michael Wimmer", year = "2016", abstract = "With the enormous advances of the acquisition technology over the last years, fast processing and high-quality visualization of large point clouds have gained increasing attention. Commonly, a mesh surface is reconstructed from the point cloud and a high-resolution texture is generated over the mesh from the images taken at the site to represent surface materials. However, this global reconstruction and texturing approach becomes impractical with increasing data sizes. Recently, due to its potential for scalability and extensibility, a method for texturing a set of depth maps in a preprocessing and stitching them at runtime has been proposed to represent large scenes. However, the rendering performance of this method is strongly dependent on the number of depth maps and their resolution. Moreover, for the proposed scene representation, every single depth map has to be textured by the images, which in practice heavily increases processing costs. In this paper, we present a novel method to break these dependencies by introducing an efficient raytracing of multiple depth maps. In a preprocessing phase, we first generate high-resolution textured depth maps by rendering the input points from image cameras and then perform a graph-cut based optimization to assign a small subset of these points to the images. At runtime, we use the resulting point-to-image assignments (1) to identify for each view ray which depth map contains the closest ray-surface intersection and (2) to efficiently compute this intersection point. The resulting algorithm accelerates both the texturing and the rendering of the depth maps by an order of magnitude.", month = feb, doi = "10.1109/TVCG.2015.2430333", issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "2", volume = "22", pages = "1127--1137", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/arikan-2015-dmrt/", } @article{sorger-2015-litevis, title = "LiteVis: Integrated Visualization for Simulation-Based Decision Support in Lighting Design", author = "Johannes Sorger and Thomas Ortner and Christian Luksch and Michael Schw\"{a}rzler and Eduard Gr\"{o}ller and Harald Piringer", year = "2016", abstract = "State-of-the-art lighting design is based on physically accurate lighting simulations of scenes such as offices. The simulation results support lighting designers in the creation of lighting configurations, which must meet contradicting customer objectives regarding quality and price while conforming to industry standards. However, current tools for lighting design impede rapid feedback cycles. On the one side, they decouple analysis and simulation specification. On the other side, they lack capabilities for a detailed comparison of multiple configurations. The primary contribution of this paper is a design study of LiteVis, a system for efficient decision support in lighting design. LiteVis tightly integrates global illumination-based lighting simulation, a spatial representation of the scene, and non-spatial visualizations of parameters and result indicators. This enables an efficient iterative cycle of simulation parametrization and analysis. Specifically, a novel visualization supports decision making by ranking simulated lighting configurations with regard to a weight-based prioritization of objectives that considers both spatial and non-spatial characteristics. In the spatial domain, novel concepts support a detailed comparison of illumination scenarios. We demonstrate LiteVis using a real-world use case and report qualitative feedback of lighting designers. This feedback indicates that LiteVis successfully supports lighting designers to achieve key tasks more efficiently and with greater certainty.", month = jan, journal = "Visualization and Computer Graphics, IEEE Transactions on", volume = "22", number = "1", issn = "1077-2626 ", pages = "290--299", keywords = "Integrating Spatial and Non-Spatial Data", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/sorger-2015-litevis/", } @article{Groeller_2016_P3, title = "State of the Art in Transfer Functions for Direct Volume Rendering", author = "P. Ljung and J. Kr\"{u}ger and Eduard Gr\"{o}ller and Markus Hadwiger and C. Hansen and Anders Ynnerman", year = "2016", abstract = "A central topic in scientific visualization is the transfer function (TF) for volume rendering. The TF serves a fundamental role in translating scalar and multivariate data into color and opacity to express and reveal the relevant features present in the data studied. Beyond this core functionality, TFs also serve as a tool for encoding and utilizing domain knowledge and as an expression for visual design of material appearances. TFs also enable interactive volumetric exploration of complex data. The purpose of this state-of-the-art report (STAR) is to provide an overview of research into the various aspects of TFs, which lead to interpretation of the underlying data through the use of meaningful visual representations. The STAR classifies TF research into the following aspects: dimensionality, derived attributes, aggregated attributes, rendering aspects, automation, and user interfaces. The STAR concludes with some interesting research challenges that form the basis of an agenda for the development of next generation TF tools and methodologies.", journal = "Computer Graphics Forum (2016)", volume = "35", number = "3", pages = "669--691", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P3/", } @inproceedings{SCHUETZ-2015-HQP, title = "High-Quality Point Based Rendering Using Fast Single Pass Interpolation", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2015", abstract = "We present a method to improve the visual quality of point cloud renderings through a nearest-neighbor-like interpolation of points. This allows applications to render points at larger sizes in order to reduce holes, without reducing the readability of fine details due to occluding points. The implementation requires only few modifications to existing shaders, making it eligible to be integrated in software applications without major design changes.", month = sep, location = "Granada, Spain", booktitle = "Proceedings of Digital Heritage 2015 Short Papers", pages = "369--372", keywords = "point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/SCHUETZ-2015-HQP/", } @article{Jimenez_SSS_2015, title = "Separable Subsurface Scattering", author = "Jorge Jimenez and Karoly Zsolnai-Feh\'{e}r and Adrian Jarabo and Christian Freude and Thomas Auzinger and Xian-Chun Wu and Javier van der Pahlen and Michael Wimmer and Diego Gutierrez", year = "2015", abstract = "In this paper we propose two real-time models for simulating subsurface scattering for a large variety of translucent materials, which need under 0.5 milliseconds per frame to execute. This makes them a practical option for real-time production scenarios. Current state-of-the-art, real-time approaches simulate subsurface light transport by approximating the radially symmetric non-separable diffusion kernel with a sum of separable Gaussians, which requires multiple (up to twelve) 1D convolutions. In this work we relax the requirement of radial symmetry to approximate a 2D diffuse reflectance profile by a single separable kernel. We first show that low-rank approximations based on matrix factorization outperform previous approaches, but they still need several passes to get good results. To solve this, we present two different separable models: the first one yields a high-quality diffusion simulation, while the second one offers an attractive trade-off between physical accuracy and artistic control. Both allow rendering subsurface scattering using only two 1D convolutions, reducing both execution time and memory consumption, while delivering results comparable to techniques with higher cost. Using our importance-sampling and jittering strategies, only seven samples per pixel are required. Our methods can be implemented as simple post-processing steps without intrusive changes to existing rendering pipelines. https://www.youtube.com/watch?v=P0Tkr4HaIVk", month = jun, journal = "Computer Graphics Forum", volume = "34", number = "6", issn = "1467-8659", pages = "188--197", keywords = "separable, realtime rendering, subsurface scattering, filtering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Jimenez_SSS_2015/", } @article{MATTAUSCH-2015-CHCRT, title = "CHC+RT: Coherent Hierarchical Culling for Ray Tracing", author = "Oliver Mattausch and Jir\'{i} Bittner and Alberto Jaspe and Enrico Gobbetti and Michael Wimmer and Renato Pajarola", year = "2015", abstract = "We propose a new technique for in-core and out-of-core GPU ray tracing using a generalization of hierarchical occlusion culling in the style of the CHC++ method. Our method exploits the rasterization pipeline and hardware occlusion queries in order to create coherent batches of work for localized shader-based ray tracing kernels. By combining hierarchies in both ray space and object space, the method is able to share intermediate traversal results among multiple rays. We exploit temporal coherence among similar ray sets between frames and also within the given frame. A suitable management of the current visibility state makes it possible to benefit from occlusion culling for less coherent ray types like diffuse reflections. Since large scenes are still a challenge for modern GPU ray tracers, our method is most useful for scenes with medium to high complexity, especially since our method inherently supports ray tracing highly complex scenes that do not fit in GPU memory. For in-core scenes our method is comparable to CUDA ray tracing and performs up to 5.94 × better than pure shader-based ray tracing.", month = may, journal = "Computer Graphics Forum", volume = "34", number = "2", issn = "1467-8659", pages = "537--548", keywords = "occlusion culling, ray tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/MATTAUSCH-2015-CHCRT/", } @techreport{ROEGNER-2015-IBR, title = "Image-based Reprojection Using a Non-local Means Algorithm", author = "Clemens R\"{o}gner and Michael Wimmer and Johannes Hanika and Carsten Dachsbacher", year = "2015", abstract = "We introduce an image-based approach to increase the framerate of image sequences generated with offline rendering algorithms. Our method handles in most cases reflections and refractions better than existing image-based temporal coherence techniques. The proposed technique is also more accurate than some image-based upsampling methods, because it calculates an individual result for each pixel. Our proposed algorithm takes a pair of frames and generates motion vectors for each pixel. This allows for adding a new frame between that pair and thus increasing the framerate. To find the motion vectors, we utilize the non-local means denoising algorithm, which determines the similarity of two pixels by their surrounding and reinterpret that similarity as the likelihood of movement from one pixel to the other. This is similar to what it is done in video encoding to reduce file size, but in our case is done for each pixel individually instead of a block-wise approach, making our technique more accurate. Our method also improves on work in the field of real-time rendering. Such techniques use motion vectors, which are generated through knowledge about the movement of objects within the scene. This can lead to problems when the optical flow in an image sequence is not coherent with the objects movement. Our method avoids those problems. Furthermore, previous work has shown, that the non-local means algorithm can be optimized for parallel execution, which signicantly reduces the time to execute our proposed technique as well. ", month = apr, number = "TR-186-2-15-02", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "optical flow, offline rendering, image reprojection, temporal upsampling, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/ROEGNER-2015-IBR/", } @misc{KREUZER-2015-DPA, title = "Depixelizing Pixel Art in Real-Time", author = "Felix Kreuzer and Johannes Kopf and Michael Wimmer", year = "2015", abstract = "Pixel art was frequently employed in games of the 90s and earlier. On today's large and high-resolution displays, pixel art looks blocky. Recently, an algorithm was introduced to create a smooth, resolution-independent vector representation from pixel art. However, the algorithm is far too slow for interactive use, for example in a game. This poster presents an efficient implementation of the algorithm on the GPU, so that it runs at real-time rates and can be incorporated into current game emulators. Extended Abstract: http://dl.acm.org/citation.cfm?id=2721395", month = feb, publisher = "ACM New York, NY, USA", location = "San Francisco, CA", isbn = "978-1-4503-3392-4", event = "19th Symposium on Interactive 3D Graphics and Games", booktitle = "Proceedings of the 19th Symposium on Interactive 3D Graphics and Games", Conference date = "Poster presented at 19th Symposium on Interactive 3D Graphics and Games (2015-02-27--2015-03-01)", note = "130--130", pages = "130 – 130", keywords = "image processing, depixelizing, pixel art", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/KREUZER-2015-DPA/", } @inproceedings{WEBER-2015-PRA, title = "Parallel Reyes-style Adaptive Subdivision with Bounded Memory Usage", author = "Thomas Weber and Michael Wimmer and John Owens", year = "2015", abstract = "Recent advances in graphics hardware have made it a desirable goal to implement the Reyes algorithm on current graphics cards. One key component in this algorithm is the bound-and-split phase, where surface patches are recursively split until they are smaller than a given screen-space bound. While this operation has been successfully parallelized for execution on the GPU using a breadth-first traversal, the resulting implementations are limited by their unpredictable worst-case memory consumption and high global memory bandwidth utilization. In this paper, we propose an alternate strategy that allows limiting the amount of necessary memory by controlling the number of assigned worker threads. The result is an implementation that scales to the performance of the breadth-first approach while offering three advantages: significantly decreased memory usage, a smooth and predictable tradeoff between memory usage and performance, and increased locality for surface processing. This allows us to render scenes that would require too much memory to be processed by the breadth-first method.", month = feb, isbn = "978-1-4503-3392-4", publisher = "ACM", organization = "ACM", location = "San Francisco, CA", booktitle = "Proceedings of the 19th Symposium on Interactive 3D Graphics and Games (i3D 2015)", pages = "39--45", keywords = "micro-rasterization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/WEBER-2015-PRA/", } @inproceedings{wallner-2015-ModelingRoutinization, title = "Modeling Routinization in Games: An Information Theory Approach", author = "Simon Wallner and Martin Pichlmair and Michael Hecher and Michael Wimmer", year = "2015", abstract = "Routinization is the result of practicing until an action stops being a goal-directed process. This paper formulates a definition of routinization in games based on prior research in the fields of activity theory and practice theory. Routinization is analyzed using the formal model of discrete-time, discrete-space Markov chains and information theory to measure the actual error between the dynamically trained models and the player interaction. Preliminary research supports the hypothesis that Markov chains can be effectively used to model routinization in games. A full study design is presented to further explore and verify this hypothesis.", isbn = "978-1-4503-3466-2", series = "CHI PLAY ", publisher = "ACM", location = "London, United Kingdom", booktitle = "Proceedings of the 2015 Annual Symposium on Computer-Human Interaction in Play", pages = "727--732", keywords = "Games, Routinization, Markov Chains, Information Theory", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/wallner-2015-ModelingRoutinization/", } @article{arikan-2014-pcvis, title = "Large-Scale Point-Cloud Visualization through Localized Textured Surface Reconstruction", author = "Murat Arikan and Reinhold Preiner and Claus Scheiblauer and Stefan Jeschke and Michael Wimmer", year = "2014", abstract = "In this paper, we introduce a novel scene representation for the visualization of large-scale point clouds accompanied by a set of high-resolution photographs. Many real-world applications deal with very densely sampled point-cloud data, which are augmented with photographs that often reveal lighting variations and inaccuracies in registration. Consequently, the high-quality representation of the captured data, i.e., both point clouds and photographs together, is a challenging and time-consuming task. We propose a two-phase approach, in which the first (preprocessing) phase generates multiple overlapping surface patches and handles the problem of seamless texture generation locally for each patch. The second phase stitches these patches at render-time to produce a high-quality visualization of the data. As a result of the proposed localization of the global texturing problem, our algorithm is more than an order of magnitude faster than equivalent mesh-based texturing techniques. Furthermore, since our preprocessing phase requires only a minor fraction of the whole dataset at once, we provide maximum flexibility when dealing with growing datasets.", month = sep, issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "9", volume = "20", pages = "1280--1292", keywords = "image-based rendering, large-scale models, color, surface representation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/arikan-2014-pcvis/", } @article{bernhard-2014-GTOM, title = "Gaze-To-Object Mapping During Visual Search in 3D Virtual Environments ", author = "Matthias Bernhard and Efstathios Stavrakis and Michael Hecher and Michael Wimmer", year = "2014", abstract = "Stimuli obtained from highly dynamic 3D virtual environments and synchronous eye-tracking data are commonly used by algorithms that strive to correlate gaze to scene objects, a process referred to as Gaze-To-Object Mapping (GTOM). We propose to address this problem with a probabilistic approach using Bayesian inference. The desired result of the inference is a predicted probability density function (PDF) specifying for each object in the scene a probability to be attended by the user. To evaluate the quality of a predicted attention PDF, we present a methodology to assess the information value (i.e., likelihood) in the predictions of dierent approaches that can be used to infer object attention. To this end, we propose an experiment based on a visual search task which allows us to determine the object of attention at a certain point in time under controlled conditions. We perform this experiment with a wide range of static and dynamic visual scenes to obtain a ground-truth evaluation data set, allowing us to assess GTOM techniques in a set of 30 particularly challenging cases.", month = aug, journal = "ACM Transactions on Applied Perception (Special Issue SAP 2014)", volume = "11", number = "3", issn = "1544-3558", pages = "14:1--14:17", keywords = "object-based attention, eye-tracking, virtual environments, visual attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/", } @article{hecher-2014-MH, title = "A Comparative Perceptual Study of Soft Shadow Algorithms", author = "Michael Hecher and Matthias Bernhard and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2014", abstract = "We performed a perceptual user study of algorithms that approximate soft shadows in real time. Although a huge body of soft-shadow algorithms have been proposed, to our knowledge this is the first methodical study for comparing different real-time shadow algorithms with respect to their plausibility and visual appearance. We evaluated soft-shadow properties like penumbra overlap with respect to their relevance to shadow perception in a systematic way, and we believe that our results can be useful to guide future shadow approaches in their methods of evaluation. In this study, we also capture the predominant case of an inexperienced user observing shadows without comparing to a reference solution, such as when watching a movie or playing a game. One important result of this experiment is to scientifically verify that real-time soft-shadow algorithms, despite having become physically based and very realistic, can nevertheless be intuitively distinguished from a correct solution by untrained users.", month = jun, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", number = "5", volume = "11", pages = "5:1--5:21", keywords = "Perception Studies, Soft Shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/", } @article{LUKSCH-2014-RTR, title = "Real-Time Rendering of Glossy Materials with Regular Sampling", author = "Christian Luksch and Robert F. Tobler and Thomas M\"{u}hlbacher and Michael Schw\"{a}rzler and Michael Wimmer", year = "2014", abstract = "Rendering view-dependent, glossy surfaces to increase the realism in real-time applications is a computationally complex task, that can only be performed by applying some approximations—especially when immediate changes in the scene in terms of material settings and object placement are a necessity. The use of environment maps is a common approach to this problem, but implicates performance problems due to costly pre-filtering steps or expensive sampling. We, therefore, introduce a regular sampling scheme for environment maps that relies on an efficient MIP-map-based filtering step, and minimizes the number of necessary samples for creating a convincing real-time rendering of glossy BRDF materials.", month = jun, journal = "The Visual Computer", volume = "30", number = "6-8", issn = "0178-2789", pages = "717--727", keywords = "real-time rendering , BRDFs", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/LUKSCH-2014-RTR/", } @inproceedings{charpenay-2014-sgn, title = "Sampling Gabor Noise in the Spatial Domain", author = "Victor Charpenay and Bernhard Steiner and Przemyslaw Musialski", year = "2014", abstract = "Gabor noise is a powerful technique for procedural texture generation. Contrary to other types of procedural noise, its sparse convolution aspect makes it easily controllable locally. In this paper, we demonstrate this property by explicitly introducing spatial variations. We do so by linking the sparse convolution process to the parametrization of the underlying surface. Using this approach, it is possible to provide control maps for the parameters in a natural and convenient way. In order to derive intuitive control of the resulting textures, we accomplish a small study of the influence of the parameters of the Gabor kernel with respect to the outcome and we introduce a solution where we bind values such as the frequency or the orientation of the Gabor kernel to a user-provided control map in order to produce novel visual effects.", month = may, isbn = "978-80-223-3601-7", publisher = "ACM Press", location = "Smolenice castle, Slovakia", editor = "Diego Gutierrez", booktitle = "Proceedings of the 30th Spring Conference on Computer Graphics - SCCG ", pages = "79--82", keywords = "texture synthesis, Gabor noise, procedural texture", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/charpenay-2014-sgn/", } @phdthesis{knecht_2013_RSM, title = "Reciprocal Shading for Mixed Reality", author = "Martin Knecht", year = "2013", abstract = "Reciprocal shading for mixed reality aims to integrate virtual objects into real environments in a way that they are in the ideal case indistinguishable from real objects. It is therefore an attractive technology for architectural visualizations, product visualizations and for cultural heritage sites, where virtual objects should be seamlessly merged with real ones. Due to the improved performance of recent graphics hardware, real-time global illumination algorithms are feasible for mixed-reality applications, and thus more and more researchers address realistic rendering for mixed reality. The goal of this thesis is to provide algorithms which improve the visual plausibility of virtual objects in mixed-reality applications. Our contributions are as follows: First, we present five methods to reconstruct the real surrounding environment. In particular, we present two methods for geometry reconstruction, a method for material estimation at interactive frame rates and two methods to reconstruct the color mapping characteristics of the video see-through camera. Second, we present two methods to improve the visual appearance of virtual objects. The first, called differential instant radiosity, combines differential rendering with a global illumination method called instant radiosity to simulate reciprocal shading effects such as shadowing and indirect illumination between real and virtual objects. The second method focuses on the visual plausible rendering of reflective and refractive objects. The high-frequency lighting effects caused by these objects are also simulated with our method. The third part of this thesis presents two user studies which evaluate the influence of the presented rendering methods on human perception. The first user study measured task performance with respect to the rendering mode, and the second user study was set up as a web survey where participants had to choose which of two presented images, showing mixed-reality scenes, they preferred.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/knecht_2013_RSM/", } @inproceedings{birsak-2013-sta, title = "Seamless Texturing of Archaeological Data", author = "Michael Birsak and Przemyslaw Musialski and Murat Arikan and Michael Wimmer", year = "2013", abstract = "In this paper we propose a framework for out-of-core real-time rendering of high-quality textured archaeological data-sets. Our input is a triangle mesh and a set of calibrated and registered photographs. Our system performs the actual mapping of the photos to the mesh for high-quality reconstructions, which is a task referred to as the labeling problem. Another problem of such mappings are seams that arise on junctions between triangles that contain information from different photos. These are are approached with blending methods, referred to as leveling. We address both problems and introduce a novel labeling approach based on occlusion detection using depth maps that prevents texturing of parts of the model with images that do not contain the expected region. Moreover, we propose an improved approach for seam-leveling that penalizes too large values and helps to keep the resulting colors in a valid range. For high-performance visualization of the 3D models with a huge amount of textures, we make use of virtual texturing, and present an application that generates the needed texture atlas in significantly less time than existing scripts. Finally, we show how the mentioned components are integrated into a visualization application for digitized archaeological site.", month = oct, isbn = "978-1-4799-3168-2 ", publisher = "IEEE", note = "DOI: 10.1109/DigitalHeritage.2013.6743749", location = "Marseille, France", booktitle = "Digital Heritage International Congress (DigitalHeritage), 2013", pages = "265--272 ", keywords = "digital cultural heritage, out-of-core real-time rendering, seamless texturing, virtual texturing", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/birsak-2013-sta/", } @inproceedings{Auzinger_2013_NSAA, title = "Non-Sampled Anti-Aliasing", author = "Thomas Auzinger and Przemyslaw Musialski and Reinhold Preiner and Michael Wimmer", year = "2013", abstract = "In this paper we present a parallel method for high-quality edge anti-aliasing. In contrast to traditional graphics hardware methods, which rely on massive oversampling to combat aliasing issues in the rasterization process, we evaluate a closed-form solution of the associated prefilter convolution. This enables the use of a wide range of filter functions with arbitrary kernel sizes, as well as general shading methods such as texture mapping or complex illumination models. Due to the use of analytic solutions, our results are exact in the mathematical sense and provide objective ground-truth for other anti-aliasing methods and enable the rigorous comparison of different models and filters. An efficient implementation on general purpose graphics hardware is discussed and several comparisons to existing techniques and of various filter functions are given.", month = sep, isbn = "978-3-905674-51-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Lugano, Switzerland", event = "Vision, Modelin, Visualization (VMV)", editor = "Michael Bronstein and Jean Favre and Kai Hormann", booktitle = "Proceedings of the 18th International Workshop on Vision, Modeling and Visualization (VMV 2013)", pages = "169--176", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_NSAA/", } @misc{Auzinger_2013_SAR, title = "Sampled and Analytic Rasterization", author = "Thomas Auzinger and Michael Wimmer", year = "2013", abstract = "In this poster we present an overview of exact anti-aliasing (AA) methods in rasterization. In contrast to the common supersampling approaches for visibility AA (e.g. MSAA) or both visibility and shading AA (e.g. SSAA, decoupled sampling), prefiltering provides the mathematically exact solution to the aliasing problem. Instead of averaging a set a supersamples, the input data is convolved with a suitable low-pass filter before sampling is applied. Recent work showed that for both visibility signals and simple shading models, a closed-form solution to the convolution integrals can be found. As our main contribution, we present a classification of both sample-based and analytic AA approaches for rasterization and analyse their strengths and weaknesses.", month = sep, series = "VMV ", publisher = "Eurographics Association", location = "Lugano, Switzerland", isbn = "978-3-905674-51-4", event = "VMV 2013", booktitle = "Proceedings of the 18th International Workshop on Vision, Modeling and Visualization", Conference date = "Poster presented at VMV 2013 (2013-09-11--2013-09-13)", note = "223--224", pages = "223 – 224", keywords = "Anti-Aliasing, Rasterization, Sampling, Supersampling, Prefiltering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_SAR/", } @inproceedings{JAHRMANN-2013-IGR, title = "Interactive Grass Rendering Using Real-Time Tessellation", author = "Klemens Jahrmann and Michael Wimmer", year = "2013", abstract = "Grass rendering is needed for many outdoor scenes, but for real-time applications, rendering each blade of grass as geometry has been too expensive so far. This is why grass is most often drawn as a texture mapped onto the ground or grass patches rendered as transparent billboard quads. Recent approaches use geometry for blades that are near the camera and flat geometry for rendering further away. In this paper, we present a technique which is capable of rendering whole grass fields in real time as geometry by exploiting the capabilities of the tessellation shader. Each single blade of grass is rendered as a two-dimensional tessellated quad facing its own random direction. This enables each blade of grass to be influenced by wind and to interact with its environment. In order to adapt the grass field to the current scene, special textures are developed which encode on the one hand the density and height of the grass and on the other hand its look and composition.", month = jun, isbn = "978-80-86943-74-9", location = "Plzen, CZ", editor = "Manuel Oliveira and Vaclav Skala", booktitle = "WSCG 2013 Full Paper Proceedings", pages = "114--122", keywords = "grass rendering, real-time rendering, billboards", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/JAHRMANN-2013-IGR/", } @article{Auzinger_2013_AnaVis, title = "Analytic Visibility on the GPU", author = "Thomas Auzinger and Michael Wimmer and Stefan Jeschke", year = "2013", abstract = "This paper presents a parallel, implementation-friendly analytic visibility method for triangular meshes. Together with an analytic filter convolution, it allows for a fully analytic solution to anti-aliased 3D mesh rendering on parallel hardware. Building on recent works in computational geometry, we present a new edge-triangle intersection algorithm and a novel method to complete the boundaries of all visible triangle regions after a hidden line elimination step. All stages of the method are embarrassingly parallel and easily implementable on parallel hardware. A GPU implementation is discussed and performance characteristics of the method are shown and compared to traditional sampling-based rendering methods.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "1467-8659", pages = "409--418", keywords = "GPU, anti-aliasing, SIMD, filter, rendering, analytic, visibility, close-form, hidden surface elimination, hidden surface removal, GPGPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_AnaVis/", } @article{MATTAUSCH-2013-FSBE, title = "Freeform Shadow Boundary Editing", author = "Oliver Mattausch and Takeo Igarashi and Michael Wimmer", year = "2013", abstract = "We present an algorithm for artistically modifying physically based shadows. With our tool, an artist can directly edit the shadow boundaries in the scene in an intuitive fashion similar to freeform curve editing. Our algorithm then makes these shadow edits consistent with respect to varying light directions and scene configurations, by creating a shadow mesh from the new silhouettes. The shadow mesh helps a modified shadow volume algorithm cast shadows that conform to the artistic shadow boundary edits, while providing plausible interaction with dynamic environments, including animation of both characters and light sources. Our algorithm provides significantly more fine-grained local and direct control than previous artistic light editing methods, which makes it simple to adjust the shadows in a scene to reach a particular effect, or to create interesting shadow shapes and shadow animations. All cases are handled with a single intuitive interface, be it soft shadows, or (self-)shadows on arbitrary receivers.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "0167-7055", pages = "175--184", keywords = "shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/MATTAUSCH-2013-FSBE/", } @inproceedings{SCHWAERZLER-2013-FPCSS, title = "Fast Percentage Closer Soft Shadows using Temporal Coherence", author = "Michael Schw\"{a}rzler and Christian Luksch and Daniel Scherzer and Michael Wimmer", year = "2013", abstract = "We propose a novel way to efficiently calculate soft shadows in real-time applications by overcoming the high computational effort involved with the complex corresponding visibility estimation each frame: We exploit the temporal coherence prevalent in typical scene movement, making the estimation of a new shadow value only necessary whenever regions are newly disoccluded due to camera adjustment, or the shadow situation changes due to object movement. By extending the typical shadow mapping algorithm by an additional light-weight buffer for the tracking of dynamic scene objects, we can robustly and efficiently detect all screen space fragments that need to be updated, including not only the moving objects themselves, but also the soft shadows they cast. By applying this strategy to the popular Percentage Closer Soft Shadow algorithm (PCSS), we double rendering performance in scenes with both static and dynamic objects - as prevalent in various 3D game levels - while maintaining the visual quality of the original approach.", month = mar, isbn = "978-1-4503-1956-0", publisher = "ACM", location = "Orlando, Florida", address = "New York, NY, USA", booktitle = "Proceedings of ACM Symposium on Interactive 3D Graphics and Games 2013", pages = "79--86", keywords = "real-time, temporal coherence, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/SCHWAERZLER-2013-FPCSS/", } @article{knecht_martin_2013_ReflRefrObjsMR, title = "Reflective and Refractive Objects for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Christoph Winklhofer and Michael Wimmer", year = "2013", abstract = "In this paper, we present a novel rendering method which integrates reflective or refractive objects into a differential instant radiosity (DIR) framework usable for mixed-reality (MR) applications. This kind of objects are very special from the light interaction point of view, as they reflect and refract incident rays. Therefore they may cause high-frequency lighting effects known as caustics. Using instant-radiosity (IR) methods to approximate these high-frequency lighting effects would require a large amount of virtual point lights (VPLs) and is therefore not desirable due to real-time constraints. Instead, our approach combines differential instant radiosity with three other methods. One method handles more accurate reflections compared to simple cubemaps by using impostors. Another method is able to calculate two refractions in real-time, and the third method uses small quads to create caustic effects. Our proposed method replaces parts in light paths that belong to reflective or refractive objects using these three methods and thus tightly integrates into DIR. In contrast to previous methods which introduce reflective or refractive objects into MR scenarios, our method produces caustics that also emit additional indirect light. The method runs at real-time frame rates, and the results show that reflective and refractive objects with caustics improve the overall impression for MR scenarios.", month = mar, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE VR 2013)", volume = "19", number = "4", issn = "1077-2626", pages = "576--582", keywords = "Mixed Reality, Caustics, Reflections, Refractions", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/knecht_martin_2013_ReflRefrObjsMR/", } @inproceedings{LUKSCH-2013-FLM, title = "Fast Light-Map Computation with Virtual Polygon Lights", author = "Christian Luksch and Robert F. Tobler and Ralf Habel and Michael Schw\"{a}rzler and Michael Wimmer", year = "2013", abstract = "We propose a new method for the fast computation of light maps using a many-light global-illumination solution. A complete scene can be light mapped on the order of seconds to minutes, allowing fast and consistent previews for editing or even generation at loading time. In our method, virtual point lights are clustered into a set of virtual polygon lights, which represent a compact description of the illumination in the scene. The actual light-map generation is performed directly on the GPU. Our approach degrades gracefully, avoiding objectionable artifacts even for very short computation times. ", month = mar, isbn = "978-1-4503-1956-0", publisher = "ACM", location = "Orlando, Florida", booktitle = "Proceedings of ACM Symposium on Interactive 3D Graphics and Games 2013", pages = "87--94", keywords = "instant radiosity, global illumination, light-maps", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/LUKSCH-2013-FLM/", } @incollection{schedl-2013-gP4, title = "Simulating partial occlusion in post-processing depth-of-field methods", author = "David Schedl and Michael Wimmer", year = "2013", abstract = "This chapter describes a method for simulating Depth of Field (DoF). In particular, we investigate the so-called partial occlusion effect: objects near the camera blurred due to DoF are actually semitransparent and therefore result in partially visible background objects. This effect is strongly apparent in miniature- and macro photography and in film making. Games and interactive applications are nowadays becoming more cinematic, including strong DoF effects, and therefore it is important to be able to convincingly approximate the partial-occlusion effect. We show how to do so in this chapter; with the proposed optimizations even in real time.", month = mar, booktitle = "GPU Pro 4: Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "9781466567436", note = "to appear", publisher = "A K Peters", keywords = "depth of field, realtime, layers, blurring", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/schedl-2013-gP4/", } @phdthesis{Reisner_Irene_2013_R3D, title = "Reconstruction of 3D Models from Images and Point Clouds with Shape Primitives", author = "Irene Reisner-Kollmann", year = "2013", abstract = "3D models are widely used in different applications, including computer games, planning software, applications for training and simulation, and virtual city maps. For many of these applications it is necessary or at least advantageous, if the virtual 3D models are based on real world scenes and objects. Manual modeling is reserved for experts as it requires extensive skills. For this reason, it is necessary to provide automatic or semi-automatic, easy-to-use techniques for reconstructing 3D objects. In this thesis we present methods for reconstructing 3D models of man-made scenes. These scenes can often be approximated with a set of geometric primitives, like planes or cylinders. Using geometric primitives leads to light-weight, low-poly 3D models, which are beneficial for efficient storage and post-processing. The applicability of reconstruction algorithms highly depends on the existing input data, the characteristics of the captured objects, and the desired properties of the reconstructed 3D model. For this reason, we present three algorithms that use different input data. It is possible to reconstruct 3D models from just a few photographs or to use a dense point cloud as input. Furthermore, we present techniques to combine information from both, images and point clouds. The image-based reconstruction method is especially designed for environments with homogenous and reflective surfaces where it is difficult to acquire reliable point sets. Therefore we use an interactive application which requires user input. Shape primitives are fit to user-defined segmentations in two or more images. Our point-based algorithms, on the other hand, provide fully automatic reconstructions. Nevertheless, the automatic computations can be enhanced by manual user inputs for generating improved results. The first point-based algorithm is specialized on reconstructing 3D models of buildings and uses unstructured point clouds as input. The point cloud is segmented into planar regions and converted into 3D geometry. The second point-based algorithm additionally supports the reconstruction of interior scenes. While unstructured point clouds are supported as well, this algorithm specifically exploits the redundancy and visibility information provided by a set of range images. The data is automatically segmented into geometric primitives. Then the shape boundaries are extracted either automatically or interactively.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Reisner_Irene_2013_R3D/", } @inproceedings{EISEMANN-2013-ERT, title = "Efficient Real-Time Shadows", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michal Valient and Michael Wimmer", year = "2013", abstract = "This course provides an overview of efficient, real-time shadow algorithms. It presents the theoretical background but also discusses implementation details for facilitating efficient realizations (hard and soft shadows, volumetric shadows, reconstruction techniques). These elements are of relevance to both experts and practitioners. The course also reviews budget considerations and analyzes performance trade-offs, using examples from various AAA game titles and film previsualization tools. While physical accuracy can sometimes be replaced by plausible shadows, especially for games, film production requires more precision, such as scalable solutions that can deal with highly detailed geometry. The course builds upon earlier SIGGRAPH courses as well as the recent book Real-Time Shadows (A K Peters, 2011) by four of the instructors (due to its success, a second edition is planned for 2014). And with two instructors who have worked on AAA game and movie titles, the course presents interesting behind-the-scenes information that illuminates key topics.", booktitle = "ACM SIGGRAPH 2013 Courses", isbn = "978-1-4503-2339-0", location = "Anaheim, CA", publisher = "ACM", pages = "18:1--18:54", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/EISEMANN-2013-ERT/", } @article{SCHERZER-2012-TCM, title = "Temporal Coherence Methods in Real-Time Rendering", author = "Daniel Scherzer and Lei Yang and Oliver Mattausch and Diego Nehab and Pedro V. Sander and Michael Wimmer and Elmar Eisemann", year = "2012", abstract = "Nowadays, there is a strong trend towards rendering to higher-resolution displays and at high frame rates. This development aims at delivering more detail and better accuracy, but it also comes at a significant cost. Although graphics cards continue to evolve with an ever-increasing amount of computational power, the speed gain is easily counteracted by increasingly complex and sophisticated shading computations. For real-time applications, the direct consequence is that image resolution and temporal resolution are often the first candidates to bow to the performance constraints (e.g., although full HD is possible, PS3 and XBox often render at lower resolutions). In order to achieve high-quality rendering at a lower cost, one can exploit temporal coherence (TC). The underlying observation is that a higher resolution and frame rate do not necessarily imply a much higher workload, but a larger amount of redundancy and a higher potential for amortizing rendering over several frames. In this survey, we investigate methods that make use of this principle and provide practical and theoretical advice on how to exploit temporal coherence for performance optimization. These methods not only allow incorporating more computationally intensive shading effects into many existing applications, but also offer exciting opportunities for extending high-end graphics applications to lower-spec consumer-level hardware. To this end, we first introduce the notion and main concepts of TC, including an overview of historical methods. We then describe a general approach, image-space reprojection, with several implementation algorithms that facilitate reusing shading information across adjacent frames. We also discuss data-reuse quality and performance related to reprojection techniques. Finally, in the second half of this survey, we demonstrate various applications that exploit TC in real-time rendering. ", month = dec, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "31", pages = "2378--2408", keywords = "remote rendering; sampling, perception-based rendering, occlusion culling, non-photo-realistic rendering, level-of-detail, large data visualization, image-based rendering, global illumination, frame interpolation, anti-aliasing, shadows, streaming, temporal coherance, upsampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/SCHERZER-2012-TCM/", } @inproceedings{SCHWAERZLER-2012-FAS, title = "Fast Accurate Soft Shadows with Adaptive Light Source Sampling", author = "Michael Schw\"{a}rzler and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2012", abstract = "Physically accurate soft shadows in 3D applications can be simulated by taking multiple samples from all over the area light source and accumulating them. Due to the unpredictability of the size of the penumbra regions, the required sampling density has to be high in order to guarantee smooth shadow transitions in all cases. Hence, several hundreds of shadow maps have to be evaluated in any scene configuration, making the process computationally expensive. Thus, we suggest an adaptive light source subdivision approach to select the sampling points adaptively. The main idea is to start with a few samples on the area light, evaluating there differences using hardware occlusion queries, and adding more sampling points if necessary. Our method is capable of selecting and rendering only the samples which contribute to an improved shadow quality, and hence generate shadows of comparable quality and accuracy. Even though additional calculation time is needed for the comparison step, this method saves valuable rendering time and achieves interactive to real-time frame rates in many cases where a brute force sampling method does not. ", month = nov, isbn = "978-3-905673-95-1", publisher = "Eurographics Association", location = "Magdeburg, Germany", booktitle = "Proceedings of the 17th International Workshop on Vision, Modeling, and Visualization (VMV 2012)", pages = "39--46", keywords = "soft shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/SCHWAERZLER-2012-FAS/", } @article{knecht_martin_2012_RSMR, title = "Reciprocal Shading for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Oliver Mattausch and Michael Wimmer", year = "2012", abstract = "In this paper we present a novel plausible rendering method for mixed reality systems, which is useful for many real-life application scenarios, like architecture, product visualization or edutainment. To allow virtual objects to seamlessly blend into the real environment, the real lighting conditions and the mutual illumination effects between real and virtual objects must be considered, while maintaining interactive frame rates. The most important such effects are indirect illumination and shadows cast between real and virtual objects. Our approach combines Instant Radiosity and Differential Rendering. In contrast to some previous solutions, we only need to render the scene once in order to find the mutual effects of virtual and real scenes. In addition, we avoid artifacts like double shadows or inconsistent color bleeding which appear in previous work. The dynamic real illumination is derived from the image stream of a fish-eye lens camera. The scene gets illuminated by virtual point lights, which use imperfect shadow maps to calculate visibility. A sufficiently fast scene reconstruction is done at run-time with Microsoft's Kinect sensor. Thus a time-consuming manual pre-modeling step of the real scene is not necessary. Our results show that the presented method highly improves the illusion in mixed-reality applications and significantly diminishes the artificial look of virtual objects superimposed onto real scenes.", month = nov, issn = "0097-8493", journal = "Computers & Graphics", number = "7", volume = "36", pages = "846--856", keywords = "Differential rendering, Reconstruction, Instant radiosity, Microsoft Kinect, Real-time global illumination, Mixed reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/knecht_martin_2012_RSMR/", } @misc{Auzinger_2012_GeigerCam, title = "GeigerCam: Measuring Radioactivity with Webcams", author = "Thomas Auzinger and Ralf Habel and Andreas Musilek and Dieter Hainz and Michael Wimmer", year = "2012", abstract = "Measuring radioactivity is almost exclusively a professional task in the realms of science, industry and defense, but recent events spur the interest in low-cost consumer detection devices. We show that by using image processing techniques, a current, only slightly modified, off-the-shelf HD webcam can be used to measure alpha, beta as well as gamma radiation. In contrast to dedicated measurement devices such as Geiger counters, our framework can classify the type of radiation and can differentiate between various kinds of radioactive materials. By optically insulating the camera's imaging sensor, recordings at extreme exposure and gain values are possible, and the partly very faint signals detectable. The camera is set to the longest exposure time possible and to a very high gain to detect even faint signals. During measurements, GPU assisted real-time image processing of the direct video feed is used to treat the remaining noise by tracking the noise spectrum per pixel, incorporating not only spatial but also temporal variations due to temperature changes and spontaneous emissions. A confidence value per pixel based on event probabilities is calculated to identify potentially hit pixels. Finally, we use morphological clustering to group pixels into particle impact events and analyze their energies. Our approach results in a simple device that can be operated on any computer and costs only $20-30, an order of magnitude cheaper than entry-level nuclear radiation detectors.", month = aug, publisher = "ACM", location = "Los Angeles, CA", address = "New York, NY, USA", isbn = "978-1-4503-1682-8", event = "ACM SIGGRAPH 2012", editor = "Dan Wexler", booktitle = "ACM SIGGRAPH 2012 Posters", Conference date = "Poster presented at ACM SIGGRAPH 2012 (2012-08-05--2012-08-09)", note = "40:1--40:1", pages = "40:1 – 40:1", keywords = "radioactivity, webcam, measurement", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Auzinger_2012_GeigerCam/", } @article{knecht_martin_2012_BRDFEstimation, title = "Interactive BRDF Estimation for Mixed-Reality Applications", author = "Martin Knecht and Georg Tanzmeister and Christoph Traxler and Michael Wimmer", year = "2012", abstract = "Recent methods in augmented reality allow simulating mutual light interactions between real and virtual objects. These methods are able to embed virtual objects in a more sophisticated way than previous methods. However, their main drawback is that they need a virtual representation of the real scene to be augmented in the form of geometry and material properties. In the past, this representation had to be modeled in advance, which is very time consuming and only allows for static scenes. We propose a method that reconstructs the surrounding environment and estimates its Bidirectional Reflectance Distribution Function (BRDF) properties at runtime without any preprocessing. By using the Microsoft Kinect sensor and an optimized hybrid CPU & GPU-based BRDF estimation method, we are able to achieve interactive frame rates. The proposed method was integrated into a differential instant radiosity rendering system to demonstrate its feasibility.", month = jun, journal = "Journal of WSCG", volume = "20", number = "1", issn = "1213-6972", pages = "47--56", keywords = "Augmented Reality, BRDF Estimation, Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/knecht_martin_2012_BRDFEstimation/", } @article{MATTAUSCH-2012-TIS, title = "Tessellation-Independent Smooth Shadow Boundaries", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer and Takeo Igarashi", year = "2012", abstract = "We propose an efficient and light-weight solution for rendering smooth shadow boundaries that do not reveal the tessellation of the shadow-casting geometry. Our algorithm reconstructs the smooth contours of the underlying mesh and then extrudes shadow volumes from the smooth silhouettes to render the shadows. For this purpose we propose an improved silhouette reconstruction using the vertex normals of the underlying smooth mesh. Then our method subdivides the silhouette loops until the contours are sufficiently smooth and project to smooth shadow boundaries. This approach decouples the shadow smoothness from the tessellation of the geometry and can be used to maintain equally high shadow quality for multiple LOD levels. It causes only a minimal change to the fill rate, which is the well-known bottleneck of shadow volumes, and hence has only small overhead. ", month = jun, journal = "Computer Graphics Forum", volume = "4", number = "31", issn = "1467-8659", pages = "1465--1470", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/MATTAUSCH-2012-TIS/", } @article{schedl-2012-dof, title = "A layered depth-of-field method for solving partial occlusion", author = "David Schedl and Michael Wimmer", year = "2012", abstract = "Depth of field (DoF) represents a distance range around a focal plane, where objects on an image are crisp. DoF is one of the effects which significantly contributes to the photorealism of images and therefore is often simulated in rendered images. Various methods for simulating DoF have been proposed so far, but little tackle the issue of partial occlusion: Blurry objects near the camera are semi-transparent and result in partially visible background objects. This effect is strongly apparent in miniature and macro photography. In this work a DoF method is presented which simulates partial occlusion. The contribution of this work is a layered method where the scene is rendered into layers. Blurring is done efficiently with recursive Gaussian filters. Due to the usage of Gaussian filters big artifact-free blurring radii can be simulated at reasonable costs.", month = jun, journal = "Journal of WSCG", volume = "20", number = "3", issn = "1213-6972", pages = "239--246", keywords = "realtime, rendering, depth-of-field, layers, depth peeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/schedl-2012-dof/", } @article{Habel_2012_PSP, title = "Practical Spectral Photography", author = "Ralf Habel and Michael Kudenov and Michael Wimmer", year = "2012", abstract = "We introduce a low-cost and compact spectral imaging camera design based on unmodified consumer cameras and a custom camera objective. The device can be used in a high-resolution configuration that measures the spectrum of a column of an imaged scene with up to 0.8 nm spectral resolution, rivalling commercial non-imaging spectrometers, and a mid-resolution hyperspectral mode that allows the spectral measurement of a whole image, with up to 5 nm spectral resolution and 120x120 spatial resolution. We develop the necessary calibration methods based on halogen/fluorescent lamps and laser pointers to acquire all necessary information about the optical system. We also derive the mathematical methods to interpret and reconstruct spectra directly from the Bayer array images of a standard RGGB camera. This objective design introduces accurate spectral remote sensing to computational photography, with numerous applications in color theory, colorimetry, vision and rendering, making the acquisition of a spectral image as simple as taking a high-dynamic-range image.", month = may, journal = "Computer Graphics Forum (Proceedings EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "449--458", keywords = "Computational Photography, Spectroscopy, Computed Tomography Imaging Spectrometer, Practical", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Habel_2012_PSP/", } @article{Auzinger_2012_AAA, title = "Analytic Anti-Aliasing of Linear Functions on Polytopes", author = "Thomas Auzinger and Michael Guthe and Stefan Jeschke", year = "2012", abstract = "This paper presents an analytic formulation for anti-aliased sampling of 2D polygons and 3D polyhedra. Our framework allows the exact evaluation of the convolution integral with a linear function defined on the polytopes. The filter is a spherically symmetric polynomial of any order, supporting approximations to refined variants such as the Mitchell-Netravali filter family. This enables high-quality rasterization of triangles and tetrahedra with linearly interpolated vertex values to regular and non-regular grids. A closed form solution of the convolution is presented and an efficient implementation on the GPU using DirectX and CUDA C is described.", month = may, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "335--344", keywords = "Polytope, Filter Design, Analytic Anti-Aliasing, Sampling, Integral Formula, Spherically Symmetric Filter, CUDA, Closed Form Solution, 2D 3D", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Auzinger_2012_AAA/", } @inproceedings{fink-2012-cg1, title = "Teaching a Modern Graphics Pipeline Using a Shader-based Software Renderer", author = "Heinrich Fink and Thomas Weber and Michael Wimmer", year = "2012", abstract = "Shaders are a fundamental pattern of the modern graphics pipeline. This paper presents a syllabus for an introductory computer graphics course that emphasizes the use of programmable shaders while teaching raster-level algorithms at the same time. We describe a Java-based framework that is used for programming assignments in this course. This framework implements a shader-enabled software renderer and an interactive 3D editor. We also show how to create attractive course materials by using COLLADA, an open standard for 3D content exchange.", month = may, publisher = "Eurographics Association", location = "Cagliari, Italy", issn = "1017-4656", event = "Eurographics 2012", editor = "Giovanni Gallo and Beatriz Sousa Santos", booktitle = "Eurographics 2012 -- Education Papers", pages = "73--80", keywords = "Education, Collada, Java, Introductory Computer Graphics, Software Rasterizer", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/fink-2012-cg1/", } @techreport{TR-186-2-12-01, title = "Interactive Screen-Space Triangulation for High-Quality Rendering of Point Clouds", author = "Reinhold Preiner and Michael Wimmer", year = "2012", abstract = "This technical report documents work that is a precursor to the Auto Splatting technique. We present a rendering method that reconstructs high quality images from unorganized colored point data. While previous real-time image reconstruction approaches for point clouds make use of preprocessed data like point radii or normal estimations, our algorithm only requires position and color data as input and produces a reconstructed color image, normal map and depth map which can instantly be used to apply further deferred lighting passes. Our method performs a world-space neighbor search and a subsequent normal estimation in screen-space, and uses the geometry shader to triangulate the color, normal and depth information of the points. To achieve correct visibility and closed surfaces in the projected image a temporal coherence approach reuses triangulated depth information and provides adaptive neighbor search radii. Our algorithm is especially suitable for insitu high-quality visualization of big datasets like 3D-scans, making otherwise time-consuming preprocessing steps to reconstruct surface normals or point radii dispensable.", month = apr, number = "TR-186-2-12-01", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "screen triangulation, point rendering, nearest neighbors, screen-space, point clouds", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/TR-186-2-12-01/", } @incollection{MATTAUSCH-2012-EOV, title = "Efficient Online Visibility for Shadow Maps", author = "Oliver Mattausch and Jir\'{i} Bittner and Ari Silvennoinen and Daniel Scherzer and Michael Wimmer", year = "2012", abstract = "Standard online occlusion culling is able to vastly improve the rasterization performance of walkthrough applications by identifying large parts of the scene as invisible from the camera and rendering only the visible geometry. However, it is of little use for the acceleration of shadow map generation (i.e., rasterizing the scene from the light view [Williams 78]), so that typically a high percentage of the geometry will be visible when rendering shadow maps. For example, in outdoor scenes typical viewpoints are near the ground and therefore have significant occlusion, while light viewpoints are higher up and see most of the geometry. Our algorithm remedies this situation by quickly detecting and culling the geometry that does not contribute to the shadow in the final image.", month = feb, booktitle = "GPU Pro 3: Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "978-1439887820", publisher = "CRC Press", keywords = "shadow maps, visibility culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/MATTAUSCH-2012-EOV/", } @inproceedings{EISEMANN-2012-ERT, title = "Efficient Real-Time Shadows", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michal Valient and Michael Wimmer", year = "2012", abstract = "This course is a resource for applying efficient, real-time shadow algorithms. It builds on a solid foundation (previous courses at SIGGRAPH Asia 2009 and Eurographics 2010, including comprehensive course notes) and the 2011 book Real-Time Shadows (AK Peters) written by four of the presenters. The book is a compendium of many topics in the realm of shadow computation.", booktitle = "ACM SIGGRAPH 2012 Courses", isbn = "978-1-4503-1678-1", location = "Los Angeles, CA", publisher = "ACM", pages = "18:1--18:53", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/EISEMANN-2012-ERT/", } @article{bernhard-2011-bmtf, title = "Bi-modal Task Faciliation in a Virtual Traffic Scenario through Spatialized Sound Rendering ", author = "Matthias Bernhard and Karl Grosse and Michael Wimmer", year = "2011", abstract = "Audio rendering is generally used to increase the realism of Virtual Environments (VE). In addition, audio rendering may also improve the performance in specific tasks carried out in interactive applications such as games or simulators. In this paper we investigate the effect of the quality of sound rendering on task performance in a task which is inherently vision dominated. The task is a virtual traffic gap crossing scenario with two elements: first, to discriminate crossable and uncrossable gaps in oncoming traffic, and second, to find the right timing to start crossing the street without an accident. A study was carried out with 48 participants in an immersive Virtual Environment setup with a large screen and headphones. Participants were grouped into three different conditions. In the first condition, spatialized audio rendering with head-related transfer function (HRTF) filtering was used. The second group was tested with conventional stereo rendering, and the remaining group ran the experiment in a mute condition. Our results give a clear evidence that spatialized audio improves task performance compared to the unimodal mute condition. Since all task-relevant information was in the participants' field-of-view, we conclude that an enhancement of task performance results from a bimodal advantage due to the integration of visual and auditory spatial cues.", month = nov, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", note = "Article No. 24", number = "4", volume = "8", pages = "1--22", keywords = "bimodal task faciliation, pedestrian safety, virtual environments, audio-visual perception, head related transfer functions", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-bmtf/", } @inproceedings{KUE11, title = "BRDF approximation and estimation for Augmented Reality", author = "Patrick K\"{u}htreiber and Martin Knecht and Christoph Traxler", year = "2011", abstract = "In Augmented Reality applications it is important to have a good description of the surfaces of real objects if a consistent shading between real and virtual object is required. If such a description of a surface is not vailable it has to be estimated or approximated. In our paper we will present certain methods that deal with real-time bi-directional reflectance distribution function (BRDF) approximation in augmented reality. Of course an important thing to discuss is whether the applications we present all work in real-time and compute good (and real)looking results. There are different methods on how to achieve this goal. All of the methods we are going to present work via image based lighting and some require a 3D polygonal mesh representation of the object whose BRDF shall be approximated. Some methods estimate the BRDF parameters via error values and provide results at each iteration.", month = oct, organization = ""Gheorghe Asachi" Technical University of Iasi, Faculty of Automatic Control and Computer Engineering", location = "Sinaia, Romania", booktitle = "15th International Conference on System Theory, Control and Computing", pages = "318--324", keywords = "Mixed Reality, BRDF Estimation", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/KUE11/", } @inproceedings{knecht-2011-CBCM, title = "Adaptive Camera-Based Color Mapping For Mixed-Reality Applications", author = "Martin Knecht and Christoph Traxler and Werner Purgathofer and Michael Wimmer", year = "2011", abstract = "We present a novel adaptive color mapping method for virtual objects in mixed-reality environments. In several mixed-reality applications, added virtual objects should be visually indistinguishable from real objects. Recent mixed-reality methods use global-illumination algorithms to approach this goal. However, simulating the light distribution is not enough for visually plausible images. Since the observing camera has its very own transfer function from real-world radiance values to RGB colors, virtual objects look artificial just because their rendered colors do not match with those of the camera. Our approach combines an on-line camera characterization method with a heuristic to map colors of virtual objects to colors as they would be seen by the observing camera. Previous tone-mapping functions were not designed for use in mixed-reality systems and thus did not take the camera-specific behavior into account. In contrast, our method takes the camera into account and thus can also handle changes of its parameters during runtime. The results show that virtual objects look visually more plausible than by just applying tone-mapping operators.", month = oct, isbn = "978-1-4577-2183-0 ", publisher = "IEEE/IET Electronic Library (IEL), IEEE-Wiley eBooks Library, VDE VERLAG Conference Proceedings", note = "E-ISBN: 978-1-4577-2184-7", location = "Basel, Switzerland", booktitle = "Proceedings of the 2011 IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2011)", pages = "165--168", keywords = "Color Matching, Differential Rendering, Mixed Reality, Tone Mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/knecht-2011-CBCM/", } @article{jeschke-2011-est, title = "Estimating Color and Texture Parameters for Vector Graphics", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2011", abstract = "Diffusion curves are a powerful vector graphic representation that stores an image as a set of 2D Bezier curves with colors defined on either side. These colors are diffused over the image plane, resulting in smooth color regions as well as sharp boundaries. In this paper, we introduce a new automatic diffusion curve coloring algorithm. We start by defining a geometric heuristic for the maximum density of color control points along the image curves. Following this, we present a new algorithm to set the colors of these points so that the resulting diffused image is as close as possible to a source image in a least squares sense. We compare our coloring solution to the existing one which fails for textured regions, small features, and inaccurately placed curves. The second contribution of the paper is to extend the diffusion curve representation to include texture details based on Gabor noise. Like the curves themselves, the defined texture is resolution independent, and represented compactly. We define methods to automatically make an initial guess for the noise texure, and we provide intuitive manual controls to edit the parameters of the Gabor noise. Finally, we show that the diffusion curve representation itself extends to storing any number of attributes in an image, and we demonstrate this functionality with image stippling an hatching applications.", month = apr, journal = "Computer Graphics Forum", volume = "30", number = "2", note = "This paper won the 2nd best paper award at Eurographics 2011.", issn = "0167-7055", pages = "523--532", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/jeschke-2011-est/", } @inproceedings{knecht_martin-2011-FPSPAR, title = "A Framework For Perceptual Studies In Photorealistic Augmented Reality", author = "Martin Knecht and Andreas D\"{u}nser and Christoph Traxler and Michael Wimmer and Raphael Grasset", year = "2011", abstract = "In photorealistic augmented reality virtual objects are integrated in the real world in a seamless visual manner. To obtain a perfect visual augmentation these objects must be rendered indistinguishable from real objects and should be perceived as such. In this paper we propose a research test bed framework to study the different unresolved perceptual issues in photorealistic augmented reality and its application to different disciplines. The framework computes a global illumination approximation in real-time and therefore leverages a new class of experimental research topics.", month = mar, location = "Singapore", editor = "Frank Steinicke, Pete Willemsen", booktitle = "Proceedings of the 3rd IEEE VR 2011 Workshop on Perceptual Illusions in Virtual Environments", pages = "27--32", keywords = "photorealistic augmented reality, real-time global illumination, human perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/knecht_martin-2011-FPSPAR/", } @incollection{Habel_LSN_2011, title = "Level-of-Detail and Streaming Optimized Irradiance Normal Mapping", author = "Ralf Habel and Anders Nilsson and Michael Wimmer", year = "2011", month = feb, booktitle = "GPU Pro 2", editor = "Wolfgang Engel", isbn = "978-1568817187", publisher = "A.K. Peters", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/Habel_LSN_2011/", } @article{scherzer2011d, title = "A Survey of Real-Time Hard Shadow Mapping Methods", author = "Daniel Scherzer and Michael Wimmer and Werner Purgathofer", year = "2011", abstract = "Due to its versatility, speed and robustness, shadow mapping has always been a popular algorithm for fast hard shadow generation since its introduction in 1978, first for off-line film productions and later increasingly so in real-time graphics. So it is not surprising that recent years have seen an explosion in the number of shadow map related publications. The last survey that encompassed shadow mapping approaches, but was mainly focused on soft shadow generation, dates back to 2003~cite{HLHS03}, while the last survey for general shadow generation dates back to 1990~cite{Woo:1990:SSA}. No survey that describes all the advances made in hard shadow map generation in recent years exists. On the other hand, shadow mapping is widely used in the game industry, in production, and in many other applications, and it is the basis of many soft shadow algorithms. Due to the abundance of articles on the topic, it has become very hard for practitioners and researchers to select a suitable shadow algorithm, and therefore many applications miss out on the latest high-quality shadow generation approaches. The goal of this survey is to rectify this situation by providing a detailed overview of this field. We provide a detailed analysis of shadow mapping errors and derive a comprehensive classification of the existing methods. We discuss the most influential algorithms, consider their benefits and shortcomings and thereby provide the readers with the means to choose the shadow algorithm best suited to their needs. ", month = feb, issn = "0167-7055", journal = "Computer Graphics Forum", number = "1", volume = "30", pages = "169--186", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/scherzer2011d/", } @incollection{2011scherzerE, title = "Fast Soft Shadows with Temporal Coherence", author = "Daniel Scherzer and Michael Schw\"{a}rzler and Oliver Mattausch", year = "2011", month = feb, booktitle = "GPU Pro 2", editor = "Wolfgang Engel", isbn = "978-1568817187", publisher = "A.K. Peters", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/2011scherzerE/", } @inproceedings{bittner-2011-scc, title = "Shadow Caster Culling for Efficient Shadow Mapping", author = "Jir\'{i} Bittner and Oliver Mattausch and Ari Silvennoinen and Michael Wimmer", year = "2011", abstract = "We propose a novel method for efficient construction of shadow maps by culling shadow casters which do not contribute to visible shadows. The method uses a mask of potential shadow receivers to cull shadow casters using a hierarchical occlusion culling algorithm. We propose several variants of the receiver mask implementations with different culling efficiency and computational costs. For scenes with statically focused shadow maps we designed an efficient strategy to incrementally update the shadow map, which comes close to the rendering performance for unshadowed scenes. We show that our method achieves 3x-10x speedup for rendering large city like scenes and 1.5x-2x speedup for rendering an actual game scene.", month = feb, isbn = "978-1-4503-0565-5", publisher = "ACM", organization = "ACM SIGGRAPH", location = "San Francisco", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2011", pages = "81--88", keywords = "occlusion culling, shadow mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bittner-2011-scc/", } @incollection{matt2011, title = "Temporal Screen-Space Ambient Occlusion", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2011", month = feb, booktitle = "GPU Pro 2", editor = "Wolfgang Engel", isbn = "978-1568817187", publisher = "A.K. Peters", keywords = "ambient occlusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/matt2011/", } @inproceedings{scherzer2011c, title = "A Survey on Temporal Coherence Methods in Real-Time Rendering", author = "Daniel Scherzer and Lei Yang and Oliver Mattausch and Diego Nehab and Pedro V. Sander and Michael Wimmer and Elmar Eisemann", year = "2011", abstract = "Nowadays, there is a strong trend towards rendering to higher-resolution displays and at high frame rates. This development aims at delivering more detail and better accuracy, but it also comes at a significant cost. Although graphics cards continue to evolve with an ever-increasing amount of computational power, the processing gain is counteracted to a high degree by increasingly complex and sophisticated pixel computations. For real-time applications, the direct consequence is that image resolution and temporal resolution are often the first candidates to bow to the performance constraints (e.g., although full HD is possible, PS3 and XBox often render at lower resolutions). In order to achieve high-quality rendering at a lower cost, one can exploit emph{temporal coherence} (TC). The underlying observation is that a higher resolution and frame rate do not necessarily imply a much higher workload, but a larger amount of redundancy and a higher potential for amortizing rendering over several frames. In this STAR, we will investigate methods that make use of this principle and provide practical and theoretical advice on how to exploit temporal coherence for performance optimization. These methods not only allow us to incorporate more computationally intensive shading effects into many existing applications, but also offer exciting opportunities for extending high-end graphics applications to lower-spec consumer-level hardware. To this end, we first introduce the notion and main concepts of TC, including an overview of historical methods. We then describe a key data structure, the so-called emph{reprojection cache}, with several supporting algorithms that facilitate reusing shading information from previous frames. Its usefulness is illustrated in the second part of the STAR, where we present various applications. We illustrate how expensive pixel shaders, multi-pass shading effects, stereo rendering, shader antialiasing, shadow casting, and global-illumination effects can profit from pixel reuse. Furthermore, we will see that optimizations for visibility culling and object-space global illumination can also be achieved by exploiting TC. This STAR enables the reader to gain an overview of many techniques in this cutting-edge field and provides many insights into algorithmic choices and implementation issues. It delivers working knowledge of how various existing techniques are optimized via data reuse. Another goal of this STAR is to inspire the reader and to raise awareness for temporal coherence as an elegant tool that could be a crucial component to satisfy the recent need for higher resolution and more detailed content. ", booktitle = "EUROGRAPHICS 2011 State of the Art Reports", location = "Llandudno UK", publisher = "Eurographics Association", pages = "101--126", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/scherzer2011c/", } @article{mattausch-2010-tao, title = "High-Quality Screen-Space Ambient Occlusion using Temporal Coherence", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2010", abstract = "Ambient occlusion is a cheap but effective approximation of global illumination. Recently, screen-space ambient occlusion (SSAO) methods, which sample the frame buffer as a discretization of the scene geometry, have become very popular for real-time rendering. We present temporal SSAO (TSSAO), a new algorithm which exploits temporal coherence to produce high-quality ambient occlusion in real time. Compared to conventional SSAO, our method reduces both noise as well as blurring artifacts due to strong spatial filtering, faithfully representing fine-grained geometric structures. Our algorithm caches and reuses previously computed SSAO samples, and adaptively applies more samples and spatial filtering only in regions that do not yet have enough information available from previous frames. The method works well for both static and dynamic scenes.", month = dec, issn = "0167-7055", journal = "Computer Graphics Forum", number = "8", volume = "29", pages = "2492--2503", keywords = "temporal coherence, ambient occlusion, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/mattausch-2010-tao/", } @inproceedings{scherzer2010e, title = "An Overview of Temporal Coherence Methods in Real-Time Rendering ", author = "Daniel Scherzer", year = "2010", abstract = "Most of the power of modern graphics cards is put into the acceleration of shading tasks because here lies the major bottleneck for most sophisticated real-time algorithms. By using temporal coherence, i.e. reusing shading information from a previous frame, this problem can be alleviated. This paper gives an overview of the concepts of temporal coherence in real-time rendering and should give the reader the working practical and theoretical knowledge to exploit temporal coherence in his own algorithms. ", month = oct, organization = "IEEE", location = "Sinaia, Romania", issn = "2068-0465", booktitle = " 14th International Conference on System Theory and Control 2010", pages = "497--502", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/scherzer2010e/", } @inproceedings{Habel_RAV_2010, title = "Real-Time Rendering and Animation of Vegetation", author = "Ralf Habel", year = "2010", abstract = "Vegetation in all its different forms is almost always part of a scenery, be it fully natural or urban. Even in completely cultivated areas or indoor scenes, though not very dominant, potted plants or alley trees and patches of grass are usually part of a surrounding. Rendering and animating vegetation is substantially different from rendering and animating geometry with less geometric complexity such as houses, manufactured products or other objects consisting of largely connected surfaces. In this paper we will discuss several challenges posed by vegetation in real-time applications such as computer games and virtual reality applications and show efficient solutions to the problems.", month = oct, location = "Sinaia", issn = "2068-0465", booktitle = "14th International Conference on System Theory and Control (Joint conference of SINTES14, SACCS10, SIMSIS14)", pages = "231--236", keywords = "Animation, Real-Time Rendering, Vegetation", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Habel_RAV_2010/", } @inproceedings{knecht_martin_2010_DIR, title = "Differential Instant Radiosity for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Oliver Mattausch and Werner Purgathofer and Michael Wimmer", year = "2010", abstract = "In this paper we present a novel plausible realistic rendering method for mixed reality systems, which is useful for many real life application scenarios, like architecture, product visualization or edutainment. To allow virtual objects to seamlessly blend into the real environment, the real lighting conditions and the mutual illumination effects between real and virtual objects must be considered, while maintaining interactive frame rates (20-30fps). The most important such effects are indirect illumination and shadows cast between real and virtual objects. Our approach combines Instant Radiosity and Differential Rendering. In contrast to some previous solutions, we only need to render the scene once in order to find the mutual effects of virtual and real scenes. The dynamic real illumination is derived from the image stream of a fish-eye lens camera. We describe a new method to assign virtual point lights to multiple primary light sources, which can be real or virtual. We use imperfect shadow maps for calculating illumination from virtual point lights and have significantly improved their accuracy by taking the surface normal of a shadow caster into account. Temporal coherence is exploited to reduce flickering artifacts. Our results show that the presented method highly improves the illusion in mixed reality applications and significantly diminishes the artificial look of virtual objects superimposed onto real scenes.", month = oct, note = "Best Paper Award!", location = "Seoul", booktitle = "Proceedings of the 2010 IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2010)", pages = "99--107", keywords = "Instant Radiosity, Differential Rendering, Real-time Global Illumination, Mixed Reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/knecht_martin_2010_DIR/", } @article{bagar2010, title = "A Layered Particle-Based Fluid Model for Real-Time Rendering of Water", author = "Florian Bagar and Daniel Scherzer and Michael Wimmer", year = "2010", abstract = "We present a physically based real-time water simulation and rendering method that brings volumetric foam to the real-time domain, significantly increasing the realism of dynamic fluids. We do this by combining a particle-based fluid model that is capable of accounting for the formation of foam with a layered rendering approach that is able to account for the volumetric properties of water and foam. Foam formation is simulated through Weber number thresholding. For rendering, we approximate the resulting water and foam volumes by storing their respective boundary surfaces in depth maps. This allows us to calculate the attenuation of light rays that pass through these volumes very efficiently. We also introduce an adaptive curvature flow filter that produces consistent fluid surfaces from particles independent of the viewing distance.", month = jun, journal = "Computer Graphics Forum (Proceedings EGSR 2010)", volume = "29", number = "4", issn = "0167-7055", pages = "1383--1389", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bagar2010/", } @phdthesis{Mattausch-2010-vcr, title = "Visibility Computations for Real-Time Rendering in General 3D Environments", author = "Oliver Mattausch", year = "2010", abstract = "Visibility computations are essential operations in computer graphics, which are required for rendering acceleration in the form of visibility culling, as well as for computing realistic lighting. Visibility culling, which is the main focus of this thesis, aims to provide output sensitivity by sending only visible primitives to the hardware. Regardless of the rapid development of graphics hardware, it is of crucial importance for many applications like game development or architectural design, as the demands on the hardware regarding scene complexity increase accordingly. Solving the visibility problem has been an important research topic for many years, and countless methods have been proposed. Interestingly, there are still open research problems up to this day, and many algorithms are either impractical or only usable for specific scene configurations, preventing their widespread use. Visibility culling algorithms can be separated into algorithms for visibility preprocessing and online occlusion culling. Visibility computations are also required to solve complex lighting interactions in the scene, ranging from soft and hard shadows to ambient occlusion and full fledged global illumination. It is a big challenge to answer hundreds or thousands of visibility queries within a fraction of a second in order to reach real-time frame rates, which is one goal that we want to achieve in this thesis. The contribution of this thesis are four novel algorithms that provide solutions for efficient visibility interactions in order to achieve high-quality output-sensitive real-time rendering, and are general in the sense that they work with any kind of 3D scene configuration. First we present two methods dealing with the issue of automatically partitioning view space and object space into useful entities that are optimal for the subsequent visibility computations. Amazingly, this problem area was mostly ignored despite its importance, and view cells are mostly tweaked by hand in practice in order to reach optimal performance – a very time consuming task. The first algorithm specifically deals with the creation of an optimal view space partition into view cells using a cost heuristics and sparse visibility sampling. The second algorithm extends this approach to optimize both view space subdivision and object space subdivision simultaneously. Next we present a hierarchical online culling algorithm that eliminates most limitations of previous approaches, and is rendering engine friendly in the sense that it allows easy integration and efficient material sorting. It reduces the main problem of previous algorithms – the overhead due to many costly state changes and redundant hardware occlusion queries – to a minimum, obtaining up to three times speedup over previous work. At last we present an ambient occlusion algorithm which works in screen space, and show that high-quality shading with effectively hundreds of samples per pixel is possible in real time for both static and dynamic scenes by utilizing temporal coherence to reuse samples from previous frames.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "3D rendering, real-time rendering, ambient occlusion, visibility, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Mattausch-2010-vcr/", } @inproceedings{Habel-2010-EIN, title = "Efficient Irradiance Normal Mapping", author = "Ralf Habel and Michael Wimmer", year = "2010", abstract = "Irradiance normal mapping is a method to combine two popular techniques, light mapping and normal mapping, and is used in games such as Half-Life 2 or Halo 3. This combination allows using low-resolution light caching on surfaces with only a few coefficients which are evaluated by normal maps to render spatial high-frequency changes in the lighting. Though there are dedicated bases for this purpose such as the Half-Life 2 basis, higher order basis functions such as quadratic Spherical Harmonics are needed for an accurate representation. However, a full spherical basis is not needed since the irradiance is stored on the surface of a scene. In order to represent the irradiance signals efficiently, we propose a novel polynomial, hemispherically orthonormal basis function set that is specifically designed to carry a directional irradiance signal on the hemisphere and which makes optimal use of the number of coefficients. To compare our results with previous work, we analyze the relations and attributes of previously proposed basis systems and show that 6 coefficients are sufficient to accurately represent an irradiance signal on the hemisphere. To create the necessary irradiance signals, we use Spherical Harmonics as an intermediate basis due to their fast filtering capabilities.", month = feb, isbn = "978-1-60558-939-8", publisher = "ACM", location = "Washington D.C.", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2010", pages = "189--195", keywords = "irradiance, real-time rendering, normal mapping, lightmap", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Habel-2010-EIN/", } @article{preiner_2010_GIPC, title = "Real-Time Global Illumination for Point Cloud Scenes", author = "Reinhold Preiner and Michael Wimmer", year = "2010", abstract = "In this paper we present a real-time global illumination approach for illuminating scenes containing large point clouds. Our approach is based on the distribution of Virtual Point Lights (VPLs) in the scene, which are then used for the indirect illumination of the visible surfaces, using Imperfect Shadow Maps for visibility calculation of the VPLs. We are able to render multiple indirect light bounces, where each light bounce accounts for the transport of both the diffuse and the specular fraction of the reflected light.", journal = "Computer Graphics & Geometry", number = "1", volume = "12", pages = "2--16", keywords = "virtual point lights, imperfect shadow maps, point rendering, point clouds, global illumination, VPL, ISM", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/preiner_2010_GIPC/", } @inproceedings{scherzer_2010a, title = "A Survey of Real-Time Hard Shadow Mapping Methods", author = "Daniel Scherzer and Michael Wimmer and Werner Purgathofer", year = "2010", abstract = "Due to its versatility, speed and robustness, shadow mapping has always been a popular algorithm for fast hard shadow generation since its introduction in 1978, first for off-line film productions and later increasingly so in real-time graphics. So it is not surprising that recent years have seen an explosion in the number of shadow map related publications. The last survey that encompassed shadow mapping approaches, but was mainly focused on soft shadow generation, dates back to 2003~cite{HLHS03} and the last survey for general shadow generation dates back to 1990~cite{Woo:1990:SSA}. No survey that describes all the advances made in hard shadow map generation in recent years exists. On the other hand, shadow mapping is widely used in the game industry, in production, and in many other applications, and it is the basis of many soft shadow algorithms. Due to the abundance of articles on the topic, it has become very hard for practioners and researchers to select a suitable shadow algorithm, and therefore many applications miss out on the latest high-quality shadow generation approaches. %Real-time research was always tempted to bring global lighting techniques into the real-time domain. One of the most popular adaptations in this respect are hard shadows. It is therefore not surprising that real-time hard shadow generation has been one of the most active areas in research in recent years. But what is surprising is that the last state-of-the-art report that encompassed this field dates back to 1990~cite{Woo:1990:SSA}, were only the beginnings of this field were explored. The goal of this survey is to rectify this situation by providing a detailed overview of this field. We provide a detailed analysis of shadow mapping errors and derive from this a comprehensive classification of the existing methods. We discuss the most influential algorithms, consider their benefits and shortcomings and thereby provide the reader with the means to choose the shadow algorithm best suited to her needs.", booktitle = "EUROGRAPHICS 2010 State of the Art Reports", location = "Norrk\"{o}ping, Sweden", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/scherzer_2010a/", } @inproceedings{scherzer2010d, title = "Exploiting Temporal Coherence in Real-Time Rendering", author = "Daniel Scherzer and Lei Yang and Oliver Mattausch", year = "2010", abstract = "Temporal coherence (TC), the correlation of contents between adjacent rendered frames, exists across a wide range of scenes and motion types in practical real-time rendering. By taking advantage of TC, we can save redundant computation and improve the performance of many rendering tasks significantly with only a marginal decrease in quality. This not only allows us to incorporate more computationally intensive shading effects to existing applications, but also offers exciting opportunities of extending high-end graphics applications to reach lower-spec consumer-level hardware. This course aims to introduce participants to the concepts of TC, and provide them the working practical and theoretical knowledge to exploit TC in a variety of shading tasks. It begins with an introduction of the general notion of TC in rendering, as well as an overview of the recent developments in this field. Then it focuses on a key data structure - the reverse reprojection cache, which is the foundation of many applications. The course proceeds with a number of extensions of the basic algorithm for assisting in multi-pass shading effects, shader antialiasing, casting shadows and global-illumination effects. Finally, several more general coherence topics beyond pixel reuse are introduced, including visibility culling optimization and object-space global-illumination approximations. For all the major techniques and applications covered, implementation and practical issues involved in development are addressed in detail. In general, we emphasize "know how" and the guidelines related to algorithm choices. After the course, participants are encouraged to find and utilize TC in their own applications and rapidly adapt existing algorithms to meet their requirements.", booktitle = "SIGGRAPH Asia 2010 Courses", location = "Seoul, S\"{u}dkorea", keywords = "shadows, temporal coherence, real-time, rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/scherzer2010d/", } @inproceedings{SSMW09, title = "Real-Time Soft Shadows Using Temporal Coherence", author = "Daniel Scherzer and Michael Schw\"{a}rzler and Oliver Mattausch and Michael Wimmer", year = "2009", abstract = "A vast amount of soft shadow map algorithms have been presented in recent years. Most use a single sample hard shadow map together with some clever filtering technique to calculate perceptually or even physically plausible soft shadows. On the other hand there is the class of much slower algorithms that calculate physically correct soft shadows by taking and combining many samples of the light. In this paper we present a new soft shadow method that combines the benefits of these approaches. It samples the light source over multiple frames instead of a single frame, creating only a single shadow map each frame. Where temporal coherence is low we use spatial filtering to estimate additional samples to create correct and very fast soft shadows. ", month = dec, isbn = "978-3642103308", series = "Lecture Notes in Computer Science", publisher = "Springer", location = "Las Vegas, Nevada, USA", editor = "Bebis, G.; Boyle, R.; Parvin, B.; Koracin, D.; Kuno, Y.; Wang, J.; Pajarola, R.; Lindstrom, P.; Hinkenjann, A.; Encarnacao, M.; Silva, C.; Coming, D.", booktitle = "Advances in Visual Computing: 5th International Symposium on Visual Computing (ISVC 2009)", pages = "13--24", keywords = "real-time rendering, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/SSMW09/", } @article{jeschke-09-rendering, title = "Rendering Surface Details with Diffusion Curves", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "Diffusion curve images (DCI) provide a powerful tool for efficient 2D image generation, storage and manipulation. A DCI consist of curves with colors defined on either side. By diffusing these colors over the image, the final result includes sharp boundaries along the curves with smoothly shaded regions between them. This paper extends the application of diffusion curves to render high quality surface details on 3D objects. The first extension is a view dependent warping technique that dynamically reallocates texture space so that object parts that appear large on screen get more texture for increased detail. The second extension is a dynamic feature embedding technique that retains crisp, anti-aliased curve details even in extreme closeups. The third extension is the application of dynamic feature embedding to displacement mapping and geometry images. Our results show high quality renderings of diffusion curve textures, displacements, and geometry images, all rendered interactively.", month = dec, journal = "Transaction on Graphics (Siggraph Asia 2009)", volume = "28", number = "5", issn = "0730-0301", booktitle = "Transactions on Graphics (Siggraph Asia 2009)", organization = "ACM", publisher = "ACM Press", pages = "1--8", keywords = "Geometry images, Displacement mapping, Diffusion curves, Line and Curve rendering ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-rendering/", } @article{BITTNER-2009-AGVS, title = "Adaptive Global Visibility Sampling", author = "Jir\'{i} Bittner and Oliver Mattausch and Peter Wonka and Vlastimil Havran and Michael Wimmer", year = "2009", abstract = "In this paper we propose a global visibility algorithm which computes from-region visibility for all view cells simultaneously in a progressive manner. We cast rays to sample visibility interactions and use the information carried by a ray for all view cells it intersects. The main contribution of the paper is a set of adaptive sampling strategies based on ray mutations that exploit the spatial coherence of visibility. Our method achieves more than an order of magnitude speedup compared to per-view cell sampling. This provides a practical solution to visibility preprocessing and also enables a new type of interactive visibility analysis application, where it is possible to quickly inspect and modify a coarse global visibility solution that is constantly refined. ", month = aug, journal = "ACM Transactions on Graphics", volume = "28", number = "3", issn = "0730-0301", pages = "94:1--94:10", keywords = "occlusion culling, visibility sampling, visibility, PVS", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/BITTNER-2009-AGVS/", } @article{wilkie-2009-cc, title = "A Robust Illumination Estimate for Chromatic Adaptation in Rendered Images", author = "Alexander Wilkie and Andrea Weidlich", year = "2009", abstract = "We propose a method that improves automatic colour correction operations for rendered images. In particular, we propose a robust technique for estimating the visible and pertinent illumination in a given scene. We do this at very low computational cost by mostly re-using information that is already being computed during the image synthesis process. Conventional illuminant estimations either operate only on 2D image data, or, if they do go beyond pure image analysis, only use information on the luminaires found in the scene. The latter is usually done with little or no regard for how the light sources actually affect the part of the scene that is being viewed. Our technique goes beyond that, and also takes object reflectance into account, as well as the incident light that is actually responsible for the colour of the objects that one sees. It is therefore able to cope with difficult cases, such as scenes with mixed illuminants, complex scenes with many light sources of varying colour, or strongly coloured indirect illumination. ", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1101--1109", keywords = "chromatic adaptation, predicitve rendering, colour constancy", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/wilkie-2009-cc/", } @article{cline-09-poisson, title = "Dart Throwing on Surfaces", author = "David Cline and Stefan Jeschke and Anshuman Razdan and Kenric White and Peter Wonka", year = "2009", abstract = "In this paper we present dart throwing algorithms to generate maximal Poisson disk point sets directly on 3D surfaces. We optimize dart throwing by efficiently excluding areas of the domain that are already covered by existing darts. In the case of triangle meshes, our algorithm shows dramatic speed improvement over comparable sampling methods. The simplicity of our basic algorithm naturally extends to the sampling of other surface types, including spheres, NURBS, subdivision surfaces, and implicits. We further extend the method to handle variable density points, and the placement of arbitrary ellipsoids without overlap. Finally, we demonstrate how to adapt our algorithm to work with geodesic instead of Euclidean distance. Applications for our method include fur modeling, the placement of mosaic tiles and polygon remeshing.", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1217--1226", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/cline-09-poisson/", } @article{weidlich-2009-dispersion, title = "Anomalous Dispersion in Predictive Rendering", author = "Andrea Weidlich and Alexander Wilkie", year = "2009", abstract = "In coloured media, the index of refraction does not decrease monotonically with increasing wavelength, but behaves in a quite non-monotonical way. This behaviour is called anomalous dispersion and results from the fact that the absorption of a material influences its index of refraction. So far, this interesting fact has not been widely acknowledged by the graphics community. In this paper, we demonstrate how to calculate the correct refractive index for a material based on its absorption spectrum with the Kramers-Kronig relation, and we discuss for which types of objects this effect is relevant in practice. ", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1065--1072", keywords = "Predictive rendering, Spectral Rendering, Dispersion", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich-2009-dispersion/", } @inproceedings{weidlich_2009_REL, title = "Rendering the Effect of Labradorescence", author = "Andrea Weidlich and Alexander Wilkie", year = "2009", abstract = "Labradorescence is a complex optical phenomenon that can be found in certain minerals, such as Labradorite or Spectrolite. Because of their unique colour properties these minerals are often used as gemstones and decorative objects. Since the phenomenon is strongly orientation dependent, such minerals need a special cut to make the most of their unique type of colourful sheen, which makes it desirable to be able to predict the final appearance of a given stone prior to the cutting process. Also, the peculiar properties of the effect make a believable replication with an ad-hoc shader dificult even for normal, non-predictive rendering purposes. We provide a reflectance model for labradorescence that is directly derived from the physical characteristics of such materials. Due to its inherent accuracy, it can be used for predictive rendering purposes, but also for generic rendering applications. ", month = may, isbn = "978-1-56881-470-4", publisher = "ACM", location = "Kelowna, British Columbia, Canada ", booktitle = "Proceedings of Graphics Interface 2009", pages = "79--85", keywords = "Predictive Rendering, Surface, Crystals", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich_2009_REL/", } @incollection{BITTNER-2009-GEFOC, title = "Game-Engine-Friendly Occlusion Culling", author = "Jir\'{i} Bittner and Oliver Mattausch and Michael Wimmer", year = "2009", abstract = "This article presents a method which minimizes the overhead associated with occlusion queries. The method reduces the number of required state changes and should integrate easily with most game engines. The key ideas are batching of the queries and interfacing with the game engine using a dedicated render queue. We also present some additional optimizations which reduce the number of queries issued as well as the number of rendered primitives. The algorithm is based on the well-known Coherent Hierarchical Culling algorithm.", month = mar, booktitle = "SHADERX7: Advanced Rendering Techniques", chapter = "8.3", editor = "Wolfang Engel", isbn = "1-58450-598-2", publisher = "Charles River Media", volume = "7", keywords = "real-time rendering, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/BITTNER-2009-GEFOC/", } @article{Habel_09_PGT, title = "Physically Guided Animation of Trees", author = "Ralf Habel and Alexander Kusternig and Michael Wimmer", year = "2009", abstract = "This paper presents a new method to animate the interaction of a tree with wind both realistically and in real time. The main idea is to combine statistical observations with physical properties in two major parts of tree animation. First, the interaction of a single branch with the forces applied to it is approximated by a novel efficient two step nonlinear deformation method, allowing arbitrary continuous deformations and circumventing the need to segment a branch to model its deformation behavior. Second, the interaction of wind with the dynamic system representing a tree is statistically modeled. By precomputing the response function of branches to turbulent wind in frequency space, the motion of a branch can be synthesized efficiently by sampling a 2D motion texture. Using a hierarchical form of vertex displacement, both methods can be combined in a single vertex shader, fully leveraging the power of modern GPUs to realistically animate thousands of branches and ten thousands of leaves at practically no cost.", month = mar, journal = "Computer Graphics Forum (Proceedings EUROGRAPHICS 2009)", volume = "28", number = "2", issn = "0167-7055", pages = "523--532", keywords = "Animation, Physically Guided animation, Vegetation, Trees", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel_09_PGT/", } @phdthesis{Habel_2009_PhD, title = "Real-time Rendering and Animation of Vegetation", author = "Ralf Habel", year = "2009", abstract = "Vegetation rendering and animation in real-time applications still pose a significant problem due to the inherent complexity of plants. Both the high geometric complexity and intricate light transport require specialized techniques to achieve high-quality rendering of vegetation in real time. This thesis presents new algorithms that address various areas of both vegetation rendering and animation. For grass rendering, an efficient algorithm to display dense and short grass is introduced. In contrast to previous methods, the new approach is based on ray tracing to avoid the massive overdraw of billboard or explicit geometry representation techniques, achieving independence of the complexity of the grass without losing the visual characteristics of grass such as parallax and occlusion effects as the viewpoint moves. Also, a method to efficiently render leaves is introduced. Leaves exhibit a complex light transport behavior due to subsurface scattering and special attention is given to the translucency of leaves, an integral part of leaf shading. The light transport through a leaf is precomputed and can be easily evaluated at runtime, making it possible to shade a massive amount of leaves while including the effects that occur due to the leaf structure such as varying albedo and thickness variations or self shadowing. To animate a tree, a novel deformation method based on a structural mechanics model that incorporates the important physical properties of branches is introduced. This model does not require the branches to be segmented by joints as other methods, achieving smooth and accurate bending, and can be executed fully on a GPU. To drive this deformation, an optimized spectral approach that also incorporates the physical properties of branches is used. This allows animating a highly detailed tree with thousands of branches and ten thousands of leaves efficiently. Additionally, a method to use dynamic skylight models in spherical harmonics precomputed radiance transfer techniques is introduced, allowing to change the skylight parameters in real time at no considerable cost and memory footprint.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Animation, Real-time Rendering, Vegetation", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel_2009_PhD/", } @inproceedings{GRELAUD-2009-EPA, title = "Efficient and Practical Audio-Visual Rendering for Games using Crossmodal Perception", author = "David Grelaud and Nicolas Bonneel and Michael Wimmer and Manuel Asselot and George Drettakis", year = "2009", abstract = "Interactive applications such as computer games, are inherently audio visual, requiring high-quality rendering of complex 3D audio soundscapes and graphics environments. A frequent source of audio events is impact sounds, typically generated with physics engines. In this paper, we first present an optimization allowing efficient usage of impact sounds in a unified audio rendering pipeline, also including prerecorded sounds. We also exploit a recent result on audio-visual crossmodal perception to introduce a new level-of-detail selection algorithm, which jointly chooses the quality level of audio and graphics rendering. We have integrated these two techniques as a comprehensive crossmodal audio-visual rendering pipeline in a home-grown game engine, thus demonstrating the potential utility of our approach.", month = feb, isbn = "978-1-60558-429-4", publisher = "ACM", location = "Boston, Massachusetts", address = "New York, NY, USA", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2009", pages = "177--182", keywords = "audio-visual rendering, crossmodal perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/GRELAUD-2009-EPA/", } @phdthesis{weidlich-2009-thesis, title = "Pseudochromatic Colourisation of Crystals in Predictive Image Synthesis", author = "Andrea Weidlich", year = "2009", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Predictive rendering, Crystal rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich-2009-thesis/", } @phdthesis{scherzer-thesis, title = "Applications of temporal coherence in real-time rendering", author = "Daniel Scherzer", year = "2009", abstract = "Real-time rendering imposes the challenging task of creating a new rendering of an input scene at least 60 times a second. Although computer graphics hardware has made staggering advances in terms of speed and freedom of programmability, there still exist a number of algorithms that are too expensive to be calculated in this time budget, like exact shadows or an exact global illumination solution. One way to circumvent this hard time limit is to capitalize on temporal coherence to formulate algorithms incremental in time. The main thesis of this work is that temporal coherence is a characteristic of real-time graphics that can be used to redesign well-known rendering methods to become faster, while exhibiting better visual fidelity. To this end we present our adaptations of algorithms from the fields of exact hard shadows, physically correct soft shadows and fast discrete LOD blending, in which we have successfully incorporated temporal coherence. Additionally, we provide a detailed context of previous work not only in the field of temporal coherence, but also in the respective fields of the presented algorithms.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "shadows, lod, real-time, image-space", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/scherzer-thesis/", } @inproceedings{WIMMER-2009-CSR, title = "Casting Shadows in Real Time", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michael Wimmer", year = "2009", abstract = "Shadows are crucial for enhancing realism and provide important visual cues. In recent years, many important contributions have been made both for hard shadows and soft shadows. Often spurred by the tremendous increase of computational power and capabilities of graphics hardware, much progress has been made concerning visual quality and speed, making high-quality real-time shadows a reachable goal. But with the growing wealth of available choices, it is particularly difficult to pick the right solution and assess shortcomings. Because currently there is no ultimate approach available, algorithms should be selected in accordance to the context in which shadows are produced. The possibilities range across a wide spectrum; from very approximate but really efficient to slower but accurate, adapted only to smaller or only to larger sources, addressing directional lights or positional lights, or involving GPU- or CPU-heavy computations. This course tries to serve as a guide to better understand limitations and failure cases, advantages and disadvantages, and suitability of the algorithms for different application scenarios. We will focus on real-time to interactive solutions but also discuss offline approaches if needed for a better understanding.", booktitle = "ACM SIGGRAPH Asia 2009 Courses", location = "Yokohama, Japan", publisher = "ACM", note = "Lecturer: Daniel Scherzer", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-CSR/", } @article{bhagvat-09-frusta, title = "GPU Rendering of Relief Mapped Conical Frusta", author = "Deepali Bhagvat and Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "This paper proposes to use relief-mapped conical frusta (cones cut by planes) to skin skeletal objects. Based on this representation, current programmable graphics hardware can perform the rendering with only minimal communication between the CPU and GPU. A consistent definition of conical frusta including texture parametrization and a continuous surface normal is provided. Rendering is performed by analytical ray casting of the relief-mapped frusta directly on the GPU. We demonstrate both static and animated objects rendered using our technique and compare to polygonal renderings of similar quality.", issn = "0167-7055", journal = "Computer Graphics Forum", number = "28", volume = "8", pages = "2131--2139", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bhagvat-09-frusta/", } @article{guerrero-2008-sli, title = "Real-time Indirect Illumination and Soft Shadows in Dynamic Scenes Using Spherical Lights", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer", year = "2008", abstract = "We present a method for rendering approximate soft shadows and diffuse indirect illumination in dynamic scenes. The proposed method approximates the original scene geometry with a set of tightly fitting spheres. In previous work, such spheres have been used to dynamically evaluate the visibility function to render soft shadows. In this paper, each sphere also acts as a low-frequency secondary light source, thereby providing diffuse one-bounce indirect illumination. The method is completely dynamic and proceeds in two passes: In a first pass, the light intensity distribution on each sphere is updated based on sample points on the corresponding object surface and converted into the spherical harmonics basis. In a second pass, this radiance information and the visibility are accumulated to shade final image pixels. The sphere approximation allows us to compute visibility and diffuse reflections of an object at interactive frame rates of over 20 fps for moderately complex scenes.", month = oct, journal = "Computer Graphics Forum", number = "8", volume = "27", pages = "2154--2168", keywords = "global illumination, precomputed radiance transfer, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/guerrero-2008-sli/", } @techreport{TR-186-2-08-09, title = "Pixel Accurate Shadows with Shadow Mapping", author = "Christian Luksch", year = "2008", abstract = "High quality shadows generated by shadow mapping is still an extensive problem in realtime rendering. This work summarizes some state-of-the-art techniques to achieve pixel accurate shadows and points out the various problems of generating artifact free shadows. Further a demo application has been implemented to compare the different techniques and experiment with alternative approaches.", month = sep, number = "TR-186-2-08-09", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "Shadow Mapping, Deferred shading, Pixel Accurate Shadows, Real time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/TR-186-2-08-09/", } @article{SCHERZER-2008-FSR, title = "Frame Sequential Interpolation for Discrete Level-of-Detail Rendering", author = "Daniel Scherzer and Michael Wimmer", year = "2008", abstract = "In this paper we present a method for automatic interpolation between adjacent discrete levels of detail to achieve smooth LOD changes in image space. We achieve this by breaking the problem into two passes: We render the two LOD levels individually and combine them in a separate pass afterwards. The interpolation is formulated in a way that only one level has to be updated per frame and the other can be reused from the previous frame, thereby causing roughly the same render cost as with simple non interpolated discrete LOD rendering, only incurring the slight overhead of the final combination pass. Additionally we describe customized interpolation schemes using visibility textures. The method was designed with the ease of integration into existing engines in mind. It requires neither sorting nor blending of objects, nor does it introduce any constrains in the LOD used. The LODs can be coplanar, alpha masked, animated, impostors, and intersecting, while still interpolating smoothly. ", month = jun, journal = "Computer Graphics Forum (Proceedings EGSR 2008)", volume = "27", number = "4", issn = "0167-7055", pages = "1175--1181", keywords = "LOD blending, real-time rendering, levels of detail", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/SCHERZER-2008-FSR/", } @article{CADIK-2008-EHD, title = "Evaluation of HDR Tone Mapping Methods Using Essential Perceptual Attributes", author = "Martin \v{C}ad\'{i}k and Michael Wimmer and L\'{a}szl\'{o} Neumann and Alessandro Artusi", year = "2008", abstract = "The problem of reproducing high dynamic range images on devices with restricted dynamic range has gained a lot of interest in the computer graphics community. There exist various approaches to this issue, which span several research areas including computer graphics, image processing, color vision, physiological aspects, etc. These approaches assume a thorough knowledge of both the objective and subjective attributes of an image. However, no comprehensive overview and analysis of such attributes has been published so far. In this contribution, we present an overview about the effects of basic image attributes in HDR tone mapping. Furthermore, we propose a scheme of relationships between these attributes, leading to the definition of an overall image quality measure. We present results of subjective psychophysical experiments that we have performed to prove the proposed relationship scheme. Moreover, we also present an evaluation of existing tone mapping methods (operators) with regard to these attributes. Finally, the execution of with-reference and without a real reference perceptual experiments gave us the opportunity to relate the obtained subjective results. Our effort is not just useful to get into the tone mapping field or when implementing a tone mapping method, but it also sets the stage for well-founded quality comparisons between tone mapping methods. By providing good definitions of the different attributes, user-driven or fully automatic comparisons are made possible. ", month = jun, issn = "0097-8493", journal = "Computers & Graphics", number = "3", volume = "32", pages = "330--349", keywords = "high dynamic range, tone mapping operators, tone mapping evaluation, image attributes", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/CADIK-2008-EHD/", } @techreport{radax-2008-ir, title = "Instant Radiosity for Real-Time Global Illumination", author = "Ingo Radax", year = "2008", abstract = "Global illumination is necessary to achieve realistic images. Although there are plenty methods that focus on solving this problem, most of them are not fast enough for interactive environments. Instant radiosity is a method that approximates the indirect lighting, as part of global illumination, by creating additional light sources. Thereby it is very fast and does not need lot of preprocessing, so it is perfectly fit to be used within real-time requirements. Further techniques based on instant radiosity have extended the method to provide better image quality or faster rendering. So instant radiosity and its derivations can bring us global illumination in real-time.", month = may, number = "TR-186-2-08-15", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "Three-Dimensional Graphics and Realism, shading, radiosity, real time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/radax-2008-ir/", } @article{mattausch-2008-CHC, title = "CHC++: Coherent Hierarchical Culling Revisited", author = "Oliver Mattausch and Jir\'{i} Bittner and Michael Wimmer", year = "2008", abstract = "We present a new algorithm for efficient occlusion culling using hardware occlusion queries. The algorithm significantly improves on previous techniques by making better use of temporal and spatial coherence of visibility. This is achieved by using adaptive visibility prediction and query batching. As a result of the new optimizations the number of issued occlusion queries and the number of rendering state changes are significantly reduced. We also propose a simple method for determining tighter bounding volumes for occlusion queries and a method which further reduces the pipeline stalls. The proposed method provides up to an order of magnitude speedup over the previous state of the art. The new technique is simple to implement, does not rely on hardware calibration and integrates well with modern game engines.", month = apr, journal = "Computer Graphics Forum (Proceedings Eurographics 2008)", volume = "27", number = "2", issn = "0167-7055", pages = "221--230", keywords = "temporal coherence, dynamic occlusion culling, occlusion queries", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/mattausch-2008-CHC/", } @inproceedings{Habel_08_SSH, title = "Efficient Spherical Harmonics Lighting with the Preetham Skylight Model", author = "Ralf Habel and Bogdan Mustata and Michael Wimmer", year = "2008", abstract = "We present a fast and compact representation of a skylight model for spherical harmonics lighting, especially for outdoor scenes. This representation allows dynamically changing the sun position and weather conditions on a per frame basis. We chose the most used model in real-time graphics, the Preetham skylight model, because it can deliver both realistic colors and dynamic range and its extension into spherical harmonics can be used to realistically light a scene. We separate the parameters of the Preetham skylight models' spherical harmonics extension and perform a polynomial two-dimensional non-linear least squares fit for the principal parameters to achieve both negligible memory and computation costs. Additionally, we execute a domain specific Gibbs phenomena suppression to remove ringing artifacts.", month = apr, publisher = "Eurographics Association", location = "Crete, Greece", issn = "1017-4656", editor = "Katerina Mania and Erik Reinhard", booktitle = "Eurographics 2008 - Short Papers", pages = "119--122", keywords = "Natural Phenomena, Spherical Harmonics, Skylight", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Habel_08_SSH/", } @techreport{knecht-2007-ao, title = "State of the Art Report on Ambient Occlusion", author = "Martin Knecht", year = "2007", abstract = "Ambient occlusion is a shading method which takes light occluded by geometry into account. Since this technique needs to integrate over a hemisphere it was first only used in offline rendering tools. However, the increasing resources of modern graphics hardware, enable us to render ambient occlusion in realtime. The goal of this report is to describe the most popular techniques with respect to realtime rendering. First we introduce how ambient occlusion is defined and then we will explain and categorize the presented techniques. ", month = nov, number = "TR-186-2-07-13", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "ambient occlusion, global illumination, real time", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/knecht-2007-ao/", } @inproceedings{CHARALAMBOS-2007-HLOD, title = "Optimized HLOD Refinement Driven by Hardware Occlusion Queries", author = "Jean Pierre Charalambos and Jir\'{i} Bittner and Michael Wimmer and Eduardo Romero", year = "2007", abstract = "We present a new method for integrating hierarchical levels of detail (HLOD) with occlusion culling. The algorithm refines the HLOD hierarchy using geometric criteria as well as the occlusion information. For the refinement we use a simple model which takes into account the possible distribution of the visible pixels. The traversal of the HLOD hierarchy is optimized by a new algorithm which uses spatial and temporal coherence of visibility. We predict the HLOD refinement condition for the current frame based on the results from the last frame. This allows an efficient update of the front of termination nodes as well as an efficient scheduling of hardware occlusion queries. Compared to previous approaches, the new method improves on speed as well as image quality. The results indicate that the method is very close to the optimal scheduling of occlusion queries for driving the HLOD refinement.", month = nov, isbn = "978-3-540-76855-9", series = "Lecture Notes in Computer Science, volume 4841", publisher = "Springer", location = "Lake Tahoe, Nevada/California", editor = "Bebis, G.; Boyle, R.; Parvin, B.; Koracin, D.; Paragios, N.; Tanveer, S.-M.; Ju, T.; Liu, Z.; Coquillart, S.; Cruz-Neira, C.; M\"{o}ller, T.; Malzbender, T.", booktitle = "Advances in Visual Computing (Third International Symposium on Visual Computing -- ISVC 2007)", pages = "106--117", keywords = "occlusion queries, levels of detail, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/CHARALAMBOS-2007-HLOD/", } @techreport{TR-186-2-07-09, title = "Rendering Imperfections: Dust, Scratches, Aging,...", author = "Michael Schw\"{a}rzler and Michael Wimmer", year = "2007", abstract = "In order to incrase the realism of an image or a scene in a computergraphics application, so-called imperfections are often used during rendering. These are techniques which add details like dirt, scratches, dust or aging effects to the models and textures. Realism is improved through imperfections since computer generated models are usually too perfect to be accepted as realistic by human observers. By making them, for example, dusty and scratched, people can imagine them being part of their real world much more easily. This article gives an overview of currently used imperfections techniques and algorithms. Topics like textures, scratches, aging, dust, weathering, lichen growth and terrain erosion are covered.", month = sep, number = "TR-186-2-07-09", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "scratches, dust, imperfections, aging", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/TR-186-2-07-09/", } @inproceedings{JESCHKE-2007-ISC, title = "Interactive Smooth and Curved Shell Mapping", author = "Stefan Jeschke and Stephan Mantler and Michael Wimmer", year = "2007", abstract = "Shell mapping is a technique to represent three-dimensional surface details. This is achieved by extruding the triangles of an existing mesh along their normals, and mapping a 3D function (e.g., a 3D texture) into the resulting prisms. Unfortunately, such a mapping is nonlinear. Previous approaches perform a piece-wise linear approximation by subdividing the prisms into tetrahedrons. However, such an approximation often leads to severe artifacts. In this paper we present a correct (i.e., smooth) mapping that does not rely on a decomposition into tetrahedrons. We present an efficient GPU ray casting algorithm which provides correct parallax, self-occlusion, and silhouettes, at the cost of longer rendering times. The new formulation also allows modeling shells with smooth curvatures using Coons patches within the prisms. Tangent continuity between adjacent prisms is guaranteed, while the mapping itself remains local, i.e. every curved prism content is modeled at runtime in the GPU without the need for any precomputation. This allows instantly replacing animated triangular meshes with prism-based shells.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "10", pages = "351--360", keywords = "Display algorithms, Shading", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/JESCHKE-2007-ISC/", } @inproceedings{Habel_2007_RTT, title = "Physically Based Real-Time Translucency for Leaves", author = "Ralf Habel and Alexander Kusternig and Michael Wimmer", year = "2007", abstract = "This paper presents a new shading model for real-time rendering of plant leaves that reproduces all important attributes of a leaf and allows for a large number of leaves to be shaded. In particular, we use a physically based model for accurate subsurface scattering on the translucent side of directly lit leaves. For real-time rendering of this model, we formulate it as an image convolution process and express the result in an efficient directional basis that is fast to evaluate. We also propose a data acquisition method for leaves that uses off-the-shelf devices.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "253--263", keywords = "Realtime Rendering, Natural Scene Rendering, Physically Based Rendering, Natural Phenomena", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Habel_2007_RTT/", } @inproceedings{Scherzer-2007-PCS, title = "Pixel-Correct Shadow Maps with Temporal Reprojection and Shadow Test Confidence", author = "Daniel Scherzer and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "Shadow mapping suffers from spatial aliasing (visible as blocky shadows) as well as temporal aliasing (visible as flickering). Several methods have already been proposed for reducing such artifacts, but so far none is able to provide satisfying results in real time. This paper extends shadow mapping by reusing information of previously rasterized images, stored efficiently in a so-called history buffer. This buffer is updated in every frame and then used for the shadow calculation. In combination with a special confidence-based method for the history buffer update (based on the current shadow map), temporal and spatial aliasing can be completely removed. The algorithm converges in about 10 to 60 frames and during convergence, shadow borders are sharpened over time. Consequently, in case of real-time frame rates, the temporal shadow adaption is practically imperceptible. The method is simple to implement and is as fast as uniform shadow mapping, incurring only the minor speed hit of the history buffer update. It works together with advanced filtering methods like percentage closer filtering and more advanced shadow mapping techniques like perspective or light space perspective shadow maps.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "45--50", keywords = "shadow mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Scherzer-2007-PCS/", } @inproceedings{GIEGL-2007-FVS, title = "Fitted Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "Too little shadow map resolution and resulting undersampling artifacts, perspective and projection aliasing, have long been a fundamental problem of shadowing scenes with shadow mapping. We present a new smart, real-time shadow mapping algorithm that virtually increases the resolution of the shadow map beyond the GPU hardware limit where needed. We first sample the scene from the eye-point on the GPU to get the needed shadow map resolution in different parts of the scene. We then process the resulting data on the CPU and finally arrive at a hierarchical grid structure, which we traverse in kd-tree fashion, shadowing the scene with shadow map tiles where needed. Shadow quality can be traded for speed through an intuitive parameter, with a homogeneous quality reduction in the whole scene, down to normal shadow mapping. This allows the algorithm to be used on a wide range of hardware.", month = may, isbn = "978-1-56881-337-0", publisher = "Canadian Human-Computer Communications Society", location = "Montreal, Canada", editor = "Christopher G. Healey and Edward Lank", booktitle = "Proceedings of Graphics Interface 2007", pages = "159--168", keywords = "real-time shadowing, shadows, shadow maps, large environemnts", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-FVS/", } @habilthesis{WIMMER-2007-RTR, title = "Real-Time Rendering", author = "Michael Wimmer", year = "2007", abstract = "Real-time rendering is concerned with the display of computer-generated images at rates which let a human observer believe that she is looking at a smooth animation. This thesis deals with several contributions to the field of real-time rendering that improve either the performance of rendering algorithms or the quality of the displayed images. Light-Space Perspective Shadow Maps improve the quality of real-time rendering by providing better looking shadow rendering, one of the most popular research topics in real-time rendering. Conversely, Coherent Hierarchical Culling and Guided Visibility Sampling improve the performance of real-time rendering through visibility culling. One is designed for runtime computation and the other for preprocessing. Finally, real-time rendering is extended from traditional polygon rendering to a new type of dataset that has recently gained importance, namely point clouds, especially huge datasets that cannot be loaded into main memory. ", month = may, URL = "https://www.cg.tuwien.ac.at/research/publications/2007/WIMMER-2007-RTR/", } @inproceedings{MATTAUSCH-2007-OSP, title = "Optimized Subdivisions for Preprocessed Visibility", author = "Oliver Mattausch and Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2007", abstract = "This paper describes a new tool for preprocessed visibility. It puts together view space and object space partitioning in order to control the render cost and memory cost of the visibility description generated by a visibility solver. The presented method progressively refines view space and object space subdivisions while minimizing the associated render and memory costs. Contrary to previous techniques, both subdivisions are driven by actual visibility information. We show that treating view space and object space together provides a powerful method for controlling the efficiency of the resulting visibility data structures.", month = may, isbn = "978-1-56881-337-0", publisher = "Canadian Human-Computer Communications Society", location = "Montreal, Canada", editor = "Christopher G. Healey and Edward Lank", booktitle = "Proceedings of Graphics Interface 2007", pages = "335--342", keywords = "visibility preprocessing, potentially visible sets, view cells", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/MATTAUSCH-2007-OSP/", } @misc{MANTLER-2007-DMBBC, title = "Displacement Mapped Billboard Clouds", author = "Stephan Mantler and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "This paper introduces displacement mapped billboard clouds (DMBBC), a new image-based rendering primitive for the fast display of geometrically complex objects at medium to far distances. The representation is based on the well-known billboard cloud (BBC) technique, which represents an object as several textured rectangles in order to dramatically reduce its geometric complexity. Our new method uses boxes instead of rectangles, each box representing a volumetric part of the model. Rendering the contents of a box is done entirely on the GPU using ray casting. DMBBCs will often obviate the need to switch to full geometry for closer distances, which is especially helpful for scenes that are densely populated with complex objects, e.g. for vegetation scenes. We show several ways to store the volumetric information, with different tradeoffs between memory requirements and image quality. In addition we discuss techniques to accelerate the ray casting algorithm, and a way for smoothly switching between DMBBCs for medium distances and BBCs for far distances.", month = apr, event = "Symposium on Interactive 3D Graphics and Games", Conference date = "Poster presented at Symposium on Interactive 3D Graphics and Games (2007-04-30--2007-05-02)", keywords = "rendering acceleration, billboard clouds, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/MANTLER-2007-DMBBC/", } @inproceedings{GIEGL-2007-QV1, title = "Queried Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "Shadowing scenes by shadow mapping has long suffered from the fundamental problem of undersampling artifacts due to too low shadow map resolution, leading to so-called perspective and projection aliasing. In this paper we present a new real-time shadow mapping algorithm capable of shadowing large scenes by virtually increasing the resolution of the shadow map beyond the GPU hardware limit. We start with a brute force approach that uniformly increases the resolution of the whole shadow map. We then introduce a smarter version which greatly increases runtime performance while still being GPU-friendly. The algorithm contains an easy to use performance/quality-tradeoff parameter, making it tunable to a wide range of graphics hardware.", month = apr, isbn = "978-1-59593-628-8", publisher = "ACM Press", location = "Seattle, WA", address = "New York, NY, USA", booktitle = "Proceedings of ACM SIGGRAPH 2007 Symposium on Interactive 3D Graphics and Games", pages = "65--72", keywords = "shadow maps, shadows, real-time shadowing, large environemnts", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-QV1/", } @article{GIEGL-2007-UNP, title = "Unpopping: Solving the Image-Space Blend Problem for Smooth Discrete LOD Transitions", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "This paper presents a new, simple and practical algorithm to avoid artifacts when switching between discrete levels of detail (LOD) by smoothly blending LOD representations in image space. We analyze the alternatives of conventional alpha-blending and so-called late-switching (the switching of LODs markusquote{far enough} from the eye-point), widely thought to solve the LOD switching discontinuity problem, and conclude that they either do not work in practice, or defeat the concept of LODs. In contrast we show that our algorithm produces visually pleasing blends for static and animated discrete LODs, for discrete LODs with different types of LOD representations (e.g. billboards and meshes) and even to some extent totally different objects with similar spatial extent, with a very small runtime overhead.", month = mar, issn = "0167-7055", journal = "Computer Graphics Forum", number = "1", volume = "26", pages = "46--49", keywords = "popping, LOD switching, levels of detail, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-UNP/", } @article{Habel_2007_IAG, title = "Instant Animated Grass", author = "Ralf Habel and Michael Wimmer and Stefan Jeschke", year = "2007", abstract = "This paper introduces a technique for rendering animated grass in real time. The technique uses front-to-back compositing of implicitly defined grass slices in a fragment shader and therefore significantly reduces the overhead associated with common vegetation rendering systems. We also introduce a texture-based animation scheme that combines global wind movements with local turbulences. Since the technique is confined to a fragment shader, it can be easily integrated into any rendering system and used as a material in existing scenes. ", month = jan, journal = "Journal of WSCG", volume = "15", number = "1-3", note = "ISBN 978-80-86943-00-8", issn = "1213-6972", pages = "123--128", keywords = "Real-time Rendering, Natural Scene Rendering, Natural Phenomena, GPU Programming", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Habel_2007_IAG/", } @inproceedings{zotti-2007-wscg, title = "A Critical Review of the Preetham Skylight Model", author = "Georg Zotti and Alexander Wilkie and Werner Purgathofer", year = "2007", abstract = "The Preetham skylight model is currently one of the most widely used analytic models of skylight luminance in computer graphics. Despite its widespread use, very little work has been carried out to verify the results generated by the model, both in terms of the luminance patterns it generates, and in terms of numerical reliability and stability. We have implemented the model in Mathematica, visualise and discuss those parameter ranges which exhibit problematic behaviour, and compare the computed luminance values with references from literature, especially the 15 standard skylight distributions of the CIE 2003 Standard General Sky. We also performed luminance measurements on real cloudless skies, and compare these measurements to the predictions of the model.", month = jan, isbn = "978-80-86943-02-2", publisher = "University of West Bohemia", location = "Plzen", editor = "Vaclav Skala", booktitle = "WSCG ", pages = "23--30", keywords = "Verification, Skylight", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/zotti-2007-wscg/", } @techreport{TR-186-2-07-01, title = "Displacement Mapped Billboard Clouds", author = "Stephan Mantler and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "This paper introduces displacement mapped billboard clouds (DMBBC), a new image-based rendering primitive for the fast display of geometrically complex objects at medium to far distances. The representation is based on the well-known billboard cloud (BBC) technique, which represents an object as several textured rectangles in order to dramatically reduce its geometric complexity. Our new method uses boxes instead of rectangles, each box representing a volumetric part of the model. Rendering the contents of a box is done entirely on the GPU using ray casting. DMBBCs will often obviate the need to switch to full geometry for closer distances, which is especially helpful for scenes that are densely populated with complex objects, e.g. for vegetation scenes. We show several ways to store the volumetric information, with different tradeoffs between memory requirements and image quality. In addition we discuss techniques to accelerate the ray casting algorithm, and a way for smoothly switching between DMBBCs for medium distances and BBCs for far distances.", month = jan, number = "TR-186-2-07-01", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "rendering acceleration, billboard clouds, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/TR-186-2-07-01/", } @article{neumann-2006-gamma, title = "Accurate Display Gamma Functions based on Human Observation", author = "Attila Neumann and Alessandro Artusi and L\'{a}szl\'{o} Neumann and Georg Zotti and Werner Purgathofer", year = "2007", abstract = "This paper describes an accurate method to obtain the Tone Reproduction Curve (TRC) of display devices without using a measurement device. It is an improvement of an existing technique based on human observation, solving its problem of numerical instability and resulting in functions in log--log scale which correspond better to the nature of display devices. We demonstrate the effiency of our technique on different monitor technologies, comparing it with direct measurements using a spectrophotometer.", issn = "0361-2317", journal = "Color Research & Applications", note = "2006 angenommen, 2007 erschienen", number = "4", volume = "32", pages = "310--319", keywords = "Generalized Gamma Function, Colour Reproduction, Display Measurement, Human Visual System, Spatial Vision", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/neumann-2006-gamma/", } @phdthesis{zotti-2007-PhD, title = "Computer Graphics in Historical and Modern Sky Observations", author = "Georg Zotti", year = "2007", abstract = "This work describes work done in three areas of research where sky observations meet computer graphics. The whole topic covers several millennia of human history and posed combined challenges from fields including archaeology, astronomy, cultural heritage, digital image processing and computer graphics. The first part presents interdisciplinary work done in the fields of archaeo-astronomy, visualisation and virtual reconstruction. A novel diagram has been developed which provides an intuitive, easy visualisation to investigate archaeological survey maps for evidence of astronomically motivated orientation of buildings. This visualisation was developed and first applied to a certain class of neolithic circular structures in Lower Austria in order to investigate the idea of solar orientation of access doorways. This diagram and its intuitive interpretation allowed the author to set up a new hypothesis about practical astronomical activities in the middle neolithic period in central Europe. How virtual reconstructions of these buildings characteristic for a short time during the neolithic epoch can be combined with the excellent sky simulation of a modern planetarium to communicate these results to a broader audience is described thereafter. The second part of this work describes a certain class of historical scientific instruments for sky observations and its reconstruction with methods of computer graphics. Long after the stone age, in the Middle Ages, the astrolabe was the most celebrated instrument for celestial observations and has been explained in contemporary literature, usually with the help of precomputed tables for a certain size or kind of instrument. Today, historical exhibitions frequently present one of these instruments, but its various applications are hard to explain to the general audience without hands-on demonstration. For this challenge from the cultural heritage domain, an approach using the idea of procedural modelling is presented. Here, a computer graphics model is not statically drawn but specified by parametrised plotting functions, which can then be repeatedly executed with different parameters to create the final model. This approach is demonstrated to provide a very flexible solution which can immediately be applied to specific needs just by tweaking a few parameters, instead of having to repetitively draw the whole model manually. From the two-dimensional procedural model, 3D models can be easily created, and even the production of wooden instruments on a Laser engraver/plotter is demonstrated. The third and longest part deals with methods of sky simulation and rendering in the domain of computer graphics. In this discipline, modelling of skylight and atmospheric effects has developed tremendously over the last two decades, which is covered by an extensive survey of literature from the computer graphics and also atmosphere physics domains. The requirements of physically correct or at least plausible rendering include realistic values for sky brightness. Measurements performed with a luminance meter on a clear sky in order to verify the currently most widely used analytic skylight model [Preetham 1999] shows however its limited applicability. There are two classical groups of clear-sky models: numerical simulations of scattering in the atmosphere, and fast analytical models. Recently, another method for more realistic looking skylight models has been developed: digital images taken with a fisheye lens are combined into high dynamic range images which can be used for scene illumination and as sky background. These images can be calibrated by photometric measurements of absolute luminance values. Long-time exposures allow to apply this system to quantitative investigations of sky brightness, sky colours, and also nocturnal light pollution by artificial illumination. Results and other applications of the system are described, and the pipeline for creating such images is described in the appendix. This work closes with some notes of future directions of research.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Astrolabe, Archaeo-Astronomy, Skydome Visualisation, Cultural Heritage, Sky Measurements", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/zotti-2007-PhD/", } @incollection{GIEGL-2006-QVS, title = "Queried Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2006", abstract = "In this article we present a new real-time shadow mapping algorithm capable of shadowing large scenes by virtually increasing the resolution of the shadow map beyond the GPU hardware limit. We start with a brute force approach that uniformly increases the resolution of the whole shadow map. We then introduce a smarter version which greatly increases runtime performance while still being GPU-friendly. The algorithm contains an easy to use performance/quality-tradeoff parameter, making it tunable to a wide range of graphics hardware.", month = dec, booktitle = "ShaderX 5 -- Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "1-58450-499-4", publisher = "Charles River Media", series = "ShaderX", volume = "5", keywords = "shadows, shadow mapping, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/GIEGL-2006-QVS/", } @inproceedings{zotti-2006-pla, title = "Using Virtual Reconstructions in a Planetarium for Demonstrations in Archaeo-Astronomy", author = "Georg Zotti and Alexander Wilkie and Werner Purgathofer", year = "2006", abstract = "In the last decades, archaeologists in central Europe have found traces of enigmatic neolithic circular building structures buried in the soil. Recent studies indicate that the orientation of many of their doorways may have been chosen with an astronomical background in mind. This paper explains the use of virtual reconstructions of these buildings from archaeological data, in combination with a simulation of the sky of that time in a Planetarium, to present the astronomical findings to the public.", month = nov, isbn = "963-9495-89-1", publisher = "Pannonian University Press", location = "Eger", editor = "Cecilia Sik Lanyi ", booktitle = "Third Central European Multimedia and Virtual Reality Conference (Proc. CEMVRC2006)", pages = "43--51", keywords = "Virtual Reality, Public Dissemination, Archaeo-Astronomy", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/zotti-2006-pla/", } @inproceedings{Mantler-06-landscape, title = "Interactive Landscape Visualization Using GPU Ray Casting", author = "Stephan Mantler and Stefan Jeschke", year = "2006", abstract = "This paper demonstrates the simple yet effective usage of height fields for interactive landscape visualizations using a ray casting approach implemented in the pixel shader of modern graphics cards. The rendering performance is output sensitive, i.e., it scales with the number of pixels rather than the complexity of the landscape. Given a height field of a terrain and a topographic map or similar data as input, the vegetation cover is extracted and stored on top of the height field in a preprocess, enhancing the terrain with forest canopies or other mesostructure. In addition, enhanced illumination models like shadowing and ambient occlusion can be calculated at runtime with reasonable computational cost, which greatly enhances the scene realism. Finally, including the presented technique into existing rendering systems is relatively simple, mainly consisting of data preparation and pixel shader programming.", month = nov, booktitle = "Proceedings of Graphite 2006", keywords = "real-time rendering, gpu ray casting", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Mantler-06-landscape/", } @inproceedings{wilkie-2006-dfs, title = "A Reflectance Model for Diffuse Fluorescent Surfaces", author = "Alexander Wilkie and Andrea Weidlich and Caroline Larboulette and Werner Purgathofer", year = "2006", abstract = "Fluorescence is an interesting and visually prominent effect, which has not been fully covered by Computer Graphics research so far. While the physical phenomenon of fluorescence has been addressed in isolation, the actual reflection behaviour of real fluorescent surfaces has never been documented, and no analytical BRDF models for such surfaces have been published yet. This paper aims to illustrate the reflection properties typical for diffuse fluorescent surfaces, and provides a BRDF model based on a layered microfacet approach that mimics them.", month = nov, isbn = "1-59593-564-9", location = "Kuala Lumpur, Malaysia", booktitle = "Proceedings of Graphite 2006", pages = "8", pages = "321--328", keywords = "Fluorescence, Analytical BRDF models", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/wilkie-2006-dfs/", } @inproceedings{CADIK-2006-IAQ, title = "Image Attributes and Quality for Evaluation of Tone Mapping Operators", author = "Martin \v{C}ad\'{i}k and Michael Wimmer and L\'{a}szl\'{o} Neumann and Alessandro Artusi", year = "2006", abstract = "The problem of reproducing high dynamic range images on devices with restricted dynamic range has gained a lot of interest in the computer graphics community. There exist various approaches to this issue, which span several research areas including computer graphics, image processing, color science, physiology, neurology, psychology, etc. These approaches assume a thorough knowledge of both the objective and subjective attributes of an image. However, no comprehensive overview and analysis of such attributes has been published so far. In this paper, we present an overview of image quality attributes of different tone mapping methods. Furthermore, we propose a scheme of relationships between these attributes, leading to the definition of an overall image quality measure. We present results of subjective psychophysical tests that we have performed to prove the proposed relationship scheme. We also present the evaluation of existing tone mapping methods with regard to these attributes. Our effort is not just useful to get into the tone mapping field or when implementing a tone mapping operator, but it also sets the stage for well-founded quality comparisons between tone mapping operators. By providing good definitions of the different attributes, user-driven or fully automatic comparisons are made possible at all.", month = oct, publisher = "National Taiwan University Press", location = "Taipe, Taiwan", booktitle = "Proceedings of Pacific Graphics 2006 (14th Pacific Conference on Computer Graphics and Applications)", pages = "35--44", keywords = "tone mapping evaluation, tone mapping, high-dynamic range images", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/CADIK-2006-IAQ/", } @article{WONKA-2006-GVS, title = "Guided Visibility Sampling", author = "Peter Wonka and Michael Wimmer and Kaichi Zhou and Stefan Maierhofer and Gerd Hesina and Alexander Reshetov", year = "2006", abstract = "This paper addresses the problem of computing the triangles visible from a region in space. The proposed aggressive visibility solution is based on stochastic ray shooting and can take any triangular model as input. We do not rely on connectivity information, volumetric occluders, or the availability of large occluders, and can therefore process any given input scene. The proposed algorithm is practically memoryless, thereby alleviating the large memory consumption problems prevalent in several previous algorithms. The strategy of our algorithm is to use ray mutations in ray space to cast rays that are likely to sample new triangles. Our algorithm improves the sampling efficiency of previous work by over two orders of magnitude.", month = jul, journal = "ACM Transactions on Graphics", volume = "25", number = "3", note = "Proceedings ACM SIGGRAPH 2006", issn = "0730-0301", doi = "10.1145/1141911.1141914", pages = "494--502", keywords = "visibility, visibility sampling, occlusion culling, PVS", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WONKA-2006-GVS/", } @inproceedings{MATTAUSCH-2006-AVC, title = "Adaptive Visibility-Driven View Cell Construction", author = "Oliver Mattausch and Jir\'{i} Bittner and Michael Wimmer", year = "2006", abstract = "We present a new method for the automatic partitioning of view space into a multi-level view cell hierarchy. We use a cost-based model in order to minimize the average rendering time. Unlike previous methods, our model takes into account the actual visibility in the scene, and the partition is not restricted to planes given by the scene geometry. We show that the resulting view cell hierarchy works for different types of scenes and gives lower average rendering time than previously used methods.", month = jun, isbn = "3-90567-335-5", publisher = "Eurographics Association", organization = "Eurographics", location = "Nicosia, Cyprus", editor = "Wolfgang Heidrich and Tomas Akenine-Moller", booktitle = "Rendering Techniques 2006 (Proceedings Eurographics Symposium on Rendering)", pages = "195--206", keywords = "view cells, real-time rendering, visibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/MATTAUSCH-2006-AVC/", } @inproceedings{jeschke-05-ISTAR, title = "Image-based Representations for Accelerated Rendering of Complex Scenes", author = "Stefan Jeschke and Michael Wimmer and Werner Purgathofer", year = "2005", abstract = "This paper gives an overview of image-based representations commonly used for reducing the geometric complexity of a scene description in order to accelerate the rendering process. Several different types of representations and ways for using them have been presented, which are classified and discussed here. Furthermore, the overview includes techniques for accelerating the rendering of static scenes or scenes with animations and/or dynamic lighting effects. The advantages and drawbacks of the different approaches are illuminated, and unsolved problems and roads for further research are shown.", month = aug, booktitle = "EUROGRAPHICS 2005 State of the Art Reports", editor = "Y. Chrysanthou and M. Magnor", location = "Dublin, Ireland", publisher = "The Eurographics Association and The Image Synthesis Group", organization = "EUROGRAPHICS", pages = "1--20", keywords = "Impostors, Display Algorithms, Three Dimensional Graphics and Realism, Color, Shading, Shadowing and Texture", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-ISTAR/", } @inproceedings{bittner-2005-egsr, title = "Fast Exact From-Region Visibility in Urban Scenes", author = "Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2005", abstract = "We present a fast exact from-region visibility algorithm for 2.5D urban scenes. The algorithm uses a hierarchical subdivision of line-space for identifying visibility interactions in a 2D footprint of the scene. Visibility in the remaining vertical dimension is resolved by testing for the existence of lines stabbing sequences of virtual portals. Our results show that exact analytic from-region visibility in urban scenes can be computed at times comparable or even superior to recent conservative methods. ", month = jun, isbn = "3-905673-23-1", publisher = "Eurographics Association", organization = "Eurographics", location = "Konstanz, Germany", editor = "Kavita Bala and Philip Dutr\'{e}", booktitle = "Rendering Techniques 2005 (Proceedings Eurographics Symposium on Rendering)", pages = "223--230", keywords = "real-time rendering, visibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bittner-2005-egsr/", } @inproceedings{havran-2005-sccg, title = "On Cross-Validation and Resampling of BRDF Data Measurements", author = "Vlastimil Havran and Attila Neumann and Georg Zotti and Werner Purgathofer and Hans-Peter Seidel", year = "2005", abstract = "We discuss the validation of BTF data measurements by means used for BRDF measurements. First, we show how to apply the Helmholtz reciprocity and isotropy for a single data set. Second, we discuss a cross-validation for BRDF measurement data obtained from two different measurement setups, where the measurements are not calibrated or the level of accuracy is not known. We show the practical problems encountered and the solutions we have used to validate physical setup for four material samples. We describe a novel coordinate system suitable for resampling the BRDF data from one data set to another data set. Further, we show how the perceptually uniform color space CIELab is used for cross-comparison of BRDF data measurements, which were not calibrated.", month = may, location = "Budmerice, Slovakia", booktitle = "Proceedings SCCG 2005", pages = "161--168", keywords = "reflectance function, BRDF data acquisition, BRDF data validation, predictive rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/havran-2005-sccg/", } @inproceedings{jeschke-05-AIP, title = "Automatic Impostor Placement for Guaranteed Frame Rates and Low Memory Requirements", author = "Stefan Jeschke and Michael Wimmer and Heidrun Schumann and Werner Purgathofer", year = "2005", abstract = "Impostors are image-based primitives commonly used to replace complex geometry in order to reduce the rendering time needed for displaying complex scenes. However, a big problem is the huge amount of memory required for impostors. This paper presents an algorithm that automatically places impostors into a scene so that a desired frame rate and image quality is always met, while at the same time not requiring enormous amounts of impostor memory. The low memory requirements are provided by a new placement method and through the simultaneous use of other acceleration techniques like visibility culling and geometric levels of detail.", month = apr, isbn = "1-59593-013-2", publisher = "ACM Press", organization = "ACM", location = "Washington DC", booktitle = "Proceedings of ACM SIGGRAPH 2005 Symposium on Interactive 3D Graphics and Games", pages = "103--110", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-AIP/", } @incollection{Wimmer-2005-HOQ, title = "Hardware Occlusion Queries Made Useful", author = "Michael Wimmer and Jir\'{i} Bittner", year = "2005", abstract = "Hardware occlusion queries make it possible for an application to ask the 3D API whether or not any pixels would be drawn if a particular object was rendered. With this feature, applications can check to see whether or not the bounding boxes of complex objects are visible; if the bounds are occluded, the application can skip drawing those objects. In this chapter, we present a simple and powerful algorithm to solve the problem of latency and CPU/GPU stall typically associated with a naive usage of hardware occlusion queries.", month = mar, booktitle = "GPU Gems 2: Programming Techniques for High-Performance Graphics and General-Purpose Computation", editor = "Matt Pharr and Randima Fernando", isbn = "0-32133-559-7", publisher = "Addison-Wesley", keywords = "occlusion culling, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/Wimmer-2005-HOQ/", } @phdthesis{jeschke-05-ARI, title = "Accelerating the Rendering Process Using Impostors", author = "Stefan Jeschke", year = "2005", abstract = "The interactive rendering of three-dimensional geometric models is a research area of big interest in computer graphics. The generation of a fluent animation for complex models, consisting of multiple million primitives, with more than 60 frames per second is a special challenge. Possible applications include ship-, driving- and flight simulators, virtual reality and computer games. Although the performance of common computer graphics hardware has dramatically increased in recent years, the demand for more realism and complexity in common scenes is growing even faster. This dissertation is about one approach for accelerating the rendering of such complex scenes. We take advantage of the fact that the appearance of distant scene parts hardly changes for several successive output images. Those scene parts are replaced by precomputed image-based representations, so-called impostors. Impostors are very fast to render while maintaining the appearance of the scene part as long as the viewer moves within a bounded viewing region, a so-called view cell. However, unsolved problems of impostors are the support of a satisfying visual quality with reasonable computational effort for the impostor generation, as well as very high memory requirements for impostors for common scenes. Until today, these problems are the main reason why impostors are hardly used for rendering acceleration. This thesis presents two new impostor techniques that are based on partitioning the scene part to be represented into image layers with different distances to the observer. A new error metric allows a guarantee for a minimum visual quality of an impostor even for large view cells. Furthermore, invisible scene parts are efficiently excluded from the representation without requiring any knowledge about the scene structure, which provides a more compact representation. One of the techniques combines every image layer separately with geometric information. This allows a fast generation of memory-efficient impostors for distant scene parts. In the other technique, the geometry is independent from the depth layers, which allows a compact representation for near scene parts. The second part of this work is about the efficient usage of impostors for a given scene. The goal is to guarantee a minimum frame rate for every view within the scene while at the same time minimizing the memory requirements for all impostors. The presented algorithm automatically selects impostors and view cells so that for every view, only the most suitable scene parts are represented as impostors. Previous approaches generated numerous similar impostors for neighboring view cells, thus wasting memory. The new algorithm overcomes this problem. i The simultaneous use of additional acceleration techniques further reduces the required impostor memory and allows making best use of all available techniques at the same time. The approach is general in the sense that it can handle arbitrary scenes and a broad range of impostor techniques, and the acceleration provided by the impostors can be adapted to the bottlenecks of different rendering systems. In summary, the provided techniques and algorithms dramatically reduce the required impostor memory and simultaneously guarantee a minimum output image quality. This makes impostors useful for numerous scenes and applications where they could hardly be used before.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "image-based rendering, impostors, rendering acceleration", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-ARI/", } @inproceedings{zotti-2005-lum, title = "Approximating Real-World Luminaires with OpenGL Lights", author = "Georg Zotti and Attila Neumann and Werner Purgathofer", year = "2005", abstract = "Dynamic illumination in real-time applications using OpenGL is still usually done with the classical light forms of point lights, directional lights and spot lights. For applications simulating real-world scenes, e.g. architectural planning, finding parameter sets for these simple lights to match real-world luminaires is required for realistic work. This paper describes a simple approach to process a luminaire data file in IESNA IES-LM63-95 format to create an approximation using at most 2 OpenGL lights to represent one luminaire.", month = feb, isbn = "80-903100-9-5", publisher = "UNION press", organization = "University of West Bohemia", note = "only on conference CD-ROM", location = "Plzen", address = "Plzen", editor = "Vaclav Skala", booktitle = "WSCG 2005 Short Paper Proceedings", pages = "49--52", keywords = "interactive illumination planning, OpenGL, Real-world luminaires", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/zotti-2005-lum/", } @inproceedings{havran-2005-egsr, title = "Ray Maps for Global Illumination", author = "Vlastimil Havran and Jir\'{i} Bittner and Robert Herzog and Hans-Peter Seidel", year = "2005", abstract = "We describe a novel data structure for representing light transport called ray map. The ray map extends the concept of photon maps: it stores not only photon impacts but the whole photon paths. We demonstrate the utility of ray maps for global illumination by eliminating boundary bias and reducing topological bias of density estimation in global illumination. Thanks to the elimination of boundary bias we could use ray maps for fast direct visualization with the image quality being close to that obtained by the expensive nal gathering step. We describe in detail our implementation of the ray map using a lazily constructed kD-tree. We also present several optimizations bringing the ray map query performance close to the performance of the photon map.", booktitle = "Eurographics Symposium on Rendering", pages = "43--54", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/havran-2005-egsr/", } @article{Bittner-2004-CHC, title = "Coherent Hierarchical Culling: Hardware Occlusion Queries Made Useful", author = "Jir\'{i} Bittner and Michael Wimmer and Harald Piringer and Werner Purgathofer", year = "2004", abstract = "We present a simple but powerful algorithm for optimizing the usage of hardware occlusion queries in arbitrary complex scenes. Our method minimizes the number of issued queries and reduces the delays due to the latency of query results. We reuse the results of the occlusion queries from the last frame in order to initiate and schedule the queries in the next frame. This is done by processing nodes of a spatial hierarchy in front-to-back order, interleaving occlusion queries with the rendering of certain previously visible nodes. The proposed scheduling of the queries makes use of spatial and temporal coherence of visibility. Despite its simplicity, the algorithm achieves good culling efficiency for scenes of various characteristics. The implementation of the algorithm is straightforward, and it can be easily integrated in existing real-time rendering packages using various spatial data structures.", month = sep, journal = "Computer Graphics Forum", volume = "23", number = "3", note = "Proceedings EUROGRAPHICS 2004", issn = "0167-7055", pages = "615--624", keywords = "occlusion query, visibility, real-time rendering, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Bittner-2004-CHC/", } @inproceedings{Wilkie-2004-AMS, title = "An Analytical Model for Skylight Polarisation", author = "Alexander Wilkie and Robert F. Tobler and Christiane Ulbricht and Georg Zotti and Werner Purgathofer", year = "2004", abstract = "Under certain circumstances the polarisation state of the illumination can have a significant influence on the appearance of scenes; outdoor scenes with specular surfaces -- such as water bodies or windows -- under clear, blue skies are good examples of such environments. In cases like that it can be essential to use a polarising renderer if a true prediction of nature is intended, but so far no polarising skylight models have been presented. This paper presents a plausible analytical model for the polarisation of the light emitted from a clear sky. Our approach is based on a suitable combination of several components with well-known characteristics, and yields acceptable results in considerably less time than an exhaustive simulation of the underlying atmospheric scattering phenomena would require.", month = jun, isbn = "3-905673-12-6", publisher = "?", editor = "Alexander Keller and Henrik Wann Jensen ", booktitle = "Proceedings of the Eurographics Symposium on Rendering", pages = "387--399", keywords = "skylight rendering, polarisation", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Wilkie-2004-AMS/", } @inproceedings{Wimmer-2004-LSPM, title = "Light Space Perspective Shadow Maps", author = "Michael Wimmer and Daniel Scherzer and Werner Purgathofer", year = "2004", abstract = "In this paper, we present a new shadow mapping technique that improves the quality of perspective and uniform shadow maps. Our technique uses a perspective transform specified in light space which allows treating all lights as directional lights and does not change the direction of the light sources. This gives all the benefits of the perspective mapping but avoids the problems inherent in perspective shadow mapping like singularities in post-perspective space, missed shadow casters etc. Furthermore, we show that both uniform and perspective shadow maps distribute the perspective aliasing error that occurs in shadow mapping unequally over the available z-range. We therefore propose a transform that equalizes this error and gives equally pleasing results for near and far viewing distances. Our method is simple to implement, requires no scene analysis and is therefore as fast as uniform shadow mapping.", month = jun, isbn = "3-905673-12-6", publisher = "Eurographics Association", organization = "Eurographics", location = "Norrk\"{o}ping, Sweden", editor = "Alexander Keller and Henrik W. Jensen", booktitle = "Rendering Techniques 2004 (Proceedings Eurographics Symposium on Rendering)", pages = "143--151", keywords = "shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Wimmer-2004-LSPM/", } @article{Bittner-2003-Vis, title = "Visibility in Computer Graphics", author = "Jir\'{i} Bittner and Peter Wonka", year = "2003", abstract = "Visibility computation is crucial for computer graphics from its very beginning. The first visibility algorithms in computer graphics aimed to determine visible surfaces in a synthesized image of a 3D scene. Nowadays there are many different visibility algorithms for various visibility problems. We propose a new taxonomy of visibility problems that is based on a classification according to the problem domain. We provide a broad overview of visibility problems and algorithms in computer graphics grouped by the proposed taxonomy. The paper surveys visible surface algorithms, visibility culling algorithms, visibility algorithms for shadow computation, global illumination, point-based and image-based rendering, and global visibility computations. Finally, we discuss common concepts of visibility algorithm design and several criteria for the classification of visibility algorithms.", month = sep, issn = "0265-8135", journal = "Environment and Planning B: Planning and Design", number = "5", volume = "30", pages = "729--756", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Bittner-2003-Vis/", } @inproceedings{Wimmer-2003-RTE, title = "Rendering Time Estimation for Real-Time Rendering", author = "Michael Wimmer and Peter Wonka", year = "2003", abstract = "This paper addresses the problem of estimating the rendering time for a real-time simulation. We study different factors that contribute to the rendering time in order to develop a framework for rendering time estimation. Given a viewpoint (or view cell) and a list of potentially visible objects, we propose several algorithms that can give reasonable upper limits for the rendering time on consumer hardware. This paper also discusses several implementation issues and design choices that are necessary to make the rendering time predictable. Finally, we lay out two extensions to current rendering hardware which would allow implementing a system with constant frame rates.", month = jun, isbn = "3-905673-03-7", publisher = "Eurographics Association", organization = "Eurographics", location = "Leuven, Belgium", editor = "Per Christensen and Daniel Cohen-Or", booktitle = "Rendering Techniques 2003 (Proceedings Eurographics Symposium on Rendering)", pages = "118--129", keywords = "graphics hardware, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Wimmer-2003-RTE/", } @inproceedings{Artusi-2003-Del, title = "Delivering Interactivity to Complex Tone Mapping Operators", author = "Alessandro Artusi and Jir\'{i} Bittner and Michael Wimmer and Alexander Wilkie", year = "2003", abstract = "The accurate display of high dynamic range images requires the application of complex tone mapping operators. These operators are computationally costly, which prevents their usage in interactive applications. We propose a general framework that delivers interactive performance to an important subclass of tone mapping operators, namely global tone mapping operators. The proposed framework consists of four steps: sampling the input image, applying the tone mapping operator, tting the point-sampled tone mapping curve, and reconstructing the tone mapping curve for all pixels of the input image. We show how to make use of recent graphics hardware while keeping the advantage of generality by performing tone mapping in software. We demonstrate the capabilities of our method by accelerating several common global tone mapping operators and integrating the operators in a real-time rendering application.", month = jun, isbn = "3-905673-03-7", publisher = "Eurographics Association", organization = "Eurographics", location = "Leuven, Belgium", editor = "Per Christensen and Daniel Cohen-Or", booktitle = "Rendering Techniques 2003 (Proceedings Eurographics Symposium on Rendering)", pages = "38--44", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Artusi-2003-Del/", } @inproceedings{Jeschke-2002-TDMR, title = "Textured Depth Meshes for Real-Time Rendering of Arbitrary Scenes", author = "Stefan Jeschke and Michael Wimmer", year = "2002", abstract = "This paper presents a new approach to generate textured depth meshes (TDMs), an impostor-based scene representation that can be used to accelerate the rendering of static polygonal models. The TDMs are precalculated for a fixed viewing region (view cell). The approach relies on a layered rendering of the scene to produce a voxel-based representation. Secondary, a highly complex polygon mesh is constructed that covers all the voxels. Afterwards, this mesh is simplified using a special error metric to ensure that all voxels stay covered. Finally, the remaining polygons are resampled using the voxel representation to obtain their textures. The contribution of our approach is manifold: first, it can handle polygonal models without any knowledge about their structure. Second, only scene parts that may become visible from within the view cell are represented, thereby cutting down on impostor complexity and storage costs. Third, an error metric guarantees that the impostors are practically indistinguishable compared to the original model (i.e. no rubber-sheet effects or holes appear as in most previous approaches). Furthermore, current graphics hardware is exploited for the construction and use of the impostors.", month = jun, isbn = "1-58133-534-3", publisher = "Eurographics Association", organization = "Eurographics", location = "Pisa, Italy", editor = "Paul Debevec and Simon Gibson", booktitle = "Rendering Techniques 2002 (Proceedings Eurographics Workshop on Rendering)", pages = "181--190", keywords = "Rendering, Walkthrough, Computer Graphics, Impostors", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Jeschke-2002-TDMR/", } @inproceedings{Jeschke-2002-LEMA, title = "Layered Environment-Map Impostors for Arbitrary Scenes", author = "Stefan Jeschke and Michael Wimmer and Heidrun Schumann", year = "2002", abstract = "This paper presents a new impostor-based approach to accelerate the rendering of very complex static scenes. The scene is partitioned into viewing regions, and a layered impostor representation is precalculated for each of them. An optimal placement of impostor layers guarantees that our representation is indistinguishable from the original geometry. Furthermore the algorithm exploits common graphics hardware both during preprocessing and rendering. Moreover the impostor representation is compressed using several strategies to cut down on storage space.", month = may, isbn = "1-56881-183-7", publisher = "AK Peters Ltd.", location = "Calgary, CA", editor = "Wolfgang St\"{u}rzlinger and Michael McCool", booktitle = "Proceedings of Graphics Interface 2002", pages = "1--8", keywords = "virtual environments, environment maps, impostors, walkthroughs, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Jeschke-2002-LEMA/", } @techreport{TR-186-2-02-04, title = "An Error Metric for Layered Environment Map Impostors", author = "Stefan Jeschke and Michael Wimmer", year = "2002", abstract = "Impostors are image-based primitives commonly used to replace complex geometry in order to accelerate the rendering of large virtual environments. This paper describes a “layered impostor technique” used for representing distant scene-parts when seen from a bounded viewing region. A special layer placement is derived which bounds the geometric error introduced by parallaxes to a defined value. In combination with a special technique for image generation, a high-quality impostor representation without image artifacts can be obtained.", month = feb, number = "TR-186-2-02-04", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "impostors, real-time rendering, virtual", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/TR-186-2-02-04/", } @inproceedings{Devlin-2002-STA, title = "STAR Report on Tone Reproduction and Physically Based Spectral Rendering", author = "K. Devlin and A. Chalmers and Alexander Wilkie and Werner Purgathofer", year = "2002", booktitle = "Eurographics 2002", publisher = "Eurographics Association", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Devlin-2002-STA/", } @inproceedings{Bittner-2001-Vis, title = "Visibility Preprocessing for Urban Scenes using Line Space Subdivision", author = "Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2001", abstract = "We present an algorithm for visibility preprocessing of urban environments. The algorithm uses a subdivision of line space to analytically calculate a conservative potentially visible set for a given region in the scene. We present a detailed evaluation of our method including a comparison to another recently published visibility preprocessing algorithm. To the best of our knowledge the proposed method is the first algorithm that scales to large scenes and efficiently handles large view cells.", month = oct, isbn = "0-7695-1227-5", publisher = "IEEE Computer Society Press", location = "Tokyo, Japan", editor = "Bob Werner", booktitle = "Proceedings of Pacific Graphics 2001 (Ninth Pacific Conference on Computer Graphics and Applications)", pages = "276--284", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Bittner-2001-Vis/", } @article{Wonka-2001-Ins, title = "Instant Visibility", author = "Peter Wonka and Michael Wimmer and Fran\c{c}ois Sillion", year = "2001", abstract = "We present an online occlusion culling system which computes visibility in parallel to the rendering pipeline. We show how to use point visibility algorithms to quickly calculate a tight potentially visible set (PVS) which is valid for several frames, by shrinking the occluders used in visibility calculations by an adequate amount. These visibility calculations can be performed on a visibility server, possibly a distinct computer communicating with the display host over a local network. The resulting system essentially combines the advantages of online visibility processing and region-based visibility calculations, allowing asynchronous processing of visibility and display operations. We analyze two different types of hardware-based point visibility algorithms and address the problem of bounded calculation time which is the basis for true real-time behavior. Our results show reliable, sustained 60 Hz performance in a walkthrough with an urban environment of nearly 2 million polygons, and a terrain flyover.", month = sep, journal = "Computer Graphics Forum", volume = "20", number = "3", note = "G\"{u}nther Enderle [Best Paper] Award, Best Student Paper Award. A. Chalmers and T.-M. Rhyne (eds.), Proceedings EUROGRAPHICS 2001", issn = "0167-7055", pages = "411--421", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wonka-2001-Ins/", } @inproceedings{Wimmer-2001-Poi, title = "Point-Based Impostors for Real-Time Visualization", author = "Michael Wimmer and Peter Wonka and Fran\c{c}ois Sillion", year = "2001", abstract = "We present a new data structure for encoding the appearance of a geometric model as seen from a viewing region (view cell). This representation can be used in interactive or real-time visualization applications to replace a complex model by an impostor, maintaining high quality rendering while cutting down rendering time. Our approach relies on an object-space sampled representation similar to a point cloud or a layered depth image, but introduces two fundamental additions to previous techniques. First, the sampling rate is controlled to provide sufficient density across all possible viewing conditions from the specified view cell. Second, a correct, antialiased representation of the plenoptic function is computed using Monte Carlo integration. Our system therefore achieves high quality rendering using a simple representation with bounded complexity. We demonstrate the method for an application in urban visualization.", month = jun, isbn = "3-211-83709-4", publisher = "Springer-Verlag", organization = "Eurographics", editor = "Steven J. Gortler and Karol Myszkowski", booktitle = "Rendering Techniques 2001 (Proceedings Eurographics Workshop on Rendering)", pages = "163--176", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wimmer-2001-Poi/", } @article{Brusi-2001-Opt, title = "Optimal Ray Shooting in Monte Carlo Radiosity", author = "A. Brusi and Mateu Sbert and Philippe Bekaert and Werner Purgathofer", year = "2001", journal = "Computers&Graphics", volume = "26", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Brusi-2001-Opt/", } @article{Wilkie-2001-Ori, title = "Orientation Lightmaps for Photon Radiosity in Complex Environments", author = "Alexander Wilkie and Robert F. Tobler and Werner Purgathofer", year = "2001", abstract = "We present a method that makes the use of photon tracing methods feasible for complex scenes when a totally accurate solution is not essential. This is accomplished by using orientation lightmaps, which average the illumination of complex objects depending on the surface normal. Through this averaging, they considerably reduce the variance of the stochastic solution. In order to use these specialised lightmaps, which consume comparatively small amounts of memory, no changes have to be made to the basic photon-tracing algorithm. Also, they can be freely mixed with normal lightmaps. This gives the user good control over the amount of inaccuracy he introduces by their application. The area computations necessary for their insertion are performed using a stochastic sampling method that performs well for highly complex objects.", journal = "The Visual Computer", note = "In The Visual Computer, Vol. 17, No. 5, pp. 318-327, Springer, Heidelberg, 2001", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wilkie-2001-Ori/", } @inproceedings{wonka-2000-VisP, title = "Visibility Preprocessing with Occluder Fusion for Urban Walkthroughs", author = "Peter Wonka and Michael Wimmer and Dieter Schmalstieg", year = "2000", abstract = "This paper presents an efficient algorithm for occlusion culling of urban environments. It is conservative and accurate in finding all significant occlusion. It discretizes the scene into view cells, for which cell-to-object visibility is precomputed, making on-line overhead negligible. Unlike other precomputation methods for view cells, it is able to conservatively compute all forms of occluder interaction for an arbitrary number of occluders. To speed up preprocessing, standard graphics hardware is exploited and occluder occlusion is considered. A walkthrough application running an 8 million polygon model of the city of Vienna on consumer-level hardware illustrates our results.", month = jun, isbn = "3-211-83535-0", publisher = "Springer-Verlag Wien New York", organization = "Eurographics", location = "held in Brno, Czech Republic, June 26-28, 2000", editor = "Bernard P\'{e}roche and Holly Rushmeier", booktitle = "Rendering Techniques 2000 (Proceedings Eurographics Workshop on Rendering)", pages = "71--82", keywords = "Visibility determination, image-based rendering., occluder occlusion, occluder fusion, urban environments, walkthrough, real-time graphics, shadow algorithms, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2000/wonka-2000-VisP/", } @article{Wimmer-1999-FWIb, title = "Fast Walkthroughs with Image Caches and Ray Casting", author = "Michael Wimmer and Markus Giegl and Dieter Schmalstieg", year = "1999", abstract = "We present an output-sensitive rendering algorithm for accelerating walkthroughs of large, densely occluded virtual environments using a multi-stage Image Based Rendering Pipeline. In the first stage, objects within a certain distance are rendered using the traditional graphics pipeline, whereas the remaining scene is rendered by a pixel-based approach using an Image Cache, horizon estimation to avoid calculating sky pixels, and finally, ray casting. The time complexity of this approach does not depend on the total number of primitives in the scene. We have measured speedups of up to one oder of magnitude.", month = dec, issn = "0097-8493", journal = "Computers and Graphics", number = "6", volume = "23", pages = "831--838", URL = "https://www.cg.tuwien.ac.at/research/publications/1999/Wimmer-1999-FWIb/", } @inproceedings{Wimmer-1999-FWIa, title = "Fast Walkthroughs with Image Caches and Ray Casting", author = "Michael Wimmer and Markus Giegl and Dieter Schmalstieg", year = "1999", abstract = "We present an output-sensitive rendering algorithm for accelerating walkthroughs of large, densely occluded virtual environments using a multi-stage Image Based Rendering Pipeline. In the first stage, objects within a certain distance are rendered using the traditional graphics pipeline, whereas the remaining scene is rendered by a pixel-based approach using an Image Cache, horizon estimation to avoid calculating sky pixels, and finally, ray casting. The time complexity of this approach does not depend on the total number of primitives in the scene. We have measured speedups of up to one oder of magnitude.", month = jun, isbn = "3-211-83347-1", publisher = "Springer-Verlag Wien", organization = "Eurographics", editor = "Michael Gervautz and Dieter Schmalstieg and Axel Hildebrand", booktitle = "Virtual Environments '99. Proceedings of the 5th Eurographics Workshop on Virtual Environments", pages = "73--84", URL = "https://www.cg.tuwien.ac.at/research/publications/1999/Wimmer-1999-FWIa/", } @inproceedings{tobler97_hsdasr, title = "A Hierarchical Subdivision Algorithm for Stochastic Radiosity Methods", author = "Robert F. Tobler and Alexander Wilkie and Martin Feda and Werner Purgathofer", year = "1997", abstract = "The algorithm proposed in this paper uses a stochastic approach to incrementally calculate the illumination function over a surface. By tracking the illumination function at different levels of meshing resolution, it is possible to get a measure for the quality of the current representation, and to adoptively subdivide in places with inadequate accuracy. With this technique a hierarchical mesh that is based on the stochastic evaluation of global illumination is generated.", month = jun, publisher = "Springer Wien", organization = "Eurographics", address = "St. Etienne, France", editor = "Julie Dorsey and Philipp Slusallek", booktitle = "Eurographics Rendering Workshop 1997", pages = "193--204", keywords = "radiosity, monte carlo methods", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/tobler97_hsdasr/", } @inproceedings{traxler-1997-TRA, title = "Efficient Ray Tracing of Complex Natural Scenes", author = "Christoph Traxler and Michael Gervautz", year = "1997", abstract = "In this paper we present a method for the consistent modelling and efficient ray tracing of complex natural scenes. Both plants and terrains are modelled and represented in the same way to allow mutual influences of their appearance and interdependencies of their geometry. Plants are generated together with a fractal terrain, so that they directly grow on it. This allows an accurate calculation of reflections and the cast of shadows. The scenes are modeled with a special kind of PL-Systems and are represented by cyclic object-instancing graphs. This is a very compact representation for ray tracing, which avoids restrictions to the complexity of the scenes. To significantly increase the efficiency of ray tracing with this representation an adaptation of conventional optimization techniques to cyclic graphs is necessary. In this paper we introduce methods for the calculation of a bounding box hierarchy and the use of a regular 3d-grid for cyclic graphs.", publisher = "World Scientific Publishers", location = "Denver, Colorado", editor = "M. M. Novak and T. G. Dewey", booktitle = "Proceedings of Fractal 97", keywords = "Cyclic Object Instancing Graphs, PL-systems, Natural Phenomena , Ray Tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/traxler-1997-TRA/", }