@article{hladky-2022-QS, title = "QuadStream: A Quad-Based Scene Streaming Architecture for Novel Viewpoint Reconstruction", author = "Jozef Hladky and Michael Stengel and Nicholas Vining and Bernhard Kerbl and Hans-Peter Seidel and Markus Steinberger", year = "2022", abstract = "Cloud rendering is attractive when targeting thin client devices such as phones or VR/AR headsets, or any situation where a high-end GPU is not available due to thermal or power constraints. However, it introduces the challenge of streaming rendered data over a network in a manner that is robust to latency and potential dropouts. Current approaches range from streaming transmitted video and correcting it on the client---which fails in the presence of disocclusion events---to solutions where the server sends geometry and all rendering is performed on the client. To balance the competing goals of disocclusion robustness and minimal client workload, we introduce QuadStream, a new streaming technique that reduces motion-to-photon latency by allowing clients to render novel views on the fly and is robust against disocclusions. Our key idea is to transmit an approximate geometric scene representation to the client which is independent of the source geometry and can render both the current view frame and nearby adjacent views. Motivated by traditional macroblock approaches to video codec design, we decompose the scene seen from positions in a view cell into a series of view-aligned quads from multiple views, or QuadProxies. By operating on a rasterized G-Buffer, our approach is independent of the representation used for the scene itself. Our technical contributions are an efficient parallel quad generation, merging, and packing strategy for proxy views that cover potential client movement in a scene; a packing and encoding strategy allowing masked quads with depth information to be transmitted as a frame coherent stream; and an efficient rendering approach that takes advantage of modern hardware capabilities to turn our QuadStream representation into complete novel views on thin clients. According to our experiments, our approach achieves superior quality compared both to streaming methods that rely on simple video data and to geometry-based streaming.", month = dec, journal = "ACM Transactions on Graphics", volume = "41", number = "6", issn = "1557-7368", publisher = "ASSOC COMPUTING MACHINERY", keywords = "streaming, real-time rendering, virtual reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/hladky-2022-QS/", } @article{gilmutdinov-2022-aomlbwug, title = "Assessment of Material Layers in Building Walls Using GeoRadar", author = "Ildar Gilmutdinov and Ingrid Schl\"{o}gel and Alois Hinterleitner and Peter Wonka and Michael Wimmer", year = "2022", abstract = "Assessing the structure of a building with non-invasive methods is an important problem. One of the possible approaches is to use GeoRadar to examine wall structures by analyzing the data obtained from the scans. However, so far, the obtained data have to be assessed manually, relying on the experience of the user in interpreting GPR radargrams. We propose a data-driven approach to evaluate the material composition of a wall from its GPR radargrams. In order to generate training data, we use gprMax to model the scanning process. Using simulation data, we use a convolutional neural network to predict the thicknesses and dielectric properties of walls per layer. We evaluate the generalization abilities of the trained model on the data collected from real buildings.", month = oct, doi = "10.3390/rs14195038", issn = "2072-4292", journal = "Remote Sensing", number = "19", pages = "12", volume = "14", event = "Radar Techniques for Structures Characterization and Monitoring", publisher = "MDPI", pages = "5038--", keywords = "deep learning, ground-penetrating radar, non-destructive-evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/gilmutdinov-2022-aomlbwug/", } @bachelorsthesis{Rait2022, title = "Fast Radial Search for Progressive Photon Mapping", author = "Alexius Rait", year = "2022", abstract = "Global illumination is critical to realistic rendering and as such many different algorithms were developed to solve it, one of which – photon mapping – was designed to efficiently render caustics and indirect lighting. It achieves this by saving photons in a data structure during the first step and then using the result by collecting photons near specific search origins. This step is very time intensive due to the sheer amount of searches performed each iteration. In this paper, we will compare the performance of two spatial data structures and parallelized search algorithms written in CUDA for the GPU by execution time and memory usage for the photon-gathering use case. The algorithms were implemented in an open-source progressive photon mapping project [11] and are using parts of S. Reinwald’s fast-KNN as a basis [9]. ", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/Rait2022/", } @bachelorsthesis{Komon2022, title = "Distributed Surface Reconstruction", author = "Patrick Komon", year = "2022", abstract = "As the accessibility of high-quality 3D scans increases, processing the scanned data becomes more challenging. 3D scanners obtain very large, unstructured sets of points, so called point clouds. To be able to use the data in a meaningful way it is necessary to reconstruct the surface of the scanned object from the point cloud, resulting in a 3D model. This is called the problem of 3D surface reconstruction. Processing very large point clouds (in a reasonable time) is necessary in order to keep up with ever increasing scanning technology. In this thesis, we construct, implement and evaluate a distributed surface reconstruction algorithm called DistributedBallFilter. It is a distributed-memory parallel version of the recently developed BallFilter algorithm [Ohr22]. Firstly, the input point cloud is subdivided into chunks called tiles using a 3D grid. To ensure the correctness of the results, tiles are slightly overlapping on their borders. After splitting the input, each tile can be processed independently from each other. The tiles are assigned and distributed to a number of p processes. The assignment of tiles to processes is calculated using longest-processing-time-first list scheduling. Then all processes reconstruct the 3D surface of all their assigned tiles in parallel. After all tiles are processed, the result is merged back together into a single 3D model, containing the reconstructed surface of the entire input point cloud. The asymptotic run time complexity is O(n log n) in the worst case (same as BallFilter) and O(n + n log n p ) in the best case, depending on the distribution of points within the input data. Furthermore, we implemented the algorithm in C++. The input splitting is run on a GPU using CUDA and discussed thoroughly in its dedicated paper [Bru22]. For each tile a single file is output, which is communicated to each process via a distributed file system. The MPI standard is used for sending all local results to a single process, which is also responsible for merging and outputting the final 3D model. Finally we executed the algorithm on the VSC3+ cluster, a high-performance cluster based in Vienna. It was run against several test data sets. We visualize the results and analysed the behavior of the running time when scaling the number of processes as well as the input size. In our tests, DistributedBallFilter managed to be up to around five times faster than BallFilter depending on the number of nodes used and the input size. The largest observed speedup was by a factor of 5.89 compared to BallFilter.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/Komon2022/", } @bachelorsthesis{Keilman2022, title = "Immersive Redesign", author = "Manuel Keilman", year = "2022", abstract = "In this thesis, my colleague Ahmed El Agrod and I implemented software that allows point clouds to be edited. By moving, deleting, saving, and inserting selected objects, the point cloud should be able to be modified. This bachelor thesis mainly describes how newly added objects are automatically placed on the ground being recognized by an algorithm. Furthermore, it is described how an image inpainting algorithm was implemented to fill incomplete flat regions of point clouds with new points and associated matching colors. The ground detection was performed using the RANSAC algorithm, which computes a plane representing the ground for the scene. For the image inpainting algorithm, three-dimensional point cloud points had to be mapped to a 2D image, then use an image inpainting algorithm to fill in the missing pixels, and finally, map the 2D pixels of the inpainted image back to 3D points in the scene. An evaluation was also conducted to test both the automatic ground detection and the image inpainting algorithm regarding runtime and visual quality.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/Keilman2022/", } @bachelorsthesis{kristmann-2022-occ, title = "Occluder Frequency Analysis for Evaluating the Level of Visibility of Partly Occluded Objects", author = "Elias Kristmann", year = "2022", abstract = "To increase rendering efficiency of large and complex scenes, occlusion culling algorithms detect objects which are completely hidden by others and therefore do not need to be rendered. However, these methods often follow an all-or-nothing principle, either culling the geometry entirely or drawing it at full detail. This approach disregards an important subcategory of the visibility problem: detecting objects that are hardly visible because they are partly occluded and which can therefore be rendered at a lower level of detail without generating noticeable artifacts. In this thesis we assess the level of visibility of such objects by computing a hierarchical occlusion map and analysing its structure based on the frequencies of the occluders. This analysis results in a parameter that controls the level of detail (LOD) in which the geometry is rendered. The algorithm performs well even in scenes with sparse occlusion, surpassing the standard hierarchical occlusion map algorithm, with still a lot of potential for even further improvements.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "rendering, occlusion culling, real-time", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/kristmann-2022-occ/", } @article{SCHUETZ-2022-PCC, title = "Software Rasterization of 2 Billion Points in Real Time", author = "Markus Sch\"{u}tz and Bernhard Kerbl and Michael Wimmer", year = "2022", abstract = "We propose a software rasterization pipeline for point clouds that is capable of brute-force rendering up to two billion points in real time (60fps). Improvements over the state of the art are achieved by batching points in a way that a number of batch-level optimizations can be computed before rasterizing the points within the same rendering pass. These optimizations include frustum culling, level-of-detail rendering, and choosing the appropriate coordinate precision for a given batch of points directly within a compute workgroup. Adaptive coordinate precision, in conjunction with visibility buffers, reduces the number of loaded bytes for the majority of points down to 4, thus making our approach several times faster than the bandwidth-limited state of the art. Furthermore, support for LOD rendering makes our software-rasterization approach suitable for rendering arbitrarily large point clouds, and to meet the increased performance demands of virtual reality rendering. ", month = jul, journal = "Proceedings of the ACM on Computer Graphics and Interactive Techniques", volume = "5", number = "3", issn = "2577-6193", doi = "10.1145/3543863", pages = "1--17", keywords = "point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/SCHUETZ-2022-PCC/", } @article{cardoso-2022-rtpercept, title = "Training and Predicting Visual Error for Real-Time Applications", author = "Joao Afonso Cardoso and Bernhard Kerbl and Lei Yang and Yury Uralsky and Michael Wimmer", year = "2022", abstract = "Visual error metrics play a fundamental role in the quantification of perceived image similarity. Most recently, use cases for them in real-time applications have emerged, such as content-adaptive shading and shading reuse to increase performance and improve efficiency. A wide range of different metrics has been established, with the most sophisticated being capable of capturing the perceptual characteristics of the human visual system. However, their complexity, computational expense, and reliance on reference images to compare against prevent their generalized use in real-time, restricting such applications to using only the simplest available metrics. In this work, we explore the abilities of convolutional neural networks to predict a variety of visual metrics without requiring either reference or rendered images. Specifically, we train and deploy a neural network to estimate the visual error resulting from reusing shading or using reduced shading rates. The resulting models account for 70%--90% of the variance while achieving up to an order of magnitude faster computation times. Our solution combines image-space information that is readily available in most state-of-the-art deferred shading pipelines with reprojection from previous frames to enable an adequate estimate of visual errors, even in previously unseen regions. We describe a suitable convolutional network architecture and considerations for data preparation for training. We demonstrate the capability of our network to predict complex error metrics at interactive rates in a real-time application that implements content-adaptive shading in a deferred pipeline. Depending on the portion of unseen image regions, our approach can achieve up to 2x performance compared to state-of-the-art methods.", month = may, journal = "Proceedings of the ACM on Computer Graphics and Interactive Techniques", volume = "5", number = "1", issn = "2577-6193", doi = "10.1145/3522625", pages = "17", publisher = "Association for Computing Machinery", pages = "1--17", keywords = "perceptual error, variable rate shading, real-time", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/cardoso-2022-rtpercept/", } @mastersthesis{geyer-2018-apbf, title = "Adaptive Sampling in position based fluids", author = "Lukas Geyer", year = "2022", abstract = "Position-Based Fluids (PBF) are a Lagrangian fluid-simulation method and are an implementation of Smoothed Particle Hydrodynamics integrated into the Position-Based Dynamics (PBD) framework. In PBD, constraints applied to object positions are used to enforce a variety of physical laws. In the case of PBF, the fluid is represented by particles and constraints are added that prevent fluid compression. The original PBF method defines all particles to be of equal mass and rest density. In this thesis, we propose a method for generalizing PBF to allow particles to represent varying amounts of fluid. This enables the fluid to be simulated with regionally varying levels of detail with the intent to reduce memory consumption and to increase performance. For each fluid region, we compute the targeted level of detail based on its distance to the fluid boundary, and use merging and splitting strategies to adapt the particles accordingly. We discuss the relation of the particle density to the kernel width used in PBF and provide several approaches for adapting the kernel width to fit the local level of detail. The advantages and disadvantages of each approach are evaluated and a streamlined implementation-variant is proposed which has advantageous properties for larger bodies of fluid. This streamlined solution bases the kernel width entirely on the boundary distance. Its approach is mathematically analyzed in regard to the expected number of particles and neighbor pairs for varying fluid body sizes. The mathematical analysis as well as measurements done in our test implementation show that while our method might increase the neighbor pair count for shallow fluids, it greatly reduces the number of particles and neighbor pairs if the fluid is sufficiently deep, giving the opportunity to significantly lower the computational effort in these cases.", month = may, pages = "81", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "Position-Based Fluids, Particle-Based Fluid Simulation", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/geyer-2018-apbf/", } @bachelorsthesis{HANN_2022_IPT, title = "Incremental Updates of Path-Traced Scenes during Editing", author = "Pascal Hann", year = "2022", abstract = "In this work I present a novel adaptive sampling algorithm for 3D editing software, developed by me and my colleagues. The algorithm is based on the idea of using knowledge about how a given user interaction affects a scene visually. We split the image into regions and order them, according to that knowledge, from most noticeably affected to least. The rendering budget can then be focused on the more affected regions earlier and on the lesser ones later in an incremental rendering process. Although this concept could probably work with other rendering methods, we designed it to be able to use path-tracing as the viewport renderer in 3D editing software without the typical grain-like noise and waiting times for sufficiently smooth rendered images this technology usually comes with. The goal of this work is to offer users of 3D editing software an as uninterrupted workflow as possible while still being able to see their work in high quality.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "incremental rendering, path tracing, GPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/HANN_2022_IPT/", } @inproceedings{kerbl-2022-trienc, title = "An Improved Triangle Encoding Scheme for Cached Tessellation", author = "Bernhard Kerbl and Linus Horvath and Daniel Cornel and Michael Wimmer", year = "2022", abstract = "With the recent advances in real-time rendering that were achieved by embracing software rasterization, the interest in alternative solutions for other fixed-function pipeline stages rises. In this paper, we revisit a recently presented software approach for cached tessellation, which compactly encodes and stores triangles in GPU memory. While the proposed technique is both efficient and versatile, we show that the original encoding is suboptimal and provide an alternative scheme that acts as a drop-in replacement. As shown in our evaluation, the proposed modifications can yield performance gains of 40\% and more.", month = apr, isbn = "978-3-03868-169-4", location = "Reims", issn = "1017-4656", event = "Eurographics 2022", editor = "Pelechano, Nuria and Vanderhaeghe, David", doi = "10.2312/egs.20221031", booktitle = "Eurographics 2022 - Short Papers", pages = "1--4", keywords = "gpu, real-time, tessellation", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/kerbl-2022-trienc/", } @inproceedings{unterguggenberger-2022-vulkan, title = "The Road to Vulkan: Teaching Modern Low-Level APIs in Introductory Graphics Courses", author = "Johannes Unterguggenberger and Bernhard Kerbl and Michael Wimmer", year = "2022", abstract = "For over two decades, the OpenGL API provided users with the means for implementing versatile, feature-rich, and portable real-time graphics applications. Consequently, it has been widely adopted by practitioners and educators alike and is deeply ingrained in many curricula that teach real-time graphics for higher education. Over the years, the architecture of graphics processing units (GPUs) incrementally diverged from OpenGL's conceptual design. The more recently introduced Vulkan API provides a more modern, fine-grained approach for interfacing with the GPU. Various properties of this API and overall trends suggest that Vulkan could soon replace OpenGL in many areas. Hence, it stands to reason that educators who have their students' best interests at heart should provide them with corresponding lecture material. However, Vulkan is notoriously verbose and rather challenging for first-time users, thus transitioning to this new API bears a considerable risk of failing to achieve expected teaching goals. In this paper, we document our experiences after teaching Vulkan in an introductory graphics course side-by-side with conventional OpenGL. A final survey enables us to draw conclusions about perceived workload, difficulty, and students' acceptance of either approach and identify suitable conditions and recommendations for teaching Vulkan to undergraduate students.", month = apr, isbn = "978-3-03868-170-0", publisher = "The Eurographics Association", location = "Reims", issn = "1017-4656", event = "Eurographics 2022", doi = "10.2312/eged.20221043", booktitle = "Eurographics 2022 - Education Papers", pages = "9", pages = "31--39", keywords = "vulkan, gpu, opengl", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/unterguggenberger-2022-vulkan/", } @mastersthesis{FRAISS-2022-CGMM, title = "Construction and Visualization of Gaussian Mixture Models from Point Clouds for 3D Object Representation", author = "Simon Maximilian Fraiss", year = "2022", abstract = "Point clouds are a common representation of three-dimensional shapes in computer graphics and 3D-data processing. However, in some applications, other representations are more useful. Gaussian Mixture Models (GMMs) can be used as such an alternative representation. A GMM is a convex sum of normal distributions, which aims to describe a point cloud’s density. In this thesis, we investigate both visualization and construction of GMMs. For visualization, we have implemented a tool that enables both isoellipsoid and density visualization of GMMs. We describe the mathematical backgrounds, the algorithms, and our implementation of this tool. Regarding GMM construction, we investigate several algorithms used in previous papers for constructing GMMs for 3D-data processing tasks. We present our implementations of the expectation-maximization (EM) algorithm and top-down HEM. Additionally, we have adapted the implementation of geometrically regularized bottom-up HEM to produce a fixed number of Gaussians. We evaluate these three algorithms in terms of the quality of their generated GMMs. In many cases, the statistical likelihood, which is maximized by the EM algorithm, is not a reliable indicator for a GMM’s quality. Therefore, we instead rely on the reconstruction error of a reconstructed point cloud based on the Chamfer distance. Additionally, we provide metrics for measuring the reconstructed point cloud’s uniformity and the GMM’s variation of Gaussians. We demonstrate that EM provides the best results in terms of these metrics. Top-down HEM is a fast alternative, and can produce even better results when using fewer input points. The results of geometrically regularized bottom-up HEM are inferior for lower numbers of Gaussians but it can create good GMMs consisting of high numbers of Gaussians very eciently.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/FRAISS-2022-CGMM/", } @bachelorsthesis{horvath_imp, title = "Improved Triangle Encoding for Cached Adaptive Tessellation", author = "Linus Horvath", year = "2022", abstract = "Changing the vertex count of a given geometry through hardware tessellation on the GPU is limited by today’s standards. The capped edge splits (64 splits per edge) as well as increasingly worse performance with deeper levels certainly leaves room for improvement. So in the meantime, software-based solutions using GPU shaders provide much more flexibility as well as features. One possible solution, which we will be focusing on, was presented by Jad Khoury [KDR18] in 2018 which implements a tessellation cache on the GPU. This enables the tessellation step to not only reuse the data of the previous frame but also makes it adaptive by only having to calculate the changes since the last frame. The adaptive cache improves tessellation performance at the cost of memory on the GPU but their particular implementation still slows down on deeper tessellation levels because of the recursive nature of their algorithm. Our work replaces their recursive algorithm with a constant-time solution by exploiting the grid structure of their tessellated geometry.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "tessellation, gpu", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/horvath_imp/", } @studentproject{pernsteiner-2022-msh, title = "Improvements and Additional Features for a Mesh Shader-Based Research Project", author = "Jakob Pernsteiner", year = "2022", abstract = "New mesh shader graphics pipelines allow fine grained processing of input geometry which is split into small chunks of geometery---so called "meshlets". To match geometry processing efficiency of classical shader stages (from vertex to geometry shaders), an optimized mesh shader implementation is required using the two new tightly-coupled shader stages: task shaders, and mesh shaders. Fine-grained view frustum and backface culling on a per-meshlet basis further improve performance.", month = feb, note = ""Gawain" 3D model (c) Unity Technologies, provided through their "The Heretic: Digital Human" package.", keywords = "Meshlets, Mesh Shader, Task Shader", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/pernsteiner-2022-msh/", } @mastersthesis{STREICHER-2022-DSLS, title = "Digital Surveying of Large Scale Multi-Layered Terrain", author = "Kevin Streicher", year = "2022", abstract = "Digital terrain surveying is the exploration of terrain reconstructions and quantitative analysis of their properties. Out-of-core techniques, such as terrain streaming, are required to perform surveying on large-scale terrains at interactive frame-rates.The polyline based surveying tool from PRo3D, one of the state-of-the-art solutions for planetary geology, was implemented in our tool Visionary. In PRo3D the polylines are subsampled using fixed-rate subsampling (FRSS) at equidistant points. Our method uses variable-rate subsampling (VRSS) and shared-edge detection (SED) as an improvement that finds exact results when neighbouring primitives are hit. Furthermore, an uncertainty metric On-Data Ratio (ODR) was presented to raise awareness about the uncertainty of these measurements. Visionary was developed in the Unity game engine to evaluate if it is a suitable framework for such a specialized tool. We evaluated our implementation against Pro3D.", pages = "112", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "out-of-core, digital terrain surveying, large-scale terrain streaming, uncertainty", URL = "https://www.cg.tuwien.ac.at/research/publications/2022/STREICHER-2022-DSLS/", } @inproceedings{murturi_PGG, title = "On Provisioning Procedural Geometry Workloads on Edge Architectures", author = "Ilir Murturi and Chao Jia and Bernhard Kerbl and Michael Wimmer and Schahram Dustdar and Christos Tsigkanos", year = "2021", abstract = "Contemporary applications such as those within Augmented or Virtual Reality (AR/VR) pose challenges for software architectures supporting them, which have to adhere to stringent latency, data transmission, and performance requirements. This manifests in processing 3D models, whose 3D contents are increasingly generated procedurally rather than explicitly, resulting in computational workloads (i.e., perceived as Procedural Geometry Workloads) with particular characteristics and resource requirements. Traditionally, executing such workloads takes place in resource-rich environments such as the cloud. However, the massive amount of data transfer, heterogeneous devices, and networks involved affect latency, which in turn causes low-quality visualization in user-facing applications (e.g., AR/VR). To overcome such challenges, processing elements available close to end users can be leveraged to generate 3D models instead, and as such the edge emerges as a central architectural entity. This paper describes such procedural geometry workloads, their particular characteristics, and challenges to execute them on heterogeneous devices. Furthermore, we propose an architecture capable of provisioning procedural geometry workloads in edge scenarios.", month = oct, isbn = "978-989-758-536-4", publisher = "SciTePress", organization = "INSTICC", event = "17th International Conference on Web Information Systems and Technologies - WEBIST", editor = " Francisco Dom\'{i}nguez Mayo, Massimo Marchiori and Joaquim Filipe", doi = "10.5220/0010687800003058", booktitle = "Proceedings of the 17th International Conference on Web Information Systems and Technologies - WEBIST", pages = "6", pages = "354--359", keywords = "distributed systems, procedural geometry, rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/murturi_PGG/", } @inproceedings{roth_vdi, title = "View-Dependent Impostors for Architectural Shape Grammars", author = "Chao Jia and Moritz Roth and Bernhard Kerbl and Michael Wimmer", year = "2021", abstract = "Procedural generation has become a key component in satisfying a growing demand for ever-larger, highly detailed geometry in realistic, open-world games and simulations. In this paper, we present our work towards a new level-of-detail mechanism for procedural geometry shape grammars. Our approach automatically identifies and adds suitable surrogate rules to a shape grammar's derivation tree. Opportunities for surrogates are detected in a dedicated pre-processing stage. Where suitable, textured impostors are then used for rendering based on the current viewpoint at runtime. Our proposed methods generate simplified geometry with superior visual quality to the state-of-the-art and roughly the same rendering performance.", month = oct, isbn = "978-3-03868-162-5", publisher = "Eurographics Association", organization = "The Eurographics Association", location = "online", event = "Pacific Graphics 2021", editor = "Lee, Sung-Hee and Zollmann, Stefanie and Okabe, Makoto and W\"{u}nsche, Burkhard", doi = "10.2312/pg.20211390", booktitle = "Pacific Graphics Short Papers, Posters, and Work-in-Progress Papers", pages = "2", pages = "63--64", keywords = "procedural geometry, real-time, GPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/roth_vdi/", } @inproceedings{stappen_SteFAS, title = "Temporally Stable Content-Adaptive and Spatio-Temporal Shading Rate Assignment for Real-Time Applications", author = "Stefan Stappen and Johannes Unterguggenberger and Bernhard Kerbl and Michael Wimmer", year = "2021", abstract = "We propose two novel methods to improve the efficiency and quality of real-time rendering applications: Texel differential-based content-adaptive shading (TDCAS) and spatio-temporally filtered adaptive shading (STeFAS). Utilizing Variable Rate Shading (VRS)-a hardware feature introduced with NVIDIA's Turing micro-architecture-and properties derived during rendering or Temporal Anti-Aliasing (TAA), our techniques adapt the resolution to improve the performance and quality of real-time applications. VRS enables different shading resolution for different regions of the screen during a single render pass. In contrast to other techniques, TDCAS and STeFAS have very little overhead for computing the shading rate. STeFAS enables up to 4x higher rendering resolutions for similar frame rates, or a performance increase of 4× at the same resolution.", month = oct, isbn = "978-3-03868-162-5", publisher = "Eurographics Association", organization = "The Eurographics Association", location = "online", event = "Pacific Graphics 2021", editor = "Lee, Sung-Hee and Zollmann, Stefanie and Okabe, Makoto and W\"{u}nsche, Burkhard", doi = "10.2312/pg.20211391", booktitle = "Pacific Graphics Short Papers, Posters, and Work-in-Progress Papers", pages = "2", pages = "65--66", keywords = "variable rate shading, temporal antialiasing", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/stappen_SteFAS/", } @article{unterguggenberger-2021-msh, title = "Conservative Meshlet Bounds for Robust Culling of Skinned Meshes", author = "Johannes Unterguggenberger and Bernhard Kerbl and Jakob Pernsteiner and Michael Wimmer", year = "2021", abstract = "Following recent advances in GPU hardware development and newly introduced rendering pipeline extensions, the segmentation of input geometry into small geometry clusters-so-called meshlets-has emerged as an important practice for efficient rendering of complex 3D models. Meshlets can be processed efficiently using mesh shaders on modern graphics processing units, in order to achieve streamlined geometry processing in just two tightly coupled shader stages that allow for dynamic workload manipulation in-between. The additional granularity layer between entire models and individual triangles enables new opportunities for fine-grained visibility culling methods. However, in contrast to static models, view frustum and backface culling on a per-meshlet basis for skinned, animated models are difficult to achieve while respecting the conservative spatio-temporal bounds that are required for robust rendering results. In this paper, we describe a solution for computing and exploiting relevant conservative bounds for culling meshlets of models that are animated using linear blend skinning. By enabling visibility culling for animated meshlets, our approach can help to improve rendering performance and alleviate bottlenecks in the notoriously performanceand memory-intensive skeletal animation pipelines of modern real-time graphics applications.", month = oct, journal = "Computer Graphics Forum", volume = "40", number = "7", issn = "1467-8659", doi = "10.1111/cgf.14401", booktitle = "Computer Graphics Forum", pages = "13", publisher = "Eurographics Association", pages = "57--69", keywords = "real-time rendering, meshlet, mesh shader, task shader, view frustum culling, backface culling, Vulkan, vertex skinning, animation, conservative bounds, bounding boxes, Rodrigues' rotation formula, spatio-temporal bounds", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/unterguggenberger-2021-msh/", } @talk{kerbl_2021_hdg, title = "Providing Highly Detailed Geometry for Cloud and Edge Real-Time Rendering", author = "Bernhard Kerbl", year = "2021", abstract = "Mesh shading was recently introduced as a topical GPU feature in the NVIDIA Turing and AMD RDNA2 GPU architectures, offering an alternative pathway for executing the transformation, generation and augmentation of geometry for hardware rasterization. Future trends in game development will rely on mesh shading and “meshlets”, using highly detailed meshes with deep level of detail hierarchies. Particularly powerful applications of meshlets include arbtirary culling and subdivision methods. Furthermore, advanced pre-computation include visibility and lighting information that can be stored on a per-meshlet basis, thus promoting the compression of attributes through quantization and the acceleration of computations via hierarchical processing. Although meshlets can be comprised from arbitrary assemblages of primitives, their benefits are highest when meshlet formation is done in a way that already takes the usecase into account. Individual formation procedures can be defined in order to achieve specific goals. As an example, we may generate meshlets that are optimized for global illumination techniques, by minimizing their curvature and variance in material coefficients. Incoming light can then be ray-traced and cached per meshlet, along with view-dependent variance encoded in a discretized data structure. More uniform meshlets thus require less data transferred for accurately approximating their global illumination, reducing the consumption of critical memory bandwidth. We may also partition entire scenes into meshlets that foster fast visibility culling for large groups of primitives, without transforming even a single vertex. In fact, meshlet formation policies can leverage arbitrary attributes, such as the distribution of UV coordinates, ambient occlusion or mesh topology in order to optimize them according to desired runtime criteria. Cloud gaming offers a unique opportunity for leveraging this technology at a larger scale: dedicated data storages and servers can maintain multiple copies of complex triangle meshes, each partitioned by a particular meshlet formation policy. A live monitor can react to a specific bottleneck by dynamically switching meshlets to best accommodate the current GPU resource requirements. In this talk, we will present the various possibilities for real-time rendering to benefit from mesh shading by means of optimized meshlet formation procedures.", month = jul, event = "InnovWave 2021", location = "online", keywords = "cloud, real-time, rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/kerbl_2021_hdg/", } @article{SCHUETZ-2021-PCC, title = "Rendering Point Clouds with Compute Shaders and Vertex Order Optimization", author = "Markus Sch\"{u}tz and Bernhard Kerbl and Michael Wimmer", year = "2021", abstract = "While commodity GPUs provide a continuously growing range of features and sophisticated methods for accelerating compute jobs, many state-of-the-art solutions for point cloud rendering still rely on the provided point primitives (GL_POINTS, POINTLIST, ...) of graphics APIs for image synthesis. In this paper, we present several compute-based point cloud rendering approaches that outperform the hardware pipeline by up to an order of magnitude and achieve significantly better frame times than previous compute-based methods. Beyond basic closest-point rendering, we also introduce a fast, high-quality variant to reduce aliasing. We present and evaluate several variants of our proposed methods with different flavors of optimization, in order to ensure their applicability and achieve optimal performance on a range of platforms and architectures with varying support for novel GPU hardware features. During our experiments, the observed peak performance was reached rendering 796 million points (12.7GB) at rates of 62 to 64 frames per second (50 billion points per second, 802GB/s) on an RTX 3090 without the use of level-of-detail structures. We further introduce an optimized vertex order for point clouds to boost the efficiency of GL_POINTS by a factor of 5x in cases where hardware rendering is compulsory. We compare different orderings and show that Morton sorted buffers are faster for some viewpoints, while shuffled vertex buffers are faster in others. In contrast, combining both approaches by first sorting according to Morton-code and shuffling the resulting sequence in batches of 128 points leads to a vertex buffer layout with high rendering performance and low sensitivity to viewpoint changes. ", month = jul, journal = "Computer Graphics Forum", volume = "40", number = "4", issn = "1467-8659", doi = "10.1111/cgf.14345", booktitle = "techreport", pages = "12", publisher = "Eurographics Association", pages = "115--126", keywords = "point-based rendering, compute shader, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/SCHUETZ-2021-PCC/", } @bachelorsthesis{roth_2021_vdst, title = "View-Dependent Surrogate Terminals for Procedural Geometry Generation", author = "Moritz Roth", year = "2021", abstract = "Procedural geometry generation plays an ever-increasing role in the movie- and video gaming industry. Shape grammars have established themselves as the preferred solution for procedural architecture generation. Research in past decades drastically improved the speed of geometry derivation through shape grammars, making it possible to generate 3D buildings on-demand and in real-time. However, the constantly rising demand for high-quality visualizations requires new measures to reduce complexity in 3D models generated by shape grammars without sacrificing visual quality. This thesis explores the feasibility and benefits of inserting view-dependent surrogate terminals into a shape grammar. Surrogate terminals end grammar derivation early and approximate finer details with pre-rendered images. We find a possible solution for implementing view-dependent surrogate terminals and describe a scheme to automatically insert them into a shape grammar. Results show that contrary to previous approaches, our method avoids the generation of visibly incomplete geometry. However, even though the modified shape grammars evaluate faster than the original in large scenes, previous methods provide a more significant performance gain. We conclude that view-dependent surrogate terminals provide promising results, but further optimization is necessary to match the performance of prior techniques.", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "procedural geometry, real-time, rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/roth_2021_vdst/", } @studentproject{pichler_2022_fro, title = "Frontend for a photogrammetry webservice", author = "Marie-Sophie Pichler", year = "2021", abstract = "Subject of this student project is the frontend for a photogrammetry webservice. It consists of an upload page, an editor and the typical bookkeeping of a website.", month = jun, keywords = "Frontend, Editor, Photogrammetry", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/pichler_2022_fro/", } @mastersthesis{Mazza_2021_05, title = "Homomorphic-Encrypted Volume Rendering", author = "Sebastian Mazza", year = "2021", abstract = "Computationally demanding tasks are typically calculated in dedicated data centers, and real-time visualizations also follow this trend. Some rendering tasks, however, require the highest level of confidentiality so that no other party, besides the owner, can read or see the sensitive data. Here we present a direct volume rendering approach that performs volume rendering directly on encrypted volume data by using the homomorphic Paillier encryption algorithm. This approach ensures that the volume data and rendered image are uninterpretable to the rendering server. Our volume rendering pipeline introduces novel approaches for encrypted-data compositing, interpolation, and opacity modulation, as well as simple transfer function design, where each of these routines maintains the highest level of privacy. We present performance and memory overhead analysis that is associated with our privacy-preserving scheme. Our approach is open and secure by design, as opposed to secure through obscurity. Owners of the data only have to keep their secure key confidential to guarantee the privacy of their volume data and the rendered images. Our work is, to our knowledge, the first privacy-preserving remote volume-rendering approach that does not require that any server involved be trustworthy; even in cases when the server is compromised, no sensitive data will be leaked to a foreign party. Furthermore, we developed a big-integer (multiple-precision, or multiple word integer) library for Vulkan graphics pipeline. It facilitates the rendering of securely encrypted data on the GPU. It supports the calculation of common mathematical operations like addition, subtraction, multiplication, division. Moreover, it supports specialized operations for asymmetric cryptography like modular exponentiation with Montgomery reduction. We also introduce a testing framework for Vulkan that allows the automated testing of big-integer computations on the GPU.", month = may, pages = "109", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "Volume Rendering, Transfer Function, Homomorphic-Encryption, Paillier, Big-Integer, GPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/Mazza_2021_05/", } @article{KOCH-2021-GVS, title = "Guided Visibility Sampling++", author = "Thomas Bernhard Koch and Michael Wimmer", year = "2021", abstract = "Visibility computation is a common problem in the field of computer graphics. Examples include occlusion culling, where parts of the scene are culled away, or global illumination simulations, which are based on the mutual visibility of pairs of points to calculate lighting. In this paper, an aggressive from-region visibility technique called Guided Visibility Sampling++ (GVS++) is presented. The proposed technique improves the Guided Visibility Sampling algorithm through improved sampling strategies, thus achieving low error rates on various scenes, and being over four orders of magnitude faster than the original CPU-based Guided Visibility Sampling implementation. We present sampling strategies that adaptively compute sample locations and use ray casting to determine a set of triangles visible from a flat or volumetric rectangular region in space. This set is called a potentially visible set (PVS). Based on initial random sampling, subsequent exploration phases progressively grow an intermediate solution. A termination criterion is used to terminate the PVS search. A modern implementation using the Vulkan graphics API and RTX ray tracing is discussed. Furthermore, we show optimizations that allow for an implementation that is over 20 times faster than a naive implementation.", month = apr, journal = "Proceedings of the ACM on Computer Graphics and Interactive Techniques", volume = "4", number = "1", issn = "2577-6193", doi = "10.1145/3451266", pages = "16", pages = "4:1--4:16", keywords = "visibility culling, real-time rendering, ray tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/KOCH-2021-GVS/", } @phdthesis{SCHUETZ-2021-DISS, title = "Interactive Exploration of Point Clouds", author = "Markus Sch\"{u}tz", year = "2021", abstract = "Laser scanning, photogrammetry and other 3D scanning approaches generate data sets comprising millions to trillions of points. Modern GPUs can easily render a few million and up to tens of millions of points in real time, but data sets with hundreds of millions of points and more require acceleration structures to be rendered in real time. In this thesis, we present three contributions to the state of the art with the goal of improving the performance as well as the quality of real-time rendered point clouds. Two of our contributions address the performance of LOD structure generation. State-of-the-art approaches achieve a throughput of up to around 1 million points per second, which requires users to wait minutes even for smaller data sets with a few hundred million points. Our proposed solutions are: A bottom-up LOD generation approach that creates LOD structures up to an order of magnitude faster than previous work, and a progressive rendering approach that is capable of rendering any point cloud that fits in GPU memory in real time, without the need to generate LOD structures at all. The former achieves a throughput of up to 10 million points per second, and the latter is capable of loading point clouds at rates of up to 37 million points per second from an industry-standard point-cloud format (LAS), and up to 100 million points per second if the file format matches the vertex buffer format. Since it does not need LOD structures, the progressive rendering approach can render already loaded points right away while additional points are still being loaded. Our third contribution improves the quality of LOD-based point-cloud rendering by introducing a continuous level-of-detail approach that produces gradual transitions in point density, rather than the characteristic and noticeable blocks from discrete LOD structures. It is mainly targeted towards VR applications, where discrete levels of detail are especially noticeable and disturbing, in a large part due to the popping of chunks of points during motion. ", month = apr, pages = "107", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "point cloud rendering, lidar", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/SCHUETZ-2021-DISS/", } @inproceedings{panfili-2021-myop, title = "Myopia in Head-Worn Virtual Reality", author = "Lara Panfili and Michael Wimmer and Katharina Kr\"{o}sl", year = "2021", abstract = "In this work, we investigate the influence of myopia on the perceived visual acuity (VA) in head-worn virtual reality (VR). Factors such as display resolution or vision capabilities of users influence the VA in VR. We simulated eyesight tests in VR and on a desktop screen and conducted a user study comparing VA measurements of participants with normal sight and participants with myopia. Surprisingly, our results suggest that people with severe myopia can see better in VR than in the real world, while the VA of people with normal or corrected sight or mild myopia is reduced in VR.", month = mar, booktitle = "2021 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", doi = "10.1109/VRW52623.2021.00197", isbn = "978-1-6654-1166-0", location = "online", publisher = "IEEE Computer Society Press", pages = "2", pages = "629--630", keywords = "visual impairments", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/panfili-2021-myop/", } @mastersthesis{Kellner-2021-DA, title = "Klassifikation Urbaner Punktwolken Mittels 3D CNNs In Kombination mit Rekonstruktion von Gehsteigen", author = "Lisa-Maria Kellner", year = "2021", abstract = "LiDAR devices are able to capture the physical world very accurately. Therefore, they are often used for 3D reconstruction. Unfortunately, such data can become extremely large very quickly and usually only a small part of the point cloud is of interest. Thus, the point cloud is filtered beforehand in order to apply algorithms only on those points that are relevant for it. A semantic information about the points can be used for such a filtering. Semantic segmentation of point clouds is a popular field of research and here there has been a trend towards deep learning in recent years too. However, contrary to images, point clouds are unstructured. Hence, point clouds are often rasterized, but this has to be done, such that the underlying structure is represented well. In this thesis, a 3D Convolutional Neural Network is developed and trained for a semantic segmentation of LiDAR point clouds. Thereby, a point cloud is represented with an octree data structure, which makes it easy to rasterize only relevant parts. Since, just dense parts of the point cloud, in which important information about the structure is located, are subdivided further. This allows to simply take nodes of a certain level of the octree and rasterize them as data samples. There are many application areas for 3D reconstructions based on point clouds. In an urban scenario, these can be for example whole city models or buildings. However, in this thesis, the reconstruction of sidewalks is explored. Since, for flood simulations in cities, an increase in height of a few centimeters can make a great difference and information about the curb geometry helps to make them more accurate. In the sidewalk reconstruction process, the point cloud is filtered first, based on a semantic segmentation of a 3D CNN, and then point cloud features are calculated to detect curb points. With these curb points, the geometry of the curb, sidewalk and street are computed. Taken all together, this thesis develops a proof-of-concept prototype for semantic point cloud segmentation using 3D CNNs and based on that, a curb detection and reconstruction algorithm.", month = mar, note = "1", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/Kellner-2021-DA/", } @mastersthesis{KOCH-2021-GVSDA, title = "Visibility precomputation with RTX ray tracing", author = "Thomas Bernhard Koch", year = "2021", abstract = "Visibility computation is a common problem in the field of computer graphics. Examples include occlusion culling, where parts of the scene are culled away, or global illumination simulations, which are based on the mutual visibility of pairs of points to calculate lighting. In this thesis, an aggressive from-region visibility technique called Guided Visibility Sampling++ (GVS++) is presented. The proposed technique improves the Guided Visibility Sampling algorithm through improved sampling strategies, thus achieving low error rates on various scenes, and being over four orders of magnitude faster than the original CPU-based Guided Visibility Sampling implementation. We present intelligent sampling strategies that use ray casting to determine a set of triangles visible from a flat or volumetric rectangular region in space. This set is called a potentially visible set (PVS). Based on initial random sampling, subsequent exploration phases progressively grow an intermediate solution. A termination criterion is used to terminate the PVS search. A modern implementation using the Vulkan graphics API and RTX ray tracing is discussed. Furthermore, optimizations are shown that allow for an implementation that is over 20 times faster than a naive implementation.", month = mar, pages = "89", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "visibitility culling, ray tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/KOCH-2021-GVSDA/", } @bachelorsthesis{klein-2020-rtgi, title = "A Framework For Real-Time Global Illumination Algorithms", author = "Markus Klein", year = "2021", abstract = "If someone were in need of a real-time global illumination algorithm regarding their specific requirements, they would have no issue finding many possible options nowadays. There are many algorithms that are unmatched in realism, interactivity or performance. However, it might be challenging to compare different approaches side by side. In this thesis, a framework is proposed that is capable of building a foundation for the comparison of real-time global illumination algorithms. This framework depends on an unified handling of various algorithms while aiming to be nonrestrictive towards them. All modules of the application are designed to be as mutable, generic, extendable, and reusable as possible to avoid the reimplementation of similar concepts. A consistent concept is integrated into the framework to provide a great amount of configurability, even at runtime.", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/klein-2020-rtgi/", } @inproceedings{cardoso-2021-cost, title = "Cost Volume Refinement for Depth Prediction", author = "Joao Afonso Cardoso and Nuno Goncalves and Michael Wimmer", year = "2021", abstract = "Light-field cameras are becoming more popular in the consumer market. Their data redundancy allows, in theory, to accurately refocus images after acquisition and to predict the depth of each point visible from the camera. Combined, these two features allow for the generation of full-focus images, which is impossible in traditional cameras. Multiple methods for depth prediction from light fields (or stereo) have been proposed over the years. A large subset of these methods relies on cost-volume estimates – 3D objects where each layer represents a heuristic of whether each point in the image is at a certain distance from the camera. Generally, this volume is used to regress a depth map, which is then refined for better results. In this paper, we argue that refining the cost volumes is superior to refining the depth maps in order to further increase the accuracy of depth predictions. We propose a set of cost-volume refinement algorithms and show their effectiveness.", month = jan, isbn = "978-1-7281-8809-6", publisher = "IEEE", location = "Milan, Italy", event = "25th International Conference on Pattern Recognition (ICPR)", doi = "10.1109/ICPR48806.2021.9412730", booktitle = "Proceedings of the 25th International Conference on Pattern Recognition", pages = "354--361", keywords = "depth reconstruction, light fields, cost volumes", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/cardoso-2021-cost/", } @article{luidolt-2020-lightperceptionVR, title = "Gaze-Dependent Simulation of Light Perception in Virtual Reality", author = "Laura R. Luidolt and Michael Wimmer and Katharina Kr\"{o}sl", year = "2020", abstract = "The perception of light is inherently different inside a virtual reality (VR) or augmented reality (AR) simulation when compared to the real world. Conventional head-worn displays (HWDs) are not able to display the same high dynamic range of brightness and color as the human eye can perceive in the real world. To mimic the perception of real-world scenes in virtual scenes, it is crucial to reproduce the effects of incident light on the human visual system. In order to advance virtual simulations towards perceptual realism, we present an eye-tracked VR/AR simulation comprising effects for gaze-dependent temporal eye adaption, perceptual glare, visual acuity reduction, and scotopic color vision. Our simulation is based on medical expert knowledge and medical studies of the healthy human eye. We conducted the first user study comparing the perception of light in a real-world low-light scene to a VR simulation. Our results show that the proposed combination of simulated visual effects is well received by users and also indicate that an individual adaptation is necessary, because perception of light is highly subjective.", month = dec, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "Volume 26, Issue 12", issn = "1077-2626", doi = "10.1109/TVCG.2020.3023604", pages = "3557--3567", keywords = "perception, virtual reality, user studies", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/luidolt-2020-lightperceptionVR/", } @phdthesis{KROESL-2020-SVI, title = "Simulating Vision Impairments in Virtual and Augmented Reality", author = "Katharina Kr\"{o}sl", year = "2020", abstract = "There are at least 2.2 billion people affected by vision impairments worldwide, and the number of people suffering from common eye diseases like cataracts, diabetic retinopathy, glaucoma or macular degeneration, which show a higher prevalence with age, is expected to rise in the years to come, due to factors like aging of the population. Medical publications, ophthalmologists and patients can give some insight into the effects of vision impairments, but for people with normal eyesight (even medical personnel) it is often hard to grasp how certain eye diseases can affect perception. We need to understand and quantify the effects of vision impairments on perception, to design cities, buildings, or lighting systems that are accessible for people with vision impairments. Conducting studies on vision impairments in the real world is challenging, because it requires a large number of participants with exactly the same type of impairment. Such a sample group is often hard or even impossible to find, since not every symptom can be assessed precisely and the same eye disease can be experienced very differently between affected people. In this thesis, we address these issues by presenting a system and a methodology to simulate vision impairments, such as refractive errors, cataracts, cornea disease, and age-related macular degeneration in virtual reality (VR) and augmented reality (AR), which allows us to conduct user studies in VR or AR with people with healthy eyesight and graphically simulated vision impairments. We present a calibration technique that allows us to calibrate individual simulated symptoms to the same level of severity for every user, taking hardware constraints as well as vision capabilities of users into account. We measured the influence of simulated reduced visual acuity on maximum recognition distances of signage in a VR study and showed that current international standards and norms do not sufficiently consider people with vision impairments. In a second study, featuring our medically based cataract simulations in VR, we found that different lighting systems can positively or negatively affect the perception of people with cataracts. We improved and extended our cataract simulation to video–see-through AR and evaluated and adjusted each simulated symptom together with cataract patients in a pilot study, showing the flexibility and potential of our approach. In future work we plan to include further vision impairments and open source our software, so it can be used for architects and lighting designers to test their designs for accessibility, for training of medical personnel, and to increase empathy for people with vision impairments. This way, we hope to contribute to making this world more inclusive for everyone. ", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/KROESL-2020-SVI/", } @talk{WIMMER-2020-ASG, title = "Applications of Smart Graphics", author = "Michael Wimmer", year = "2020", abstract = "For a long period of time, the focus of computer graphics was mostly the quality and speed of image generation. Meanwhile, commercial rendering engines leave little to be desired, but computer graphics research has expanded to solve application problems through so-called “smart graphics”. In this talk, I will present some of our latest advances in “smart” computer graphics in simulation, rendering and content generation. I will show how we can now simulate visual impairments in virtual reality, which could be used to create empathy for people affected by these impairments. I will describe how we have advanced point-based rendering techniques to allow incorporating real environments into rendering applications with basically no preprocessing. On the other hand, virtual environments could be created efficiently by collaborative crowed-sourced procedural modeling. Finally, efficient simulations of floods and heavy rainfall may help experts and increase public awareness of natural disasters and the effects of climate change.", month = nov, event = "Smart Tools and Applications in Graphics (STAG) 2020", location = "online", keywords = "computer graphics, rendering, simulation", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/WIMMER-2020-ASG/", } @article{SCHUETZ-2020-MPC, title = "Fast Out-of-Core Octree Generation for Massive Point Clouds", author = "Markus Sch\"{u}tz and Stefan Ohrhallinger and Michael Wimmer", year = "2020", abstract = "We propose an efficient out-of-core octree generation method for arbitrarily large point clouds. It utilizes a hierarchical counting sort to quickly split the point cloud into small chunks, which are then processed in parallel. Levels of detail are generated by subsampling the full data set bottom up using one of multiple exchangeable sampling strategies. We introduce a fast hierarchical approximate blue-noise strategy and compare it to a uniform random sampling strategy. The throughput, including out-of-core access to disk, generating the octree, and writing the final result to disk, is about an order of magnitude faster than the state of the art, and reaches up to around 6 million points per second for the blue-noise approach and up to around 9 million points per second for the uniform random approach on modern SSDs.", month = nov, journal = "Computer Graphics Forum", volume = "39", number = "7", issn = "1467-8659", doi = "10.1111/cgf.14134", pages = "13", publisher = "John Wiley & Sons, Inc.", pages = "1--13", keywords = "point clouds, point-based rendering, level of detail", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/SCHUETZ-2020-MPC/", } @mastersthesis{lipp-2019-rtxq3, title = "Real-Time Ray Tracing in Quake III", author = "Lukas Lipp", year = "2020", abstract = "This work discusses the extension of the popular Quake III game engine using real-time raytracing. It investigates how ray tracing can be implemented using the most recent graphics card generation by NVIDIA, which offers dedicated hardware support and acceleration via an new API. In addition, strategies will be discussed about how offline ray-tracing algorithms can be transformed to an online real-time context. In order to implement ray tracing, Quake III needs to be extended with a Vulkan backend. Next, distributed ray tracing is implemented and is used to render the whole game world except for the user interface (UI) elements. The UI will be handled by the rasterizer. The performance and efficiency of ray tracing in a game engine using the RTX hardware features is analyzed and discussed. The focus lies on how quality and performance relate to each other, and how far ray tracing can be pushed with still acceptable frame rate of around 30/60 frames per second. Furthermore, implementation strategies that improve the quality, performance or both will be discussed.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "Rendering, Ray Tracing, RTX, Quake III", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/lipp-2019-rtxq3/", } @bachelorsthesis{pernsteinre_jakob_2020_eechc, title = "Ensuring the Effectiveness of CHC++ in Vulkan", author = "Jakob Pernsteiner", year = "2020", abstract = "Real-time occlusion culling is a valuable tool to increase the performance of real-time rendering applications by detecting and removing invisible geometry from the rendering pipeline. Through new rendering Application Programming Interface (API) like Vulkan and modern hardware, these culling algorithms can become even more powerful. This thesis tries to ensure and evaluate the performance of Coherent Hierarchical Culling Revisited (CHC++) in this new environment by performing various optimisations to the algorithm. The changes include the batching of consecutive draw-calls and occlusion queries into single GPU-queue submits to reduce the overhead on the CPU and GPU. Additionally, the support for alpha blended transparent objects was added to the algorithm, which allows for correct culling and rendering of these objects. The algorithm performs great in environments with high occlusion and does not degrade in performance in the worst case scenario. But the high performance increase of the original implementation could not be replicated, which is attributed to the difference in rendering APIs and hardware improvements.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "culling, real-time, GPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/pernsteinre_jakob_2020_eechc/", } @misc{brugger-2020-tdp, title = "Test Scene Design for Physically Based Rendering", author = "Elias Brugger and Christian Freude and Michael Wimmer", year = "2020", abstract = "Physically based rendering is a discipline in computer graphics which aims at reproducing certain light and material appearances that occur in the real world. Complex scenes can be difficult to compute for rendering algorithms. This paper introduces a new comprehensive test database of scenes that treat different light setups in conjunction with diverse materials and discusses its design principles. A lot of research is focused on the development of new algorithms that can deal with difficult light conditions and materials efficiently. This database delivers a comprehensive foundation for evaluating existing and newly developed rendering techniques. A final evaluation compares different results of different rendering algorithms for all scenes.", month = aug, URL = "https://www.cg.tuwien.ac.at/research/publications/2020/brugger-2020-tdp/", } @techreport{freude_2020_rs, title = "R-Score: A Novel Approach to Compare Monte Carlo Renderings", author = "Christian Freude and Hiroyuki Sakai and Karoly Zsolnai-Feh\'{e}r and Michael Wimmer", year = "2020", abstract = "In this paper, we propose a new approach for the comparison and analysis of Monte Carlo (MC) rendering algorithms. It is based on a novel similarity measure called render score (RS) that is specically designed for MC rendering, statistically motivated, and incorporates bias and variance. Additionally, we propose a comparison scheme that alleviates the need for practically converged reference images (RIs). Our approach can be used to compare and analyze dierent rendering methods by revealing detailed (per-pixel) dierences and subsequently potential conceptual or implementation-related issues, thereby offering a more informative and meaningful alternative to commonly used metrics.", month = aug, number = "TR-193-02-2020-4", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", note = "human contact: technical-report@cg.tuwien.ac.at", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/freude_2020_rs/", } @article{OTEPKA-2020-PPC, title = "Efficient Loading and Visualization of Massive Feature-Richt Point Clouds Without Hierarchical Acceleration Structures", author = "Johannes Otepka and Gottfried Mandlburger and Markus Sch\"{u}tz and Norbert Pfeifer and Michael Wimmer", year = "2020", abstract = "Nowadays, point clouds are the standard product when capturing reality independent of scale and measurement technique. Especially, Dense Image Matching (DIM) and Laser Scanning (LS) are state of the art capturing methods for a great variety of applications producing detailed point clouds up to billions of points. In-depth analysis of such huge point clouds typically requires sophisticated spatial indexing structures to support potentially long-lasting automated non-interactive processing tasks like feature extraction, semantic labelling, surface generation, and the like. Nevertheless, a visual inspection of the point data is often necessary to obtain an impression of the scene, roughly check for completeness, quality, and outlier rates of the captured data in advance. Also intermediate processing results, containing additional per-point computed attributes, may require visual analyses to draw conclusions or to parameterize further processing. Over the last decades a variety of commercial, free, and open source viewers have been developed that can visualise huge point clouds and colorize them based on available attributes. However, they have either a poor loading and navigation performance, visualize only a subset of the points, or require the creation of spatial indexing structures in advance. In this paper, we evaluate a progressive method that is capable of rendering any point cloud that fits in GPU memory in real time without the need of time consuming hierarchical acceleration structure generation. In combination with our multi-threaded LAS and LAZ loaders, we achieve load performance of up to 20 million points per second, display points already while loading, support flexible switching between different attributes, and rendering up to one billion points with visually appealing navigation behaviour. Furthermore, loading times of different data sets for different open source and commercial software packages are analysed.", month = aug, journal = "ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences", volume = "XLIII-B2-2020", issn = "1682-1750", doi = "10.5194/isprs-archives-XLIII-B2-2020-293-2020", pages = "293--300", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/OTEPKA-2020-PPC/", } @misc{kerbl-2020-improvencoding, title = "Improved Triangle Encoding for Cached Adaptive Tessellation", author = "Linus Horvath and Bernhard Kerbl and Michael Wimmer", year = "2020", month = jul, location = "online", event = "HPG 2020", Conference date = "Poster presented at HPG 2020 (2020-05-01--2020-06-22)", keywords = "GPU, tessellation, real-time", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/kerbl-2020-improvencoding/", } @mastersthesis{reznicek-2020-fpgaray, title = "FPGARay: Accelerating Physically Based Rendering Using FPGAs", author = "Alexander Reznicek", year = "2020", abstract = "The synthesis of an image from a scene stored on a computer is called rendering, which is able to deliver photo-realistic results, e.g., by using specific variants of the class of ray tracing algorithms. However, these variants (e.g., path tracing) possess a stochastic characteristic which results in a high computational expense. This is explained by the nature of stochastic algorithms, which use a high number of samples to compute a result—in case of ray tracing, these samples manifest in a high number of rays needed for a complete rendering. One possibility to accelerate ray tracing—no matter if using a stochastic or simpler variants—is the use of customized hardware. FPGRay is such an approach, which combines the use of customized hardware with the software of an off-the-shelf PC to a hybrid solution. This allows increasing the efficiency by specialized hardware and delivers a sustainability in case of changing algorithms at the same time. The results point towards a possible efficiency gain. Unfortunately, in the scope of this thesis this was not realizable and the specific implementation showed a lower efficiency compared to the software implementation. Nevertheless, the possibility to achieve a higher efficiency with this approach by indicating FPGRay’s potential could be shown.", month = jun, pages = "121", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "Rendering, FPGA, hardware acceleration, ray tracing, path tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/reznicek-2020-fpgaray/", } @article{zsolnaifeher-2020-pme, title = "Photorealistic Material Editing Through Direct Image Manipulation", author = "Karoly Zsolnai-Feh\'{e}r and Peter Wonka and Michael Wimmer", year = "2020", abstract = "Creating photorealistic materials for light transport algorithms requires carefully fine-tuning a set of material properties to achieve a desired artistic effect. This is typically a lengthy process that involves a trained artist with specialized knowledge. In this work, we present a technique that aims to empower novice and intermediate-level users to synthesize high-quality photorealistic materials by only requiring basic image processing knowledge. In the proposed workflow, the user starts with an input image and applies a few intuitive transforms (e.g., colorization, image inpainting) within a 2D image editor of their choice, and in the next step, our technique produces a photorealistic result that approximates this target image. Our method combines the advantages of a neural network-augmented optimizer and an encoder neural network to produce high-quality output results within 30 seconds. We also demonstrate that it is resilient against poorly-edited target images and propose a simple extension to predict image sequences with a strict time budget of 1-2 seconds per image. Video: https://www.youtube.com/watch?v=8eNHEaxsj18", month = jun, journal = "Computer Graphics Forum", volume = "39", number = "4", issn = "1467-8659", doi = "10.1111/cgf.14057", pages = "14", pages = "107--120", keywords = "neural rendering, neural networks, photorealistic rendering, photorealistic material editing", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/zsolnaifeher-2020-pme/", } @article{luksch_2020, title = "Real-Time Approximation of Photometric Polygonal Lights", author = "Christian Luksch and Lukas Prost and Michael Wimmer", year = "2020", abstract = "We present a real-time rendering technique for photometric polygonal lights. Our method uses a numerical integration technique based on a triangulation to calculate noise-free diffuse shading. We include a dynamic point in the triangulation that provides a continuous near-field illumination resembling the shape of the light emitter and its characteristics. We evaluate the accuracy of our approach with a diverse selection of photometric measurement data sets in a comprehensive benchmark framework. Furthermore, we provide an extension for specular reflection on surfaces with arbitrary roughness that facilitates the use of existing real-time shading techniques. Our technique is easy to integrate into real-time rendering systems and extends the range of possible applications with photometric area lights.", month = may, journal = "Proceedings of the ACM on Computer Graphics and Interactive Techniques", volume = "3", number = "1", issn = "2577-6193", doi = "10.1145/3384537", pages = "4.1--4.18", keywords = "area lights, photometric lights, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/luksch_2020/", } @article{schuetz-2020-PPC, title = "Progressive Real-Time Rendering of One Billion Points Without Hierarchical Acceleration Structures", author = "Markus Sch\"{u}tz and Gottfried Mandlburger and Johannes Otepka and Michael Wimmer", year = "2020", abstract = "Research in rendering large point clouds traditionally focused on the generation and use of hierarchical acceleration structures that allow systems to load and render the smallest fraction of the data with the largest impact on the output. The generation of these structures is slow and time consuming, however, and therefore ill-suited for tasks such as quickly looking at scan data stored in widely used unstructured file formats, or to immediately display the results of point-cloud processing tasks. We propose a progressive method that is capable of rendering any point cloud that fits in GPU memory in real time, without the need to generate hierarchical acceleration structures in advance. Our method supports data sets with a large amount of attributes per point, achieves a load performance of up to 100 million points per second, displays already loaded data in real time while remaining data is still being loaded, and is capable of rendering up to one billion points using an on-the-fly generated shuffled vertex buffer as its data structure, instead of slow-to-generate hierarchical structures. Shuffling is done during loading in order to allow efficiently filling holes with random subsets, which leads to a higher quality convergence behavior. ", month = may, journal = "Computer Graphics Forum", volume = "39", number = "2", issn = "1467-8659", doi = "10.1111/cgf.13911", booktitle = "EUROGRAPHICS", pages = "14", publisher = "John Wiley & Sons Ltd.", pages = "51--64", keywords = "point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/schuetz-2020-PPC/", } @inproceedings{tatzgern-2020-sst, title = "Stochastic Substitute Trees for Real-Time Global Illumination", author = "Wolfgang Tatzgern and Benedikt Mayr and Bernhard Kerbl and Markus Steinberger", year = "2020", abstract = "With the introduction of hardware-supported ray tracing and deep learning for denoising, computer graphics has made a considerable step toward real-time global illumination. In this work, we present an alternative global illumination method: The stochastic substitute tree (SST), a hierarchical structure inspired by lightcuts with light probability distributions as inner nodes. Our approach distributes virtual point lights (VPLs) in every frame and efficiently constructs the SST over those lights by clustering according to Morton codes. Global illumination is approximated by sampling the SST and considers the BRDF at the hit location as well as the SST nodes’ intensities for importance sampling directly from inner nodes of the tree. To remove the introduced Monte Carlo noise, we use a recurrent autoencoder. In combination with temporal filtering, we deliver real-time global illumination for complex scenes with challenging light distributions.", month = may, event = "I3D ’20", booktitle = "Symposium on Interactive 3D Graphics and Games", pages = "1--9", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/tatzgern-2020-sst/", } @inproceedings{unterguggenberger-2020-fmvr, title = "Fast Multi-View Rendering for Real-Time Applications", author = "Johannes Unterguggenberger and Bernhard Kerbl and Markus Steinberger and Dieter Schmalstieg and Michael Wimmer", year = "2020", abstract = "Efficient rendering of multiple views can be a critical performance factor for real-time rendering applications. Generating more than one view multiplies the amount of rendered geometry, which can cause a huge performance impact. Minimizing that impact has been a target of previous research and GPU manufacturers, who have started to equip devices with dedicated acceleration units. However, vendor-specific acceleration is not the only option to increase multi-view rendering (MVR) performance. Available graphics API features, shader stages and optimizations can be exploited for improved MVR performance, while generally offering more versatile pipeline configurations, including the preservation of custom tessellation and geometry shaders. In this paper, we present an exhaustive evaluation of MVR pipelines available on modern GPUs. We provide a detailed analysis of previous techniques, hardware-accelerated MVR and propose a novel method, leading to the creation of an MVR catalogue. Our analyses cover three distinct applications to help gain clarity on overall MVR performance characteristics. Our interpretation of the observed results provides a guideline for selecting the most appropriate one for various use cases on different GPU architectures.", month = may, isbn = "978-3-03868-107-6", organization = "Eurographics", location = "online", event = "EGPGV 2020", editor = "Frey, Steffen and Huang, Jian and Sadlo, Filip", doi = "10.2312/pgv.20201071", booktitle = "Eurographics Symposium on Parallel Graphics and Visualization", pages = "13--23", keywords = "Real-Time Rendering, Rasterization, Multi-View, OVR_multiview, Geometry Shader, Evaluation", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/unterguggenberger-2020-fmvr/", } @bachelorsthesis{brugger-2020-tsdpbr, title = "Test Scene Design for Physically Based Rendering", author = "Elias Brugger", year = "2020", abstract = "Physically based rendering is a discipline in computer graphics which aims at reproducing certain light and material appearances that occur in the real world. Complex scenes can be difficult to compute for rendering algorithms. The goal of this thesis is to create a comprehensive test database of scenes that treat different light setups in conjunction with diverse materials. A lot of research is focused on the development of new algorithms that can deal with difficult light conditions and materials efficiently. This database should deliver a comprehensive foundation for evaluating existing and newly developed rendering techniques. A final evaluation will compare different results of different rendering algorithms for all scenes. ", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "Physically Based Rendering, Database, Verification, Tests", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/brugger-2020-tsdpbr/", } @inproceedings{kroesl-2020-XREye, title = "XREye: Simulating Visual Impairments in Eye-Tracked XR ", author = "Katharina Kr\"{o}sl and Carmine Elvezio and Matthias H\"{u}rbe and Sonja Karst and Steven Feiner and Michael Wimmer", year = "2020", abstract = "Many people suffer from visual impairments, which can be difficult for patients to describe and others to visualize. To aid in understanding what people with visual impairments experience, we demonstrate a set of medically informed simulations in eye-tracked XR of several common conditions that affect visual perception: refractive errors (myopia, hyperopia, and presbyopia), cornea disease, and age-related macular degeneration (wet and dry).", month = mar, booktitle = "2020 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)", location = "(Atlanta) online", publisher = "IEEE", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/kroesl-2020-XREye/", } @bachelorsthesis{wiesinger_2020_odpr, title = "An Open Database for Physically Based Rendering", author = "Andreas Wiesinger", year = "2020", abstract = "The propagation of light and its interaction with matter can be simulated using mathematical models, most commonly Bidirectional Reflectance Distribution Functions (BRDFs). However, the creation of physically accurate BRDFs and their verification can be challenging. In order to be able to test and verify physically-based rendering algorithms, various methods have been researched. However, they are rarely used by the community. One key to the verification of rendering algorithms is to provide test-methods and test-data. Another key is to motivate the community to actually use them and run more tests. This thesis focuses on the latter. For this purpose, the author designed a web-application called “Open Database for Physically-based Rendering (ODPR)”, where test-scenes of different types and from different studies will be merged into one publicly available place. A prototype for ODPR was implemented. The web-application uses community-driven design-patterns similar to StackExchange-sites, and allows scientists to register and upload test-scenes. The idea is, that ODPR will be built up and maintained with the help of the community, by providing free downloads of test-scenes and additional privileges to registered users. ", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/wiesinger_2020_odpr/", } @mastersthesis{houska-2020-IPGM, title = "Improved Persistent Grid Mapping", author = "Peter Houska", year = "2020", abstract = "We propose a novel heightmap-based terrain rendering algorithm that enhances the Persistent Grid Mapping (PGM) method. As in the underlying method, we cache a regular triangulated grid in video memory and use the GPU to project the mesh onto the ground plane each frame anew. Each vertex in the grid is then displaced according to the sampled heightmap value along the ground plane’s normal vector. The perspective mapping of the grid results in a view-dependent, continuous level-of-detail approximation of the terrain dataset. PGM is a simple and elegant terrain rendering algorithm, however, as the camera hovers over the terrain, projected vertex positions slide over the terrain. This leads to the underlying static terrain surface changing shape slightly from frame to frame. We address these swimming artifacts by introducing four improvements: tailoring the projected grid, which pushes most otherwise culled vertices back into the view frustum, redistributing grid vertices according to an importance function for more faithful mipmap selection when sampling the heightmap, local terrain edge search for vertices within a certain proximity to the camera, and exploiting temporal coherence between frames. While our algorithm cannot guarantee a maximum screen-space error, it nevertheless reduces PGM’s inherent temporal aliasing artifacts considerably.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/houska-2020-IPGM/", } @mastersthesis{Luidolt-2020-DA, title = "Perception of Light in Virtual Reality", author = "Laura R. Luidolt", year = "2020", abstract = "The perception of light and light incidence in the human eye is substantially different in real-world scenarios and virtual reality (VR) simulations. Standard low dynamic range displays, as used in common VR headsets, are not able to replicate the same light intensities we see in reality. Therefore, light phenomenons, such as temporal eye adaptation, perceptual glare, visual acuity reduction and scotopic color vision need to be simulated to generate realistic images. Even though, a physically based simulation of these effects could increase the perceived reality of VR applications, this topic has not been thoroughly researched yet. We propose a post-processing workflow for VR and augmented reality (AR), using eye tracking, that is based on medical studies of the healthy human eye and is able to run in real time, to simulate light effects as close to reality as possible. We improve an existing temporal eye adaptation algorithm to be view-dependent. We adapt a medically based glare simulation to run in VR and AR. Additionally, we add eye tracking to adjust the glare intensity according to the viewing direction and the glare appearance depending on the user’s pupil size. We propose a new function fit for the reduction of visual acuity in VR head mounted displays. Finally, we include scotopic color vision for more realistic rendering of low-light scenes. We conducted a primarily qualitative pilot study, comparing a real-world low-light scene to our VR simulation through individual, perceptual evaluation. Most participants mentioned, that the simulation of temporal eye adaptation, visual acuity reduction and scotopic color vision was similar or the same as their own perception in the real world. However, further work is necessary to improve the appearance and movement of our proposed glare kernel. We conclude, that our work has laid a ground base for further research regarding the simulation and individual adaptation to the perception of light in VR.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "perception, temporal eye adaptation, glare, virtual reality, scotopic vision, visual acuity reduction, augmented reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/Luidolt-2020-DA/", } @bachelorsthesis{rumpelnik_martin_2020_PRM, title = "Planetary Rendering with Mesh Shaders", author = "Martin Rumpelnik", year = "2020", abstract = "Planetary rendering solutions often suffer from artifacts or low performance when rendering very big terrains with high details. In this thesis, we present a method that targets real-time applications and therefore aims to achieve high performance. The method can be applied with an arbitrary amount of detail, which enables stable performance under runtime or hardware restriction. In contrast to existing methods, like quadtrees and clipmaps, our method avoids artifacts, such as popping or swimming, as much as possible. The method submits coarse, rectangular regions of cells around the viewer to NVidia’s new geometry pipeline that was introduced with their Turing Architecture. Due to the capabilities of the new pipeline, we can make efficient level-of-detail decisions on the graphics processing unit (GPU) and produce work to create circular regions from the rectangular ones. These circular regions provide uniform terrain resolution for the viewer in all directions, while maintaining low rendering times.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "rendering, real-time, GPU, terrain", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/rumpelnik_martin_2020_PRM/", } @studentproject{leopold-2019-vox, title = "Adding Voxelization using the Hardware Rasterizer to a Cross-Platform C++/OpenGL Engine", author = "Nikole Leopold", year = "2019", abstract = "Two tasks were performed: 1) Ported Engine186 to Linux 2) Implemented a voxelization algorithm using Engine186", month = dec, keywords = "OpenGL, Voxelization, Linux, Hardware Acceleration", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/leopold-2019-vox/", } @mastersthesis{stappen-2019-vrs, title = "Improving Real-Time Rendering Quality and Efficiency using Variable Rate Shading on Modern Hardware", author = "Stefan Stappen", year = "2019", abstract = "With the NVIDIA Turing graphics card micro-architecture released in 2018, not only performance in terms of operations per second is increased but also new hardware features are introduced, like Variable Rate Shading (VRS). VRS allows focussing the processing power by dividing the framebuffer into tiles and dynamically controlling the resolution of each tile. To be precise, the screen is partitioned into tiles of 16x16 pixels and for each tile, it can be specified how often the fragment shader shall be executed. It is both possible, to have fewer fragment shader invocations than there are fragments, or more fragment shader invocations than there are fragments. This allows individually defining lower sampling rates or supersampling for regions of the screen. Regions of less interest or with less visual details can be assigned less computational power in terms of shader executions while regions that should provide high fidelity can be supersampled. The challenges here are to find and distinguish these regions in a dynamic scene, like it is the case for games, and how this technique integrates with commonly used techniques in the industry, like deferred shading. NVIDIA already proposed some strategies on how these regions can be distinguished and how the shading rate can be selected. Among these strategies are Content-Adaptive Shading and Motion-Adaptive Shading. Content-Adaptive Shading varies the shading rate according to the current content of a frame and does not take temporal coherence into account. Motion-Adaptive Shading adapts the shading rate according to the changes in the scene. Stable regions, like for example the horizon and the car in a driving simulation, will be rendered with higher quality. In contrast, moving regions like the street will be rendered more coarsely because the viewer cannot focus on these regions anyway. Another approach for selecting the shading rate is to adapt the resolution to the viewer’s focus. This can be done in combination with an eye-tracking device and is called foveated rendering. We invented a novel approach that utilizes data from temporal anti-aliasing techniques to detect under- and oversampled regions and select the appropriate shading rate for these regions. We developed five algorithms, edge-based and texel-differential based Content-Adaptive Shading, Motion-Adaptive Shading integrating the motion over multiple frames, single-pass foveated rendering and TAA-Adaptive Shading. The applicability of each algorithm to modern renderer architectures with forward and deferred shading and anti-aliasing post-processing has been evaluated. The major advantage of our VRS techniques is that some of them enable up to 4x higher rendering resolution with the same performance or up to 4x better performance at the same resolution.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", keywords = "Variable Rate Shading", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/stappen-2019-vrs/", } @phdthesis{zsolnai-feher-thesis-2019, title = "Photorealistic Material Learning and Synthesis", author = "Karoly Zsolnai-Feh\'{e}r", year = "2019", abstract = "Light transport simulations are the industry-standard way of creating convincing photorealistic imagery and are widely used in creating animation movies, computer animations, medical and architectural visualizations among many other notable applications. These techniques simulate how millions of rays of light interact with a virtual scene, where the realism of the final output depends greatly on the quality of the used materials and the geometry of the objects within this scene. In this thesis, we endeavor to address two key issues pertaining to photorealistic material synthesis: first, creating convincing photorealistic materials requires years of expertise in this field and requires a non-trivial amount of trial and error from the side of the artist. We propose two learning-based methods that enables novice users to easily and quickly synthesize photorealistic materials by learning their preferences and recommending arbitrarily many new material models that are in line with their artistic vision. We also augmented these systems with a neural renderer that performs accurate light-transport simulation for these materials orders of magnitude quicker than the photorealistic rendering engines commonly used for these tasks. As a result, novice users are now able to perform mass-scale material synthesis, and even expert users experience a significant improvement in modeling times when many material models are sought. Second, simulating subsurface light transport leads to convincing translucent material visualizations, however, most published techniques either take several hours to compute an image, or make simplifying assumptions regarding the underlying physical laws of volumetric scattering. We propose a set of real-time methods to remedy this issue by decomposing well-known 2D convolution filters into a set of separable 1D convolutions while retaining a high degree of visual accuracy. These methods execute within a few milliseconds and can be inserted into state-of-the-art rendering systems as a simple post-processing step without introducing intrusive changes into the rendering pipeline.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "neural rendering, machine learning, photorealistic rendering, ray tracing, global illumination, material synthesis", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/zsolnai-feher-thesis-2019/", } @phdthesis{klein_2019_PHD, title = "Instant Construction of Atomistic Models for Visualization in Integrative Cell Biology", author = "Tobias Klein", year = "2019", abstract = "AbstractComputational models have advanced research of integrative cell biology in variousways. Especially in the biological mesoscale, the scale between atoms and cellularenvironments, computational models improve the understanding and qualitative anal-ysis. The mesoscale is an important range, since it represents the range of scalesthat are not fully accessible to a single experimental technique. Complex molecularassemblies within this scale have been visualized with x-ray crystallography, thoughonly in isolation. Mesoscale models shows how molecules are assembled into morecomplex subcelluar environments that orchestrate the processes of life. The skillfulcombination of the results of imaging and experimental techniques provides a glimpseof the processes, which are happening here. Only recently, biologists have startedto unify the various sources of information. They have begun to computationallyassemble and subsequently visualize complex environments, such as viruses or bacteria.Currently, we live in an opportune time for researching integrative structural biologydue to several factors. First and foremost, the wealth of data, driven through sourceslike online databases, makes structural information about biological entities publiclyavailable. In addition to that, the progress of parallel processors builds the foundationto instantly construct and render large mesoscale environments in atomistic detail.Finally, new scientific advances in visualization allow the efficient rendering of complexbiological phenomena with millions of structural units.In this cumulative thesis, we propose several novel techniques that facilitate the instantconstruction of mesoscale structures. The common methodological strategy of thesetechniques and insight from this thesis is “compute instead of store”. This approacheliminates the storage and memory management complexity, and enables instantchanges of the constructed models. Combined, our techniques are capable of instantlyconstructing large-scale biological environments using the basic structural buildingblocks of cells. These building blocks are mainly nucleic acids, lipids, and solubleproteins. For the generation of long linear polymers formed by nucleic acids, wepropose a parallel construction technique that makes use of a midpoint displacementalgorithm. The efficient generation of lipid membranes is realized through a texturesynthesis approach that makes use of the Wang tiling concept. For the population ofsoluble proteins, we present a staged algorithm, whereby each stage is processed inparallel. We have integrated the instant construction approach into a visual environmentin order to improve several aspects. First, it allows immediate feedback on the createdix structures and the results of parameter changes. Additionally, the integration ofconstruction in visualization builds the foundation for visualization systems that striveto construct large-scale environments on-the-fly. Lastly, it advances the qualitativeanalysis of biological mesoscale environments, where a multitude of synthesized modelsis required. In order to disseminate the physiology of biological mesoscale models,we propose a novel concept that simplifies the creation of multi-scale proceduralanimations. ", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/klein_2019_PHD/", } @bachelorsthesis{panfili-2019-VAVR, title = "Effects of VR-Displays on Visual Acuity", author = "Lara Panfili", year = "2019", abstract = "The perceived visual acuity (VA) of people in virtual reality (VR), using a head-mounted display (HMD), is not equal to their VA in the real world. The reason for this difference is the reduction of visual acuity in the virtual environment that is caused by various factors, such as the low resolution of the VR display. Based on those circumstances, the capacity of an individual to distinguish small details diminishes visibly. Previous studies regarding eyesight in VR have already verified how the best visual resolution in virtual environments is always lower than the natural vision and therefore this aspect could be seen as a mild vision impairment for the users of an HMD. The goal of this project is to investigate how much the VA is reduced in VR and respectively whether the decrease of VA in VR is perceived similar by everyone or if visual impairments like Myopia, influence the visual perception. Based on a previous project, two different tests were implemented with the game engine Unreal Engine 4, a VR version for which an HTC VIVE headset was used, along with a desktop version. These tests were used to investigate the VA of the participant in a user study and the results have been compared to each other in order to find the extent to which visual impairments have an impact on VA. ", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "virtual reality, visual acuity", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/panfili-2019-VAVR/", } @misc{SCHUETZ-2019-PCC, title = "Rendering Point Clouds with Compute Shaders", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2019", abstract = "We propose a compute shader based point cloud rasterizer with up to 10 times higher performance than classic point-based rendering with the GL_POINT primitive. In addition to that, our rasterizer offers 5 byte depth-buffer precision with uniform or customizable distribution, and we show that it is possible to implement a highquality splatting method that blends together overlapping fragments while still maintaining higher frame-rates than the traditional approach.", month = nov, isbn = "978-1-4503-6943-5/19/11", event = "SIGGRAPH Asia", Conference date = "Poster presented at SIGGRAPH Asia (2019-11)", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/SCHUETZ-2019-PCC/", } @bachelorsthesis{ESCHNER-2019-GDT, title = "Generating Synthetic Training Data for Video Surveillance Applications", author = "Johannes Eschner", year = "2019", abstract = "As the demand for ever-more capable computer vision systems has been increasing in recent years, there is a growing need for labeled ground-truth data for such systems. These ground-truth datasets are used for the training and evaluation of computer vision algorithms and are usually created by manually annotating images or image sequences with semantic labels. Synthetic video generation provides an alternative approach to the problem of generating labels. Here, the label data and the image sequences can be created simultaneously by utilizing a 3D render engine. Many of the existing frameworks for generating such synthetic datasets focus the context of autonomous driving, where vast amounts of labeled input data are needed. In this thesis an implementation of a synthetic data generation framework for evaluating tracking algorithms in the context of video surveillance is presented. This framework uses a commercially available game engine as a renderer to generate synthetic video clips that depict different scenarios that can occur in a video surveillance setting. These scenarios include a multitude of interactions of different characters in a reconstructed environment. A collection of such synthetic clips is then compared to real videos by using it as an input for two different tracking algorithms. While producing synthetic ground-truth data in real time using a game engine is less work intensive than manual annotation, the results of the evaluation show that both tracking algorithms perform better on real data. This suggests that the synthetic data coming from the framework is limited in its suitability for evaluating tracking algorithms.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "neural networks", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/ESCHNER-2019-GDT/", } @bachelorsthesis{Rumpler-2019-PPC, title = "Progressive Rendering of Massive Point Clouds in WebGL 2.0 Compute", author = "Wolfgang Rumpler", year = "2019", abstract = "Rendering large point clouds is a computationally expensive task, and various optimizations are required to achieve the desired performance for realtime applications. It is typical to store the point data hierarchically to enable fast retrieval and visibility testing in point clouds that consist of billions of points. However, rendering the selected nodes is still a demanding task for the graphics units on modern devices. Especially on mobile devices rendering millions of points every frame is often not possible with sufficient frame rates. Techniques that progressively render the points of a point cloud were proposed to reduce the load on the GPU. The results of the previous frames are recycled, and details are accumulated over multiple frames. Combining hierarchical structures with progressive rendering, therefore, houses an exciting opportunity for increasing the performance for massive point clouds. This work investigates a novel approach to render massive point clouds progressively in the browser by transforming the hierarchical structure locally into an unstructured pool of points. The pool is then rendered progressively with compute shaders and continuously updated with new nodes from the octree.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "point cloud, progressive rendering, webgl", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Rumpler-2019-PPC/", } @talk{WIMMER-2019-CGSG, title = "Computer Graphics for Serious Games", author = "Michael Wimmer", year = "2019", abstract = "10 years ago, the focus of computer graphics was mostly the quality and speed of image generation, and serious games set in realistic environments profited from these advances. Meanwhile, commercial rendering engines leave little to be desired, but computer graphics research has opened other doors which might be relevant for application in serious games. In this talk, I will present some of our latest advances in computer graphics in simulation, rendering and content generation. I will show how we can now simulate visual impairments in virtual reality, which could be used in games to create empathy for people affected by these impairments. I will describe how we have advanced point-based rendering techniques to allow incorporating real environments into rendering applications with basically no preprocessing. On the other hand, virtual environments for serious games could be created efficiently by collaborative crowed-sourced procedural modeling. Finally, efficient simulations of floods and heavy rainfall may not only help experts, but might be the basis of serious games to increase public awareness of natural disasters and the effects of climate change.", month = sep, event = "11th International Conference on Virtual Worlds and Games for Serious Applications", location = "Vienna, Austria", keywords = "serious games", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/WIMMER-2019-CGSG/", } @bachelorsthesis{Gruber2019, title = "Extended Image Classification", author = "Horst Gruber", year = "2019", abstract = "In this thesis, we developed an image classification model with improving classification performance over a training phase. The model is using a pre-trained convolutional neuronal network (CNN) for feature extraction and a k-means algorithm for clustering. Performance optimization is realized by optimized weight factors for the extracted feature values. The optimization of the weight factors is calculated iteratively during a training phase. The measure of the weight factor adoption in a training step is related to the ground-truth dependent clustering contribution of the newly added image feature. We see as an advantage of our approach that the optimization requires no internal changes of the applied feature extraction and clustering algorithms, hence pre-trained models or closed-source implementations can be used. As a further advantage, we see the step-wise transparency of the performance development during the training phase for each newly added image as opposed to batch-based training for CNNs. This enables dynamic control of the training phase by the user. Another advantage is the small number of parameters to be optimized, which results in reduced processing time. A further advantage is the classification performance of our model that outperforms the reference model without feature weight optimization. In the course of our work, we developed a Python application that implements our model and provides a user-friendly interface. It allows easy set-up of test cases and provides graphics and tables for a comprehensive evaluation on process steps level. We consider this application as a starting point for future work.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Gruber2019/", } @article{klein_2019_PMP, title = "Multi-Scale Procedural Animations of Microtubule Dynamics Based on Measured Data", author = "Tobias Klein and Ivan Viola and Eduard Gr\"{o}ller and Peter Mindek", year = "2019", abstract = "Biologists often use computer graphics to visualize structures, which due to physical limitations are not possible to imagewith a microscope. One example for such structures are microtubules, which are present in every eukaryotic cell. They are part ofthe cytoskeleton maintaining the shape of the cell and playing a key role in the cell division. In this paper, we propose a scientifically-accurate multi-scale procedural model of microtubule dynamics as a novel application scenario for procedural animation, which cangenerate visualizations of their overall shape, molecular structure, as well as animations of the dynamic behaviour of their growth anddisassembly. The model is spanning from tens of micrometers down to atomic resolution. All the aspects of the model are driven byscientific data. The advantage over a traditional, manual animation approach is that when the underlying data change, for instance dueto new evidence, the model can be recreated immediately. The procedural animation concept is presented in its generic form, withseveral novel extensions, facilitating an easy translation to other domains with emergent multi-scale behavior.", month = aug, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "26", number = "1", doi = "10.1109/TVCG.2019.2934612", pages = "622--632", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/klein_2019_PMP/", } @studentproject{koch-2019-PR, title = "Simulation of Diabetic Macular Edema in Virtual Reality", author = "Thomas Bernhard Koch", year = "2019", abstract = "Simulation of diabetic macular edema (DME) is implemented in a virtual reality simulation using Unreal Engine 4. Common symptoms of DME are blurry vision, loss of contrast, floaters and distorted vision. We use different computer graphics techniques to create effects which resemble such symptoms. An eye tracker from Pupil Labs is used in order to make effects gaze dependent. The implementation of these effects is discussed and adjustable parameters of the effects are explained.", month = aug, note = "1", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/koch-2019-PR/", } @article{celarek_adam-2019-qelta, title = "Quantifying the Error of Light Transport Algorithms", author = "Adam Celarek and Wenzel Jakob and Michael Wimmer and Jaakko Lehtinen", year = "2019", abstract = "This paper proposes a new methodology for measuring the error of unbiased physically based rendering algorithms. The current state of the art includes mean squared error (MSE) based metrics and visual comparisons of equal-time renderings of competing algorithms. Neither is satisfying as MSE does not describe behavior and can exhibit significant variance, and visual comparisons are inherently subjective. Our contribution is two-fold: First, we propose to compute many short renderings instead of a single long run and use the short renderings to estimate MSE expectation and variance as well as per-pixel standard deviation. An algorithm that achieves good results in most runs, but with occasional outliers is essentially unreliable, which we wish to quantify numerically. We use per-pixel standard deviation to identify problematic lighting effects of rendering algorithms. The second contribution is the error spectrum ensemble (ESE), a tool for measuring the distribution of error over frequencies. The ESE serves two purposes: It reveals correlation between pixels and can be used to detect outliers, which offset the amount of error substantially.", month = jul, journal = "Computer Graphics Forum", volume = "38", number = "4", doi = "10.1111/cgf.13775", publisher = "The Eurographics Association and John Wiley & Sons Ltd.", pages = "111--121", keywords = "measuring error, light transport, global illumination", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/celarek_adam-2019-qelta/", } @inproceedings{kroesl-2019-ThesisFF, title = "Simulating Vision Impairments in VR and AR", author = "Katharina Kr\"{o}sl", year = "2019", abstract = "1.3 billion people worldwide are affected by vision impairments, according to the World Health Organization. However, vision impairments are hardly ever taken into account when we design our cities, buildings, emergency signposting, or lighting systems. With this research, we want to develop realistic, medically based simulations of eye diseases in VR and AR, which allow calibrating vision impairments to the same level for different users. This allows us to conduct user studies with participants with normal sight and graphically simulated vision impairments, to determine the effects of these impairments on perception, and to investigate lighting concepts under impaired vision conditions. This thesis will, for the first time, provide methods for architects and designers to evaluate their designs for accessibility and to develop lighting systems that can enhance the perception of people with vision impairments.", month = jun, booktitle = "ACM SIGGRAPH THESIS FAST FORWARD 2019", keywords = "vision impairments, cataracts, virtual reality, augmented reality, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ThesisFF/", } @bachelorsthesis{Thann_2019_01, title = "Evaluation of Coherent Hierarchical Culling Revisited with Varied Parameters", author = "J\"{u}rgen Thann", year = "2019", abstract = "Point clouds received through various types of 3D-scanning techniques increase in size constantly. To compensate for this fact,the performance of rendering point clouds has to be improved accordingly. In addition to view frustum culling, occlusion culling can be used to reduce the number of points that has to be loaded. The occlusion culling algorithm I evaluate is called "CHC++: Coherent Hierarchical Culling Revisited" [MBW08]. For this thesis, I implemented a parameterizable point cloud renderer that allowed me to test varied values for multiple parameters. While not all parameters are defined in the evaluated paper, all influence the algorithm’s performance. My evaluation shows that the algorithm increases performance clearly for non-synthetic scenes. However, with the scenes in this evaluation, Tighter Bounding Volumes introduced a slight decrease in performance instead of improving it. Furthermore, using an SSD instead of an HDD did not yield the expected impact on the loading speed. Finally, I provide a general use case specific decision guideline for choosing values for the parameters. My mplementation produced for this thesis is available under the GNU Lesser General Public License, Version 3.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/Thann_2019_01/", } @article{CORNEL-2019-IVF, title = "Interactive Visualization of Flood and Heavy Rain Simulations", author = "Daniel Cornel and Andreas Buttinger-Kreuzhuber and Artem Konev and Zsolt Horvath and Michael Wimmer and Raimund Heidrich and J\"{u}rgen Waser", year = "2019", abstract = "In this paper, we present a real-time technique to visualize large-scale adaptive height fields withC1-continuous surfacereconstruction. Grid-based shallow water simulation is an indispensable tool for interactive flood management applications.Height fields defined on adaptive grids are often the only viable option to store and process the massive simulation data. Theirvisualization requires the reconstruction of a continuous surface from the spatially discrete simulation data. For regular grids,fast linear and cubic interpolation are commonly used for surface reconstruction. For adaptive grids, however, there exists nohigher-order interpolation technique fast enough for interactive applications.Our proposed technique bridges the gap between fast linear and expensive higher-order interpolation for adaptive surfacereconstruction. During reconstruction, no matter if regular or adaptive, discretization and interpolation artifacts can occur,which domain experts consider misleading and unaesthetic. We take into account boundary conditions to eliminate these artifacts,which include water climbing uphill, diving towards walls, and leaking through thin objects. We apply realistic water shadingwith visual cues for depth perception and add waves and foam synthesized from the simulation data to emphasize flow directions.The versatility and performance of our technique are demonstrated in various real-world scenarios. A survey conducted withdomain experts of different backgrounds and concerned citizens proves the usefulness and effectiveness of our technique.", month = jun, journal = "Computer Graphics Forum", volume = "38", number = "3", issn = "1467-8659", doi = "10.1111/cgf.13669", pages = "25--39", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/CORNEL-2019-IVF/", } @misc{kerbl_2019_planet_poster, title = "Real-time Rendering of Procedural Planets at Arbitrary Altitudes", author = "Florian Michelic and Michael Kenzel and Karl Haubenwallner and Bernhard Kerbl and Markus Steinberger", year = "2019", abstract = "Focusing on real-time, high-fidelity rendering, we present a novel approach for combined consideration of four major phenomena that define the visual representation of entire planets: We present a simple and fast solution for a distortion-free generation of 3D planetary terrain, spherical ocean waves and efficient rendering of volumetric clouds along with atmospheric scattering. Our approach to terrain and ocean mesh generation relies on a projected, persistent grid that can instantaneously and smoothly adapt to fast-changing viewpoints. For generating planetary ocean surfaces, we present a wave function that creates seamless, evenly spaced waves across the entire planet without causing unsightly artifacts. We further show how to render volumetric clouds in combination with precomputed atmospheric scattering and account for their contribution to light transport above ground. Our method provides mathematically consistent approximations of cloud-atmosphere interactions and works for any view point and direction, ensuring continuous transitions in appearance as the viewer moves from ground to space. Among others, our approach supports cloud shadows, light shafts, ocean reflections, and earth shadows on the clouds. The sum of these effects can be visualized at more than 120 frames per second on current graphics processing units.", month = may, note = "Voted best poster of I3D '19", location = "Montreal, Canada", event = "I3D 2019", Conference date = "Poster presented at I3D 2019 (2019-05-21--2019-05-23)", keywords = "planet, rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/kerbl_2019_planet_poster/", } @inproceedings{LUKSCH-2019-IGI, title = "Incrementally Baked Global Illumination", author = "Christian Luksch and Michael Wimmer and Michael Schw\"{a}rzler", year = "2019", abstract = "Global Illumination is affected by the slightest change in a 3D scene, requiring a complete reevaluation of the distributed light. In cases where real-time algorithms are not applicable due to high demands on the achievable accuracy, this recomputation from scratch results in artifacts like flickering or noise, disturbing the visual appearance and negatively affecting interactive lighting design workflows. We propose a novel system tackling this problem by providing incremental updates of a baked global illumination solution after scene modifications, and a re-convergence after a few seconds. Using specifically targeted incremental data structures and prioritization strategies in a many-light global illumination algorithm, we compute a differential update from one illumination state to another. We further demonstrate the use of a novel error balancing strategy making it possible to prioritize the illumination updates.", month = may, isbn = "978-1-4503-6310-5", series = "I3D ’19", publisher = "ACM", location = "Montreal, Quebec, Canada", event = "33rd Symposium on Interactive 3D Graphics and Games (I3D 2019)", editor = "Blenkhorn, Ari Rapkin", doi = "10.1145/3306131.3317015", booktitle = "Proceedings of the 33rd Symposium on Interactive 3D Graphics and Games (I3D 2019)", pages = "4:1--4:10", keywords = "Global Illumination, Instant Radiosity, Lightmaps", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/LUKSCH-2019-IGI/", } @inproceedings{STEINLECHNER-2019-APS, title = "Adaptive Point-cloud Segmentation for Assisted Interactions", author = "Harald Steinlechner and Bernhard Rainer and Michael Schw\"{a}rzler and Georg Haaser and Attila Szabo and Stefan Maierhofer and Michael Wimmer", year = "2019", abstract = "In this work, we propose an interaction-driven approach streamlined to support and improve a wide range of real-time 2D interaction metaphors for arbitrarily large pointclouds based on detected primitive shapes. Rather than performing shape detection as a costly pre-processing step on the entire point cloud at once, a user-controlled interaction determines the region that is to be segmented next. By keeping the size of the region and the number of points small, the algorithm produces meaningful results and therefore feedback on the local geometry within a fraction of a second. We can apply these finding for improved picking and selection metaphors in large point clouds, and propose further novel shape-assisted interactions that utilize this local semantic information to improve the user’s workflow.", month = may, isbn = "978-1-4503-6310-5", series = "I3D ’19", publisher = "ACM", location = "Montreal, Quebec, Canada", event = "33rd Symposium on Interactive 3D Graphics and Games", editor = "Blenkhorn, Ari Rapkin", doi = "10.1145/3306131.3317023", booktitle = "Proceedings of the 33rd Symposium on Interactive 3D Graphics and Games", pages = "14:1--14:9", keywords = "Pointcloud Segmentation, Shape Detection, Interactive Editing", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/STEINLECHNER-2019-APS/", } @bachelorsthesis{heinz-2019-psi, title = "High-Quality Rendering of Interactive Particle Systems for Real-Time Applications", author = "Alexander Heinz", year = "2019", abstract = "Particle systems are widely used in real-time applications. This thesis presents and compares several state-of-the-art methods for rendering and simulating particle systems. In computer graphics, particle systems are used to represent fluids like water, gas-like substances like fire and smoke or effects like explosions and fireworks. In the case of water, each particle can be interpreted as one or more waterdrops. To simulate a firework, the particles can be seen as sparks, and for smoke or fire big particles can be used and blended over each other. In the context of this thesis all described techniques can be associated with either rendering or simulation. For rendering a single particle using the graphics processing unit (GPU), several methods exist. Probably the easiest would be to draw a texture onto a quad that is always looking towards the camera. This is called Billboarding and can be implemented very easily and efficiently. One drawback is that the hard edges of the drawn quad can become visible, when parts of the particle overlap with other objects in the scene. In this case the volumetric appearance of the particle can easily be destroyed. In order to avoid this, the transparency can be adapted near the region of overlap in such a way that the hard edges are smoothed out. This method is called Soft Particles and is used, because it increases the picture quality of the particle. The simulation, as the main topic of this work, can be interpreted as the approximation of a physical behavior, that can be achieved by manipulation of the data. Therefore it is about the changing, creation and deletion of particles. A raindrop-particle, as an example, is created in the sky in a cloud, then fall towards the ground, and at the ground the particle gets deleted. It is obvious that a system may contain a high number of particles in order to obtain the phenomenon to be approximated. In real-time applications, all particles should ideally be simulated at least 60 times per second, but this is not absolutely necessary in the type of simulation, that underlies this work. Therefore, an efficient method for simulation is very important. In this work three different basic methods for simulation are presented and compared. The two most important differences are the memory management and the way of computation of the simulation. While in one technique the particle data is stored in the main memory and the computation is done by the CPU, these two processes are outsourced to the GPU, when using one of the other methods. At the end of each simulation step the particles are checked against collision with other scene objects. In case of a collision the particle is reflected respectively. In order to represent transparent substances like smoke properly the particles have to be sorted and - starting with the farthest away - rendered. The easiest way would be to sort the particles according to their distance to the camera. When simulation is done on the CPU this can be easily achieved using classical sort algorithms, but when storing the particle data on the GPU other algorithms, like the Bitonic Merge Sort, have to be used. ", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien ", keywords = "Particle System, Real-time Rendering, Particle Interaction", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/heinz-2019-psi/", } @misc{schuetz-2019-LCO, title = "Live Coding of a VR Render Engine in VR", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2019", abstract = "Live coding in virtual reality allows users to create and modify their surroundings through code without the need to leave the virtual reality environment. Previous work focuses on modifying the scene. We propose an application that allows developers to modify virtually everything at runtime, including the scene but also the render engine, shader code and input handling, using standard desktop IDEs through a desktop mirror. ", month = mar, publisher = "IEEE", location = "Osaka", address = "http://ieeevr.org/2019/", event = "IEEE VR 2019", doi = "https://doi.org/10.1109/VR.2019.8797760", Conference date = "Poster presented at IEEE VR 2019 (2019-03)", note = "1150--1151", pages = "1150 – 1151", keywords = "virtual reality, live coding, VR", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/schuetz-2019-LCO/", } @incollection{BOKSANSKY-2019-RTS, title = "Ray Traced Shadows: Maintaining Real-Time Frame Rates", author = "Jakub Boksansky and Michael Wimmer and Jir\'{i} Bittner", year = "2019", abstract = "Efficient and accurate shadow computation is a long-standing problem in computer graphics. In real-time applications, shadows have traditionally been computed using the rasterization-based pipeline. With recent advances of graphics hardware, it is now possible to use ray tracing in real-time applications, making ray traced shadows a viable alternative to rasterization. While ray traced shadows avoid many problems inherent in rasterized shadows, tracing every shadow ray independently can become a bottleneck if the number of required rays rises, e.g., for high-resolution rendering, for scenes with multiple lights, or for area lights. Therefore, the computation should focus on image regions where shadows actually appear, in particular on the shadow boundaries. We present a practical method for ray traced shadows in real-time applications. Our method uses the standard rasterization pipeline for resolving primary-ray visibility and ray tracing for resolving visibility of light sources. We propose an adaptive sampling algorithm for shadow rays combined with an adaptive shadowfiltering method. These two techniques allow computing high-quality shadows with a limited number of shadow rays per pixel. We evaluated our method using a recent real-time ray tracing API (DirectX Raytracing) and compare the results with shadow mapping using cascaded shadow maps.", month = mar, address = "New York", booktitle = "Ray Tracing Gems: High-Quality and Real-Time Rendering with DXR and Other APIs", doi = "10.1007/978-1-4842-4427-2_13", editor = "Erik Haines and Tomas Akenine-M\"{o}ller", isbn = "978-1-4842-4426-5", publisher = "Springer", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/BOKSANSKY-2019-RTS/", } @inproceedings{kroesl-2019-ICthroughVR, title = "ICthroughVR: Illuminating Cataracts through Virtual Reality", author = "Katharina Kr\"{o}sl and Carmine Elvezio and Matthias H\"{u}rbe and Sonja Karst and Michael Wimmer and Steven Feiner", year = "2019", abstract = "Vision impairments, such as cataracts, affect how many people interact with their environment, yet are rarely considered by architects and lighting designers because of a lack of design tools. To address this, we present a method to simulate vision impairments caused by cataracts in virtual reality (VR), using eye tracking for gaze-dependent effects. We conducted a user study to investigate how lighting affects visual perception for users with cataracts. Unlike past approaches, we account for the user's vision and some constraints of VR headsets, allowing for calibration of our simulation to the same level of degraded vision for all participants.", month = mar, publisher = "IEEE", location = "Osaka, Japan", event = "IEEE VR 2019, the 26th IEEE Conference on Virtual Reality and 3D User Interfaces", doi = "10.1109/VR.2019.8798239", booktitle = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces", pages = "655--663", keywords = "vision impairments, cataracts, virtual reality, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/kroesl-2019-ICthroughVR/", } @inproceedings{schuetz-2019-CLOD, title = "Real-Time Continuous Level of Detail Rendering of Point Clouds", author = "Markus Sch\"{u}tz and Katharina Kr\"{o}sl and Michael Wimmer", year = "2019", abstract = "Real-time rendering of large point clouds requires acceleration structures that reduce the number of points drawn on screen. State-of-the art algorithms group and render points in hierarchically organized chunks with varying extent and density, which results in sudden changes of density from one level of detail to another, as well as noticeable popping artifacts when additional chunks are blended in or out. These popping artifacts are especially noticeable at lower levels of detail, and consequently in virtual reality, where high performance requirements impose a reduction in detail. We propose a continuous level-of-detail method that exhibits gradual rather than sudden changes in density. Our method continuously recreates a down-sampled vertex buffer from the full point cloud, based on camera orientation, position, and distance to the camera, in a point-wise rather than chunk-wise fashion and at speeds up to 17 million points per millisecond. As a result, additional details are blended in or out in a less noticeable and significantly less irritating manner as compared to the state of the art. The improved acceptance of our method was successfully evaluated in a user study.", month = mar, publisher = "IEEE", location = "Osaka, Japan", event = "IEEE VR 2019, the 26th IEEE Conference on Virtual Reality and 3D User Interfaces", doi = "10.1109/VR.2019.8798284", booktitle = "2019 IEEE Conference on Virtual Reality and 3D User Interfaces", pages = "103--110", keywords = "point clouds, virtual reality, VR", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/schuetz-2019-CLOD/", } @inproceedings{ZOTTI-2016-VAA, title = "Virtual Archaeoastronomy: Stellarium for Research and Outreach", author = "Georg Zotti and Florian Schaukowitsch and Michael Wimmer and Wolfgang Neubauer", year = "2019", abstract = "In the last few years, the open-source desktop planetarium program Stellarium has become ever more popular for research and dissemination of results in Cultural Astronomy. In this time we have added significant capabilities for applications in cultural astronomy to the program. The latest addition allows its use in a multi-screen installation running both completely automated and manually controlled setups. During the development time, also the accuracy of astronomical simulation has been greatly improved.", month = mar, isbn = "978-3-319-97006-6", publisher = "Springer", location = "Milano, Italy", event = "SIA 2016 (16th Conference of the Italian Society for Archaeoastronomy)", booktitle = "Archaeoastronomy in the Roman World (Proceedings 16th Conference of the Italian Society for Archaeoastronomy)", pages = "187--205", keywords = "stellarium", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/ZOTTI-2016-VAA/", } @mastersthesis{PROST-2019-RTPAL, title = "Real-Time Photometric Area Light Approximation for Interactive Lighting Design", author = "Lukas Prost", year = "2019", abstract = "Photometric light sources are modeled after real-world luminaires and are used in lighting design to accurately simulate lighting. While an accurate evaluation of their illumination is possible with offline global-illumination algorithms, currently used realtime approximations, which are required for an interactive lighting design work flow, are prone to errors when the light source is close to illuminated objects. This is due to the non-zero dimensionality of photometric lights, which are often area or volume lights. In this thesis, we present a new technique to approximate photometric area lights in real time. This new technique is based on combining two sampling strategies that are currently used in game engines to approximate the illumination from diffuse area lights. Our technique samples the photometric area light with this combined sampling strategy and then computes the illumination with a cubature technique based the Delaunay triangulation. To do this in real time, we implemented our method on the GPU and developed a compact triangle data structure that enables an efficient generation of a Delaunay triangulation. The result of this thesis is a new technique for photometric area lights that creates visually plausible approximations in real time, even if the light source is close to illuminated objects.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Research Unit of Computer Graphics, Institute of Visual Computing and Human-Centered Technology, Faculty of Informatics, TU Wien", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/PROST-2019-RTPAL/", } @inproceedings{STEINLECHNER-2019-ICT, title = "A Novel Approach for Immediate, Interactive CT Data Visualization andEvaluation using GPU-based Segmentation and Visual Analysis", author = "Harald Steinlechner and Georg Haaser and Bernd Oberdorfer and Daniel Habe and Stefan Maierhofer and Michael Schw\"{a}rzler and Eduard Gr\"{o}ller", year = "2019", abstract = "CT data of industrially produced cast metal parts are often afflicted with artefacts due to complex geometries ill-suited for the scanning process. Simple global threshold-based porosity detection algorithms usually fail to deliver meaningful results. Other adaptive methods can handle image artefacts, but require long preprocessing times. This makes an efficient analysis workflow infeasible. We propose an alternative approach for analyzing and visualizing volume defects in a fully interactive manner, where analyzing volumes becomes more of an interactive exploration instead of time-consuming parameter guessing interrupted by long processing times. Our system is based on a highly efficient GPU implementation of a segmentation algorithm for porosity detection. The runtime is on the order of seconds for a full volume and parametrization is kept simple due to a single threshold parameter. A fully interactive user interface comprised of multiple linked views allows to quickly identify defects of interest, while filtering out artefacts even in noisy areas.", month = feb, location = "Padova, Italy", event = "International Conference on Industrial Computed Tomography (ICT) 2019", editor = "Simone Carmignato", booktitle = "International Conference on Industrial Computed Tomography (ICT) 2019", pages = "1--6", keywords = "CT, GPU, Inclusion Detection, Interactive Visualisation, VisualAnalysis, Parallel Coordinates, Volume Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2019/STEINLECHNER-2019-ICT/", } @bachelorsthesis{themmer-2018-fbx, title = "Definition of a Workflow to Import FBX Models in Unity3D at Run-Time While Retaining Material Properties for Various Shader Types", author = "Nicolas Themmer", year = "2018", abstract = "This bachelor thesis can be divided into two sequential parts. The first part examines physically-based shader analyses their functionality. In this context, shader were studied in general, while discussing core elements like the rendering equation and the graphics pipeline. Physically-based shader were subsequently brought to close attention. In this step, the study deals with various real life phenomenons, that occur in reality and were successfully implemented in computer graphics. For this purpose, the BRDF (Bidirectional Reflection Distribution Function) was explained theoretically and analysed mathematically. The second part of this thesis covers the import of FBX files into the game engine Unity3D. The goal of this chapter is to modify the import process to the extend that material information won’t get lost. When importing FBX files into Unity3D, textures have to be assigned to Unity materials by default. Therefore, the Autodesk API is used to examine FBX files and gain necessary information regarding textures and materials. The thesis covers the use-case of uploading FBX files to a server, analysing these files, generating Unity files in a native format (AssetBundles) with correct material information and storing them into a database. A client, that synchronises itself with the database during run-time and loading these models into a visual scene was also implemented. In this context, the process of modeling 3D objects, including materials and textures is covered as well. The workflow was implemented as a network application in order to outsource the conversion process and therefore substantially decrease the consumption of computational power. The workflow defines necessary steps, to automate the assignment of textures to Unity materials. A solution for the use-case of including FBX files into unity applications during run-time is hereby presented.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/themmer-2018-fbx/", } @bachelorsthesis{Tucek_2018, title = "INTRA SPACE Agent An Agent-Based Architecture for an Artistic Real-Time Installation", author = "Tom Tucek", year = "2018", abstract = "This work describes the processes involved in developing and embedding an agent system into an artistic real-time installation. The agent system would be responsible for controlling virtual figures on a screen, which interact with users of the installation. It was necessary to develop agents which displayed behavior pre-defined in stories designed by the project team, as well as to ensure that such agents acted in a way that was both well received by visitors, while also stimulating interaction in a way that allowed the project team to conduct research on the interactions between humans and nonhumans. The agent system was implemented using Jason, a Java-based interpreter of the agentprogramming language AgentSpeak. Over the course of the project, various agent scenarios were developed, with differing ways of implementation. An iterative process was used for development and regular meetings with project members were instated, to discuss progress and ideas, while utilizing visualizations to aid communication. Behavior of developed agents was plagued by various problems, from being too reliant on reactions towards user behavior, to not interacting enough with active users. Various approaches to such problems were tried out, discussed, and documented. During the final installation, agents with indeterministic and emergent behavior were employed. Furthermore, agents were focused on both pursuing their own goals as well as constantly paying attention to visitor behavior. This allowed users to realize agents as a social presence and interact with them in a way that was both novel and natural. ", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Jason; Agent System; Human-Agent Interaction; Real-Time; Art ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Tucek_2018/", } @bachelorsthesis{GRIESHOFER-2018-GOS, title = "Game Optimization and Steam Publishing for Swarmlake", author = "Dominique Grieshofer", year = "2018", abstract = "Video games are complex pieces of software which require a certain amount of prototyping and iteration to create the intended experience. They are also real-time applications and need to be performant to run at the desired speed. Most software architecture is about creating more flexible code and therefore making fewer assumptions which allow for faster prototyping and iteration time. However, optimizing is all about making assumptions and knowing limitations to be able to improve efficiency. Since optimal optimization is usually more natural to guarantee after making a well-designed game than vice versa, keeping the code flexible until the end is a valid compromise. Knowing game optimization patterns beforehand can be useful to make sure only the least amount of code needs to be rewritten at the end of a game’s development cycle. Successfully selling a product such as a video game also requires marketing and distribution. One of the most influential platform to distribute computer games on PC is Steam. Knowing more about the target platform a game releases on can make it more likely to make the optimal decisions in that process.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "computer games", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/GRIESHOFER-2018-GOS/", } @bachelorsthesis{Fischer-2018-sssvr, title = "Subsurface Scattering in VR", author = "Lukas Fischer", year = "2018", abstract = "Subsurface Scattering is a physical phenomenon that appears in many materials but is most notable for human skin. Current research makes it possible to calculate the local scattering of light inside a translucent medium around the point of entry with a convolution of a separable filter in screen-space. This thesis tries to evaluate this technique by Jimenez et al. for stereoscopic rendering and how it can be implemented for the currently popular game engine Unity. Unity offers support for VR applications and allows the implementation of post-processing effects and other techniques that rely on shaders. The implemented Subsurface Scattering method is combined with an approach for translucency and a physically based specular model. In the developed application the effects can be observed with and without VR and important parameters can be changed by the user. The performance and visual quality are reviewed with respect to the viability of the effects in Unity, stereoscopic rendering and frame rate. The latter is especially important in VR applications to deliver a comfortable interactive experience. Implementation https://drive.google.com/file/d/19cWkXh19uDCIa6Mcu3qy1UeIxYlOmjJA/view?usp=sharing ", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "subsurface scattering, virtual reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Fischer-2018-sssvr/", } @article{Konev-FCV2018, title = "Fast cutaway visualization of sub-terrain tubular networks", author = "Artem Konev and Manuel Matusich and Ivan Viola and Hendrik Schulze and Daniel Cornel and J\"{u}rgen Waser", year = "2018", month = oct, doi = "https://doi.org/10.1016/j.cag.2018.07.004", issn = "0097-8493", journal = "Computers & Graphics", number = "5", pages = "25–35", volume = "75", pages = "25--35", keywords = "Cutaway visualization, Procedural billboarding, Subsurface networks", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Konev-FCV2018/", } @inproceedings{kroesl-2018-DC, title = "[DC] Computational Design of Smart Lighting Systems for Visually Impaired People, using VR and AR Simulations", author = "Katharina Kr\"{o}sl", year = "2018", abstract = "This Doctoral Consortium paper presents my dissertation research in a multidisciplinary setting, spanning over the areas of architecture, specifically lighting design and building information modeling, to virtual reality (VR) and perception. Since vision impairments are hardly taken into account in architecture and lighting design today, this research aims to provide the necessary tools to quantify the effects of vision impairments, so design guidelines regarding these impairments can be developed. Another research goal is the determination of the influence of different lighting conditions on the perception of people with vision impairments. This would allow us to develop smart lighting systems that can aid visually impaired people by increasing their visual perception of their environment. This paper also outlines the concept for a tool to automatically generate lighting solutions and compare and test them in VR, as design aid for architects and lighting designers.", month = oct, publisher = "IEEE", location = "Munich", event = "ISMAR 2018", booktitle = "Proceedings of the 2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)", keywords = "vision impairments, lighting design, virtual reality, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-DC/", } @mastersthesis{GAMPER-2018-OSG, title = "Ocean Surface Generation and Rendering", author = "Thomas Gamper", year = "2018", abstract = "The synthesis of a believable depiction of the ocean surface is a permanent topic of interest in computer graphics. It represents even more of a challenge for applications which require the real-time display of a water body as large as the ocean. That is because the ocean is a highly dynamic system which combines waves at all scales, ranging from millimetres to kilometres. Moreover, the ocean may be observed from several distances, ranging from close-ups to views which reach the horizon. Thus, we present a framework to generate and render the open ocean in real time, for arbitrary viewing distances and including waves at all scales. We focus our efforts on the geometry of the animated ocean surface, for which we leverage a set of wave spectrum models from oceanographic research. We discuss the intricacies of said models, as well as their fitness for real-time rendering. Moreover, we delineate in detail how to integrate distinct wave spectrum models into a coherent framework, from which one is able obtain believable, consistent and coherent results.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "ocean rendering, wave synthesis", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/GAMPER-2018-OSG/", } @bachelorsthesis{OPPTIZ-2018-IVRL, title = "Implementing Virtual Ray Lights for Rendering Scenes with Participating Media", author = "Michael Oppitz", year = "2018", abstract = "This thesis documents the full implementation of the method Virtual Ray Lights for Rendering Scenes with Participating Media. As a basic understanding of the foundations of rendering and related approaches is necessary to understand this complex method, these foundations are discussed first. There, the rendering equation and the physical behaviour of light is described. Additionally, rendering approaches like Recursive Ray Tracing and Photon Mapping that do not consider participating media, as well as methods like Volumetric Photon Mapping, Virtual Point Lights and Photon Beams, which are able to render participating media, are evaluated. For the discussion on Virtual Ray Lights, the evaluation takes place in three parts. First, the method is discussed in general with a mathematical analysis. Afterwards, implementation details are evaluated where pseudocode examples are provided. Lastly, the rendered results of the implementation are evaluated thoroughly. These results are also compared to provided images from various research papers. The goal of this thesis is to provide an implementation of Virtual Ray Lights, as well as providing the tools to implement this method in other projects. We provide the well-documented source code for this project, with the scene settings to recreate the examples.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "global illumination, virtual point lights", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/OPPTIZ-2018-IVRL/", } @misc{kroesl-2018-TVS, title = "The Virtual Schoolyard: Attention Training in Virtual Reality for Children with Attentional Disorders", author = "Katharina Kr\"{o}sl and Anna Felnhofer and Johanna X. Kafka and Laura Schuster and Alexandra Rinnerthaler and Michael Wimmer and Oswald D. Kothgassner", year = "2018", abstract = "This work presents a virtual reality simulation for training different attentional abilities in children and adolescents. In an interdisciplinary project between psychology and computer science, we developed four mini-games that are used during therapy sessions to battle different aspects of attentional disorders. First experiments show that the immersive game-like application is well received by children. Our tool is also currently part of a treatment program in an ongoing clinical study.", month = aug, publisher = "ACM", location = "Vancouver, Canada", isbn = "978-1-4503-5817-0", event = "ACM SIGGRAPH 2018", doi = "10.1145/3230744.3230817", Conference date = "Poster presented at ACM SIGGRAPH 2018 (2018-08-12--2018-08-16)", note = "Article 27--", pages = "Article 27 – ", keywords = "virtual reality, attentional disorders, user study", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/kroesl-2018-TVS/", } @misc{schuetz-2018-PPC, title = "Progressive Real-Time Rendering of Unprocessed Point Clouds", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2018", abstract = "Rendering tens of millions of points in real time usually requires either high-end graphics cards, or the use of spatial acceleration structures. We introduce a method to progressively display as many points as the GPU memory can hold in real time by reprojecting what was visible and randomly adding additional points to uniformly converge towards the full result within a few frames. Our method heavily limits the number of points that have to be rendered each frame and it converges quickly and in a visually pleasing way, which makes it suitable even for notebooks with low-end GPUs. The data structure consists of a randomly shuffled array of points that is incrementally generated on-the-fly while points are being loaded. Due to this, it can be used to directly view point clouds in common sequential formats such as LAS or LAZ while they are being loaded and without the need to generate spatial acceleration structures in advance, as long as the data fits into GPU memory.", month = aug, publisher = "ACM", location = "Vancouver, Canada", isbn = "978-1-4503-5817-0/18/08", event = "ACM SIGGRAPH 2018", doi = "10.1145/3230744.3230816", Conference date = "Poster presented at ACM SIGGRAPH 2018 (2018-08-12--2018-08-16)", note = "Article 41--", pages = "Article 41 – ", keywords = "point based rendering, point cloud, LIDAR", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/schuetz-2018-PPC/", } @article{zsolnai-2018-gms, title = "Gaussian Material Synthesis", author = "Karoly Zsolnai-Feh\'{e}r and Peter Wonka and Michael Wimmer", year = "2018", abstract = "We present a learning-based system for rapid mass-scale material synthesis that is useful for novice and expert users alike. The user preferences are learned via Gaussian Process Regression and can be easily sampled for new recommendations. Typically, each recommendation takes 40-60 seconds to render with global illumination, which makes this process impracticable for real-world workflows. Our neural network eliminates this bottleneck by providing high-quality image predictions in real time, after which it is possible to pick the desired materials from a gallery and assign them to a scene in an intuitive manner. Workflow timings against Disney’s “principled” shader reveal that our system scales well with the number of sought materials, thus empowering even novice users to generate hundreds of high-quality material models without any expertise in material modeling. Similarly, expert users experience a significant decrease in the total modeling time when populating a scene with materials. Furthermore, our proposed solution also offers controllable recommendations and a novel latent space variant generation step to enable the real-time fine-tuning of materials without requiring any domain expertise.", month = aug, journal = "ACM Transactions on Graphics (SIGGRAPH 2018)", volume = "37", number = "4", issn = "0730-0301", doi = "10.1145/3197517.3201307", pages = "76:1--76:14", keywords = "gaussian material synthesis, neural rendering, neural rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/zsolnai-2018-gms/", } @phdthesis{schwaerzler_2018_phd, title = "Advances in the Multimodal 3D Reconstruction and Modeling of Buildings", author = "Michael Schw\"{a}rzler", year = "2018", abstract = "Driven by the need for faster and more efficient workflows in the digitization of urban environments, the availability of affordable 3D data-acquisition systems for buildings has drastically increased in the last years: Laser scanners and photogrammetric methods both produce millions of 3D points within minutes of acquisition time. They are applied both on street-level as well as from above using drones, and are used to enhance traditional tachymetric measurements in surveying. However, these 3D data points are not the only available information: Extracted meta data from images, simulation results (e.g., from light simulations), 2D floor plans, and semantic tags – especially from the upcoming Building Information Modeling (BIM) systems – are becoming increasingly important. The challenges this multimodality poses during the reconstruction of CAD-ready 3D buildings are manifold: Apart from handling the enormous size of the data that is collected during the acquisition steps, the different data sources must also be registered to each other in order to be applicable in a common context – which can be difficult in case of missing or erroneous information. Nevertheless, the potential for improving both the workflow efficiency as well as the quality of the reconstruction results is huge: Missing information can be substituted by data from other sources, information about spatial or semantic relations can be utilized to overcome limitations, and interactive modeling complexity can be reduced (e.g., by limiting interactions to a two-dimensional space). In this thesis, four publications are presented which aim at providing freely combinable “building blocks” for the creation of helpful methods and tools for advancing the field of Multimodal Urban Reconstruction. First, efficient methods for the calculation of shadows cast by area light sources are presented – one with a focus on the most efficient generation of physically accurate penumbras, and the other one with the goal of reusing soft shadow information in consecutive frames to avoid costly recalculations. Then, a novel, optimization-supported reconstruction and modeling tool is presented, which employs sketch-based interactions and snapping techniques to create water-tight 3D building models. An extension to this system is demonstrated consecutively: There, 2D photos act as the only interaction canvas for the simple, sketch-based creation of building geometry and the corresponding textures. Together, these methods form a solid foundation for the creation of common, multimodal environments targeted at the reconstruction of 3D building models.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/schwaerzler_2018_phd/", } @studentproject{GANTNER-2018-ARO, title = "AR Overlays for Leica VIS with Vulkan", author = "Patrick Gantner", year = "2018", abstract = "Using the existing Visual Inertial System (VIS) for pose estimation, BIM (Building Information Management) information is displayed using Augmented Reality technology. Specific usecases, for example, stakeout or CAD previews are implemented to demonstrate the advantages of such overlays. The focus of the implementation is on creating a correct representation of the physical camera in the virtual scene and render objects on top of the incoming video stream of the device. For a good AR experience the objects should be distorted according to the camera’s distortion model and be occluded correctly with the environment.", month = jun, URL = "https://www.cg.tuwien.ac.at/research/publications/2018/GANTNER-2018-ARO/", } @article{Kathi-2018-VRB, title = "A VR-based user study on the effects of vision impairments on recognition distances of escape-route signs in buildings", author = "Katharina Kr\"{o}sl and Dominik Bauer and Michael Schw\"{a}rzler and Henry Fuchs and Michael Wimmer and Georg Suter", year = "2018", abstract = "In workplaces or publicly accessible buildings, escape routes are signposted according to official norms or international standards that specify distances, angles and areas of interest for the positioning of escape-route signs. In homes for the elderly, in which the residents commonly have degraded mobility and suffer from vision impairments caused by age or eye diseases, the specifications of current norms and standards may be insufficient. Quantifying the effect of symptoms of vision impairments like reduced visual acuity on recognition distances is challenging, as it is cumbersome to find a large number of user study participants who suffer from exactly the same form of vision impairments. Hence, we propose a new methodology for such user studies: By conducting a user study in virtual reality (VR), we are able to use participants with normal or corrected sight and simulate vision impairments graphically. The use of standardized medical eyesight tests in VR allows us to calibrate the visual acuity of all our participants to the same level, taking their respective visual acuity into account. Since we primarily focus on homes for the elderly, we accounted for their often limited mobility by implementing a wheelchair simulation for our VR application.", month = apr, journal = "The Visual Computer", volume = "34", number = "6-8", issn = "0178-2789", doi = "10.1007/s00371-018-1517-7", pages = "911--923", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Kathi-2018-VRB/", } @inproceedings{PB-VRVis-2018-005, title = "An Automated Verification Workflow for Planned Lighting Setups using BIM", author = "Andreas Walch and Katharina Kr\"{o}sl and Christian Luksch and David Pichler and Thomas Pipp and Michael Schw\"{a}rzler", year = "2018", abstract = "The use of Building Information Modeling (BIM) methods is becoming more and more established in the planning stage, during the construction, and for the management of buildings. Tailored BIM software packages allow to handle a vast amount of relevant aspects, but have so far not been covering specialized tasks like the evaluation of light distributions in and around a 3D model of a building. To overcome this limitation, we demonstrate the use of the open-source IFC format for preparing and exchanging BIM data to be used in our interactive light simulation system. By exploiting the availability of 3D data and semantic descriptions, it is possible to automatically place measurement surfaces in the 3D scene, and evaluate the suitability and sustainability of a planned lighting design according to given constraints and industry norms. Interactive visualizations for fast analysis of the simulation results, created using state-of-the-art web technologies, are seamlessly integrated in the 3D work environment, helping the lighting designer to quickly improve the initial lighting solution with a few clicks.", month = apr, isbn = "978-3-9504173-5-7", series = "REAL CORP", event = "REAL CORP 2018", editor = "M. Schrenk and V. V. Popovich and P. Zeile and P. Elisei and C. Beyerand G. Navratil", booktitle = "REAL CORP 2018, Proceedings", pages = "55–65", pages = "55--65", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/PB-VRVis-2018-005/", } @bachelorsthesis{glanz-2017-pbrcomparison, title = "A Comparison of Physically Based Rendering Systems", author = "Robert Glanz", year = "2018", abstract = "In this thesis, a quantitative evaluation is performed to find the most relevant physically based rendering systems in research. As a consequence of this evaluation, the rendering systems Mitsuba, PBRT-v3 and LuxRender are compared to each other and their potential for interoperability is assessed. The goal is to find common materials and light models and analyze the effects of changing the parameters of those models.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "physically based rendering, Monte Carlo rendering, pbrt, Mitsuba, LuxRender", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/glanz-2017-pbrcomparison/", } @mastersthesis{Szabo-2018-DA, title = "A Composable and Reusable Photogrammetric Reconstruction Library", author = "Attila Szabo", year = "2018", month = mar, note = "1", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Szabo-2018-DA/", } @mastersthesis{Bauer_Dominik_2018_VR, title = "Evaluation of the Recognition Distances of Safety Signs in VR Considering Vision Impairments", author = "Dominik Bauer", year = "2018", abstract = "To facilitate the safe evacuation of buildings, escape-route safety signs need to be placed along the whole escape route such that they are legible for building occupants. While standards and legal requirements provide suggestions on how to select and place safety signs to achieve this, they do not provide sufficient considerations concerning people suffering from vision impairments. A main cause of vision impairment are age-related eye diseases, with the most common symptom being the loss of visual acuity. We investigate the influence of visual acuity on the ability to recognize safety signs using a novel methodology, evaluating existing standards concerning vision impairments: We calibrate the visual acuity of the test subjects to the same level via a standardized medical test in VR. This is achieved by using test subjects with normal or corrected vision and simulating the impairment in VR. Furthermore, we present a tool for lighting designers which enables them to check their designs considering maximum recognition distances to investigate problematic areas along an escape route. Using our novel user-study methodology, we determined the recognition distances for safety signs, observed under two different levels of visual acuity and varying observation angles. In addition, we determined the impact of the HTC Vive’s HMD on the visual acuity achievable in VR. We conclude that the existing standards fail to correctly estimate the maximum recognition distances of safety signs for observers suffering from reduced visual acuity.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "virtual reality, vision impairment simulation, emergency lighting", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/Bauer_Dominik_2018_VR/", } @mastersthesis{ROEGNER-2014-TUI, title = "Temporal Upsampling for Image Sequences Using a Non-Local Means Algorithm", author = "Clemens R\"{o}gner", year = "2017", abstract = "Computer-generated video sequences with a frame-rate higher than the usual 24 images per second, such as 48 or 60 frames per second, have become more popular in the respective industries, due to more visual fidelity. This, however, results in more computational costs for the same length of the video sequence. One solution to this problem is the so-called frame-rate upsampling, which makes use of temporal and spatial coherence to approximate new frames and therefore saves computational time. Several methods have been published in this field, for the purposes of real-time rendering as well as for offline rendering algorithms. In this thesis, two new algorithms for fame-rate upsampling are introduced. Those are targeted at high-quality computer-generated images that feature various globalillumination effects. The two new algorithms make use of a video denoising method - the non-local means algorithm - to find the appropriate pixel colors for the frame, that has to be upsampled. To find the corresponding pixels in another frame, the methods of this thesis either use existing color information or require additional data, which can be extracted from any global-illumination algorithm with minimal further computations. The proposed methods are aimed at handling reflections and refractions in the scene better than previous work.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "global illumination, frame-rate upsampling, temporal coherence", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ROEGNER-2014-TUI/", } @mastersthesis{CELAREK-2017-QCL, title = "Quantifying the Convergence of Light-Transport Algorithms", author = "Adam Celarek", year = "2017", abstract = "This work aims at improving methods for measuring the error of unbiased, physically based light-transport algorithms. State-of-the-art papers show algorithmic improvements via error measures like Mean Square Error (MSE) or visual comparison of equal-time renderings. These methods are unreliable since outliers can cause MSE variance and visual comparison is inherently subjective. We introduce a simple proxy algorithm: pure algorithms produce one image corresponding to the computation budget N. The proxy, on the other hand, averages N independent images with a computation budget of 1. The proxy algorithm fulfils the preconditions for the Central Limit Theorem (CLT), and hence, we know that its convergence rate is (1/N). Since this same convergence rate applies for all methods executed using the proxy algorithm, comparisons using variance- or standard-deviation-per-pixel images are possible. These per-pixel error images can be routinely computed and allow comparing the render quality of different lighting effects. Additionally, the average of pixel variances is more robust against outliers compared to the traditional MSE or comparable metrics computed for the pure algorithm. We further propose the Error Spectrum Ensemble (ESE) as a new tool for evaluating lighttransport algorithms. It summarizes expected error and outliers over spatial frequencies. ESE is generated using the data from the proxy algorithm: N error images are computed using a reference, transformed into Fourier power spectra and compressed using radial averages. The descriptor is a summary of those radial averages. In the results, we show that standard-deviation images, short equal-time renderings, ESE and expected MSE are valuable tools for assessing light-transport algorithms.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "error metric, global illumination", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/CELAREK-2017-QCL/", } @mastersthesis{Rainer_2017, title = "Interactive Shape Detection in Out-of-Core Point Clouds for Assisted User Interactions", author = "Bernhard Rainer", year = "2017", abstract = "This thesis presents a semi-automated method for shape detection in out-of-core point clouds. Rather than performing shape detection on the entire point cloud at once, a user-controlled interaction determines the region that is to be segmented next. By keeping the size of the region and the number of points small, the algorithm produces meaningful results within a fraction of a second. Thus, the user is presented immediately with feedback on the local geometry. As modern point clouds can contain billions of points and the memory capacity of consumer PCs is usually insufficient to hold all points in memory at the same time, a level-of-detail data structure is used to store the point cloud on the hard disc, and data is loaded into memory only on use. This data structure partitions the point cloud into small regions, each containing around 5000 points, that are used for rendering and shape detection. Interacting with point clouds is a particularly demanding task. A precise selection of a region of interest, using the two-dimensional lasso interaction, often needs multiple view changes and subsequent improvements. This thesis proposes improvements to the lasso interaction, by performing selection only on the set of points that are approximated by a detected shape. Thus, the selection of undesired points in the fore- and background is reduced. Point picking is improved as well by the use of a detected shape, such that only points that are approximated by this shape are pick-able. The result of this thesis is an application that allows the user to view point clouds with millions of points. It also provides a novel interaction technique for quick local shape detection as well as shape-assisted interactions that utilize this local semantic information to improve the user’s workflow.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Rainer_2017/", } @mastersthesis{KREUZER-2017-PBF, title = "Using Perception-Based Filtering to Hide Shadow Artifacts", author = "Felix Kreuzer", year = "2017", abstract = "Shadows are an indispensable aid for understanding spatial relations of objects in natural scenes, which is why they are very important for real-time rendering applications. Combining filtering techniques with shadow mapping is a common tool to simulate visually-pleasing shadows in interactive applications. A positive effect of such approaches is that the filtering blurs aliasing artifacts caused by sampling the discretized geometric data stored in the shadow map, thereby improving the visual quality of the shadow. The goal of this thesis is to exploit common filtering algorithms, in order to find a function of blur radius and shadow-map sampling frequency, which allows for optimized computational performance while mostly preserving the visual quality of the shadow. In the course of this work, we investigate how shadow artifacts arise and how to hide them. We set up and execute a user study to find the optimal relation between the shadow-map sampling frequency and the filter radius. From the results of the user study, we derive a formula and develop an algorithm that can be incorporated into existing shadow-mapping algorithms. We evaluate our results by applying the algorithm to a custom-made rendering framework and observe an increase in processing speeds.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/KREUZER-2017-PBF/", } @studentproject{OPPITZ-2017-3DM, title = "3D Mass Customization: Real-Time High-Quality Lighting Effects for WebGL", author = "Michael Oppitz", year = "2017", abstract = "During the internship at ShapeDiver GmbH the visual quality of an existing WebGL platform had to be improved. This platform is used as an online 3D configurator, in which customers can modify the properties of parametric models. This includes the modification of properties like shape, size and materials. The difficulty for this project was to contemplate the fact that the product can be customized by the user in real-time.", month = oct, URL = "https://www.cg.tuwien.ac.at/research/publications/2017/OPPITZ-2017-3DM/", } @phdthesis{preiner_2017_phd, title = "Dynamic and Probabilistic Point-Cloud Processing", author = "Reinhold Preiner", year = "2017", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/preiner_2017_phd/", } @mastersthesis{weinzierl_heigl-2017-wh14, title = "Adaptively Clustered Reflective Shadow Maps", author = "Christoph Weinzierl", year = "2017", abstract = "The content of this thesis is to extend an existing real-time global illumination technique that uses Virtual Area Lights (VALs) for indirect illumination in combination with point-based shadow maps, so called Imperfect Shadow Maps (ISMs) to generate corresponding soft indirect shadows. A clustering algorithm inspired by k-Means creates VAL-clusters in light space, which are updated on a frame-by-frame basis. In this talk we will present concepts and techniques to eliminate some of the constraints and limitations of the existing solution as well as incorporating new ideas to make the algorithm more robust for larger, more complex and dynamic scenes. ", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "VAL, ISM, Virtual Area Lights, Global Illumination, Imperfect Shadow Maps", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/weinzierl_heigl-2017-wh14/", } @bachelorsthesis{pointner_michael-2017-baa, title = "Multi-Focal Image Generation using Automatic Depth-Based Focus Selection", author = "Michael Pointner", year = "2017", abstract = "One of the most important objectives in photography is the sharpness of the whole image, which is not so easy to achieve because of the physical properties of a camera lens. Due to Google’s new Tango API, which enables the perception of depth information on smartphones, we have evaluated the usability of this new technology for the generation of all-sharp images through multi-focus with the Lenovo Phab 2 Pro as the first smartphone to support this technology.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "all-in-focus, focus selection, image stitching", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/pointner_michael-2017-baa/", } @bachelorsthesis{dodik-2017-pcbpt, title = "Implementing Probabilistic Connections for Bidirectional Path Tracing in the Mitsuba Renderer", author = "Nikola Dodik", year = "2017", abstract = "Light transport simulation algorithms are remarkably adept at recreating a large variety of light phenomena which occur in nature. As such they have seen widespread adoption across the industry, which made it paramount to create efficient and robust algorithms. One recent algorithm which tries to deal with this problem is known as Probabilistic Connections for Bidirectional Path Tracing (PCBPT). It builds upon the classical Bidirectional Path Tracing (BDPT) algorithm. In Bidirectional Path Tracing, a ray is traced from the sensor as well as from the emitter. The two rays are then connected to calculate the light contribution to image pixels. PCBPT extends this idea to support connecting multiple emitter paths to one sensor subpath, and introduces importance sampling as a way of choosing the most suitable emitter paths. Unfortunately, there was no implementation of PCBPT publically available, which is why we implemented it into the open-source Mitsuba renderer. We evaluate the algorithm against standard BDPT on a variety of different scenes. Our comparisons provide insight into what type of scenes PCBPT can help improve and where the additional computational cost presents too much of an overhead.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "physically based rendering, Monte Carlo rendering, bidirectional path tracing, probabilistic connections for bidirectional path tracing, Mitsuba, importance sampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/dodik-2017-pcbpt/", } @bachelorsthesis{FRAISS-2017-PCU, title = "Rendering Large Point Clouds in Unity", author = "Simon Maximilian Fraiss", year = "2017", abstract = "In this thesis, a point-cloud renderer was developed in the game engine Unity. The focus lies especially on very big point clouds with several millions or billions of points, which cannot be loaded completely into memory. Special data structures and algorithms are needed to load and render continuously only the parts of the point-cloud that are relevant for the current camera position. The result is an efficient rendering system with variable settings and various rendering methods. The project is available on GitHub. ", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "point clouds, unity", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/FRAISS-2017-PCU/", } @inproceedings{kroesl-2017-LiteMaker, title = "LiteMaker: Interactive Luminaire Development using Progressive Photon Tracing and Multi-Resolution Upsampling", author = "Katharina Kr\"{o}sl and Christian Luksch and Michael Schw\"{a}rzler and Michael Wimmer", year = "2017", abstract = "Industrial applications like luminaire development (the creation of a luminaire in terms of geometry and material) or lighting design (the efficient and aesthetic placement of luminaires in a virtual scene) rely heavily on high realism and physically correct simulations. Using typical approaches like CAD modeling and offline rendering, this requirement induces long processing times and therefore inflexible workflows. In this paper, we combine a GPU-based progressive photon-tracing algorithm to accurately simulate the light distribution of a luminaire with a novel multi-resolution image-filtering approach that produces visually meaningful intermediate results of the simulation process. By using this method in a 3D modeling environment, luminaire development is turned into an interactive process, allowing for real-time modifications and immediate feedback on the light distribution. Since the simulation results converge to a physically plausible solution that can be imported as a representation of a luminaire into a light-planning software, our work contributes to combining the two former decoupled workflows of luminaire development and lighting design, reducing the overall production time and cost for luminaire manufacturers. ", month = sep, isbn = "978-3-03868-049-9", publisher = "The Eurographics Association", location = "Bonn, Germany", event = "VMV 2017", editor = "Matthias Hullin and Reinhard Klein and Thomas Schultz and Angela Yao", doi = "10.2312/vmv.20171253", booktitle = "Vision, Modeling & Visualization", pages = "1--8", keywords = "Computing methodologies, Ray tracing, Image processing, Mesh geometry models", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/kroesl-2017-LiteMaker/", } @article{ZOTTI-2017-BM, title = "Beyond 3D Models: Simulation of Temporally Evolving Models in Stellarium", author = "Georg Zotti and Florian Schaukowitsch and Michael Wimmer", year = "2017", abstract = "In recent years, the interactive visual exploration and demonstration of three-dimensional virtual models of buildings or natural structures of archaeoastronomical interest under a simulated sky has become available for users of the open-source desktop planetarium program Stellarium [Zotti, 2015, 2016]. Users can load an architectural model in the well-known OBJ format and walk around to explore sight lines or light-and-shadow interaction in present and past times [Frischer et al., 2016]. However, until now, the model itself did not change in time, and loading models for various building phases (e.g., the assumed order of building the various standing stones, timber circles and stone circles of Stonehenge) always required a break in simulation and user interaction to load a model for the next phase. On the other hand, displaying a model under the sky of the wrong time may lead to inappropriate conclusions. Large-area models required considerable time to load, and loading caused a reset of location, so the user interested in changes in a certain viewing axis had to recreate that view again. Given that Stellarium is an “astronomical time machine”, nowadays capable of replaying sky vistas thousands of years ago with increasing accuracy [Zotti et al., submitted] and also for models with several million triangular faces, it seemed worth to explore possibilities to also show changes over time in the simulated buildings. The Scenery3D plugin of Stellarium is, however, not a complete game engine, and replicating the infrastructure found in such game engines like Unity3D – for example to interactively move game objects, or load small sub-components like standing stones and place them at arbitrary coordinates – seemed overkill. The solution introduced here is remarkably simple and should be easily adoptable for the casual model-making researcher: the MTL material description for the model, a simple plain-text file that describes colour, reflection behaviour, photo-texture or transparency of the various parts of the object, can be extended for our rendering system. Newly introduced values describe dates where parts of the model can appear and disappear (with transitional transparency to allow for archaeological dating uncertainties). The model parts with these enhanced, time-aware materials appear to fade in during the indicated time, will be fully visible in their “active” time, and will fade out again when Stellarium is set to simulate the sky when the real-world structures most likely have vanished. The only requirement for the model creator is now to separate objects so that they receive unique materials that can then be identified and augmented with these entries in the MTL text file. The advantages of this new feature should be clear: an observer can remain in a certain location in the virtual model and let the land- and skyscape change over decades or centuries, without the need to load new models. This allows the simulation of construction and reconstruction phases while still always keeping particularly interesting viewpoints unchanged, and will always show the matching sky for the most appropriate reconstruction phase of the model. ", month = sep, journal = "Mediterranean Archaeology and Archaeometry", volume = "18", number = "4", issn = "1108-9628", doi = "10.5281/zenodo.1477972", booktitle = "25th SEAC Conference", pages = "501--506", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ZOTTI-2017-BM/", } @bachelorsthesis{ESBERGER-2016-AHO, title = "A History of Austrian Computer Games", author = "Sebastian Esberger", year = "2017", abstract = "Videospiele sind nicht nur eine der modernsten Unterhaltungs- und Kunstmedien, sondern auch eine der am schnelllebigsten. Trotz der Tatsache, dass erst seit 45 Jahren Videospiele existieren, sind bereits viele Informationen dar\"{u}ber verloren gegangen. In \"{O}sterreich begann der Videospiele-Boom erst um 1990 herum, trotzdem sind auch hier schon viele Informationen verloren gegangen. Diese Arbeit soll auf den folgenden Seiten zeigen, wie das Projekt ”A History of Austrian Computer Games” durchgef\"{u}hrt wurde, um Informationen \"{u}ber die \"{o}sterreichische Spiellandschaft zu erhalten. Unter anderem wurde dazu ein Webauftritt erstellt, welcher die Spielegeschichte in einer Datenbank festh\"{a}lt und \"{o}ffentlich verf\"{u}gbar macht. Interviews von Gr\"{u}nder und Entwickler der damaligen Szene zeigen aber auch die pers\"{o}nliche Sicht auf die Ereignisse der Vergangenheit.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ESBERGER-2016-AHO/", } @mastersthesis{ERLER-2017-HVR, title = "Haptic Feedback in Room-Scale VR", author = "Philipp Erler", year = "2017", abstract = "Virtual reality (VR) is now becoming a mainstream medium. Current systems like the HTC Vive offer accurate tracking of the HMD and controllers, which allows for highly immersive interactions with the virtual environment. The interactions can be further enhanced by adding feedback. As an example, a controller can vibrate when it is close to a grabbable ball. As such interactions are not exhaustingly researched, we conducted a user study. Specifically, we examine: - grabbing and throwing with controllers in a simple basketball game. - the influence of haptic and optical feedback on performance, presence, task load, and usability. - the advantages of VR over desktop for point-cloud editing. Several new techniques emerged from the point-cloud editor for VR. The bi-manual pinch gesture, which extends the handlebar metaphor, is a novel viewing method used to translate, rotate, and scale the point-cloud. Our new rendering technique uses the geometry shader to draw sparse point clouds quickly. The selection volumes at the controllers are our new technique to efficiently select points in point clouds. The resulting selection is visualized in real time. The results of the user study show that: - grabbing with a controller button is intuitive but throwing is not. Releasing a button is a bad metaphor for releasing a grabbed virtual object in order to throw it. - any feedback is better than none. Adding haptic, optical, or both feedback types to the grabbing improves the user performance and presence. However, only sub-scores like accuracy and predictability are significantly improved. Usability and task load are mostly unaffected by feedback. - the point-cloud editing is significantly better in VR with the bi-manual pinch gesture and selection volumes than on the desktop with the orbiting camera and lasso selections. ", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "virtual reality, room-scale VR, throwing, grabbing, physics, basketball, haptic feedback, optical feedback, controllers, point cloud, point-cloud editing, presence, performance, usability, task load", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/", } @bachelorsthesis{Kovacs_2017, title = "Shadow Volumes in Unreal Engine 4", author = "B\'{a}lint Istvan Kov\'{a}cs", year = "2017", abstract = "The presented bachelor thesis project explores the possibilities of implementing custom lighting techniques in a state-of-the-art game engine. Specifically, Unreal Engine 4 is analyzed for the feasibility of implementing shadow volumes in a shader-centric plugin. The thesis discusses the theoretical and practical background of Unreal Engine and of shadow volumes, and provides detailed information on every implementation step. It shows the challenges of customization and the results achieved.", month = jun, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/Kovacs_2017/", } @article{CORNEL-2017-FRS, title = "Forced Random Sampling: fast generation of importance-guided blue-noise samples", author = "Daniel Cornel and Hiroyuki Sakai and Christian Luksch and Michael Wimmer", year = "2017", abstract = "In computer graphics, stochastic sampling is frequently used to efficiently approximate complex functions and integrals. The error of approximation can be reduced by distributing samples according to an importance function, but cannot be eliminated completely. To avoid visible artifacts, sample distributions are sought to be random, but spatially uniform, which is called blue-noise sampling. The generation of unbiased, importance-guided blue-noise samples is expensive and not feasible for real-time applications. Sampling algorithms for these applications focus on runtime performance at the cost of having weak blue-noise properties. Blue-noise distributions have also been proposed for digital halftoning in the form of precomputed dither matrices. Ordered dithering with such matrices allows to distribute dots with blue-noise properties according to a grayscale image. By the nature of ordered dithering, this process can be parallelized easily. We introduce a novel sampling method called forced random sampling that is based on forced random dithering, a variant of ordered dithering with blue noise. By shifting the main computational effort into the generation of a precomputed dither matrix, our sampling method runs efficiently on GPUs and allows real-time importance sampling with blue noise for a finite number of samples. We demonstrate the quality of our method in two different rendering applications.", month = jun, journal = "The Visual Computer", volume = "33", number = "6", issn = "1432-2315", pages = "833--843", keywords = "blue-noise sampling, importance sampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/CORNEL-2017-FRS/", } @inproceedings{JAHRMANN-2017-RRTG, title = "Responsive Real-Time Grass Rendering for General 3D Scenes", author = "Klemens Jahrmann and Michael Wimmer", year = "2017", abstract = "Grass plays an important role in most natural environments. Most interactive applications use image-based techniques to approximate fields of grass due to the high geometrical complexity, leading to visual artifacts. In this paper, we propose a grass-rendering technique that is capable of drawing each blade of grass as geometrical object in real time. Accurate culling methods together with an adaptable rendering pipeline ensure that only the blades of grass that are important for the visual appearance of the field of grass are rendered. In addition, we introduce a physical model that is evaluated for each blade of grass. This enables that a blade of grass can react to its environment by calculating the influence of gravity, wind and collisions. A major advantage of our approach is that it can render fields of grass of arbitrary shape and spatial alignment. Thus, in contrast to previous work, the blades of grass can be placed on any 3D model, which is not required to be a flat surface or a height map.", month = feb, isbn = "978-1-4503-4886-7", publisher = "ACM", location = "San Francisco, CA", event = "I3D 2017", booktitle = "Proceedings of the 21st ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games", pages = "6:1--6:10", keywords = "real-time rendering, grass rendering, hardware tessellation", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/JAHRMANN-2017-RRTG/", } @studentproject{gersthofer_2017_COP, title = "Clipping and Orthogonal Projection in a Point Cloud Viewer", author = "Lukas Gersthofer", year = "2017", abstract = "Potree is an interactive web-based point cloud renderer. During an internship I was responsible for extending the application by a set of new features. That includes a new tool for clipping, an orthographic camera mode and some more minor features which are explained in this report. The following report contains information about the changes made to the Potree application.", keywords = "point cloud, webgl, potree", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/gersthofer_2017_COP/", } @article{mindek-2017-marion, title = "Visualization Multi-Pipeline for Communicating Biology", author = "Peter Mindek and David Kou\v{r}il and Johannes Sorger and David Toloudis and Blair Lyons and Graham Johnson and Eduard Gr\"{o}ller and Ivan Viola", year = "2017", abstract = "We propose a system to facilitate biology communication by developing a pipeline to support the instructional visualization of heterogeneous biological data on heterogeneous user-devices. Discoveries and concepts in biology are typically summarized with illustrations assembled manually from the interpretation and application of heterogenous data. The creation of such illustrations is time consuming, which makes it incompatible with frequent updates to the measured data as new discoveries are made. Illustrations are typically non-interactive, and when an illustration is updated, it still has to reach the user. Our system is designed to overcome these three obstacles. It supports the integration of heterogeneous datasets, reflecting the knowledge that is gained from different data sources in biology. After pre-processing the datasets, the system transforms them into visual representations as inspired by scientific illustrations. As opposed to traditional scientific illustration these representations are generated in real-time - they are interactive. The code generating the visualizations can be embedded in various software environments. To demonstrate this, we implemented both a desktop application and a remote-rendering server in which the pipeline is embedded. The remote-rendering server supports multi-threaded rendering and it is able to handle multiple users simultaneously. This scalability to different hardware environments, including multi-GPU setups, makes our system useful for efficient public dissemination of biological discoveries. ", journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "24", number = "1", keywords = "Biological visualization, remote rendering, public dissemination", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/mindek-2017-marion/", } @incollection{SCHEIBLAUER-2015-WFC, title = "Workflow for Creating and Rendering Huge Point Models", author = "Claus Scheiblauer and Norbert Zimmermann and Michael Wimmer", year = "2017", booktitle = "Fundamentals of Virtual Archaeology: Theory and Practice", isbn = "9781466594760", note = "(to appear) 15.06.2017", publisher = "A K Peters/CRC Press", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/SCHEIBLAUER-2015-WFC/", } @studentproject{SIPPL-2017-POS, title = "Projecting Openstreetmap Tiles onto 3D-Surfaces", author = "Tobias Sippl", year = "2017", abstract = "The algorithm/software should project map-data from a tile-based map-service onto 3d Geometry or point clouds. Different levels of detail should be available and tiles should be downloaded on the fly, while the program is running.", keywords = "Open Street Map, point cloud rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/SIPPL-2017-POS/", } @article{ZOTTI-2017-TSP, title = "The Skyscape Planetarium", author = "Georg Zotti and Florian Schaukowitsch and Michael Wimmer", year = "2017", abstract = "Communicating scientific topics in state of the art exhibitions frequently involves the creation of impressive visual installations. In the exhibition “STONEHENGE. –A Hidden Landscape.” in the MAMUZ museum for prehistory in Mistelbach, Lower Austria, LBI ArchPro presents recent research results from the Stonehenge Hidden Landscape Project. A central element of the exhibition which extends over two floors connected with open staircases is an assembly of original-sized replica of several stones of the central trilithon horseshoe which is seen from both floors. In the upper floor, visitors are at eye level with the lintels, and on a huge curved projection screen which extends along the long wall of the hall they can experience the view out over the Sarsen circle into the surrounding landscape. This paper describes the planning and creation of this part of the exhibition, and some first impressions after opening.", journal = "Culture and Cosmos", volume = "21", number = "1", issn = "1368-6534", booktitle = "24th SEAC Conference", pages = "269--281", keywords = "stellarium", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ZOTTI-2017-TSP/", } @xmascard{mindek-xmas-card-2016, title = "X-Mas Card 2016", author = "Ludovic Autin and Peter Mindek", year = "2016", abstract = "As far as we can tell, the Universe is made of atoms. Or pixels. In any case, this Christmas card celebrates both. This year's Christmas tree is decorated with a chain of DNA molecules modeled as a spline populated by nucleotides. Several macromolecules of Fibrinogen, Hemoglobin, and Low-Density Lipoprotein are used as decorations as well. All the proteins, as well as the DNA, are modeled down to atomic resolution. The scene is rendered in real-time using cellVIEW - a molecular visualization framework developed at TU Wien and Scripps Research Institute. *** Soweit wir wissen besteht das Universum aus Atomen. Oder Pixel. Wie auch immer, diese Weihnachtskarte feiert beides. Der Weihnachtsbaum ist mit einer DNA-Molek\"{u}lkette geschm\"{u}ckt, modelliert als ein Spline der mit Nukleotiden besetzt ist. Auch mehrere Fibrinogen-, H\"{a}moglobin- und Lipoprotein-Makromolek\"{u}le wurden als Dekorationen verwendet. Alle Proteine, als auch die DNA, sind bis auf Atomaufl\"{o}sung modelliert. Die Szene wurde mit cellVIEW in Echtzeit gerendert. cellVIEW ist eine Visualisierungssoftware f\"{u}r Molek\"{u}le, die an der TU Wien und dem Scripps Research Institute entwickelt wurde.", month = dec, keywords = "Molecular Visualization, DNA, Proteins", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/mindek-xmas-card-2016/", } @mastersthesis{JAHRMANN-2016-IGR, title = "Interactive Grass Rendering in Real Time Using Modern OpenGL Features", author = "Klemens Jahrmann", year = "2016", abstract = "Grass species are an important part of vegetation all over the world and can be found in all climatic zones. Therefore, grass can be found in almost all outside scenarios. Until today, there are only few sophisticated algorithms for rendering grass in real time due to the high amount of geometrical complexity. As a result, most algorithms visualize grass as a collection of billboards or use other image-based methods, which have problems dealing with animation or physical interaction. Another disadvantage of image-based methods is that they often have artifacts when viewed from specific angles, because they are just two-dimensional images embedded in three-dimensional space. In this thesis we will introduce a fully geometric approach of grass rendering working at interactive framerates. The algorithm is very generic and is able to be adjusted and extended in various ways in order to be applicable to most scenarios of rendering grass or grass-like vegetation.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/JAHRMANN-2016-IGR/", } @bachelorsthesis{Przemyslaw_Gora_2016_UVU, title = "Unreal vs Unity: Ein Vergleich zwischen zwei modernen Spiele-Engines", author = "Przemyslaw Gora and Lukas Leibetseder", year = "2016", abstract = "This bachelor’s thesis focuses on the comparison of two game engines, the Unreal Engine 4 and Unity 5 Engine. We will take a closer look at the different aspects that we find important, describe and compare them. Starting with the content-pipeline, which includes the usage of externally created content, we will focus on three big categories: Audio, Images and 3D-Assets. During this process it will be shown that Unity 5 supports much more formats to import than the Unreal Engine 4. This is especially noticeable with Audio and 3D-Assets. For the latter there is a feature in Unity 5 that allows you to directly import formats of various modelling tools like Maya, although it is fair to mention that in a few cases one will be reverting to the standard way of importing FBX files. While Unreal Engine 4 doesn’t have a huge support for external formats it offers more options to use the assets within the engine. In the following chapter we will take a look at the features each engine has to offer. Both, Unreal and Unity, have a big arsenal of tools to simplify various aspects of the development process. Yet again the Unreal Engines offers a greater set of options. Afterwards we will create a simple small project in Unreal Engine 4 and Unity 5 to demonstrate the usability and tools both engines have to offer. As we will see, the level design and placing of some objects in the editor is very similar. The interesting part starts with the creation of a controllable player character. The behaviour of such is realized differently on both sides. In Unity 5 one uses C#-scripts whereas Unreal Engine 4 offers visual scripting. We will compare those two systems and point out their pros and cons. In the further course we will take a look at the list of effects from the lecture UE Computergraphik (186.831) and check if they are available in either of both engines. In the last chapter, we’ll take a look at the legal aspects and limitation when using Unreal and Unity. It’s interesting to see how far it is possible to use those engines in university lectures.", month = oct, note = "1", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Unreal, Unity 3D, Game Engine", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Przemyslaw_Gora_2016_UVU/", } @inproceedings{WIMMER-2016-HARVEST4D, title = "Harvesting Dynamic 3DWorlds from Commodity Sensor Clouds", author = "Tamy Boubekeur and Paolo Cignoni and Elmar Eisemann and Michael Goesele and Reinhard Klein and Stefan Roth and Michael Weinmann and Michael Wimmer", year = "2016", abstract = "The EU FP7 FET-Open project "Harvest4D: Harvesting Dynamic 3D Worlds from Commodity Sensor Clouds" deals with the acquisition, processing, and display of dynamic 3D data. Technological progress is offering us a wide-spread availability of sensing devices that deliver different data streams, which can be easily deployed in the real world and produce streams of sampled data with increased density and easier iteration of the sampling process. These data need to be processed and displayed in a new way. The Harvest4D project proposes a radical change in acquisition and processing technology: instead of a goal-driven acquisition that determines the devices and sensors, its methods let the sensors and resulting available data determine the acquisition process. A variety of challenging problems need to be solved: huge data amounts, different modalities, varying scales, dynamic, noisy and colorful data. This short contribution presents a selection of the many scientific results produced by Harvest4D. We will focus on those results that could bring a major impact to the Cultural Heritage domain, namely facilitating the acquisition of the sampled data or providing advanced visual analysis capabilities.", month = oct, isbn = "978-3-03868-011-6", publisher = "Eurographics Association", location = "Genova, Italy", event = "GCH 2016", editor = "Chiara Eva Catalano and Livio De Luca", doi = "10.2312/gch.20161378", booktitle = "Proceedings of the 14th Eurographics Workshop on Graphics and Cultural Heritage", pages = "19--22", keywords = "acquisition, 3d scanning, reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/WIMMER-2016-HARVEST4D/", } @mastersthesis{SCHUETZ-2016-POT, title = "Potree: Rendering Large Point Clouds in Web Browsers", author = "Markus Sch\"{u}tz", year = "2016", abstract = "This thesis introduces Potree, a web-based renderer for large point clouds. It allows users to view data sets with billions of points, from sources such as LIDAR or photogrammetry, in real time in standard web browsers. One of the main advantages of point cloud visualization in web browser is that it allows users to share their data sets with clients or the public without the need to install third-party applications and transfer huge amounts of data in advance. The focus on large point clouds, and a variety of measuring tools, also allows users to use Potree to look at, analyze and validate raw point cloud data, without the need for a time-intensive and potentially costly meshing step. The streaming and rendering of billions of points in web browsers, without the need to load large amounts of data in advance, is achieved with a hierarchical structure that stores subsamples of the original data at different resolutions. A low resolution is stored in the root node and with each level, the resolution gradually increases. The structure allows Potree to cull regions of the point cloud that are outside the view frustum, and to render distant regions at a lower level of detail. The result is an open source point cloud viewer, which was able to render point cloud data sets of up to 597 billion points, roughly 1.6 terabytes after compression, in real time in a web browser.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "point cloud rendering, WebGL, LIDAR", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/SCHUETZ-2016-POT/", } @bachelorsthesis{prost-2016-molecule, title = "Molecule-Rendering in Unity3D", author = "Lukas Prost", year = "2016", abstract = "Due to their omnipresence and ease of use, smart phones are getting more and more utilized as educational instruments for different subjects, for example, visualizing molecules in a chemistry class. In domain-specific mobile visualization applications, the choice of the ideal visualization technique of molecules can vary based on the background and age of the target group, and mostly depends on the choice of a graphical designer. Designers, however, rarely have sufficient programming skills and require an engineer even for the slightest adjustment in the required visual appearance. In this thesis we present a configuration system for rendering effects implemented in Unity3D, that allows to define the visual appearance of a molecule in a JSON file without the need of programming knowledge. We discuss the technical realization of different rendering effects on a mobile platform, and demonstrate our system and its versatility on a commercial chemistry visualization app, creating different visual styles for molecule renderings that are appealing to students as well as scientists and advertisement. ", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "molecule visualization, Unity, rendering effects, mobile devices", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/prost-2016-molecule/", } @bachelorsthesis{Tucek_Tom-2016-aai, title = "Agent-based architecture for artistic real-time installation", author = "Tom Tucek", year = "2016", abstract = "The aim of this thesis is to transfer artistically predetermined scenarios and behaviours for several digital figures acting in the context of an artistic art installation into an agent based system and develop the corresponding agent behaviours. For his purpose the agent-oriented programming language called AgentSpeak is used.", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Tucek_Tom-2016-aai/", } @bachelorsthesis{Wang-2016-BAC, title = "Game Design Patterns for CPU Performance Gain in Games ", author = "Xi Wang", year = "2016", month = aug, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Wang-2016-BAC/", } @article{arikan-2015-dmrt, title = "Multi-Depth-Map Raytracing for Efficient Large-Scene Reconstruction", author = "Murat Arikan and Reinhold Preiner and Michael Wimmer", year = "2016", abstract = "With the enormous advances of the acquisition technology over the last years, fast processing and high-quality visualization of large point clouds have gained increasing attention. Commonly, a mesh surface is reconstructed from the point cloud and a high-resolution texture is generated over the mesh from the images taken at the site to represent surface materials. However, this global reconstruction and texturing approach becomes impractical with increasing data sizes. Recently, due to its potential for scalability and extensibility, a method for texturing a set of depth maps in a preprocessing and stitching them at runtime has been proposed to represent large scenes. However, the rendering performance of this method is strongly dependent on the number of depth maps and their resolution. Moreover, for the proposed scene representation, every single depth map has to be textured by the images, which in practice heavily increases processing costs. In this paper, we present a novel method to break these dependencies by introducing an efficient raytracing of multiple depth maps. In a preprocessing phase, we first generate high-resolution textured depth maps by rendering the input points from image cameras and then perform a graph-cut based optimization to assign a small subset of these points to the images. At runtime, we use the resulting point-to-image assignments (1) to identify for each view ray which depth map contains the closest ray-surface intersection and (2) to efficiently compute this intersection point. The resulting algorithm accelerates both the texturing and the rendering of the depth maps by an order of magnitude.", month = feb, doi = "10.1109/TVCG.2015.2430333", issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "2", volume = "22", pages = "1127--1137", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/arikan-2015-dmrt/", } @article{sorger-2015-litevis, title = "LiteVis: Integrated Visualization for Simulation-Based Decision Support in Lighting Design", author = "Johannes Sorger and Thomas Ortner and Christian Luksch and Michael Schw\"{a}rzler and Eduard Gr\"{o}ller and Harald Piringer", year = "2016", abstract = "State-of-the-art lighting design is based on physically accurate lighting simulations of scenes such as offices. The simulation results support lighting designers in the creation of lighting configurations, which must meet contradicting customer objectives regarding quality and price while conforming to industry standards. However, current tools for lighting design impede rapid feedback cycles. On the one side, they decouple analysis and simulation specification. On the other side, they lack capabilities for a detailed comparison of multiple configurations. The primary contribution of this paper is a design study of LiteVis, a system for efficient decision support in lighting design. LiteVis tightly integrates global illumination-based lighting simulation, a spatial representation of the scene, and non-spatial visualizations of parameters and result indicators. This enables an efficient iterative cycle of simulation parametrization and analysis. Specifically, a novel visualization supports decision making by ranking simulated lighting configurations with regard to a weight-based prioritization of objectives that considers both spatial and non-spatial characteristics. In the spatial domain, novel concepts support a detailed comparison of illumination scenarios. We demonstrate LiteVis using a real-world use case and report qualitative feedback of lighting designers. This feedback indicates that LiteVis successfully supports lighting designers to achieve key tasks more efficiently and with greater certainty.", month = jan, journal = "Visualization and Computer Graphics, IEEE Transactions on", volume = "22", number = "1", issn = "1077-2626 ", pages = "290--299", keywords = "Integrating Spatial and Non-Spatial Data", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/sorger-2015-litevis/", } @article{Groeller_2016_P3, title = "State of the Art in Transfer Functions for Direct Volume Rendering", author = "P. Ljung and J. Kr\"{u}ger and Eduard Gr\"{o}ller and Markus Hadwiger and C. Hansen and Anders Ynnerman", year = "2016", abstract = "A central topic in scientific visualization is the transfer function (TF) for volume rendering. The TF serves a fundamental role in translating scalar and multivariate data into color and opacity to express and reveal the relevant features present in the data studied. Beyond this core functionality, TFs also serve as a tool for encoding and utilizing domain knowledge and as an expression for visual design of material appearances. TFs also enable interactive volumetric exploration of complex data. The purpose of this state-of-the-art report (STAR) is to provide an overview of research into the various aspects of TFs, which lead to interpretation of the underlying data through the use of meaningful visual representations. The STAR classifies TF research into the following aspects: dimensionality, derived attributes, aggregated attributes, rendering aspects, automation, and user interfaces. The STAR concludes with some interesting research challenges that form the basis of an agenda for the development of next generation TF tools and methodologies.", journal = "Computer Graphics Forum (2016)", volume = "35", number = "3", pages = "669--691", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Groeller_2016_P3/", } @talk{intel2016, title = "Real-time Subsurface Scattering, Light Transport and Two Minute Papers", author = "Karoly Zsolnai-Feh\'{e}r", year = "2016", abstract = "K\'{a}roly Zsolnai-Feh\'{e}r is a PhD student at the Technical University of Vienna. He is going to talk about Separable Subsurface Scattering, his recent collaboration with Activision Blizzard and the University of Zaragoza to render subsurface scattering in real time on the GPU for computer games. Next, he'll transition to global illumination and explain a a simple extension to Metropolis Light Transport to improve the convergence speed on a variety of scenes. The third part will be about Two Minute Papers, a YouTube web series that he started recently to communicate the most beautiful research results to a general audience. ", event = "Intel Graphics Architecture Forum", location = "Intel Advanced Rendering Technology group", keywords = "global illumination, light transport, subsurface scattering, two minute papers", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/intel2016/", } @bachelorsthesis{kendlbacher-2016, title = "Introduction Of OpenStreetMap For The Automatic Generation Of Destination Maps", author = "Felix Kendlbacher", year = "2016", abstract = "A destination map allows all travelers, within the given region of interest, to reach the same destination, no matter where exactly they start their journey at. For this purpose the important roads for traversing the road network are chosen, while the non-important roads are removed for clarity. These selected roads are then simplified to reduce unnecessary complexity, while maintaining the structure of the road network. The chosen data is then tweaked to increase the visibility of the small roads. During this process the layout is iteratively changed and evaluated according to certain aspects, and if a newly proposed layout performs better than the old one, that new one forms the basis for all future changes. In this work a method for automatically creating destination maps is implemented based on the algorithm proposed in the paper by Kopf et al. [KAB+10], with efforts made to improve the original work.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Destination Maps, OpenStreetMap", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/kendlbacher-2016/", } @mastersthesis{MEINDL-2015-OSR, title = "Omnidirectional Stereo Rendering of Virtual Environments", author = "Lukas Meindl", year = "2015", abstract = "In this thesis we discuss the use of omnidirectional stereo (omnistereo) rendering of virtual environments. We present an artefact-free technique to render omnistereo images for the CAVE in real time using the modern rendering pipeline and GPU-based tessellation. Depth perception in stereoscopic images is enabled through the horizontal disparities seen by the left and right eye. Conventional stereoscopic rendering, using off-axis or toe-in projections, provides correct depth cues in the entire field of view (FOV) for a single view-direction. Omnistereo panorama images, created from captures of the real world, provide stereo depth cues in all view direction. This concept has been adopted for rendering, as several techniques generating omnistereo images based on virtual environments have been presented. This is especially relevant in the context of surround-screen displays, as stereo depth can be provided for all view directions in a 360° panorama simultaneously for upright positioned viewers. Omnistereo rendering also lifts the need for view-direction tracking, since the projection is independent of the view direction, unlike stereoscopic projections. However, omnistereo images only provide correct depth cues in the center of the FOV. Stereo disparity distortion errors occur in the periphery of the view and worsen with distance from the center of the view. Nevertheless, due to a number of properties of the human visual system, these errors are not necessarily noticeable. We improved the existing object-warp based omnistereo rendering technique for CAVE display systems by preceding it with screen-space adaptive tessellation methods. Our improved technique creates images without perceivable artefacts and runs on the GPU at real-time frame rates. The artefacts produced by the original technique without tessellation are described by us. Tessellation is used to remedy edge curvature and texture interpolation artefacts occurring at large polygons, due to the non-linearity of the omnistereo perspective. The original approach is based on off-axis projections. We showed that on-axis projections can be used as basis as well, leading to identical images. In addition, we created a technique to efficiently render omnistereo skyboxes for the CAVE using a pre-tessellated full-screen mesh. We implemented the techniques as part of an application for a three-walled CAVE in the VRVis research center and compared them.", month = nov, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "virtual reality, stereo rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/MEINDL-2015-OSR/", } @inproceedings{SCHUETZ-2015-HQP, title = "High-Quality Point Based Rendering Using Fast Single Pass Interpolation", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2015", abstract = "We present a method to improve the visual quality of point cloud renderings through a nearest-neighbor-like interpolation of points. This allows applications to render points at larger sizes in order to reduce holes, without reducing the readability of fine details due to occluding points. The implementation requires only few modifications to existing shaders, making it eligible to be integrated in software applications without major design changes.", month = sep, location = "Granada, Spain", booktitle = "Proceedings of Digital Heritage 2015 Short Papers", pages = "369--372", keywords = "point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/SCHUETZ-2015-HQP/", } @article{Jimenez_SSS_2015, title = "Separable Subsurface Scattering", author = "Jorge Jimenez and Karoly Zsolnai-Feh\'{e}r and Adrian Jarabo and Christian Freude and Thomas Auzinger and Xian-Chun Wu and Javier van der Pahlen and Michael Wimmer and Diego Gutierrez", year = "2015", abstract = "In this paper we propose two real-time models for simulating subsurface scattering for a large variety of translucent materials, which need under 0.5 milliseconds per frame to execute. This makes them a practical option for real-time production scenarios. Current state-of-the-art, real-time approaches simulate subsurface light transport by approximating the radially symmetric non-separable diffusion kernel with a sum of separable Gaussians, which requires multiple (up to twelve) 1D convolutions. In this work we relax the requirement of radial symmetry to approximate a 2D diffuse reflectance profile by a single separable kernel. We first show that low-rank approximations based on matrix factorization outperform previous approaches, but they still need several passes to get good results. To solve this, we present two different separable models: the first one yields a high-quality diffusion simulation, while the second one offers an attractive trade-off between physical accuracy and artistic control. Both allow rendering subsurface scattering using only two 1D convolutions, reducing both execution time and memory consumption, while delivering results comparable to techniques with higher cost. Using our importance-sampling and jittering strategies, only seven samples per pixel are required. Our methods can be implemented as simple post-processing steps without intrusive changes to existing rendering pipelines. https://www.youtube.com/watch?v=P0Tkr4HaIVk", month = jun, journal = "Computer Graphics Forum", volume = "34", number = "6", issn = "1467-8659", pages = "188--197", keywords = "separable, realtime rendering, subsurface scattering, filtering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Jimenez_SSS_2015/", } @article{MATTAUSCH-2015-CHCRT, title = "CHC+RT: Coherent Hierarchical Culling for Ray Tracing", author = "Oliver Mattausch and Jir\'{i} Bittner and Alberto Jaspe and Enrico Gobbetti and Michael Wimmer and Renato Pajarola", year = "2015", abstract = "We propose a new technique for in-core and out-of-core GPU ray tracing using a generalization of hierarchical occlusion culling in the style of the CHC++ method. Our method exploits the rasterization pipeline and hardware occlusion queries in order to create coherent batches of work for localized shader-based ray tracing kernels. By combining hierarchies in both ray space and object space, the method is able to share intermediate traversal results among multiple rays. We exploit temporal coherence among similar ray sets between frames and also within the given frame. A suitable management of the current visibility state makes it possible to benefit from occlusion culling for less coherent ray types like diffuse reflections. Since large scenes are still a challenge for modern GPU ray tracers, our method is most useful for scenes with medium to high complexity, especially since our method inherently supports ray tracing highly complex scenes that do not fit in GPU memory. For in-core scenes our method is comparable to CUDA ray tracing and performs up to 5.94 × better than pure shader-based ray tracing.", month = may, journal = "Computer Graphics Forum", volume = "34", number = "2", issn = "1467-8659", pages = "537--548", keywords = "occlusion culling, ray tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/MATTAUSCH-2015-CHCRT/", } @techreport{ROEGNER-2015-IBR, title = "Image-based Reprojection Using a Non-local Means Algorithm", author = "Clemens R\"{o}gner and Michael Wimmer and Johannes Hanika and Carsten Dachsbacher", year = "2015", abstract = "We introduce an image-based approach to increase the framerate of image sequences generated with offline rendering algorithms. Our method handles in most cases reflections and refractions better than existing image-based temporal coherence techniques. The proposed technique is also more accurate than some image-based upsampling methods, because it calculates an individual result for each pixel. Our proposed algorithm takes a pair of frames and generates motion vectors for each pixel. This allows for adding a new frame between that pair and thus increasing the framerate. To find the motion vectors, we utilize the non-local means denoising algorithm, which determines the similarity of two pixels by their surrounding and reinterpret that similarity as the likelihood of movement from one pixel to the other. This is similar to what it is done in video encoding to reduce file size, but in our case is done for each pixel individually instead of a block-wise approach, making our technique more accurate. Our method also improves on work in the field of real-time rendering. Such techniques use motion vectors, which are generated through knowledge about the movement of objects within the scene. This can lead to problems when the optical flow in an image sequence is not coherent with the objects movement. Our method avoids those problems. Furthermore, previous work has shown, that the non-local means algorithm can be optimized for parallel execution, which signicantly reduces the time to execute our proposed technique as well. ", month = apr, number = "TR-186-2-15-02", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "optical flow, offline rendering, image reprojection, temporal upsampling, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/ROEGNER-2015-IBR/", } @misc{KREUZER-2015-DPA, title = "Depixelizing Pixel Art in Real-Time", author = "Felix Kreuzer and Johannes Kopf and Michael Wimmer", year = "2015", abstract = "Pixel art was frequently employed in games of the 90s and earlier. On today's large and high-resolution displays, pixel art looks blocky. Recently, an algorithm was introduced to create a smooth, resolution-independent vector representation from pixel art. However, the algorithm is far too slow for interactive use, for example in a game. This poster presents an efficient implementation of the algorithm on the GPU, so that it runs at real-time rates and can be incorporated into current game emulators. Extended Abstract: http://dl.acm.org/citation.cfm?id=2721395", month = feb, publisher = "ACM New York, NY, USA", location = "San Francisco, CA", isbn = "978-1-4503-3392-4", event = "19th Symposium on Interactive 3D Graphics and Games", booktitle = "Proceedings of the 19th Symposium on Interactive 3D Graphics and Games", Conference date = "Poster presented at 19th Symposium on Interactive 3D Graphics and Games (2015-02-27--2015-03-01)", note = "130--130", pages = "130 – 130", keywords = "image processing, depixelizing, pixel art", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/KREUZER-2015-DPA/", } @inproceedings{WEBER-2015-PRA, title = "Parallel Reyes-style Adaptive Subdivision with Bounded Memory Usage", author = "Thomas Weber and Michael Wimmer and John Owens", year = "2015", abstract = "Recent advances in graphics hardware have made it a desirable goal to implement the Reyes algorithm on current graphics cards. One key component in this algorithm is the bound-and-split phase, where surface patches are recursively split until they are smaller than a given screen-space bound. While this operation has been successfully parallelized for execution on the GPU using a breadth-first traversal, the resulting implementations are limited by their unpredictable worst-case memory consumption and high global memory bandwidth utilization. In this paper, we propose an alternate strategy that allows limiting the amount of necessary memory by controlling the number of assigned worker threads. The result is an implementation that scales to the performance of the breadth-first approach while offering three advantages: significantly decreased memory usage, a smooth and predictable tradeoff between memory usage and performance, and increased locality for surface processing. This allows us to render scenes that would require too much memory to be processed by the breadth-first method.", month = feb, isbn = "978-1-4503-3392-4", publisher = "ACM", organization = "ACM", location = "San Francisco, CA", booktitle = "Proceedings of the 19th Symposium on Interactive 3D Graphics and Games (i3D 2015)", pages = "39--45", keywords = "micro-rasterization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/WEBER-2015-PRA/", } @mastersthesis{WEBER-2015-PRA1, title = "Micropolygon Rendering on the GPU", author = "Thomas Weber", year = "2015", abstract = "Recent advances in graphics hardware have made it a desirable goal to implement the Reyes algorithm commonly used in production rendering to run on current graphics cards. One key component in this algorithm is the bound-and-split phase, where surface patches are recursively split until they are smaller than a given screen-space bound. While this operation has been successfully parallelized for execution on the GPU using a breadth-first traversal, the resulting implementations are limited by their unpredictable worst-case memory consumption and high global memory bandwidth utilization. In this paper, we propose an alternate strategy that allows limiting the amount of necessary memory by controlling the number of assigned worker threads. The result is an implementation that scales to the performance of the breadth-first approach while offering three advantages: significantly decreased memory usage, a smooth and predictable tradeoff between memory usage and performance, and increased locality for surface processing. This allows us to render scenes that would require too much memory to be processed by the breadth-first method.", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/WEBER-2015-PRA1/", } @mastersthesis{Adorjan-2015, title = "The OpenSFM Database", author = "Matthias Adorjan", year = "2015", abstract = "Besides using high-cost laser scanning equipment to capture large point clouds for topographical or architectural purposes, nowadays other, more affordable approaches exist. Structure-from-motion (SfM) in combination with multi-view stereo (MVS) is such a low-cost photogrammetric method used to generate large point datasets. It refers to the process of estimating three-dimensional structures out of two-dimensional image sequences. These sequences can even be captured with conventional consumer-grade digital cameras. In our work we aim to a establish a free and fully accessible structure-from-motion system, based on the idea of collaborative projects like OpenStreetMap. Our client-server system, called OpenSfM, consists of a web front-end which lets the user explore, upload and edit SfM-datasets and a back-end that answers client requests and processes the uploaded data and stores it in a database. The front-end is a virtual tourism client which allows the exploration of georeferenced point clouds together with their corresponding SfM-data like camera parameters and photos. The information is rendered in the context of an interactive virtual globe. An upload functionality makes it possible to integrate new SfM-datasets into the system and improve or extend existing datasets by adding images that fill missing areas of the affected point cloud. Furthermore, an edit mode allows the correction of georeferencing or reconstruction errors. On the other side the back-end evaluates the uploaded information and generates georeferenced point datasets using a state-of-the-art SfM engine and the GPS data stored in the uploaded images. The generated point clouds are preprocessed, such that they can be used by the front-end’s point cloud renderer. After that, they are stored together with the uploaded images and SfM parameters in the underlying database. On the whole, our system allows the gathering of SfM-datasets that represent different sights or landmarks, but also just locally famous buildings, placed all over the world. Those datasets can be explored in an interactive way by every user who accesses the virtual tourism client using a web browser.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Adorjan-2015/", } @talk{Auzinger-2015-IST, title = "Prefiltered Anti-Aliasing on Parallel Hardware", author = "Thomas Auzinger", year = "2015", event = "Seminar Talk", location = "IST Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Auzinger-2015-IST/", } @studentproject{PERNDORFER-2015-ECS, title = "Exploring Cells via Serious Gaming", author = "Rafael Perndorfer and Thomas Stipsits", year = "2015", abstract = "Es wurde ein Spiel entwickelt, dass z.B. Sch\"{u}lern grundlegende Mechanismen der Zellbiologie spielerisch n\"{a}herbringt.", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/PERNDORFER-2015-ECS/", } @inproceedings{wallner-2015-ModelingRoutinization, title = "Modeling Routinization in Games: An Information Theory Approach", author = "Simon Wallner and Martin Pichlmair and Michael Hecher and Michael Wimmer", year = "2015", abstract = "Routinization is the result of practicing until an action stops being a goal-directed process. This paper formulates a definition of routinization in games based on prior research in the fields of activity theory and practice theory. Routinization is analyzed using the formal model of discrete-time, discrete-space Markov chains and information theory to measure the actual error between the dynamically trained models and the player interaction. Preliminary research supports the hypothesis that Markov chains can be effectively used to model routinization in games. A full study design is presented to further explore and verify this hypothesis.", isbn = "978-1-4503-3466-2", series = "CHI PLAY ", publisher = "ACM", location = "London, United Kingdom", booktitle = "Proceedings of the 2015 Annual Symposium on Computer-Human Interaction in Play", pages = "727--732", keywords = "Games, Routinization, Markov Chains, Information Theory", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/wallner-2015-ModelingRoutinization/", } @talk{zsolnai-ist-invited-2014, title = "Light Transport with a Touch of Fluids", author = "Karoly Zsolnai-Feh\'{e}r", year = "2014", month = oct, event = "IST Austria", keywords = "photorealistic rendering, subsurface scattering, fluid simulation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/zsolnai-ist-invited-2014/", } @article{arikan-2014-pcvis, title = "Large-Scale Point-Cloud Visualization through Localized Textured Surface Reconstruction", author = "Murat Arikan and Reinhold Preiner and Claus Scheiblauer and Stefan Jeschke and Michael Wimmer", year = "2014", abstract = "In this paper, we introduce a novel scene representation for the visualization of large-scale point clouds accompanied by a set of high-resolution photographs. Many real-world applications deal with very densely sampled point-cloud data, which are augmented with photographs that often reveal lighting variations and inaccuracies in registration. Consequently, the high-quality representation of the captured data, i.e., both point clouds and photographs together, is a challenging and time-consuming task. We propose a two-phase approach, in which the first (preprocessing) phase generates multiple overlapping surface patches and handles the problem of seamless texture generation locally for each patch. The second phase stitches these patches at render-time to produce a high-quality visualization of the data. As a result of the proposed localization of the global texturing problem, our algorithm is more than an order of magnitude faster than equivalent mesh-based texturing techniques. Furthermore, since our preprocessing phase requires only a minor fraction of the whole dataset at once, we provide maximum flexibility when dealing with growing datasets.", month = sep, issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "9", volume = "20", pages = "1280--1292", keywords = "image-based rendering, large-scale models, color, surface representation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/arikan-2014-pcvis/", } @article{bernhard-2014-GTOM, title = "Gaze-To-Object Mapping During Visual Search in 3D Virtual Environments ", author = "Matthias Bernhard and Efstathios Stavrakis and Michael Hecher and Michael Wimmer", year = "2014", abstract = "Stimuli obtained from highly dynamic 3D virtual environments and synchronous eye-tracking data are commonly used by algorithms that strive to correlate gaze to scene objects, a process referred to as Gaze-To-Object Mapping (GTOM). We propose to address this problem with a probabilistic approach using Bayesian inference. The desired result of the inference is a predicted probability density function (PDF) specifying for each object in the scene a probability to be attended by the user. To evaluate the quality of a predicted attention PDF, we present a methodology to assess the information value (i.e., likelihood) in the predictions of dierent approaches that can be used to infer object attention. To this end, we propose an experiment based on a visual search task which allows us to determine the object of attention at a certain point in time under controlled conditions. We perform this experiment with a wide range of static and dynamic visual scenes to obtain a ground-truth evaluation data set, allowing us to assess GTOM techniques in a set of 30 particularly challenging cases.", month = aug, journal = "ACM Transactions on Applied Perception (Special Issue SAP 2014)", volume = "11", number = "3", issn = "1544-3558", pages = "14:1--14:17", keywords = "object-based attention, eye-tracking, virtual environments, visual attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/", } @article{hecher-2014-MH, title = "A Comparative Perceptual Study of Soft Shadow Algorithms", author = "Michael Hecher and Matthias Bernhard and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2014", abstract = "We performed a perceptual user study of algorithms that approximate soft shadows in real time. Although a huge body of soft-shadow algorithms have been proposed, to our knowledge this is the first methodical study for comparing different real-time shadow algorithms with respect to their plausibility and visual appearance. We evaluated soft-shadow properties like penumbra overlap with respect to their relevance to shadow perception in a systematic way, and we believe that our results can be useful to guide future shadow approaches in their methods of evaluation. In this study, we also capture the predominant case of an inexperienced user observing shadows without comparing to a reference solution, such as when watching a movie or playing a game. One important result of this experiment is to scientifically verify that real-time soft-shadow algorithms, despite having become physically based and very realistic, can nevertheless be intuitively distinguished from a correct solution by untrained users.", month = jun, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", number = "5", volume = "11", pages = "5:1--5:21", keywords = "Perception Studies, Soft Shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/", } @article{LUKSCH-2014-RTR, title = "Real-Time Rendering of Glossy Materials with Regular Sampling", author = "Christian Luksch and Robert F. Tobler and Thomas M\"{u}hlbacher and Michael Schw\"{a}rzler and Michael Wimmer", year = "2014", abstract = "Rendering view-dependent, glossy surfaces to increase the realism in real-time applications is a computationally complex task, that can only be performed by applying some approximations—especially when immediate changes in the scene in terms of material settings and object placement are a necessity. The use of environment maps is a common approach to this problem, but implicates performance problems due to costly pre-filtering steps or expensive sampling. We, therefore, introduce a regular sampling scheme for environment maps that relies on an efficient MIP-map-based filtering step, and minimizes the number of necessary samples for creating a convincing real-time rendering of glossy BRDF materials.", month = jun, journal = "The Visual Computer", volume = "30", number = "6-8", issn = "0178-2789", pages = "717--727", keywords = "real-time rendering , BRDFs", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/LUKSCH-2014-RTR/", } @inproceedings{charpenay-2014-sgn, title = "Sampling Gabor Noise in the Spatial Domain", author = "Victor Charpenay and Bernhard Steiner and Przemyslaw Musialski", year = "2014", abstract = "Gabor noise is a powerful technique for procedural texture generation. Contrary to other types of procedural noise, its sparse convolution aspect makes it easily controllable locally. In this paper, we demonstrate this property by explicitly introducing spatial variations. We do so by linking the sparse convolution process to the parametrization of the underlying surface. Using this approach, it is possible to provide control maps for the parameters in a natural and convenient way. In order to derive intuitive control of the resulting textures, we accomplish a small study of the influence of the parameters of the Gabor kernel with respect to the outcome and we introduce a solution where we bind values such as the frequency or the orientation of the Gabor kernel to a user-provided control map in order to produce novel visual effects.", month = may, isbn = "978-80-223-3601-7", publisher = "ACM Press", location = "Smolenice castle, Slovakia", editor = "Diego Gutierrez", booktitle = "Proceedings of the 30th Spring Conference on Computer Graphics - SCCG ", pages = "79--82", keywords = "texture synthesis, Gabor noise, procedural texture", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/charpenay-2014-sgn/", } @talk{Auzinger_2014_DCGI, title = "Analytic Rasterization", author = "Thomas Auzinger", year = "2014", event = "Invited Talk at Czech Technical University in Prague", location = "Czech Technical University in Prague, Department of Computer Graphics and Interaction, Prague", keywords = "antialiasing, analytic, prefiltering", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Auzinger_2014_DCGI/", } @talk{Auzinger_2014_UJA, title = "GPGPU in Graphics and Visualization", author = "Thomas Auzinger", year = "2014", event = "Invited Talk at Universidad de Ja\'{e}n in Spain", location = "Universidad de Ja\'{e}n, Spain", keywords = "GPGPU, medical, visualization, antialiasing", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Auzinger_2014_UJA/", } @bachelorsthesis{silvana_2014, title = "Automated Lighting Design For Photorealistic Rendering", author = "Silvana Podaras", year = "2014", abstract = "We present a novel technique to minimize the number of light sources in a virtual 3D scene without introducing any perceptible changes to it. The theoretical part of the thesis gives an overview on previous research in the field of automated lighting design, followed by an introduction to the theory of rendering and genetic algorithms. The implementation is done as extension called "Light Source Cleaner" to LuxRender, a physically based, open-source renderer. The algorithm adjusts the intensities of the light sources in a way that certain light sources can be canceled out, thus enabling to render a similar image with significantly less number of light sources, introducing a remarkable reduction to the execution time of scenes where many light sources are used.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "global illumination, photorealistic rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/silvana_2014/", } @phdthesis{knecht_2013_RSM, title = "Reciprocal Shading for Mixed Reality", author = "Martin Knecht", year = "2013", abstract = "Reciprocal shading for mixed reality aims to integrate virtual objects into real environments in a way that they are in the ideal case indistinguishable from real objects. It is therefore an attractive technology for architectural visualizations, product visualizations and for cultural heritage sites, where virtual objects should be seamlessly merged with real ones. Due to the improved performance of recent graphics hardware, real-time global illumination algorithms are feasible for mixed-reality applications, and thus more and more researchers address realistic rendering for mixed reality. The goal of this thesis is to provide algorithms which improve the visual plausibility of virtual objects in mixed-reality applications. Our contributions are as follows: First, we present five methods to reconstruct the real surrounding environment. In particular, we present two methods for geometry reconstruction, a method for material estimation at interactive frame rates and two methods to reconstruct the color mapping characteristics of the video see-through camera. Second, we present two methods to improve the visual appearance of virtual objects. The first, called differential instant radiosity, combines differential rendering with a global illumination method called instant radiosity to simulate reciprocal shading effects such as shadowing and indirect illumination between real and virtual objects. The second method focuses on the visual plausible rendering of reflective and refractive objects. The high-frequency lighting effects caused by these objects are also simulated with our method. The third part of this thesis presents two user studies which evaluate the influence of the presented rendering methods on human perception. The first user study measured task performance with respect to the rendering mode, and the second user study was set up as a web survey where participants had to choose which of two presented images, showing mixed-reality scenes, they preferred.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/knecht_2013_RSM/", } @inproceedings{birsak-2013-sta, title = "Seamless Texturing of Archaeological Data", author = "Michael Birsak and Przemyslaw Musialski and Murat Arikan and Michael Wimmer", year = "2013", abstract = "In this paper we propose a framework for out-of-core real-time rendering of high-quality textured archaeological data-sets. Our input is a triangle mesh and a set of calibrated and registered photographs. Our system performs the actual mapping of the photos to the mesh for high-quality reconstructions, which is a task referred to as the labeling problem. Another problem of such mappings are seams that arise on junctions between triangles that contain information from different photos. These are are approached with blending methods, referred to as leveling. We address both problems and introduce a novel labeling approach based on occlusion detection using depth maps that prevents texturing of parts of the model with images that do not contain the expected region. Moreover, we propose an improved approach for seam-leveling that penalizes too large values and helps to keep the resulting colors in a valid range. For high-performance visualization of the 3D models with a huge amount of textures, we make use of virtual texturing, and present an application that generates the needed texture atlas in significantly less time than existing scripts. Finally, we show how the mentioned components are integrated into a visualization application for digitized archaeological site.", month = oct, isbn = "978-1-4799-3168-2 ", publisher = "IEEE", note = "DOI: 10.1109/DigitalHeritage.2013.6743749", location = "Marseille, France", booktitle = "Digital Heritage International Congress (DigitalHeritage), 2013", pages = "265--272 ", keywords = "digital cultural heritage, out-of-core real-time rendering, seamless texturing, virtual texturing", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/birsak-2013-sta/", } @inproceedings{Auzinger_2013_NSAA, title = "Non-Sampled Anti-Aliasing", author = "Thomas Auzinger and Przemyslaw Musialski and Reinhold Preiner and Michael Wimmer", year = "2013", abstract = "In this paper we present a parallel method for high-quality edge anti-aliasing. In contrast to traditional graphics hardware methods, which rely on massive oversampling to combat aliasing issues in the rasterization process, we evaluate a closed-form solution of the associated prefilter convolution. This enables the use of a wide range of filter functions with arbitrary kernel sizes, as well as general shading methods such as texture mapping or complex illumination models. Due to the use of analytic solutions, our results are exact in the mathematical sense and provide objective ground-truth for other anti-aliasing methods and enable the rigorous comparison of different models and filters. An efficient implementation on general purpose graphics hardware is discussed and several comparisons to existing techniques and of various filter functions are given.", month = sep, isbn = "978-3-905674-51-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Lugano, Switzerland", event = "Vision, Modelin, Visualization (VMV)", editor = "Michael Bronstein and Jean Favre and Kai Hormann", booktitle = "Proceedings of the 18th International Workshop on Vision, Modeling and Visualization (VMV 2013)", pages = "169--176", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_NSAA/", } @misc{Auzinger_2013_SAR, title = "Sampled and Analytic Rasterization", author = "Thomas Auzinger and Michael Wimmer", year = "2013", abstract = "In this poster we present an overview of exact anti-aliasing (AA) methods in rasterization. In contrast to the common supersampling approaches for visibility AA (e.g. MSAA) or both visibility and shading AA (e.g. SSAA, decoupled sampling), prefiltering provides the mathematically exact solution to the aliasing problem. Instead of averaging a set a supersamples, the input data is convolved with a suitable low-pass filter before sampling is applied. Recent work showed that for both visibility signals and simple shading models, a closed-form solution to the convolution integrals can be found. As our main contribution, we present a classification of both sample-based and analytic AA approaches for rasterization and analyse their strengths and weaknesses.", month = sep, series = "VMV ", publisher = "Eurographics Association", location = "Lugano, Switzerland", isbn = "978-3-905674-51-4", event = "VMV 2013", booktitle = "Proceedings of the 18th International Workshop on Vision, Modeling and Visualization", Conference date = "Poster presented at VMV 2013 (2013-09-11--2013-09-13)", note = "223--224", pages = "223 – 224", keywords = "Anti-Aliasing, Rasterization, Sampling, Supersampling, Prefiltering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_SAR/", } @book{Sturn-2013-ST, title = "Sturning Technology - Real-Time Rendering of Non Photorealistic 3d Worlds", author = "Tobias Sturn", year = "2013", abstract = "We introduce a new technology, “Sturning Technology” for artistic, non photorealistic, emotional real time rendering of 3d scenes and blending between the different emotional renderings to show the current emotional state of the viewer of the scene. The European art history with Impressionism, Expressionism and Romanticism is taken as reference for creating these emotional renderings because the painters of these areas wanted to evoke nothing more but pure emotions in just one single “frame”. This technology can be used for all kinds of interactive applications but mainly for games in which the player naturally undergoes many different sensations. We believe that emotional renderings can help a lot to create a much deeper emotional gaming experience where the graphics are directly linked to the emotional state of the player.", month = jul, isbn = "978-3-639-47141-0", pages = "104", publisher = "AV Akademikerverlag", keywords = "Computer Graphics, Emotional Rendering, Non Photorealistic Rendering, Real-Time Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Sturn-2013-ST/", } @inproceedings{JAHRMANN-2013-IGR, title = "Interactive Grass Rendering Using Real-Time Tessellation", author = "Klemens Jahrmann and Michael Wimmer", year = "2013", abstract = "Grass rendering is needed for many outdoor scenes, but for real-time applications, rendering each blade of grass as geometry has been too expensive so far. This is why grass is most often drawn as a texture mapped onto the ground or grass patches rendered as transparent billboard quads. Recent approaches use geometry for blades that are near the camera and flat geometry for rendering further away. In this paper, we present a technique which is capable of rendering whole grass fields in real time as geometry by exploiting the capabilities of the tessellation shader. Each single blade of grass is rendered as a two-dimensional tessellated quad facing its own random direction. This enables each blade of grass to be influenced by wind and to interact with its environment. In order to adapt the grass field to the current scene, special textures are developed which encode on the one hand the density and height of the grass and on the other hand its look and composition.", month = jun, isbn = "978-80-86943-74-9", location = "Plzen, CZ", editor = "Manuel Oliveira and Vaclav Skala", booktitle = "WSCG 2013 Full Paper Proceedings", pages = "114--122", keywords = "grass rendering, real-time rendering, billboards", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/JAHRMANN-2013-IGR/", } @article{Auzinger_2013_AnaVis, title = "Analytic Visibility on the GPU", author = "Thomas Auzinger and Michael Wimmer and Stefan Jeschke", year = "2013", abstract = "This paper presents a parallel, implementation-friendly analytic visibility method for triangular meshes. Together with an analytic filter convolution, it allows for a fully analytic solution to anti-aliased 3D mesh rendering on parallel hardware. Building on recent works in computational geometry, we present a new edge-triangle intersection algorithm and a novel method to complete the boundaries of all visible triangle regions after a hidden line elimination step. All stages of the method are embarrassingly parallel and easily implementable on parallel hardware. A GPU implementation is discussed and performance characteristics of the method are shown and compared to traditional sampling-based rendering methods.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "1467-8659", pages = "409--418", keywords = "GPU, anti-aliasing, SIMD, filter, rendering, analytic, visibility, close-form, hidden surface elimination, hidden surface removal, GPGPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_AnaVis/", } @article{MATTAUSCH-2013-FSBE, title = "Freeform Shadow Boundary Editing", author = "Oliver Mattausch and Takeo Igarashi and Michael Wimmer", year = "2013", abstract = "We present an algorithm for artistically modifying physically based shadows. With our tool, an artist can directly edit the shadow boundaries in the scene in an intuitive fashion similar to freeform curve editing. Our algorithm then makes these shadow edits consistent with respect to varying light directions and scene configurations, by creating a shadow mesh from the new silhouettes. The shadow mesh helps a modified shadow volume algorithm cast shadows that conform to the artistic shadow boundary edits, while providing plausible interaction with dynamic environments, including animation of both characters and light sources. Our algorithm provides significantly more fine-grained local and direct control than previous artistic light editing methods, which makes it simple to adjust the shadows in a scene to reach a particular effect, or to create interesting shadow shapes and shadow animations. All cases are handled with a single intuitive interface, be it soft shadows, or (self-)shadows on arbitrary receivers.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "0167-7055", pages = "175--184", keywords = "shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/MATTAUSCH-2013-FSBE/", } @article{knecht_martin_2013_ReflRefrObjsMR, title = "Reflective and Refractive Objects for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Christoph Winklhofer and Michael Wimmer", year = "2013", abstract = "In this paper, we present a novel rendering method which integrates reflective or refractive objects into a differential instant radiosity (DIR) framework usable for mixed-reality (MR) applications. This kind of objects are very special from the light interaction point of view, as they reflect and refract incident rays. Therefore they may cause high-frequency lighting effects known as caustics. Using instant-radiosity (IR) methods to approximate these high-frequency lighting effects would require a large amount of virtual point lights (VPLs) and is therefore not desirable due to real-time constraints. Instead, our approach combines differential instant radiosity with three other methods. One method handles more accurate reflections compared to simple cubemaps by using impostors. Another method is able to calculate two refractions in real-time, and the third method uses small quads to create caustic effects. Our proposed method replaces parts in light paths that belong to reflective or refractive objects using these three methods and thus tightly integrates into DIR. In contrast to previous methods which introduce reflective or refractive objects into MR scenarios, our method produces caustics that also emit additional indirect light. The method runs at real-time frame rates, and the results show that reflective and refractive objects with caustics improve the overall impression for MR scenarios.", month = mar, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE VR 2013)", volume = "19", number = "4", issn = "1077-2626", pages = "576--582", keywords = "Mixed Reality, Caustics, Reflections, Refractions", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/knecht_martin_2013_ReflRefrObjsMR/", } @inproceedings{LUKSCH-2013-FLM, title = "Fast Light-Map Computation with Virtual Polygon Lights", author = "Christian Luksch and Robert F. Tobler and Ralf Habel and Michael Schw\"{a}rzler and Michael Wimmer", year = "2013", abstract = "We propose a new method for the fast computation of light maps using a many-light global-illumination solution. A complete scene can be light mapped on the order of seconds to minutes, allowing fast and consistent previews for editing or even generation at loading time. In our method, virtual point lights are clustered into a set of virtual polygon lights, which represent a compact description of the illumination in the scene. The actual light-map generation is performed directly on the GPU. Our approach degrades gracefully, avoiding objectionable artifacts even for very short computation times. ", month = mar, isbn = "978-1-4503-1956-0", publisher = "ACM", location = "Orlando, Florida", booktitle = "Proceedings of ACM Symposium on Interactive 3D Graphics and Games 2013", pages = "87--94", keywords = "instant radiosity, global illumination, light-maps", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/LUKSCH-2013-FLM/", } @incollection{schedl-2013-gP4, title = "Simulating partial occlusion in post-processing depth-of-field methods", author = "David Schedl and Michael Wimmer", year = "2013", abstract = "This chapter describes a method for simulating Depth of Field (DoF). In particular, we investigate the so-called partial occlusion effect: objects near the camera blurred due to DoF are actually semitransparent and therefore result in partially visible background objects. This effect is strongly apparent in miniature- and macro photography and in film making. Games and interactive applications are nowadays becoming more cinematic, including strong DoF effects, and therefore it is important to be able to convincingly approximate the partial-occlusion effect. We show how to do so in this chapter; with the proposed optimizations even in real time.", month = mar, booktitle = "GPU Pro 4: Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "9781466567436", note = "to appear", publisher = "A K Peters", keywords = "depth of field, realtime, layers, blurring", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/schedl-2013-gP4/", } @inproceedings{SCHWAERZLER-2013-FPCSS, title = "Fast Percentage Closer Soft Shadows using Temporal Coherence", author = "Michael Schw\"{a}rzler and Christian Luksch and Daniel Scherzer and Michael Wimmer", year = "2013", abstract = "We propose a novel way to efficiently calculate soft shadows in real-time applications by overcoming the high computational effort involved with the complex corresponding visibility estimation each frame: We exploit the temporal coherence prevalent in typical scene movement, making the estimation of a new shadow value only necessary whenever regions are newly disoccluded due to camera adjustment, or the shadow situation changes due to object movement. By extending the typical shadow mapping algorithm by an additional light-weight buffer for the tracking of dynamic scene objects, we can robustly and efficiently detect all screen space fragments that need to be updated, including not only the moving objects themselves, but also the soft shadows they cast. By applying this strategy to the popular Percentage Closer Soft Shadow algorithm (PCSS), we double rendering performance in scenes with both static and dynamic objects - as prevalent in various 3D game levels - while maintaining the visual quality of the original approach.", month = mar, isbn = "978-1-4503-1956-0", publisher = "ACM", location = "Orlando, Florida", address = "New York, NY, USA", booktitle = "Proceedings of ACM Symposium on Interactive 3D Graphics and Games 2013", pages = "79--86", keywords = "real-time, temporal coherence, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/SCHWAERZLER-2013-FPCSS/", } @phdthesis{Reisner_Irene_2013_R3D, title = "Reconstruction of 3D Models from Images and Point Clouds with Shape Primitives", author = "Irene Reisner-Kollmann", year = "2013", abstract = "3D models are widely used in different applications, including computer games, planning software, applications for training and simulation, and virtual city maps. For many of these applications it is necessary or at least advantageous, if the virtual 3D models are based on real world scenes and objects. Manual modeling is reserved for experts as it requires extensive skills. For this reason, it is necessary to provide automatic or semi-automatic, easy-to-use techniques for reconstructing 3D objects. In this thesis we present methods for reconstructing 3D models of man-made scenes. These scenes can often be approximated with a set of geometric primitives, like planes or cylinders. Using geometric primitives leads to light-weight, low-poly 3D models, which are beneficial for efficient storage and post-processing. The applicability of reconstruction algorithms highly depends on the existing input data, the characteristics of the captured objects, and the desired properties of the reconstructed 3D model. For this reason, we present three algorithms that use different input data. It is possible to reconstruct 3D models from just a few photographs or to use a dense point cloud as input. Furthermore, we present techniques to combine information from both, images and point clouds. The image-based reconstruction method is especially designed for environments with homogenous and reflective surfaces where it is difficult to acquire reliable point sets. Therefore we use an interactive application which requires user input. Shape primitives are fit to user-defined segmentations in two or more images. Our point-based algorithms, on the other hand, provide fully automatic reconstructions. Nevertheless, the automatic computations can be enhanced by manual user inputs for generating improved results. The first point-based algorithm is specialized on reconstructing 3D models of buildings and uses unstructured point clouds as input. The point cloud is segmented into planar regions and converted into 3D geometry. The second point-based algorithm additionally supports the reconstruction of interior scenes. While unstructured point clouds are supported as well, this algorithm specifically exploits the redundancy and visibility information provided by a set of range images. The data is automatically segmented into geometric primitives. Then the shape boundaries are extracted either automatically or interactively.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Reisner_Irene_2013_R3D/", } @inproceedings{EISEMANN-2013-ERT, title = "Efficient Real-Time Shadows", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michal Valient and Michael Wimmer", year = "2013", abstract = "This course provides an overview of efficient, real-time shadow algorithms. It presents the theoretical background but also discusses implementation details for facilitating efficient realizations (hard and soft shadows, volumetric shadows, reconstruction techniques). These elements are of relevance to both experts and practitioners. The course also reviews budget considerations and analyzes performance trade-offs, using examples from various AAA game titles and film previsualization tools. While physical accuracy can sometimes be replaced by plausible shadows, especially for games, film production requires more precision, such as scalable solutions that can deal with highly detailed geometry. The course builds upon earlier SIGGRAPH courses as well as the recent book Real-Time Shadows (A K Peters, 2011) by four of the instructors (due to its success, a second edition is planned for 2014). And with two instructors who have worked on AAA game and movie titles, the course presents interesting behind-the-scenes information that illuminates key topics.", booktitle = "ACM SIGGRAPH 2013 Courses", isbn = "978-1-4503-2339-0", location = "Anaheim, CA", publisher = "ACM", pages = "18:1--18:54", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/EISEMANN-2013-ERT/", } @xmascard{x-mas-2013, title = "X-Mas Card 2013", author = "Michael Birsak and Przemyslaw Musialski", year = "2013", abstract = "With our Season’s Greetings we are sending you an automatically generated brochure that provides routing information for several of Vienna’s Christ- mas markets. In the middle you can see an overview map that provides approximate locations as well as routing information. Around the map you can see so-called ‘detail lenses’ that provide more exact maps and routing as well as names and photographs. All elements are arranged using a binary integer program (BIP), whose goal is the positioning of the detail lenses as close as possible to their correspond- ing markers in the overview map.", keywords = "x-mas card", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/x-mas-2013/", } @article{SCHERZER-2012-TCM, title = "Temporal Coherence Methods in Real-Time Rendering", author = "Daniel Scherzer and Lei Yang and Oliver Mattausch and Diego Nehab and Pedro V. Sander and Michael Wimmer and Elmar Eisemann", year = "2012", abstract = "Nowadays, there is a strong trend towards rendering to higher-resolution displays and at high frame rates. This development aims at delivering more detail and better accuracy, but it also comes at a significant cost. Although graphics cards continue to evolve with an ever-increasing amount of computational power, the speed gain is easily counteracted by increasingly complex and sophisticated shading computations. For real-time applications, the direct consequence is that image resolution and temporal resolution are often the first candidates to bow to the performance constraints (e.g., although full HD is possible, PS3 and XBox often render at lower resolutions). In order to achieve high-quality rendering at a lower cost, one can exploit temporal coherence (TC). The underlying observation is that a higher resolution and frame rate do not necessarily imply a much higher workload, but a larger amount of redundancy and a higher potential for amortizing rendering over several frames. In this survey, we investigate methods that make use of this principle and provide practical and theoretical advice on how to exploit temporal coherence for performance optimization. These methods not only allow incorporating more computationally intensive shading effects into many existing applications, but also offer exciting opportunities for extending high-end graphics applications to lower-spec consumer-level hardware. To this end, we first introduce the notion and main concepts of TC, including an overview of historical methods. We then describe a general approach, image-space reprojection, with several implementation algorithms that facilitate reusing shading information across adjacent frames. We also discuss data-reuse quality and performance related to reprojection techniques. Finally, in the second half of this survey, we demonstrate various applications that exploit TC in real-time rendering. ", month = dec, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "31", pages = "2378--2408", keywords = "remote rendering; sampling, perception-based rendering, occlusion culling, non-photo-realistic rendering, level-of-detail, large data visualization, image-based rendering, global illumination, frame interpolation, anti-aliasing, shadows, streaming, temporal coherance, upsampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/SCHERZER-2012-TCM/", } @article{knecht_martin_2012_RSMR, title = "Reciprocal Shading for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Oliver Mattausch and Michael Wimmer", year = "2012", abstract = "In this paper we present a novel plausible rendering method for mixed reality systems, which is useful for many real-life application scenarios, like architecture, product visualization or edutainment. To allow virtual objects to seamlessly blend into the real environment, the real lighting conditions and the mutual illumination effects between real and virtual objects must be considered, while maintaining interactive frame rates. The most important such effects are indirect illumination and shadows cast between real and virtual objects. Our approach combines Instant Radiosity and Differential Rendering. In contrast to some previous solutions, we only need to render the scene once in order to find the mutual effects of virtual and real scenes. In addition, we avoid artifacts like double shadows or inconsistent color bleeding which appear in previous work. The dynamic real illumination is derived from the image stream of a fish-eye lens camera. The scene gets illuminated by virtual point lights, which use imperfect shadow maps to calculate visibility. A sufficiently fast scene reconstruction is done at run-time with Microsoft's Kinect sensor. Thus a time-consuming manual pre-modeling step of the real scene is not necessary. Our results show that the presented method highly improves the illusion in mixed-reality applications and significantly diminishes the artificial look of virtual objects superimposed onto real scenes.", month = nov, issn = "0097-8493", journal = "Computers & Graphics", number = "7", volume = "36", pages = "846--856", keywords = "Differential rendering, Reconstruction, Instant radiosity, Microsoft Kinect, Real-time global illumination, Mixed reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/knecht_martin_2012_RSMR/", } @inproceedings{SCHWAERZLER-2012-FAS, title = "Fast Accurate Soft Shadows with Adaptive Light Source Sampling", author = "Michael Schw\"{a}rzler and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2012", abstract = "Physically accurate soft shadows in 3D applications can be simulated by taking multiple samples from all over the area light source and accumulating them. Due to the unpredictability of the size of the penumbra regions, the required sampling density has to be high in order to guarantee smooth shadow transitions in all cases. Hence, several hundreds of shadow maps have to be evaluated in any scene configuration, making the process computationally expensive. Thus, we suggest an adaptive light source subdivision approach to select the sampling points adaptively. The main idea is to start with a few samples on the area light, evaluating there differences using hardware occlusion queries, and adding more sampling points if necessary. Our method is capable of selecting and rendering only the samples which contribute to an improved shadow quality, and hence generate shadows of comparable quality and accuracy. Even though additional calculation time is needed for the comparison step, this method saves valuable rendering time and achieves interactive to real-time frame rates in many cases where a brute force sampling method does not. ", month = nov, isbn = "978-3-905673-95-1", publisher = "Eurographics Association", location = "Magdeburg, Germany", booktitle = "Proceedings of the 17th International Workshop on Vision, Modeling, and Visualization (VMV 2012)", pages = "39--46", keywords = "soft shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/SCHWAERZLER-2012-FAS/", } @mastersthesis{hecher-2012-MH, title = "A Comparative Perceptual Study of Soft Shadow Algorithms", author = "Michael Hecher", year = "2012", abstract = "While a huge body of soft shadow algorithms has been proposed, there has been no methodical study for comparing different real-time shadowing algorithms with respect to their plausibility and visual appearance. Therefore, a study was designed to identify and evaluate scene properties with respect to their relevance to shadow quality perception. Since there are so many factors that might influence perception of soft shadows (e.g., complexity of objects, movement, and textures), the study was designed and executed in a way on which future work can build on. The evaluation concept not only captures the predominant case of an untrained user experiencing shadows without comparing them to a reference solution, but also the cases of trained and experienced users. We achieve this by reusing the knowledge users gain during the study. Moreover, we thought that the common approach of a two-option forced-choice-study can be frustrating for participants when both choices are so similar that people think they are the same. To tackle this problem a neutral option was provided. For time-consuming studies, where frustrated participants tend to arbitrary choices, this is a useful concept. Speaking with participants after the study and evaluating the results, supports our choice for a third option. The results are helpful to guide the design of future shadow algorithms and allow researchers to evaluate algorithms more effectively. They also allow developers to make better performance versus quality decisions for their applications. One important result of this study is that we can scientifically verify that, without comparison to a reference solution, the human perception is relatively indifferent to a correct soft shadow. Hence, a simple but robust soft shadow algorithm is the better choice in real-world situations. Another finding is that approximating contact hardening in soft shadows is sufficient for the average user and not significantly worse for experts.", month = oct, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Perception Studies, Soft Shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/hecher-2012-MH/", } @misc{Auzinger_2012_GeigerCam, title = "GeigerCam: Measuring Radioactivity with Webcams", author = "Thomas Auzinger and Ralf Habel and Andreas Musilek and Dieter Hainz and Michael Wimmer", year = "2012", abstract = "Measuring radioactivity is almost exclusively a professional task in the realms of science, industry and defense, but recent events spur the interest in low-cost consumer detection devices. We show that by using image processing techniques, a current, only slightly modified, off-the-shelf HD webcam can be used to measure alpha, beta as well as gamma radiation. In contrast to dedicated measurement devices such as Geiger counters, our framework can classify the type of radiation and can differentiate between various kinds of radioactive materials. By optically insulating the camera's imaging sensor, recordings at extreme exposure and gain values are possible, and the partly very faint signals detectable. The camera is set to the longest exposure time possible and to a very high gain to detect even faint signals. During measurements, GPU assisted real-time image processing of the direct video feed is used to treat the remaining noise by tracking the noise spectrum per pixel, incorporating not only spatial but also temporal variations due to temperature changes and spontaneous emissions. A confidence value per pixel based on event probabilities is calculated to identify potentially hit pixels. Finally, we use morphological clustering to group pixels into particle impact events and analyze their energies. Our approach results in a simple device that can be operated on any computer and costs only $20-30, an order of magnitude cheaper than entry-level nuclear radiation detectors.", month = aug, publisher = "ACM", location = "Los Angeles, CA", address = "New York, NY, USA", isbn = "978-1-4503-1682-8", event = "ACM SIGGRAPH 2012", editor = "Dan Wexler", booktitle = "ACM SIGGRAPH 2012 Posters", Conference date = "Poster presented at ACM SIGGRAPH 2012 (2012-08-05--2012-08-09)", note = "40:1--40:1", pages = "40:1 – 40:1", keywords = "radioactivity, webcam, measurement", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Auzinger_2012_GeigerCam/", } @article{knecht_martin_2012_BRDFEstimation, title = "Interactive BRDF Estimation for Mixed-Reality Applications", author = "Martin Knecht and Georg Tanzmeister and Christoph Traxler and Michael Wimmer", year = "2012", abstract = "Recent methods in augmented reality allow simulating mutual light interactions between real and virtual objects. These methods are able to embed virtual objects in a more sophisticated way than previous methods. However, their main drawback is that they need a virtual representation of the real scene to be augmented in the form of geometry and material properties. In the past, this representation had to be modeled in advance, which is very time consuming and only allows for static scenes. We propose a method that reconstructs the surrounding environment and estimates its Bidirectional Reflectance Distribution Function (BRDF) properties at runtime without any preprocessing. By using the Microsoft Kinect sensor and an optimized hybrid CPU & GPU-based BRDF estimation method, we are able to achieve interactive frame rates. The proposed method was integrated into a differential instant radiosity rendering system to demonstrate its feasibility.", month = jun, journal = "Journal of WSCG", volume = "20", number = "1", issn = "1213-6972", pages = "47--56", keywords = "Augmented Reality, BRDF Estimation, Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/knecht_martin_2012_BRDFEstimation/", } @article{MATTAUSCH-2012-TIS, title = "Tessellation-Independent Smooth Shadow Boundaries", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer and Takeo Igarashi", year = "2012", abstract = "We propose an efficient and light-weight solution for rendering smooth shadow boundaries that do not reveal the tessellation of the shadow-casting geometry. Our algorithm reconstructs the smooth contours of the underlying mesh and then extrudes shadow volumes from the smooth silhouettes to render the shadows. For this purpose we propose an improved silhouette reconstruction using the vertex normals of the underlying smooth mesh. Then our method subdivides the silhouette loops until the contours are sufficiently smooth and project to smooth shadow boundaries. This approach decouples the shadow smoothness from the tessellation of the geometry and can be used to maintain equally high shadow quality for multiple LOD levels. It causes only a minimal change to the fill rate, which is the well-known bottleneck of shadow volumes, and hence has only small overhead. ", month = jun, journal = "Computer Graphics Forum", volume = "4", number = "31", issn = "1467-8659", pages = "1465--1470", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/MATTAUSCH-2012-TIS/", } @article{schedl-2012-dof, title = "A layered depth-of-field method for solving partial occlusion", author = "David Schedl and Michael Wimmer", year = "2012", abstract = "Depth of field (DoF) represents a distance range around a focal plane, where objects on an image are crisp. DoF is one of the effects which significantly contributes to the photorealism of images and therefore is often simulated in rendered images. Various methods for simulating DoF have been proposed so far, but little tackle the issue of partial occlusion: Blurry objects near the camera are semi-transparent and result in partially visible background objects. This effect is strongly apparent in miniature and macro photography. In this work a DoF method is presented which simulates partial occlusion. The contribution of this work is a layered method where the scene is rendered into layers. Blurring is done efficiently with recursive Gaussian filters. Due to the usage of Gaussian filters big artifact-free blurring radii can be simulated at reasonable costs.", month = jun, journal = "Journal of WSCG", volume = "20", number = "3", issn = "1213-6972", pages = "239--246", keywords = "realtime, rendering, depth-of-field, layers, depth peeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/schedl-2012-dof/", } @article{Auzinger_2012_AAA, title = "Analytic Anti-Aliasing of Linear Functions on Polytopes", author = "Thomas Auzinger and Michael Guthe and Stefan Jeschke", year = "2012", abstract = "This paper presents an analytic formulation for anti-aliased sampling of 2D polygons and 3D polyhedra. Our framework allows the exact evaluation of the convolution integral with a linear function defined on the polytopes. The filter is a spherically symmetric polynomial of any order, supporting approximations to refined variants such as the Mitchell-Netravali filter family. This enables high-quality rasterization of triangles and tetrahedra with linearly interpolated vertex values to regular and non-regular grids. A closed form solution of the convolution is presented and an efficient implementation on the GPU using DirectX and CUDA C is described.", month = may, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "335--344", keywords = "Polytope, Filter Design, Analytic Anti-Aliasing, Sampling, Integral Formula, Spherically Symmetric Filter, CUDA, Closed Form Solution, 2D 3D", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Auzinger_2012_AAA/", } @inproceedings{fink-2012-cg1, title = "Teaching a Modern Graphics Pipeline Using a Shader-based Software Renderer", author = "Heinrich Fink and Thomas Weber and Michael Wimmer", year = "2012", abstract = "Shaders are a fundamental pattern of the modern graphics pipeline. This paper presents a syllabus for an introductory computer graphics course that emphasizes the use of programmable shaders while teaching raster-level algorithms at the same time. We describe a Java-based framework that is used for programming assignments in this course. This framework implements a shader-enabled software renderer and an interactive 3D editor. We also show how to create attractive course materials by using COLLADA, an open standard for 3D content exchange.", month = may, publisher = "Eurographics Association", location = "Cagliari, Italy", issn = "1017-4656", event = "Eurographics 2012", editor = "Giovanni Gallo and Beatriz Sousa Santos", booktitle = "Eurographics 2012 -- Education Papers", pages = "73--80", keywords = "Education, Collada, Java, Introductory Computer Graphics, Software Rasterizer", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/fink-2012-cg1/", } @article{Habel_2012_PSP, title = "Practical Spectral Photography", author = "Ralf Habel and Michael Kudenov and Michael Wimmer", year = "2012", abstract = "We introduce a low-cost and compact spectral imaging camera design based on unmodified consumer cameras and a custom camera objective. The device can be used in a high-resolution configuration that measures the spectrum of a column of an imaged scene with up to 0.8 nm spectral resolution, rivalling commercial non-imaging spectrometers, and a mid-resolution hyperspectral mode that allows the spectral measurement of a whole image, with up to 5 nm spectral resolution and 120x120 spatial resolution. We develop the necessary calibration methods based on halogen/fluorescent lamps and laser pointers to acquire all necessary information about the optical system. We also derive the mathematical methods to interpret and reconstruct spectra directly from the Bayer array images of a standard RGGB camera. This objective design introduces accurate spectral remote sensing to computational photography, with numerous applications in color theory, colorimetry, vision and rendering, making the acquisition of a spectral image as simple as taking a high-dynamic-range image.", month = may, journal = "Computer Graphics Forum (Proceedings EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "449--458", keywords = "Computational Photography, Spectroscopy, Computed Tomography Imaging Spectrometer, Practical", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Habel_2012_PSP/", } @techreport{TR-186-2-12-01, title = "Interactive Screen-Space Triangulation for High-Quality Rendering of Point Clouds", author = "Reinhold Preiner and Michael Wimmer", year = "2012", abstract = "This technical report documents work that is a precursor to the Auto Splatting technique. We present a rendering method that reconstructs high quality images from unorganized colored point data. While previous real-time image reconstruction approaches for point clouds make use of preprocessed data like point radii or normal estimations, our algorithm only requires position and color data as input and produces a reconstructed color image, normal map and depth map which can instantly be used to apply further deferred lighting passes. Our method performs a world-space neighbor search and a subsequent normal estimation in screen-space, and uses the geometry shader to triangulate the color, normal and depth information of the points. To achieve correct visibility and closed surfaces in the projected image a temporal coherence approach reuses triangulated depth information and provides adaptive neighbor search radii. Our algorithm is especially suitable for insitu high-quality visualization of big datasets like 3D-scans, making otherwise time-consuming preprocessing steps to reconstruct surface normals or point radii dispensable.", month = apr, number = "TR-186-2-12-01", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "screen triangulation, point rendering, nearest neighbors, screen-space, point clouds", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/TR-186-2-12-01/", } @incollection{MATTAUSCH-2012-EOV, title = "Efficient Online Visibility for Shadow Maps", author = "Oliver Mattausch and Jir\'{i} Bittner and Ari Silvennoinen and Daniel Scherzer and Michael Wimmer", year = "2012", abstract = "Standard online occlusion culling is able to vastly improve the rasterization performance of walkthrough applications by identifying large parts of the scene as invisible from the camera and rendering only the visible geometry. However, it is of little use for the acceleration of shadow map generation (i.e., rasterizing the scene from the light view [Williams 78]), so that typically a high percentage of the geometry will be visible when rendering shadow maps. For example, in outdoor scenes typical viewpoints are near the ground and therefore have significant occlusion, while light viewpoints are higher up and see most of the geometry. Our algorithm remedies this situation by quickly detecting and culling the geometry that does not contribute to the shadow in the final image.", month = feb, booktitle = "GPU Pro 3: Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "978-1439887820", publisher = "CRC Press", keywords = "shadow maps, visibility culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/MATTAUSCH-2012-EOV/", } @inproceedings{EISEMANN-2012-ERT, title = "Efficient Real-Time Shadows", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michal Valient and Michael Wimmer", year = "2012", abstract = "This course is a resource for applying efficient, real-time shadow algorithms. It builds on a solid foundation (previous courses at SIGGRAPH Asia 2009 and Eurographics 2010, including comprehensive course notes) and the 2011 book Real-Time Shadows (AK Peters) written by four of the presenters. The book is a compendium of many topics in the realm of shadow computation.", booktitle = "ACM SIGGRAPH 2012 Courses", isbn = "978-1-4503-1678-1", location = "Los Angeles, CA", publisher = "ACM", pages = "18:1--18:53", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/EISEMANN-2012-ERT/", } @article{bernhard-2011-bmtf, title = "Bi-modal Task Faciliation in a Virtual Traffic Scenario through Spatialized Sound Rendering ", author = "Matthias Bernhard and Karl Grosse and Michael Wimmer", year = "2011", abstract = "Audio rendering is generally used to increase the realism of Virtual Environments (VE). In addition, audio rendering may also improve the performance in specific tasks carried out in interactive applications such as games or simulators. In this paper we investigate the effect of the quality of sound rendering on task performance in a task which is inherently vision dominated. The task is a virtual traffic gap crossing scenario with two elements: first, to discriminate crossable and uncrossable gaps in oncoming traffic, and second, to find the right timing to start crossing the street without an accident. A study was carried out with 48 participants in an immersive Virtual Environment setup with a large screen and headphones. Participants were grouped into three different conditions. In the first condition, spatialized audio rendering with head-related transfer function (HRTF) filtering was used. The second group was tested with conventional stereo rendering, and the remaining group ran the experiment in a mute condition. Our results give a clear evidence that spatialized audio improves task performance compared to the unimodal mute condition. Since all task-relevant information was in the participants' field-of-view, we conclude that an enhancement of task performance results from a bimodal advantage due to the integration of visual and auditory spatial cues.", month = nov, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", note = "Article No. 24", number = "4", volume = "8", pages = "1--22", keywords = "bimodal task faciliation, pedestrian safety, virtual environments, audio-visual perception, head related transfer functions", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-bmtf/", } @inproceedings{knecht-2011-CBCM, title = "Adaptive Camera-Based Color Mapping For Mixed-Reality Applications", author = "Martin Knecht and Christoph Traxler and Werner Purgathofer and Michael Wimmer", year = "2011", abstract = "We present a novel adaptive color mapping method for virtual objects in mixed-reality environments. In several mixed-reality applications, added virtual objects should be visually indistinguishable from real objects. Recent mixed-reality methods use global-illumination algorithms to approach this goal. However, simulating the light distribution is not enough for visually plausible images. Since the observing camera has its very own transfer function from real-world radiance values to RGB colors, virtual objects look artificial just because their rendered colors do not match with those of the camera. Our approach combines an on-line camera characterization method with a heuristic to map colors of virtual objects to colors as they would be seen by the observing camera. Previous tone-mapping functions were not designed for use in mixed-reality systems and thus did not take the camera-specific behavior into account. In contrast, our method takes the camera into account and thus can also handle changes of its parameters during runtime. The results show that virtual objects look visually more plausible than by just applying tone-mapping operators.", month = oct, isbn = "978-1-4577-2183-0 ", publisher = "IEEE/IET Electronic Library (IEL), IEEE-Wiley eBooks Library, VDE VERLAG Conference Proceedings", note = "E-ISBN: 978-1-4577-2184-7", location = "Basel, Switzerland", booktitle = "Proceedings of the 2011 IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2011)", pages = "165--168", keywords = "Color Matching, Differential Rendering, Mixed Reality, Tone Mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/knecht-2011-CBCM/", } @inproceedings{KUE11, title = "BRDF approximation and estimation for Augmented Reality", author = "Patrick K\"{u}htreiber and Martin Knecht and Christoph Traxler", year = "2011", abstract = "In Augmented Reality applications it is important to have a good description of the surfaces of real objects if a consistent shading between real and virtual object is required. If such a description of a surface is not vailable it has to be estimated or approximated. In our paper we will present certain methods that deal with real-time bi-directional reflectance distribution function (BRDF) approximation in augmented reality. Of course an important thing to discuss is whether the applications we present all work in real-time and compute good (and real)looking results. There are different methods on how to achieve this goal. All of the methods we are going to present work via image based lighting and some require a 3D polygonal mesh representation of the object whose BRDF shall be approximated. Some methods estimate the BRDF parameters via error values and provide results at each iteration.", month = oct, organization = ""Gheorghe Asachi" Technical University of Iasi, Faculty of Automatic Control and Computer Engineering", location = "Sinaia, Romania", booktitle = "15th International Conference on System Theory, Control and Computing", pages = "318--324", keywords = "Mixed Reality, BRDF Estimation", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/KUE11/", } @WorkshopTalk{preiner11IR, title = "Screen-Space Triangulation for Interactive Point Rendering", author = "Reinhold Preiner", year = "2011", abstract = "We present a novel rendering technique that reconstructs high quality images from unorganized colored point data. While previous point rendering approaches make mostly use of preprocessed point normals and radii, our algorithm only requires position and color data as input and produces a reconstructed color image, normal map and depth map which can instantly be used to apply further deferred lighting passes. Our method performs a world-space neighbor search and a subsequent normal estimation in screen-space, and uses the geometry shader to triangulate the color, normal and depth information of the points. To achieve correct visibility and closed surfaces in the projected image a temporal coherence approach reuses triangulated depth information and provides adaptive neighbor search radii. Our algorithm is especially suitable for in-situ high-quality visualization of big datasets like 3D-scans, making otherwise time-consuming preprocessing steps to reconstruct surface normals and point radii dispensable. ", month = jun, event = "Austrian-Russian Joint Seminar", location = "Vienna", keywords = "point visualization, screen space reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/preiner11IR/", } @article{jeschke-2011-est, title = "Estimating Color and Texture Parameters for Vector Graphics", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2011", abstract = "Diffusion curves are a powerful vector graphic representation that stores an image as a set of 2D Bezier curves with colors defined on either side. These colors are diffused over the image plane, resulting in smooth color regions as well as sharp boundaries. In this paper, we introduce a new automatic diffusion curve coloring algorithm. We start by defining a geometric heuristic for the maximum density of color control points along the image curves. Following this, we present a new algorithm to set the colors of these points so that the resulting diffused image is as close as possible to a source image in a least squares sense. We compare our coloring solution to the existing one which fails for textured regions, small features, and inaccurately placed curves. The second contribution of the paper is to extend the diffusion curve representation to include texture details based on Gabor noise. Like the curves themselves, the defined texture is resolution independent, and represented compactly. We define methods to automatically make an initial guess for the noise texure, and we provide intuitive manual controls to edit the parameters of the Gabor noise. Finally, we show that the diffusion curve representation itself extends to storing any number of attributes in an image, and we demonstrate this functionality with image stippling an hatching applications.", month = apr, journal = "Computer Graphics Forum", volume = "30", number = "2", note = "This paper won the 2nd best paper award at Eurographics 2011.", issn = "0167-7055", pages = "523--532", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/jeschke-2011-est/", } @inproceedings{knecht_martin-2011-FPSPAR, title = "A Framework For Perceptual Studies In Photorealistic Augmented Reality", author = "Martin Knecht and Andreas D\"{u}nser and Christoph Traxler and Michael Wimmer and Raphael Grasset", year = "2011", abstract = "In photorealistic augmented reality virtual objects are integrated in the real world in a seamless visual manner. To obtain a perfect visual augmentation these objects must be rendered indistinguishable from real objects and should be perceived as such. In this paper we propose a research test bed framework to study the different unresolved perceptual issues in photorealistic augmented reality and its application to different disciplines. The framework computes a global illumination approximation in real-time and therefore leverages a new class of experimental research topics.", month = mar, location = "Singapore", editor = "Frank Steinicke, Pete Willemsen", booktitle = "Proceedings of the 3rd IEEE VR 2011 Workshop on Perceptual Illusions in Virtual Environments", pages = "27--32", keywords = "photorealistic augmented reality, real-time global illumination, human perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/knecht_martin-2011-FPSPAR/", } @incollection{2011scherzerE, title = "Fast Soft Shadows with Temporal Coherence", author = "Daniel Scherzer and Michael Schw\"{a}rzler and Oliver Mattausch", year = "2011", month = feb, booktitle = "GPU Pro 2", editor = "Wolfgang Engel", isbn = "978-1568817187", publisher = "A.K. Peters", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/2011scherzerE/", } @inproceedings{bittner-2011-scc, title = "Shadow Caster Culling for Efficient Shadow Mapping", author = "Jir\'{i} Bittner and Oliver Mattausch and Ari Silvennoinen and Michael Wimmer", year = "2011", abstract = "We propose a novel method for efficient construction of shadow maps by culling shadow casters which do not contribute to visible shadows. The method uses a mask of potential shadow receivers to cull shadow casters using a hierarchical occlusion culling algorithm. We propose several variants of the receiver mask implementations with different culling efficiency and computational costs. For scenes with statically focused shadow maps we designed an efficient strategy to incrementally update the shadow map, which comes close to the rendering performance for unshadowed scenes. We show that our method achieves 3x-10x speedup for rendering large city like scenes and 1.5x-2x speedup for rendering an actual game scene.", month = feb, isbn = "978-1-4503-0565-5", publisher = "ACM", organization = "ACM SIGGRAPH", location = "San Francisco", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2011", pages = "81--88", keywords = "occlusion culling, shadow mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bittner-2011-scc/", } @incollection{Habel_LSN_2011, title = "Level-of-Detail and Streaming Optimized Irradiance Normal Mapping", author = "Ralf Habel and Anders Nilsson and Michael Wimmer", year = "2011", month = feb, booktitle = "GPU Pro 2", editor = "Wolfgang Engel", isbn = "978-1568817187", publisher = "A.K. Peters", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/Habel_LSN_2011/", } @incollection{matt2011, title = "Temporal Screen-Space Ambient Occlusion", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2011", month = feb, booktitle = "GPU Pro 2", editor = "Wolfgang Engel", isbn = "978-1568817187", publisher = "A.K. Peters", keywords = "ambient occlusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/matt2011/", } @article{scherzer2011d, title = "A Survey of Real-Time Hard Shadow Mapping Methods", author = "Daniel Scherzer and Michael Wimmer and Werner Purgathofer", year = "2011", abstract = "Due to its versatility, speed and robustness, shadow mapping has always been a popular algorithm for fast hard shadow generation since its introduction in 1978, first for off-line film productions and later increasingly so in real-time graphics. So it is not surprising that recent years have seen an explosion in the number of shadow map related publications. The last survey that encompassed shadow mapping approaches, but was mainly focused on soft shadow generation, dates back to 2003~cite{HLHS03}, while the last survey for general shadow generation dates back to 1990~cite{Woo:1990:SSA}. No survey that describes all the advances made in hard shadow map generation in recent years exists. On the other hand, shadow mapping is widely used in the game industry, in production, and in many other applications, and it is the basis of many soft shadow algorithms. Due to the abundance of articles on the topic, it has become very hard for practitioners and researchers to select a suitable shadow algorithm, and therefore many applications miss out on the latest high-quality shadow generation approaches. The goal of this survey is to rectify this situation by providing a detailed overview of this field. We provide a detailed analysis of shadow mapping errors and derive a comprehensive classification of the existing methods. We discuss the most influential algorithms, consider their benefits and shortcomings and thereby provide the readers with the means to choose the shadow algorithm best suited to their needs. ", month = feb, issn = "0167-7055", journal = "Computer Graphics Forum", number = "1", volume = "30", pages = "169--186", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/scherzer2011d/", } @book{EISEMANN-2011-RTS, title = "Real-Time Shadows", author = "Elmar Eisemann and Michael Schwarz and Ulf Assarsson and Michael Wimmer", year = "2011", abstract = "Important elements of games, movies, and other computer-generated content, shadows are crucial for enhancing realism and providing important visual cues. In recent years, there have been notable improvements in visual quality and speed, making high-quality realistic real-time shadows a reachable goal. Real-Time Shadows is a comprehensive guide to the theory and practice of real-time shadow techniques. It covers a large variety of different effects, including hard, soft, volumetric, and semi-transparent shadows. The book explains the basics as well as many advanced aspects related to the domain of shadow computation. It presents interactive solutions and practical details on shadow computation. The authors compare various algorithms for creating real-time shadows and illustrate how they are used in different situations. They explore the limitations and failure cases, advantages and disadvantages, and suitability of the algorithms in several applications. Source code, videos, tutorials, and more are available on the book’s website.", isbn = "978-1568814384", pages = "398", publisher = "A.K. Peters", keywords = "computer games, real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/EISEMANN-2011-RTS/", } @inproceedings{scherzer2011c, title = "A Survey on Temporal Coherence Methods in Real-Time Rendering", author = "Daniel Scherzer and Lei Yang and Oliver Mattausch and Diego Nehab and Pedro V. Sander and Michael Wimmer and Elmar Eisemann", year = "2011", abstract = "Nowadays, there is a strong trend towards rendering to higher-resolution displays and at high frame rates. This development aims at delivering more detail and better accuracy, but it also comes at a significant cost. Although graphics cards continue to evolve with an ever-increasing amount of computational power, the processing gain is counteracted to a high degree by increasingly complex and sophisticated pixel computations. For real-time applications, the direct consequence is that image resolution and temporal resolution are often the first candidates to bow to the performance constraints (e.g., although full HD is possible, PS3 and XBox often render at lower resolutions). In order to achieve high-quality rendering at a lower cost, one can exploit emph{temporal coherence} (TC). The underlying observation is that a higher resolution and frame rate do not necessarily imply a much higher workload, but a larger amount of redundancy and a higher potential for amortizing rendering over several frames. In this STAR, we will investigate methods that make use of this principle and provide practical and theoretical advice on how to exploit temporal coherence for performance optimization. These methods not only allow us to incorporate more computationally intensive shading effects into many existing applications, but also offer exciting opportunities for extending high-end graphics applications to lower-spec consumer-level hardware. To this end, we first introduce the notion and main concepts of TC, including an overview of historical methods. We then describe a key data structure, the so-called emph{reprojection cache}, with several supporting algorithms that facilitate reusing shading information from previous frames. Its usefulness is illustrated in the second part of the STAR, where we present various applications. We illustrate how expensive pixel shaders, multi-pass shading effects, stereo rendering, shader antialiasing, shadow casting, and global-illumination effects can profit from pixel reuse. Furthermore, we will see that optimizations for visibility culling and object-space global illumination can also be achieved by exploiting TC. This STAR enables the reader to gain an overview of many techniques in this cutting-edge field and provides many insights into algorithmic choices and implementation issues. It delivers working knowledge of how various existing techniques are optimized via data reuse. Another goal of this STAR is to inspire the reader and to raise awareness for temporal coherence as an elegant tool that could be a crucial component to satisfy the recent need for higher resolution and more detailed content. ", booktitle = "EUROGRAPHICS 2011 State of the Art Reports", location = "Llandudno UK", publisher = "Eurographics Association", pages = "101--126", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/scherzer2011c/", } @article{mattausch-2010-tao, title = "High-Quality Screen-Space Ambient Occlusion using Temporal Coherence", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2010", abstract = "Ambient occlusion is a cheap but effective approximation of global illumination. Recently, screen-space ambient occlusion (SSAO) methods, which sample the frame buffer as a discretization of the scene geometry, have become very popular for real-time rendering. We present temporal SSAO (TSSAO), a new algorithm which exploits temporal coherence to produce high-quality ambient occlusion in real time. Compared to conventional SSAO, our method reduces both noise as well as blurring artifacts due to strong spatial filtering, faithfully representing fine-grained geometric structures. Our algorithm caches and reuses previously computed SSAO samples, and adaptively applies more samples and spatial filtering only in regions that do not yet have enough information available from previous frames. The method works well for both static and dynamic scenes.", month = dec, issn = "0167-7055", journal = "Computer Graphics Forum", number = "8", volume = "29", pages = "2492--2503", keywords = "temporal coherence, ambient occlusion, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/mattausch-2010-tao/", } @inproceedings{Habel_RAV_2010, title = "Real-Time Rendering and Animation of Vegetation", author = "Ralf Habel", year = "2010", abstract = "Vegetation in all its different forms is almost always part of a scenery, be it fully natural or urban. Even in completely cultivated areas or indoor scenes, though not very dominant, potted plants or alley trees and patches of grass are usually part of a surrounding. Rendering and animating vegetation is substantially different from rendering and animating geometry with less geometric complexity such as houses, manufactured products or other objects consisting of largely connected surfaces. In this paper we will discuss several challenges posed by vegetation in real-time applications such as computer games and virtual reality applications and show efficient solutions to the problems.", month = oct, location = "Sinaia", issn = "2068-0465", booktitle = "14th International Conference on System Theory and Control (Joint conference of SINTES14, SACCS10, SIMSIS14)", pages = "231--236", keywords = "Animation, Real-Time Rendering, Vegetation", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Habel_RAV_2010/", } @inproceedings{knecht_martin_2010_DIR, title = "Differential Instant Radiosity for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Oliver Mattausch and Werner Purgathofer and Michael Wimmer", year = "2010", abstract = "In this paper we present a novel plausible realistic rendering method for mixed reality systems, which is useful for many real life application scenarios, like architecture, product visualization or edutainment. To allow virtual objects to seamlessly blend into the real environment, the real lighting conditions and the mutual illumination effects between real and virtual objects must be considered, while maintaining interactive frame rates (20-30fps). The most important such effects are indirect illumination and shadows cast between real and virtual objects. Our approach combines Instant Radiosity and Differential Rendering. In contrast to some previous solutions, we only need to render the scene once in order to find the mutual effects of virtual and real scenes. The dynamic real illumination is derived from the image stream of a fish-eye lens camera. We describe a new method to assign virtual point lights to multiple primary light sources, which can be real or virtual. We use imperfect shadow maps for calculating illumination from virtual point lights and have significantly improved their accuracy by taking the surface normal of a shadow caster into account. Temporal coherence is exploited to reduce flickering artifacts. Our results show that the presented method highly improves the illusion in mixed reality applications and significantly diminishes the artificial look of virtual objects superimposed onto real scenes.", month = oct, note = "Best Paper Award!", location = "Seoul", booktitle = "Proceedings of the 2010 IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2010)", pages = "99--107", keywords = "Instant Radiosity, Differential Rendering, Real-time Global Illumination, Mixed Reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/knecht_martin_2010_DIR/", } @inproceedings{scherzer2010e, title = "An Overview of Temporal Coherence Methods in Real-Time Rendering ", author = "Daniel Scherzer", year = "2010", abstract = "Most of the power of modern graphics cards is put into the acceleration of shading tasks because here lies the major bottleneck for most sophisticated real-time algorithms. By using temporal coherence, i.e. reusing shading information from a previous frame, this problem can be alleviated. This paper gives an overview of the concepts of temporal coherence in real-time rendering and should give the reader the working practical and theoretical knowledge to exploit temporal coherence in his own algorithms. ", month = oct, organization = "IEEE", location = "Sinaia, Romania", issn = "2068-0465", booktitle = " 14th International Conference on System Theory and Control 2010", pages = "497--502", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/scherzer2010e/", } @article{bagar2010, title = "A Layered Particle-Based Fluid Model for Real-Time Rendering of Water", author = "Florian Bagar and Daniel Scherzer and Michael Wimmer", year = "2010", abstract = "We present a physically based real-time water simulation and rendering method that brings volumetric foam to the real-time domain, significantly increasing the realism of dynamic fluids. We do this by combining a particle-based fluid model that is capable of accounting for the formation of foam with a layered rendering approach that is able to account for the volumetric properties of water and foam. Foam formation is simulated through Weber number thresholding. For rendering, we approximate the resulting water and foam volumes by storing their respective boundary surfaces in depth maps. This allows us to calculate the attenuation of light rays that pass through these volumes very efficiently. We also introduce an adaptive curvature flow filter that produces consistent fluid surfaces from particles independent of the viewing distance.", month = jun, journal = "Computer Graphics Forum (Proceedings EGSR 2010)", volume = "29", number = "4", issn = "0167-7055", pages = "1383--1389", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bagar2010/", } @phdthesis{Mattausch-2010-vcr, title = "Visibility Computations for Real-Time Rendering in General 3D Environments", author = "Oliver Mattausch", year = "2010", abstract = "Visibility computations are essential operations in computer graphics, which are required for rendering acceleration in the form of visibility culling, as well as for computing realistic lighting. Visibility culling, which is the main focus of this thesis, aims to provide output sensitivity by sending only visible primitives to the hardware. Regardless of the rapid development of graphics hardware, it is of crucial importance for many applications like game development or architectural design, as the demands on the hardware regarding scene complexity increase accordingly. Solving the visibility problem has been an important research topic for many years, and countless methods have been proposed. Interestingly, there are still open research problems up to this day, and many algorithms are either impractical or only usable for specific scene configurations, preventing their widespread use. Visibility culling algorithms can be separated into algorithms for visibility preprocessing and online occlusion culling. Visibility computations are also required to solve complex lighting interactions in the scene, ranging from soft and hard shadows to ambient occlusion and full fledged global illumination. It is a big challenge to answer hundreds or thousands of visibility queries within a fraction of a second in order to reach real-time frame rates, which is one goal that we want to achieve in this thesis. The contribution of this thesis are four novel algorithms that provide solutions for efficient visibility interactions in order to achieve high-quality output-sensitive real-time rendering, and are general in the sense that they work with any kind of 3D scene configuration. First we present two methods dealing with the issue of automatically partitioning view space and object space into useful entities that are optimal for the subsequent visibility computations. Amazingly, this problem area was mostly ignored despite its importance, and view cells are mostly tweaked by hand in practice in order to reach optimal performance – a very time consuming task. The first algorithm specifically deals with the creation of an optimal view space partition into view cells using a cost heuristics and sparse visibility sampling. The second algorithm extends this approach to optimize both view space subdivision and object space subdivision simultaneously. Next we present a hierarchical online culling algorithm that eliminates most limitations of previous approaches, and is rendering engine friendly in the sense that it allows easy integration and efficient material sorting. It reduces the main problem of previous algorithms – the overhead due to many costly state changes and redundant hardware occlusion queries – to a minimum, obtaining up to three times speedup over previous work. At last we present an ambient occlusion algorithm which works in screen space, and show that high-quality shading with effectively hundreds of samples per pixel is possible in real time for both static and dynamic scenes by utilizing temporal coherence to reuse samples from previous frames.", month = apr, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "3D rendering, real-time rendering, ambient occlusion, visibility, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Mattausch-2010-vcr/", } @inproceedings{Habel-2010-EIN, title = "Efficient Irradiance Normal Mapping", author = "Ralf Habel and Michael Wimmer", year = "2010", abstract = "Irradiance normal mapping is a method to combine two popular techniques, light mapping and normal mapping, and is used in games such as Half-Life 2 or Halo 3. This combination allows using low-resolution light caching on surfaces with only a few coefficients which are evaluated by normal maps to render spatial high-frequency changes in the lighting. Though there are dedicated bases for this purpose such as the Half-Life 2 basis, higher order basis functions such as quadratic Spherical Harmonics are needed for an accurate representation. However, a full spherical basis is not needed since the irradiance is stored on the surface of a scene. In order to represent the irradiance signals efficiently, we propose a novel polynomial, hemispherically orthonormal basis function set that is specifically designed to carry a directional irradiance signal on the hemisphere and which makes optimal use of the number of coefficients. To compare our results with previous work, we analyze the relations and attributes of previously proposed basis systems and show that 6 coefficients are sufficient to accurately represent an irradiance signal on the hemisphere. To create the necessary irradiance signals, we use Spherical Harmonics as an intermediate basis due to their fast filtering capabilities.", month = feb, isbn = "978-1-60558-939-8", publisher = "ACM", location = "Washington D.C.", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2010", pages = "189--195", keywords = "irradiance, real-time rendering, normal mapping, lightmap", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Habel-2010-EIN/", } @talk{jeschke-2010-diff, title = "Rendering Diffusion Curves in 2 and 3 Dimensions", author = "Stefan Jeschke", year = "2010", abstract = "Diffusion curve images (DCI) provide a powerful tool for efficient 2D image generation, storage and manipulation. A DCI consist of curves with colors defined on either side. By diffusing these colors over the image, the final result includes sharp boundaries along the curves with smoothly shaded regions between them. The first part of the talk presents a new Laplacian surface solver for a stable rendering of DCIs. It consists of a robust rasterization technique to transform the algebraic curves to the discrete image domain, and a variable stencil size diffusion solver that solves the minimal surface problem. The solver is proven to converge to the right solution, it is at least as fast as commonly used multigrid solvers, but much simpler to implement, works for arbitrary image resolutions, as well as 8 bit data. The second part of the talk extends the application of diffusion curves to render high quality surface details on 3D objects. The first extension is a view dependent warping technique that dynamically allocates more texture memory for details close to the observer. The second extension is a dynamic feature embedding technique that retains crisp, anti-aliased curve details even in extreme closeups. The third extension is the application of dynamic feature embedding to displacement mapping and geometry images. Our results show high quality renderings at interactive frame rates.", event = "Visit of MPII in Saarbruecken", location = "MPII Saarbruecken", keywords = "Diffusion curves", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/jeschke-2010-diff/", } @book{mattausch-2010-var, title = "Visibility Algorithms for Real-Time Rendering", author = "Oliver Mattausch", year = "2010", isbn = "978-3-8381-1887-1", pages = "212", publisher = "Suedwestdeutscher Verlag fuer Hochschulschriften", keywords = "real-time rendering, occlusion culling , ambient occlusion, visibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/mattausch-2010-var/", } @article{preiner_2010_GIPC, title = "Real-Time Global Illumination for Point Cloud Scenes", author = "Reinhold Preiner and Michael Wimmer", year = "2010", abstract = "In this paper we present a real-time global illumination approach for illuminating scenes containing large point clouds. Our approach is based on the distribution of Virtual Point Lights (VPLs) in the scene, which are then used for the indirect illumination of the visible surfaces, using Imperfect Shadow Maps for visibility calculation of the VPLs. We are able to render multiple indirect light bounces, where each light bounce accounts for the transport of both the diffuse and the specular fraction of the reflected light.", journal = "Computer Graphics & Geometry", number = "1", volume = "12", pages = "2--16", keywords = "virtual point lights, imperfect shadow maps, point rendering, point clouds, global illumination, VPL, ISM", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/preiner_2010_GIPC/", } @inproceedings{scherzer_2010a, title = "A Survey of Real-Time Hard Shadow Mapping Methods", author = "Daniel Scherzer and Michael Wimmer and Werner Purgathofer", year = "2010", abstract = "Due to its versatility, speed and robustness, shadow mapping has always been a popular algorithm for fast hard shadow generation since its introduction in 1978, first for off-line film productions and later increasingly so in real-time graphics. So it is not surprising that recent years have seen an explosion in the number of shadow map related publications. The last survey that encompassed shadow mapping approaches, but was mainly focused on soft shadow generation, dates back to 2003~cite{HLHS03} and the last survey for general shadow generation dates back to 1990~cite{Woo:1990:SSA}. No survey that describes all the advances made in hard shadow map generation in recent years exists. On the other hand, shadow mapping is widely used in the game industry, in production, and in many other applications, and it is the basis of many soft shadow algorithms. Due to the abundance of articles on the topic, it has become very hard for practioners and researchers to select a suitable shadow algorithm, and therefore many applications miss out on the latest high-quality shadow generation approaches. %Real-time research was always tempted to bring global lighting techniques into the real-time domain. One of the most popular adaptations in this respect are hard shadows. It is therefore not surprising that real-time hard shadow generation has been one of the most active areas in research in recent years. But what is surprising is that the last state-of-the-art report that encompassed this field dates back to 1990~cite{Woo:1990:SSA}, were only the beginnings of this field were explored. The goal of this survey is to rectify this situation by providing a detailed overview of this field. We provide a detailed analysis of shadow mapping errors and derive from this a comprehensive classification of the existing methods. We discuss the most influential algorithms, consider their benefits and shortcomings and thereby provide the reader with the means to choose the shadow algorithm best suited to her needs.", booktitle = "EUROGRAPHICS 2010 State of the Art Reports", location = "Norrk\"{o}ping, Sweden", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/scherzer_2010a/", } @book{scherzer2010b, title = "Temporal Coherence in Real-Time Rendering", author = "Daniel Scherzer", year = "2010", abstract = "Real-time rendering imposes the challenging task of creating a new rendering of an input scene at least 60 times per second. Although computer graphics hardware has made staggering advances in terms of speed and freedom of programmability in recent years, there still exists a number of algorithms that are just too expensive to be calculated in this time budget, like for instance exact shadows or an exact global illumination solution. One way to circumvent this hard time limit is to capitalize on temporal coherence to formulate algorithms incremental in time. To this end, three algorithms which successfully incorporate temporal coherence are analysed in detail. To highlight the benefits which are provided by these new practical algorithms, this book also includes the respective previous work. This includes not only the field of temporal coherence, but also the fields of real-time hard and soft shadows and discrete LOD blending. This book targets computer scientists and students with prior knowledge in real-time rendering.", isbn = "978-3-639-09196-0", pages = "122", publisher = "Verlag Dr. M\"{u}ller", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/scherzer2010b/", } @inproceedings{scherzer2010d, title = "Exploiting Temporal Coherence in Real-Time Rendering", author = "Daniel Scherzer and Lei Yang and Oliver Mattausch", year = "2010", abstract = "Temporal coherence (TC), the correlation of contents between adjacent rendered frames, exists across a wide range of scenes and motion types in practical real-time rendering. By taking advantage of TC, we can save redundant computation and improve the performance of many rendering tasks significantly with only a marginal decrease in quality. This not only allows us to incorporate more computationally intensive shading effects to existing applications, but also offers exciting opportunities of extending high-end graphics applications to reach lower-spec consumer-level hardware. This course aims to introduce participants to the concepts of TC, and provide them the working practical and theoretical knowledge to exploit TC in a variety of shading tasks. It begins with an introduction of the general notion of TC in rendering, as well as an overview of the recent developments in this field. Then it focuses on a key data structure - the reverse reprojection cache, which is the foundation of many applications. The course proceeds with a number of extensions of the basic algorithm for assisting in multi-pass shading effects, shader antialiasing, casting shadows and global-illumination effects. Finally, several more general coherence topics beyond pixel reuse are introduced, including visibility culling optimization and object-space global-illumination approximations. For all the major techniques and applications covered, implementation and practical issues involved in development are addressed in detail. In general, we emphasize "know how" and the guidelines related to algorithm choices. After the course, participants are encouraged to find and utilize TC in their own applications and rapidly adapt existing algorithms to meet their requirements.", booktitle = "SIGGRAPH Asia 2010 Courses", location = "Seoul, S\"{u}dkorea", keywords = "shadows, temporal coherence, real-time, rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/scherzer2010d/", } @article{jeschke-09-rendering, title = "Rendering Surface Details with Diffusion Curves", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "Diffusion curve images (DCI) provide a powerful tool for efficient 2D image generation, storage and manipulation. A DCI consist of curves with colors defined on either side. By diffusing these colors over the image, the final result includes sharp boundaries along the curves with smoothly shaded regions between them. This paper extends the application of diffusion curves to render high quality surface details on 3D objects. The first extension is a view dependent warping technique that dynamically reallocates texture space so that object parts that appear large on screen get more texture for increased detail. The second extension is a dynamic feature embedding technique that retains crisp, anti-aliased curve details even in extreme closeups. The third extension is the application of dynamic feature embedding to displacement mapping and geometry images. Our results show high quality renderings of diffusion curve textures, displacements, and geometry images, all rendered interactively.", month = dec, journal = "Transaction on Graphics (Siggraph Asia 2009)", volume = "28", number = "5", issn = "0730-0301", booktitle = "Transactions on Graphics (Siggraph Asia 2009)", organization = "ACM", publisher = "ACM Press", pages = "1--8", keywords = "Geometry images, Displacement mapping, Diffusion curves, Line and Curve rendering ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-rendering/", } @inproceedings{SSMW09, title = "Real-Time Soft Shadows Using Temporal Coherence", author = "Daniel Scherzer and Michael Schw\"{a}rzler and Oliver Mattausch and Michael Wimmer", year = "2009", abstract = "A vast amount of soft shadow map algorithms have been presented in recent years. Most use a single sample hard shadow map together with some clever filtering technique to calculate perceptually or even physically plausible soft shadows. On the other hand there is the class of much slower algorithms that calculate physically correct soft shadows by taking and combining many samples of the light. In this paper we present a new soft shadow method that combines the benefits of these approaches. It samples the light source over multiple frames instead of a single frame, creating only a single shadow map each frame. Where temporal coherence is low we use spatial filtering to estimate additional samples to create correct and very fast soft shadows. ", month = dec, isbn = "978-3642103308", series = "Lecture Notes in Computer Science", publisher = "Springer", location = "Las Vegas, Nevada, USA", editor = "Bebis, G.; Boyle, R.; Parvin, B.; Koracin, D.; Kuno, Y.; Wang, J.; Pajarola, R.; Lindstrom, P.; Hinkenjann, A.; Encarnacao, M.; Silva, C.; Coming, D.", booktitle = "Advances in Visual Computing: 5th International Symposium on Visual Computing (ISVC 2009)", pages = "13--24", keywords = "real-time rendering, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/SSMW09/", } @misc{Habel-09-RAT, title = "Real-Time Rendering and Animation of Trees", author = "Ralf Habel and Alexander Kusternig", year = "2009", abstract = "This demonstration combines novel methods for physically accurate yet efficient rendering and animation of trees under dynamic lighting conditions. A new leaf shading method is used that models the high-frequency structures such as veins and bulges to reproduce all important lighting attributes on a physical basis. Those structures are also used to calculate the translucency of leaves, which is modeled with physically based subsurface scattering, incorporating self-shadowing, thickness variations and varying albedo. This allows consistent reflective and translucent shading without constraining lighting or animation for close-up views. The individual deformation and animation of leaves and branches is defined by their physical properties such as shape and elasticity. A structural mechanics model is solved and combined with a length correction to achieve a physically plausible bending. To model the tree-wind interaction, a spectral approach is applied that allows for a physically guided animation as well as a high level of control. The applied methods have been published in "Physically Based Real-time Translucency for Leaves (EGSR 2007)" and "Physically Guided Animation of Trees (Eurographics 2009)". ", month = aug, keywords = "Real-Time Rendering, Trees, Animation", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel-09-RAT/", } @article{BITTNER-2009-AGVS, title = "Adaptive Global Visibility Sampling", author = "Jir\'{i} Bittner and Oliver Mattausch and Peter Wonka and Vlastimil Havran and Michael Wimmer", year = "2009", abstract = "In this paper we propose a global visibility algorithm which computes from-region visibility for all view cells simultaneously in a progressive manner. We cast rays to sample visibility interactions and use the information carried by a ray for all view cells it intersects. The main contribution of the paper is a set of adaptive sampling strategies based on ray mutations that exploit the spatial coherence of visibility. Our method achieves more than an order of magnitude speedup compared to per-view cell sampling. This provides a practical solution to visibility preprocessing and also enables a new type of interactive visibility analysis application, where it is possible to quickly inspect and modify a coarse global visibility solution that is constantly refined. ", month = aug, journal = "ACM Transactions on Graphics", volume = "28", number = "3", issn = "0730-0301", pages = "94:1--94:10", keywords = "occlusion culling, visibility sampling, visibility, PVS", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/BITTNER-2009-AGVS/", } @article{cline-09-poisson, title = "Dart Throwing on Surfaces", author = "David Cline and Stefan Jeschke and Anshuman Razdan and Kenric White and Peter Wonka", year = "2009", abstract = "In this paper we present dart throwing algorithms to generate maximal Poisson disk point sets directly on 3D surfaces. We optimize dart throwing by efficiently excluding areas of the domain that are already covered by existing darts. In the case of triangle meshes, our algorithm shows dramatic speed improvement over comparable sampling methods. The simplicity of our basic algorithm naturally extends to the sampling of other surface types, including spheres, NURBS, subdivision surfaces, and implicits. We further extend the method to handle variable density points, and the placement of arbitrary ellipsoids without overlap. Finally, we demonstrate how to adapt our algorithm to work with geodesic instead of Euclidean distance. Applications for our method include fur modeling, the placement of mosaic tiles and polygon remeshing.", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1217--1226", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/cline-09-poisson/", } @article{weidlich-2009-dispersion, title = "Anomalous Dispersion in Predictive Rendering", author = "Andrea Weidlich and Alexander Wilkie", year = "2009", abstract = "In coloured media, the index of refraction does not decrease monotonically with increasing wavelength, but behaves in a quite non-monotonical way. This behaviour is called anomalous dispersion and results from the fact that the absorption of a material influences its index of refraction. So far, this interesting fact has not been widely acknowledged by the graphics community. In this paper, we demonstrate how to calculate the correct refractive index for a material based on its absorption spectrum with the Kramers-Kronig relation, and we discuss for which types of objects this effect is relevant in practice. ", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1065--1072", keywords = "Predictive rendering, Spectral Rendering, Dispersion", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich-2009-dispersion/", } @article{wilkie-2009-cc, title = "A Robust Illumination Estimate for Chromatic Adaptation in Rendered Images", author = "Alexander Wilkie and Andrea Weidlich", year = "2009", abstract = "We propose a method that improves automatic colour correction operations for rendered images. In particular, we propose a robust technique for estimating the visible and pertinent illumination in a given scene. We do this at very low computational cost by mostly re-using information that is already being computed during the image synthesis process. Conventional illuminant estimations either operate only on 2D image data, or, if they do go beyond pure image analysis, only use information on the luminaires found in the scene. The latter is usually done with little or no regard for how the light sources actually affect the part of the scene that is being viewed. Our technique goes beyond that, and also takes object reflectance into account, as well as the incident light that is actually responsible for the colour of the objects that one sees. It is therefore able to cope with difficult cases, such as scenes with mixed illuminants, complex scenes with many light sources of varying colour, or strongly coloured indirect illumination. ", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1101--1109", keywords = "chromatic adaptation, predicitve rendering, colour constancy", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/wilkie-2009-cc/", } @inproceedings{weidlich_2009_REL, title = "Rendering the Effect of Labradorescence", author = "Andrea Weidlich and Alexander Wilkie", year = "2009", abstract = "Labradorescence is a complex optical phenomenon that can be found in certain minerals, such as Labradorite or Spectrolite. Because of their unique colour properties these minerals are often used as gemstones and decorative objects. Since the phenomenon is strongly orientation dependent, such minerals need a special cut to make the most of their unique type of colourful sheen, which makes it desirable to be able to predict the final appearance of a given stone prior to the cutting process. Also, the peculiar properties of the effect make a believable replication with an ad-hoc shader dificult even for normal, non-predictive rendering purposes. We provide a reflectance model for labradorescence that is directly derived from the physical characteristics of such materials. Due to its inherent accuracy, it can be used for predictive rendering purposes, but also for generic rendering applications. ", month = may, isbn = "978-1-56881-470-4", publisher = "ACM", location = "Kelowna, British Columbia, Canada ", booktitle = "Proceedings of Graphics Interface 2009", pages = "79--85", keywords = "Predictive Rendering, Surface, Crystals", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich_2009_REL/", } @incollection{BITTNER-2009-GEFOC, title = "Game-Engine-Friendly Occlusion Culling", author = "Jir\'{i} Bittner and Oliver Mattausch and Michael Wimmer", year = "2009", abstract = "This article presents a method which minimizes the overhead associated with occlusion queries. The method reduces the number of required state changes and should integrate easily with most game engines. The key ideas are batching of the queries and interfacing with the game engine using a dedicated render queue. We also present some additional optimizations which reduce the number of queries issued as well as the number of rendered primitives. The algorithm is based on the well-known Coherent Hierarchical Culling algorithm.", month = mar, booktitle = "SHADERX7: Advanced Rendering Techniques", chapter = "8.3", editor = "Wolfang Engel", isbn = "1-58450-598-2", publisher = "Charles River Media", volume = "7", keywords = "real-time rendering, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/BITTNER-2009-GEFOC/", } @article{Habel_09_PGT, title = "Physically Guided Animation of Trees", author = "Ralf Habel and Alexander Kusternig and Michael Wimmer", year = "2009", abstract = "This paper presents a new method to animate the interaction of a tree with wind both realistically and in real time. The main idea is to combine statistical observations with physical properties in two major parts of tree animation. First, the interaction of a single branch with the forces applied to it is approximated by a novel efficient two step nonlinear deformation method, allowing arbitrary continuous deformations and circumventing the need to segment a branch to model its deformation behavior. Second, the interaction of wind with the dynamic system representing a tree is statistically modeled. By precomputing the response function of branches to turbulent wind in frequency space, the motion of a branch can be synthesized efficiently by sampling a 2D motion texture. Using a hierarchical form of vertex displacement, both methods can be combined in a single vertex shader, fully leveraging the power of modern GPUs to realistically animate thousands of branches and ten thousands of leaves at practically no cost.", month = mar, journal = "Computer Graphics Forum (Proceedings EUROGRAPHICS 2009)", volume = "28", number = "2", issn = "0167-7055", pages = "523--532", keywords = "Animation, Physically Guided animation, Vegetation, Trees", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel_09_PGT/", } @inproceedings{GRELAUD-2009-EPA, title = "Efficient and Practical Audio-Visual Rendering for Games using Crossmodal Perception", author = "David Grelaud and Nicolas Bonneel and Michael Wimmer and Manuel Asselot and George Drettakis", year = "2009", abstract = "Interactive applications such as computer games, are inherently audio visual, requiring high-quality rendering of complex 3D audio soundscapes and graphics environments. A frequent source of audio events is impact sounds, typically generated with physics engines. In this paper, we first present an optimization allowing efficient usage of impact sounds in a unified audio rendering pipeline, also including prerecorded sounds. We also exploit a recent result on audio-visual crossmodal perception to introduce a new level-of-detail selection algorithm, which jointly chooses the quality level of audio and graphics rendering. We have integrated these two techniques as a comprehensive crossmodal audio-visual rendering pipeline in a home-grown game engine, thus demonstrating the potential utility of our approach.", month = feb, isbn = "978-1-60558-429-4", publisher = "ACM", location = "Boston, Massachusetts", address = "New York, NY, USA", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2009", pages = "177--182", keywords = "audio-visual rendering, crossmodal perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/GRELAUD-2009-EPA/", } @phdthesis{Habel_2009_PhD, title = "Real-time Rendering and Animation of Vegetation", author = "Ralf Habel", year = "2009", abstract = "Vegetation rendering and animation in real-time applications still pose a significant problem due to the inherent complexity of plants. Both the high geometric complexity and intricate light transport require specialized techniques to achieve high-quality rendering of vegetation in real time. This thesis presents new algorithms that address various areas of both vegetation rendering and animation. For grass rendering, an efficient algorithm to display dense and short grass is introduced. In contrast to previous methods, the new approach is based on ray tracing to avoid the massive overdraw of billboard or explicit geometry representation techniques, achieving independence of the complexity of the grass without losing the visual characteristics of grass such as parallax and occlusion effects as the viewpoint moves. Also, a method to efficiently render leaves is introduced. Leaves exhibit a complex light transport behavior due to subsurface scattering and special attention is given to the translucency of leaves, an integral part of leaf shading. The light transport through a leaf is precomputed and can be easily evaluated at runtime, making it possible to shade a massive amount of leaves while including the effects that occur due to the leaf structure such as varying albedo and thickness variations or self shadowing. To animate a tree, a novel deformation method based on a structural mechanics model that incorporates the important physical properties of branches is introduced. This model does not require the branches to be segmented by joints as other methods, achieving smooth and accurate bending, and can be executed fully on a GPU. To drive this deformation, an optimized spectral approach that also incorporates the physical properties of branches is used. This allows animating a highly detailed tree with thousands of branches and ten thousands of leaves efficiently. Additionally, a method to use dynamic skylight models in spherical harmonics precomputed radiance transfer techniques is introduced, allowing to change the skylight parameters in real time at no considerable cost and memory footprint.", month = feb, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Animation, Real-time Rendering, Vegetation", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel_2009_PhD/", } @phdthesis{weidlich-2009-thesis, title = "Pseudochromatic Colourisation of Crystals in Predictive Image Synthesis", author = "Andrea Weidlich", year = "2009", month = jan, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Predictive rendering, Crystal rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich-2009-thesis/", } @article{bhagvat-09-frusta, title = "GPU Rendering of Relief Mapped Conical Frusta", author = "Deepali Bhagvat and Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "This paper proposes to use relief-mapped conical frusta (cones cut by planes) to skin skeletal objects. Based on this representation, current programmable graphics hardware can perform the rendering with only minimal communication between the CPU and GPU. A consistent definition of conical frusta including texture parametrization and a continuous surface normal is provided. Rendering is performed by analytical ray casting of the relief-mapped frusta directly on the GPU. We demonstrate both static and animated objects rendered using our technique and compare to polygonal renderings of similar quality.", issn = "0167-7055", journal = "Computer Graphics Forum", number = "28", volume = "8", pages = "2131--2139", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bhagvat-09-frusta/", } @book{Habel-09-THB, title = "Real-time Rendering and Animation of Vegetation: Advances in displaying vegetation for interactive applications", author = "Ralf Habel", year = "2009", abstract = "Vegetation rendering and animation in real-time applications still pose a significant problem due to the inherent complexity of plants. Both the high geometric complexity and intricate light transport require specialized techniques to achieve high-quality rendering of vegetation in real time. This thesis presents new algorithms that address various areas of both vegetation rendering and animation. For grass rendering, an efficient algorithm to display grass is introduced. Also, a method to efficiently render leaves is introduced. Leaves exhibit a complex light transport behavior due to subsurface scattering and special attention is given to the translucency of leaves, an integral part of leaf shading. To animate a tree, a novel deformation method based on a structural mechanics model that incorporates the important physical properties of branches is introduced. To drive this deformation, an optimized spectral approach that also incorporates the physical properties of branches is used. Additionally, a method to use dynamic skylight models in spherical harmonics precomputed radiance transfer techniques is introduced, allowing to change the skylight parameters in real time. ", isbn = "978-3838104997", pages = "140", publisher = "Suedwestdeutscher Verlag fuer Hochschulschriften", keywords = "Real-time Rendering, Vegetation", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel-09-THB/", } @talk{jeschke-09-praguetalk, title = "Diffusion Curve Images--- Rendering in 2 and 3 Dimensions", author = "Stefan Jeschke", year = "2009", abstract = "Diffusion curve images (DCI) provide a powerful tool for efficient 2D image generation, storage and manipulation. A DCI consist of curves with colors defined on either side. By diffusing these colors over the image, the final result includes sharp boundaries along the curves with smoothly shaded regions between them. The first part of the talk presents a new Laplacian surface solver for a stable rendering of DCIs. It consists of a robust rasterization technique to transform the algebraic curves to the discrete image domain, and a variable stencil size diffusion solver that solves the minimal surface problem. The solver is proven to converge to the right solution, it is at least as fast as commonly used multigrid solvers, but much simpler to implement, works for arbitrary image resolutions, as well as 8 bit data. The second part of the talk extends the application of diffusion curves to render high quality surface details on 3D objects. The first extension is a view dependent warping technique that dynamically allocates more texture memory for details close to the observer. The second extension is a dynamic feature embedding technique that retains crisp, anti-aliased curve details even in extreme closeups. The third extension is the application of dynamic feature embedding to displacement mapping and geometry images. Our results show high quality renderings at interactive frame rates.", event = "Konversatorium Technical University of Prague ", location = "Prague", keywords = "Diffusion curves", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-praguetalk/", } @phdthesis{scherzer-thesis, title = "Applications of temporal coherence in real-time rendering", author = "Daniel Scherzer", year = "2009", abstract = "Real-time rendering imposes the challenging task of creating a new rendering of an input scene at least 60 times a second. Although computer graphics hardware has made staggering advances in terms of speed and freedom of programmability, there still exist a number of algorithms that are too expensive to be calculated in this time budget, like exact shadows or an exact global illumination solution. One way to circumvent this hard time limit is to capitalize on temporal coherence to formulate algorithms incremental in time. The main thesis of this work is that temporal coherence is a characteristic of real-time graphics that can be used to redesign well-known rendering methods to become faster, while exhibiting better visual fidelity. To this end we present our adaptations of algorithms from the fields of exact hard shadows, physically correct soft shadows and fast discrete LOD blending, in which we have successfully incorporated temporal coherence. Additionally, we provide a detailed context of previous work not only in the field of temporal coherence, but also in the respective fields of the presented algorithms.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "shadows, lod, real-time, image-space", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/scherzer-thesis/", } @talk{WIMMER-2009-ARTR, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2009", abstract = "Real-time rendering is a quickly developing field in computer graphics. Recent advances in graphics hardware make it possible to tackle completely new challenges, and to rethink old ones. While previously, the main focus of real-time rendering lay on classical problems like visibility and level-of-detail rendering, nowadays we see new challenges in the form of interactive procedural content generation, handling of massive amounts of data, and interactive simulation of extremely complex objects like trees. In this talk, I will try to broaden the definition of real-time rendering and give some insights how to address new research challenges. Starting with a few classical problems like rendering accurate shadows, achieving smooth transitions between different levels of detail, and global visibility computations, I will then show a few examples of recent advances in real-time rendering. One challenge is the ever-increasing size of models due to automatic acquisition methods like range scanners. In a new system we have developed, we are able to visualize and interact with datasets of over 1 Billion raw points. Another source of large models is procedural modeling, and we have developed a method to aid designers in creating these models interactively. Finally, vegetation plays an important role in interactive scenes. I will show a system to simulate both illumination and animation in such complex vegetation very realistically.", event = "7th Eurographics Italian Chapter Conference 2009", location = "Verona, Italy", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ARTR/", } @talk{WIMMER-2009-ARTR2, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2009", abstract = "Real-time rendering is a quickly developing field in computer graphics. Recent advances in graphics hardware make it possible to tackle completely new challenges, and to rethink old ones. While previously, the main focus of real-time rendering lay on classical problems like visibility and level-of-detail rendering, nowadays we see new challenges in the form of interactive procedural content generation, handling of massive amounts of data, and interactive simulation of extremely complex objects. In this talk, I will cover some of the recent advances we had in our group. First, we try to integrate procedural modeling techniques with the new parallel programming paradigms made commonly available through modern GPUs, and map L-system generation onto hardware to accelerate the generation of large L-systems. Then, I'll briefly show some results for really large scale visualization and editing of a huge point-based model consisting of over 1.2 Billion point samples of a Roman catacomb. Finally, I will treat a new approach to handle the classical visibility problem, where we show how to calculate visibility of a whole scene by exploiting the spatial coherence of visibility, thus accelerating the process so it becomes viable for interactive scene design. ", event = "University of Erlangen Research Seminar", location = "Erlangen, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ARTR2/", } @talk{WIMMER-2009-ARTR3, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2009", abstract = "Real-time rendering is a quickly developing field in computer graphics. Recent advances in graphics hardware make it possible to tackle completely new challenges, and to rethink old ones. While previously, the main focus of real-time rendering lay on classical problems like visibility and level-of-detail rendering, nowadays we see new challenges in the form of interactive procedural content generation, handling of massive amounts of data, and interactive simulation of extremely complex objects like trees. In this talk, I will try to broaden the definition of real-time rendering and give some insights how to address new research challenges. Starting with a few classical problems like rendering accurate shadows, achieving smooth transitions between different levels of detail, and global visibility computations, I will then show a few examples of recent advances in real-time rendering. One challenge is the ever-increasing size of models due to automatic acquisition methods like range scanners. In a new system we have developed, we are able to visualize and interact with datasets of over 1 Billion raw points. Another source of large models is procedural modeling, and we have developed a method to aid designers in creating these models interactively. Finally, vegetation plays an important role in interactive scenes. I will show a system to simulate both illumination and animation in such complex vegetation very realistically.", event = "25th Spring Conference on Computer Graphics (SCCG2009)", location = "Budmerice, Slovakia", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-ARTR3/", } @inproceedings{WIMMER-2009-CSR, title = "Casting Shadows in Real Time", author = "Elmar Eisemann and Ulf Assarsson and Michael Schwarz and Michael Wimmer", year = "2009", abstract = "Shadows are crucial for enhancing realism and provide important visual cues. In recent years, many important contributions have been made both for hard shadows and soft shadows. Often spurred by the tremendous increase of computational power and capabilities of graphics hardware, much progress has been made concerning visual quality and speed, making high-quality real-time shadows a reachable goal. But with the growing wealth of available choices, it is particularly difficult to pick the right solution and assess shortcomings. Because currently there is no ultimate approach available, algorithms should be selected in accordance to the context in which shadows are produced. The possibilities range across a wide spectrum; from very approximate but really efficient to slower but accurate, adapted only to smaller or only to larger sources, addressing directional lights or positional lights, or involving GPU- or CPU-heavy computations. This course tries to serve as a guide to better understand limitations and failure cases, advantages and disadvantages, and suitability of the algorithms for different application scenarios. We will focus on real-time to interactive solutions but also discuss offline approaches if needed for a better understanding.", booktitle = "ACM SIGGRAPH Asia 2009 Courses", location = "Yokohama, Japan", publisher = "ACM", note = "Lecturer: Daniel Scherzer", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-CSR/", } @talk{WIMMER-2009-VCCG, title = "Visibility Computation in Computer Graphics", author = "Michael Wimmer", year = "2009", abstract = "Visibility computation is an essential part of many computer graphics applications, for example for real-time rendering of very large scenes. Visibility can either be precomputed offline, which is a good strategy for static scenes, or calculated at runtime, which avoids precomputation and works well for dynamic scenes. In this presentation, I will cover the latest advances in both of these principal directions. For visibility precomputation, we have shown that sampling is superior to full geometric approaches for practical applications, due to its efficiency and robustness. For online visibility culling, we show how to make the best possible use of hardware occlusion queries without introducing latency and overhead.", event = "14th Computer Vision Winter Workshop (CVWW2009)", location = "Eibiswald, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/WIMMER-2009-VCCG/", } @article{guerrero-2008-sli, title = "Real-time Indirect Illumination and Soft Shadows in Dynamic Scenes Using Spherical Lights", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer", year = "2008", abstract = "We present a method for rendering approximate soft shadows and diffuse indirect illumination in dynamic scenes. The proposed method approximates the original scene geometry with a set of tightly fitting spheres. In previous work, such spheres have been used to dynamically evaluate the visibility function to render soft shadows. In this paper, each sphere also acts as a low-frequency secondary light source, thereby providing diffuse one-bounce indirect illumination. The method is completely dynamic and proceeds in two passes: In a first pass, the light intensity distribution on each sphere is updated based on sample points on the corresponding object surface and converted into the spherical harmonics basis. In a second pass, this radiance information and the visibility are accumulated to shade final image pixels. The sphere approximation allows us to compute visibility and diffuse reflections of an object at interactive frame rates of over 20 fps for moderately complex scenes.", month = oct, journal = "Computer Graphics Forum", number = "8", volume = "27", pages = "2154--2168", keywords = "global illumination, precomputed radiance transfer, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/guerrero-2008-sli/", } @techreport{TR-186-2-08-09, title = "Pixel Accurate Shadows with Shadow Mapping", author = "Christian Luksch", year = "2008", abstract = "High quality shadows generated by shadow mapping is still an extensive problem in realtime rendering. This work summarizes some state-of-the-art techniques to achieve pixel accurate shadows and points out the various problems of generating artifact free shadows. Further a demo application has been implemented to compare the different techniques and experiment with alternative approaches.", month = sep, number = "TR-186-2-08-09", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "Shadow Mapping, Deferred shading, Pixel Accurate Shadows, Real time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/TR-186-2-08-09/", } @article{CADIK-2008-EHD, title = "Evaluation of HDR Tone Mapping Methods Using Essential Perceptual Attributes", author = "Martin \v{C}ad\'{i}k and Michael Wimmer and L\'{a}szl\'{o} Neumann and Alessandro Artusi", year = "2008", abstract = "The problem of reproducing high dynamic range images on devices with restricted dynamic range has gained a lot of interest in the computer graphics community. There exist various approaches to this issue, which span several research areas including computer graphics, image processing, color vision, physiological aspects, etc. These approaches assume a thorough knowledge of both the objective and subjective attributes of an image. However, no comprehensive overview and analysis of such attributes has been published so far. In this contribution, we present an overview about the effects of basic image attributes in HDR tone mapping. Furthermore, we propose a scheme of relationships between these attributes, leading to the definition of an overall image quality measure. We present results of subjective psychophysical experiments that we have performed to prove the proposed relationship scheme. Moreover, we also present an evaluation of existing tone mapping methods (operators) with regard to these attributes. Finally, the execution of with-reference and without a real reference perceptual experiments gave us the opportunity to relate the obtained subjective results. Our effort is not just useful to get into the tone mapping field or when implementing a tone mapping method, but it also sets the stage for well-founded quality comparisons between tone mapping methods. By providing good definitions of the different attributes, user-driven or fully automatic comparisons are made possible. ", month = jun, issn = "0097-8493", journal = "Computers & Graphics", number = "3", volume = "32", pages = "330--349", keywords = "high dynamic range, tone mapping operators, tone mapping evaluation, image attributes", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/CADIK-2008-EHD/", } @article{SCHERZER-2008-FSR, title = "Frame Sequential Interpolation for Discrete Level-of-Detail Rendering", author = "Daniel Scherzer and Michael Wimmer", year = "2008", abstract = "In this paper we present a method for automatic interpolation between adjacent discrete levels of detail to achieve smooth LOD changes in image space. We achieve this by breaking the problem into two passes: We render the two LOD levels individually and combine them in a separate pass afterwards. The interpolation is formulated in a way that only one level has to be updated per frame and the other can be reused from the previous frame, thereby causing roughly the same render cost as with simple non interpolated discrete LOD rendering, only incurring the slight overhead of the final combination pass. Additionally we describe customized interpolation schemes using visibility textures. The method was designed with the ease of integration into existing engines in mind. It requires neither sorting nor blending of objects, nor does it introduce any constrains in the LOD used. The LODs can be coplanar, alpha masked, animated, impostors, and intersecting, while still interpolating smoothly. ", month = jun, journal = "Computer Graphics Forum (Proceedings EGSR 2008)", volume = "27", number = "4", issn = "0167-7055", pages = "1175--1181", keywords = "LOD blending, real-time rendering, levels of detail", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/SCHERZER-2008-FSR/", } @techreport{radax-2008-ir, title = "Instant Radiosity for Real-Time Global Illumination", author = "Ingo Radax", year = "2008", abstract = "Global illumination is necessary to achieve realistic images. Although there are plenty methods that focus on solving this problem, most of them are not fast enough for interactive environments. Instant radiosity is a method that approximates the indirect lighting, as part of global illumination, by creating additional light sources. Thereby it is very fast and does not need lot of preprocessing, so it is perfectly fit to be used within real-time requirements. Further techniques based on instant radiosity have extended the method to provide better image quality or faster rendering. So instant radiosity and its derivations can bring us global illumination in real-time.", month = may, number = "TR-186-2-08-15", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "Three-Dimensional Graphics and Realism, shading, radiosity, real time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/radax-2008-ir/", } @inproceedings{Habel_08_SSH, title = "Efficient Spherical Harmonics Lighting with the Preetham Skylight Model", author = "Ralf Habel and Bogdan Mustata and Michael Wimmer", year = "2008", abstract = "We present a fast and compact representation of a skylight model for spherical harmonics lighting, especially for outdoor scenes. This representation allows dynamically changing the sun position and weather conditions on a per frame basis. We chose the most used model in real-time graphics, the Preetham skylight model, because it can deliver both realistic colors and dynamic range and its extension into spherical harmonics can be used to realistically light a scene. We separate the parameters of the Preetham skylight models' spherical harmonics extension and perform a polynomial two-dimensional non-linear least squares fit for the principal parameters to achieve both negligible memory and computation costs. Additionally, we execute a domain specific Gibbs phenomena suppression to remove ringing artifacts.", month = apr, publisher = "Eurographics Association", location = "Crete, Greece", issn = "1017-4656", editor = "Katerina Mania and Erik Reinhard", booktitle = "Eurographics 2008 - Short Papers", pages = "119--122", keywords = "Natural Phenomena, Spherical Harmonics, Skylight", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Habel_08_SSH/", } @article{mattausch-2008-CHC, title = "CHC++: Coherent Hierarchical Culling Revisited", author = "Oliver Mattausch and Jir\'{i} Bittner and Michael Wimmer", year = "2008", abstract = "We present a new algorithm for efficient occlusion culling using hardware occlusion queries. The algorithm significantly improves on previous techniques by making better use of temporal and spatial coherence of visibility. This is achieved by using adaptive visibility prediction and query batching. As a result of the new optimizations the number of issued occlusion queries and the number of rendering state changes are significantly reduced. We also propose a simple method for determining tighter bounding volumes for occlusion queries and a method which further reduces the pipeline stalls. The proposed method provides up to an order of magnitude speedup over the previous state of the art. The new technique is simple to implement, does not rely on hardware calibration and integrates well with modern game engines.", month = apr, journal = "Computer Graphics Forum (Proceedings Eurographics 2008)", volume = "27", number = "2", issn = "0167-7055", pages = "221--230", keywords = "temporal coherence, dynamic occlusion culling, occlusion queries", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/mattausch-2008-CHC/", } @talk{WIMMER-2008-ART, title = "Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2008", abstract = "This talk will give a brief summary of recent research activities in the field of real-time rendering conducted by the real-time rendering group at the Vienna University of Technology. Highlights are interactive procedural architecture, physically based tree animation and visibility culling.", event = "INRIA Research Seminar", location = "INRIA, Grenoble, France", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/WIMMER-2008-ART/", } @talk{WIMMER-2008-CAR, title = "Current Advances in Real-Time Rendering", author = "Michael Wimmer", year = "2008", abstract = "This talk will give a brief summary of recent research activities in the field of real-time rendering conducted by the real-time rendering group at the Vienna University of Technology. Highlights are interactive procedural architecture, physically based tree animation and visibility culling.", event = "Graphics Lunch", location = "ETH Z\"{u}rich, Switzerland", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/WIMMER-2008-CAR/", } @talk{WIMMER-2008-PVR, title = "Precomputing Visibility for Real-Time Rendering of Large Scenes", author = "Michael Wimmer", year = "2008", abstract = "Visibility computation is an essential part of any real-time rendering pipeline for large scenes. Visibility can either be precomputed offline, which is a good strategy for static scenes, or calculated at runtime, which avoids precomputation and works well for dynamic scenes. In this presentation, I will cover the latest advances in both of these principal directions. For visibility precomputation, we have shown that sampling is superior to full geometric approaches for practical applications, due to its efficiency and robustness. For online visibility culling, we show how to make the best possible use of hardware occlusion queries without introducing latency and overhead.", event = "Graphical Visionday 2008", location = "Kopenhagen, Denmark", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/WIMMER-2008-PVR/", } @inproceedings{CHARALAMBOS-2007-HLOD, title = "Optimized HLOD Refinement Driven by Hardware Occlusion Queries", author = "Jean Pierre Charalambos and Jir\'{i} Bittner and Michael Wimmer and Eduardo Romero", year = "2007", abstract = "We present a new method for integrating hierarchical levels of detail (HLOD) with occlusion culling. The algorithm refines the HLOD hierarchy using geometric criteria as well as the occlusion information. For the refinement we use a simple model which takes into account the possible distribution of the visible pixels. The traversal of the HLOD hierarchy is optimized by a new algorithm which uses spatial and temporal coherence of visibility. We predict the HLOD refinement condition for the current frame based on the results from the last frame. This allows an efficient update of the front of termination nodes as well as an efficient scheduling of hardware occlusion queries. Compared to previous approaches, the new method improves on speed as well as image quality. The results indicate that the method is very close to the optimal scheduling of occlusion queries for driving the HLOD refinement.", month = nov, isbn = "978-3-540-76855-9", series = "Lecture Notes in Computer Science, volume 4841", publisher = "Springer", location = "Lake Tahoe, Nevada/California", editor = "Bebis, G.; Boyle, R.; Parvin, B.; Koracin, D.; Paragios, N.; Tanveer, S.-M.; Ju, T.; Liu, Z.; Coquillart, S.; Cruz-Neira, C.; M\"{o}ller, T.; Malzbender, T.", booktitle = "Advances in Visual Computing (Third International Symposium on Visual Computing -- ISVC 2007)", pages = "106--117", keywords = "occlusion queries, levels of detail, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/CHARALAMBOS-2007-HLOD/", } @techreport{knecht-2007-ao, title = "State of the Art Report on Ambient Occlusion", author = "Martin Knecht", year = "2007", abstract = "Ambient occlusion is a shading method which takes light occluded by geometry into account. Since this technique needs to integrate over a hemisphere it was first only used in offline rendering tools. However, the increasing resources of modern graphics hardware, enable us to render ambient occlusion in realtime. The goal of this report is to describe the most popular techniques with respect to realtime rendering. First we introduce how ambient occlusion is defined and then we will explain and categorize the presented techniques. ", month = nov, number = "TR-186-2-07-13", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "ambient occlusion, global illumination, real time", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/knecht-2007-ao/", } @techreport{TR-186-2-07-09, title = "Rendering Imperfections: Dust, Scratches, Aging,...", author = "Michael Schw\"{a}rzler and Michael Wimmer", year = "2007", abstract = "In order to incrase the realism of an image or a scene in a computergraphics application, so-called imperfections are often used during rendering. These are techniques which add details like dirt, scratches, dust or aging effects to the models and textures. Realism is improved through imperfections since computer generated models are usually too perfect to be accepted as realistic by human observers. By making them, for example, dusty and scratched, people can imagine them being part of their real world much more easily. This article gives an overview of currently used imperfections techniques and algorithms. Topics like textures, scratches, aging, dust, weathering, lichen growth and terrain erosion are covered.", month = sep, number = "TR-186-2-07-09", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "scratches, dust, imperfections, aging", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/TR-186-2-07-09/", } @inproceedings{Habel_2007_RTT, title = "Physically Based Real-Time Translucency for Leaves", author = "Ralf Habel and Alexander Kusternig and Michael Wimmer", year = "2007", abstract = "This paper presents a new shading model for real-time rendering of plant leaves that reproduces all important attributes of a leaf and allows for a large number of leaves to be shaded. In particular, we use a physically based model for accurate subsurface scattering on the translucent side of directly lit leaves. For real-time rendering of this model, we formulate it as an image convolution process and express the result in an efficient directional basis that is fast to evaluate. We also propose a data acquisition method for leaves that uses off-the-shelf devices.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "253--263", keywords = "Realtime Rendering, Natural Scene Rendering, Physically Based Rendering, Natural Phenomena", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Habel_2007_RTT/", } @inproceedings{JESCHKE-2007-ISC, title = "Interactive Smooth and Curved Shell Mapping", author = "Stefan Jeschke and Stephan Mantler and Michael Wimmer", year = "2007", abstract = "Shell mapping is a technique to represent three-dimensional surface details. This is achieved by extruding the triangles of an existing mesh along their normals, and mapping a 3D function (e.g., a 3D texture) into the resulting prisms. Unfortunately, such a mapping is nonlinear. Previous approaches perform a piece-wise linear approximation by subdividing the prisms into tetrahedrons. However, such an approximation often leads to severe artifacts. In this paper we present a correct (i.e., smooth) mapping that does not rely on a decomposition into tetrahedrons. We present an efficient GPU ray casting algorithm which provides correct parallax, self-occlusion, and silhouettes, at the cost of longer rendering times. The new formulation also allows modeling shells with smooth curvatures using Coons patches within the prisms. Tangent continuity between adjacent prisms is guaranteed, while the mapping itself remains local, i.e. every curved prism content is modeled at runtime in the GPU without the need for any precomputation. This allows instantly replacing animated triangular meshes with prism-based shells.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "10", pages = "351--360", keywords = "Display algorithms, Shading", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/JESCHKE-2007-ISC/", } @inproceedings{Scherzer-2007-PCS, title = "Pixel-Correct Shadow Maps with Temporal Reprojection and Shadow Test Confidence", author = "Daniel Scherzer and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "Shadow mapping suffers from spatial aliasing (visible as blocky shadows) as well as temporal aliasing (visible as flickering). Several methods have already been proposed for reducing such artifacts, but so far none is able to provide satisfying results in real time. This paper extends shadow mapping by reusing information of previously rasterized images, stored efficiently in a so-called history buffer. This buffer is updated in every frame and then used for the shadow calculation. In combination with a special confidence-based method for the history buffer update (based on the current shadow map), temporal and spatial aliasing can be completely removed. The algorithm converges in about 10 to 60 frames and during convergence, shadow borders are sharpened over time. Consequently, in case of real-time frame rates, the temporal shadow adaption is practically imperceptible. The method is simple to implement and is as fast as uniform shadow mapping, incurring only the minor speed hit of the history buffer update. It works together with advanced filtering methods like percentage closer filtering and more advanced shadow mapping techniques like perspective or light space perspective shadow maps.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "45--50", keywords = "shadow mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Scherzer-2007-PCS/", } @inproceedings{GIEGL-2007-FVS, title = "Fitted Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "Too little shadow map resolution and resulting undersampling artifacts, perspective and projection aliasing, have long been a fundamental problem of shadowing scenes with shadow mapping. We present a new smart, real-time shadow mapping algorithm that virtually increases the resolution of the shadow map beyond the GPU hardware limit where needed. We first sample the scene from the eye-point on the GPU to get the needed shadow map resolution in different parts of the scene. We then process the resulting data on the CPU and finally arrive at a hierarchical grid structure, which we traverse in kd-tree fashion, shadowing the scene with shadow map tiles where needed. Shadow quality can be traded for speed through an intuitive parameter, with a homogeneous quality reduction in the whole scene, down to normal shadow mapping. This allows the algorithm to be used on a wide range of hardware.", month = may, isbn = "978-1-56881-337-0", publisher = "Canadian Human-Computer Communications Society", location = "Montreal, Canada", editor = "Christopher G. Healey and Edward Lank", booktitle = "Proceedings of Graphics Interface 2007", pages = "159--168", keywords = "real-time shadowing, shadows, shadow maps, large environemnts", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-FVS/", } @inproceedings{MATTAUSCH-2007-OSP, title = "Optimized Subdivisions for Preprocessed Visibility", author = "Oliver Mattausch and Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2007", abstract = "This paper describes a new tool for preprocessed visibility. It puts together view space and object space partitioning in order to control the render cost and memory cost of the visibility description generated by a visibility solver. The presented method progressively refines view space and object space subdivisions while minimizing the associated render and memory costs. Contrary to previous techniques, both subdivisions are driven by actual visibility information. We show that treating view space and object space together provides a powerful method for controlling the efficiency of the resulting visibility data structures.", month = may, isbn = "978-1-56881-337-0", publisher = "Canadian Human-Computer Communications Society", location = "Montreal, Canada", editor = "Christopher G. Healey and Edward Lank", booktitle = "Proceedings of Graphics Interface 2007", pages = "335--342", keywords = "visibility preprocessing, potentially visible sets, view cells", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/MATTAUSCH-2007-OSP/", } @habilthesis{WIMMER-2007-RTR, title = "Real-Time Rendering", author = "Michael Wimmer", year = "2007", abstract = "Real-time rendering is concerned with the display of computer-generated images at rates which let a human observer believe that she is looking at a smooth animation. This thesis deals with several contributions to the field of real-time rendering that improve either the performance of rendering algorithms or the quality of the displayed images. Light-Space Perspective Shadow Maps improve the quality of real-time rendering by providing better looking shadow rendering, one of the most popular research topics in real-time rendering. Conversely, Coherent Hierarchical Culling and Guided Visibility Sampling improve the performance of real-time rendering through visibility culling. One is designed for runtime computation and the other for preprocessing. Finally, real-time rendering is extended from traditional polygon rendering to a new type of dataset that has recently gained importance, namely point clouds, especially huge datasets that cannot be loaded into main memory. ", month = may, URL = "https://www.cg.tuwien.ac.at/research/publications/2007/WIMMER-2007-RTR/", } @inproceedings{GIEGL-2007-QV1, title = "Queried Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "Shadowing scenes by shadow mapping has long suffered from the fundamental problem of undersampling artifacts due to too low shadow map resolution, leading to so-called perspective and projection aliasing. In this paper we present a new real-time shadow mapping algorithm capable of shadowing large scenes by virtually increasing the resolution of the shadow map beyond the GPU hardware limit. We start with a brute force approach that uniformly increases the resolution of the whole shadow map. We then introduce a smarter version which greatly increases runtime performance while still being GPU-friendly. The algorithm contains an easy to use performance/quality-tradeoff parameter, making it tunable to a wide range of graphics hardware.", month = apr, isbn = "978-1-59593-628-8", publisher = "ACM Press", location = "Seattle, WA", address = "New York, NY, USA", booktitle = "Proceedings of ACM SIGGRAPH 2007 Symposium on Interactive 3D Graphics and Games", pages = "65--72", keywords = "shadow maps, shadows, real-time shadowing, large environemnts", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-QV1/", } @misc{MANTLER-2007-DMBBC, title = "Displacement Mapped Billboard Clouds", author = "Stephan Mantler and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "This paper introduces displacement mapped billboard clouds (DMBBC), a new image-based rendering primitive for the fast display of geometrically complex objects at medium to far distances. The representation is based on the well-known billboard cloud (BBC) technique, which represents an object as several textured rectangles in order to dramatically reduce its geometric complexity. Our new method uses boxes instead of rectangles, each box representing a volumetric part of the model. Rendering the contents of a box is done entirely on the GPU using ray casting. DMBBCs will often obviate the need to switch to full geometry for closer distances, which is especially helpful for scenes that are densely populated with complex objects, e.g. for vegetation scenes. We show several ways to store the volumetric information, with different tradeoffs between memory requirements and image quality. In addition we discuss techniques to accelerate the ray casting algorithm, and a way for smoothly switching between DMBBCs for medium distances and BBCs for far distances.", month = apr, event = "Symposium on Interactive 3D Graphics and Games", Conference date = "Poster presented at Symposium on Interactive 3D Graphics and Games (2007-04-30--2007-05-02)", keywords = "rendering acceleration, billboard clouds, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/MANTLER-2007-DMBBC/", } @article{GIEGL-2007-UNP, title = "Unpopping: Solving the Image-Space Blend Problem for Smooth Discrete LOD Transitions", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "This paper presents a new, simple and practical algorithm to avoid artifacts when switching between discrete levels of detail (LOD) by smoothly blending LOD representations in image space. We analyze the alternatives of conventional alpha-blending and so-called late-switching (the switching of LODs markusquote{far enough} from the eye-point), widely thought to solve the LOD switching discontinuity problem, and conclude that they either do not work in practice, or defeat the concept of LODs. In contrast we show that our algorithm produces visually pleasing blends for static and animated discrete LODs, for discrete LODs with different types of LOD representations (e.g. billboards and meshes) and even to some extent totally different objects with similar spatial extent, with a very small runtime overhead.", month = mar, issn = "0167-7055", journal = "Computer Graphics Forum", number = "1", volume = "26", pages = "46--49", keywords = "popping, LOD switching, levels of detail, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-UNP/", } @article{Habel_2007_IAG, title = "Instant Animated Grass", author = "Ralf Habel and Michael Wimmer and Stefan Jeschke", year = "2007", abstract = "This paper introduces a technique for rendering animated grass in real time. The technique uses front-to-back compositing of implicitly defined grass slices in a fragment shader and therefore significantly reduces the overhead associated with common vegetation rendering systems. We also introduce a texture-based animation scheme that combines global wind movements with local turbulences. Since the technique is confined to a fragment shader, it can be easily integrated into any rendering system and used as a material in existing scenes. ", month = jan, journal = "Journal of WSCG", volume = "15", number = "1-3", note = "ISBN 978-80-86943-00-8", issn = "1213-6972", pages = "123--128", keywords = "Real-time Rendering, Natural Scene Rendering, Natural Phenomena, GPU Programming", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Habel_2007_IAG/", } @techreport{TR-186-2-07-01, title = "Displacement Mapped Billboard Clouds", author = "Stephan Mantler and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "This paper introduces displacement mapped billboard clouds (DMBBC), a new image-based rendering primitive for the fast display of geometrically complex objects at medium to far distances. The representation is based on the well-known billboard cloud (BBC) technique, which represents an object as several textured rectangles in order to dramatically reduce its geometric complexity. Our new method uses boxes instead of rectangles, each box representing a volumetric part of the model. Rendering the contents of a box is done entirely on the GPU using ray casting. DMBBCs will often obviate the need to switch to full geometry for closer distances, which is especially helpful for scenes that are densely populated with complex objects, e.g. for vegetation scenes. We show several ways to store the volumetric information, with different tradeoffs between memory requirements and image quality. In addition we discuss techniques to accelerate the ray casting algorithm, and a way for smoothly switching between DMBBCs for medium distances and BBCs for far distances.", month = jan, number = "TR-186-2-07-01", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "rendering acceleration, billboard clouds, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/TR-186-2-07-01/", } @inproceedings{zotti-2007-wscg, title = "A Critical Review of the Preetham Skylight Model", author = "Georg Zotti and Alexander Wilkie and Werner Purgathofer", year = "2007", abstract = "The Preetham skylight model is currently one of the most widely used analytic models of skylight luminance in computer graphics. Despite its widespread use, very little work has been carried out to verify the results generated by the model, both in terms of the luminance patterns it generates, and in terms of numerical reliability and stability. We have implemented the model in Mathematica, visualise and discuss those parameter ranges which exhibit problematic behaviour, and compare the computed luminance values with references from literature, especially the 15 standard skylight distributions of the CIE 2003 Standard General Sky. We also performed luminance measurements on real cloudless skies, and compare these measurements to the predictions of the model.", month = jan, isbn = "978-80-86943-02-2", publisher = "University of West Bohemia", location = "Plzen", editor = "Vaclav Skala", booktitle = "WSCG ", pages = "23--30", keywords = "Verification, Skylight", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/zotti-2007-wscg/", } @xmascard{Habel_07_xms, title = "X-mas 2007", author = "Ralf Habel", year = "2007", abstract = "Die gezeigte Szene ist mit einer Poisson-gefilterten Shadow Map dynamisch beleuchtet und wird mittels DirectX 10 dargestellt. Die Tanne besteht aus insgesamt 60.032 teilweise transparenten Dreiecken. Alle Oberfl\"{a}chen sind mit Normal Maps versehen, die bei der Tanne und dem schneebedeckten Gel\"{a}nde mit multi-bounce Ambient Occlusion kombiniert werden. Ein modifizierter Reinhard Tone Mapper mit Glare-Filter sorgt f\"{u}r eine realistische Darstellung bei 60 Bildern pro Sekunde. The shown scene is dynamically lit with a Poisson-filtered shadow map and displayed using DirectX 10. The fir tree consists of 60,032 partly transparent triangles. All surfaces are normal mapped, which is combined with multi-bounce ambient occlusion for the fir tree and the snow-covered terrain. A modified Reinhard tone mapper with a glare filter realistically displays the scene at 60 frames per second.", keywords = "x-mas", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Habel_07_xms/", } @talk{mattausch-2007-iav, title = "1. Adaptive Visibility-Driven View Cell Construction", author = "Oliver Mattausch", year = "2007", abstract = "We present a new method for the automatic partitioning of view space into a multi-level view cell hierarchy. We use a cost-based model in order to minimize the average rendering time. Unlike previous methods, our model takes into account the actual visibility in the scene, and the partition is not restricted to planes given by the scene geometry. We show that the resulting view cell hierarchy works for different types of scenes and gives lower average rendering time than previously used methods.", event = "Konversatorium", location = "Prague University of Technology", keywords = "visibiility, view cells", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/mattausch-2007-iav/", } @article{neumann-2006-gamma, title = "Accurate Display Gamma Functions based on Human Observation", author = "Attila Neumann and Alessandro Artusi and L\'{a}szl\'{o} Neumann and Georg Zotti and Werner Purgathofer", year = "2007", abstract = "This paper describes an accurate method to obtain the Tone Reproduction Curve (TRC) of display devices without using a measurement device. It is an improvement of an existing technique based on human observation, solving its problem of numerical instability and resulting in functions in log--log scale which correspond better to the nature of display devices. We demonstrate the effiency of our technique on different monitor technologies, comparing it with direct measurements using a spectrophotometer.", issn = "0361-2317", journal = "Color Research & Applications", note = "2006 angenommen, 2007 erschienen", number = "4", volume = "32", pages = "310--319", keywords = "Generalized Gamma Function, Colour Reproduction, Display Measurement, Human Visual System, Spatial Vision", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/neumann-2006-gamma/", } @talk{WIMMER-2007-GAR, title = "Gametools: Advanced Rendering Effects for Next-Gen Engines", author = "Michael Wimmer", year = "2007", abstract = "The GameTools Project is an EU project from the 6th Framework Programme that brings together leading European computer graphics experts from universities in Austria, France, Hungary and Spain with European industrial partners from the fields of computer game development and virtual reality to create next generation real-time 3D libraries for Geometry, Visibility and Global Illumination for the PC platform, with an extension to consoles PS2, XBox, PS3, XBox 360 planned. With the project now completed after 3 years, this talk will introduce you to the advanced technology available partly as Open Source, partly under licensing. The project comprises technologies such as continuous multiresolution models for animated characters, massive tree rendering, robust PVS generation for visiblity determination of arbitrarily large game levels, and real-time global illumination effects such as soft shadows, real-time radiosity, caustics, cloud rendering, and many more. The effects created in the GameTools project are available as plugins that can be incorporated into any game engine, and are demonstrated with the Open Source Ogre engine.", event = "Games Convention Developers Conference 2007", location = "Leipzig, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/WIMMER-2007-GAR/", } @phdthesis{zotti-2007-PhD, title = "Computer Graphics in Historical and Modern Sky Observations", author = "Georg Zotti", year = "2007", abstract = "This work describes work done in three areas of research where sky observations meet computer graphics. The whole topic covers several millennia of human history and posed combined challenges from fields including archaeology, astronomy, cultural heritage, digital image processing and computer graphics. The first part presents interdisciplinary work done in the fields of archaeo-astronomy, visualisation and virtual reconstruction. A novel diagram has been developed which provides an intuitive, easy visualisation to investigate archaeological survey maps for evidence of astronomically motivated orientation of buildings. This visualisation was developed and first applied to a certain class of neolithic circular structures in Lower Austria in order to investigate the idea of solar orientation of access doorways. This diagram and its intuitive interpretation allowed the author to set up a new hypothesis about practical astronomical activities in the middle neolithic period in central Europe. How virtual reconstructions of these buildings characteristic for a short time during the neolithic epoch can be combined with the excellent sky simulation of a modern planetarium to communicate these results to a broader audience is described thereafter. The second part of this work describes a certain class of historical scientific instruments for sky observations and its reconstruction with methods of computer graphics. Long after the stone age, in the Middle Ages, the astrolabe was the most celebrated instrument for celestial observations and has been explained in contemporary literature, usually with the help of precomputed tables for a certain size or kind of instrument. Today, historical exhibitions frequently present one of these instruments, but its various applications are hard to explain to the general audience without hands-on demonstration. For this challenge from the cultural heritage domain, an approach using the idea of procedural modelling is presented. Here, a computer graphics model is not statically drawn but specified by parametrised plotting functions, which can then be repeatedly executed with different parameters to create the final model. This approach is demonstrated to provide a very flexible solution which can immediately be applied to specific needs just by tweaking a few parameters, instead of having to repetitively draw the whole model manually. From the two-dimensional procedural model, 3D models can be easily created, and even the production of wooden instruments on a Laser engraver/plotter is demonstrated. The third and longest part deals with methods of sky simulation and rendering in the domain of computer graphics. In this discipline, modelling of skylight and atmospheric effects has developed tremendously over the last two decades, which is covered by an extensive survey of literature from the computer graphics and also atmosphere physics domains. The requirements of physically correct or at least plausible rendering include realistic values for sky brightness. Measurements performed with a luminance meter on a clear sky in order to verify the currently most widely used analytic skylight model [Preetham 1999] shows however its limited applicability. There are two classical groups of clear-sky models: numerical simulations of scattering in the atmosphere, and fast analytical models. Recently, another method for more realistic looking skylight models has been developed: digital images taken with a fisheye lens are combined into high dynamic range images which can be used for scene illumination and as sky background. These images can be calibrated by photometric measurements of absolute luminance values. Long-time exposures allow to apply this system to quantitative investigations of sky brightness, sky colours, and also nocturnal light pollution by artificial illumination. Results and other applications of the system are described, and the pipeline for creating such images is described in the appendix. This work closes with some notes of future directions of research.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "Astrolabe, Archaeo-Astronomy, Skydome Visualisation, Cultural Heritage, Sky Measurements", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/zotti-2007-PhD/", } @incollection{GIEGL-2006-QVS, title = "Queried Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2006", abstract = "In this article we present a new real-time shadow mapping algorithm capable of shadowing large scenes by virtually increasing the resolution of the shadow map beyond the GPU hardware limit. We start with a brute force approach that uniformly increases the resolution of the whole shadow map. We then introduce a smarter version which greatly increases runtime performance while still being GPU-friendly. The algorithm contains an easy to use performance/quality-tradeoff parameter, making it tunable to a wide range of graphics hardware.", month = dec, booktitle = "ShaderX 5 -- Advanced Rendering Techniques", editor = "Wolfgang Engel", isbn = "1-58450-499-4", publisher = "Charles River Media", series = "ShaderX", volume = "5", keywords = "shadows, shadow mapping, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/GIEGL-2006-QVS/", } @inproceedings{Mantler-06-landscape, title = "Interactive Landscape Visualization Using GPU Ray Casting", author = "Stephan Mantler and Stefan Jeschke", year = "2006", abstract = "This paper demonstrates the simple yet effective usage of height fields for interactive landscape visualizations using a ray casting approach implemented in the pixel shader of modern graphics cards. The rendering performance is output sensitive, i.e., it scales with the number of pixels rather than the complexity of the landscape. Given a height field of a terrain and a topographic map or similar data as input, the vegetation cover is extracted and stored on top of the height field in a preprocess, enhancing the terrain with forest canopies or other mesostructure. In addition, enhanced illumination models like shadowing and ambient occlusion can be calculated at runtime with reasonable computational cost, which greatly enhances the scene realism. Finally, including the presented technique into existing rendering systems is relatively simple, mainly consisting of data preparation and pixel shader programming.", month = nov, booktitle = "Proceedings of Graphite 2006", keywords = "real-time rendering, gpu ray casting", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Mantler-06-landscape/", } @inproceedings{wilkie-2006-dfs, title = "A Reflectance Model for Diffuse Fluorescent Surfaces", author = "Alexander Wilkie and Andrea Weidlich and Caroline Larboulette and Werner Purgathofer", year = "2006", abstract = "Fluorescence is an interesting and visually prominent effect, which has not been fully covered by Computer Graphics research so far. While the physical phenomenon of fluorescence has been addressed in isolation, the actual reflection behaviour of real fluorescent surfaces has never been documented, and no analytical BRDF models for such surfaces have been published yet. This paper aims to illustrate the reflection properties typical for diffuse fluorescent surfaces, and provides a BRDF model based on a layered microfacet approach that mimics them.", month = nov, isbn = "1-59593-564-9", location = "Kuala Lumpur, Malaysia", booktitle = "Proceedings of Graphite 2006", pages = "8", pages = "321--328", keywords = "Fluorescence, Analytical BRDF models", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/wilkie-2006-dfs/", } @inproceedings{zotti-2006-pla, title = "Using Virtual Reconstructions in a Planetarium for Demonstrations in Archaeo-Astronomy", author = "Georg Zotti and Alexander Wilkie and Werner Purgathofer", year = "2006", abstract = "In the last decades, archaeologists in central Europe have found traces of enigmatic neolithic circular building structures buried in the soil. Recent studies indicate that the orientation of many of their doorways may have been chosen with an astronomical background in mind. This paper explains the use of virtual reconstructions of these buildings from archaeological data, in combination with a simulation of the sky of that time in a Planetarium, to present the astronomical findings to the public.", month = nov, isbn = "963-9495-89-1", publisher = "Pannonian University Press", location = "Eger", editor = "Cecilia Sik Lanyi ", booktitle = "Third Central European Multimedia and Virtual Reality Conference (Proc. CEMVRC2006)", pages = "43--51", keywords = "Virtual Reality, Public Dissemination, Archaeo-Astronomy", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/zotti-2006-pla/", } @inproceedings{CADIK-2006-IAQ, title = "Image Attributes and Quality for Evaluation of Tone Mapping Operators", author = "Martin \v{C}ad\'{i}k and Michael Wimmer and L\'{a}szl\'{o} Neumann and Alessandro Artusi", year = "2006", abstract = "The problem of reproducing high dynamic range images on devices with restricted dynamic range has gained a lot of interest in the computer graphics community. There exist various approaches to this issue, which span several research areas including computer graphics, image processing, color science, physiology, neurology, psychology, etc. These approaches assume a thorough knowledge of both the objective and subjective attributes of an image. However, no comprehensive overview and analysis of such attributes has been published so far. In this paper, we present an overview of image quality attributes of different tone mapping methods. Furthermore, we propose a scheme of relationships between these attributes, leading to the definition of an overall image quality measure. We present results of subjective psychophysical tests that we have performed to prove the proposed relationship scheme. We also present the evaluation of existing tone mapping methods with regard to these attributes. Our effort is not just useful to get into the tone mapping field or when implementing a tone mapping operator, but it also sets the stage for well-founded quality comparisons between tone mapping operators. By providing good definitions of the different attributes, user-driven or fully automatic comparisons are made possible at all.", month = oct, publisher = "National Taiwan University Press", location = "Taipe, Taiwan", booktitle = "Proceedings of Pacific Graphics 2006 (14th Pacific Conference on Computer Graphics and Applications)", pages = "35--44", keywords = "tone mapping evaluation, tone mapping, high-dynamic range images", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/CADIK-2006-IAQ/", } @article{WONKA-2006-GVS, title = "Guided Visibility Sampling", author = "Peter Wonka and Michael Wimmer and Kaichi Zhou and Stefan Maierhofer and Gerd Hesina and Alexander Reshetov", year = "2006", abstract = "This paper addresses the problem of computing the triangles visible from a region in space. The proposed aggressive visibility solution is based on stochastic ray shooting and can take any triangular model as input. We do not rely on connectivity information, volumetric occluders, or the availability of large occluders, and can therefore process any given input scene. The proposed algorithm is practically memoryless, thereby alleviating the large memory consumption problems prevalent in several previous algorithms. The strategy of our algorithm is to use ray mutations in ray space to cast rays that are likely to sample new triangles. Our algorithm improves the sampling efficiency of previous work by over two orders of magnitude.", month = jul, journal = "ACM Transactions on Graphics", volume = "25", number = "3", note = "Proceedings ACM SIGGRAPH 2006", issn = "0730-0301", doi = "10.1145/1141911.1141914", pages = "494--502", keywords = "visibility, visibility sampling, occlusion culling, PVS", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WONKA-2006-GVS/", } @inproceedings{MATTAUSCH-2006-AVC, title = "Adaptive Visibility-Driven View Cell Construction", author = "Oliver Mattausch and Jir\'{i} Bittner and Michael Wimmer", year = "2006", abstract = "We present a new method for the automatic partitioning of view space into a multi-level view cell hierarchy. We use a cost-based model in order to minimize the average rendering time. Unlike previous methods, our model takes into account the actual visibility in the scene, and the partition is not restricted to planes given by the scene geometry. We show that the resulting view cell hierarchy works for different types of scenes and gives lower average rendering time than previously used methods.", month = jun, isbn = "3-90567-335-5", publisher = "Eurographics Association", organization = "Eurographics", location = "Nicosia, Cyprus", editor = "Wolfgang Heidrich and Tomas Akenine-Moller", booktitle = "Rendering Techniques 2006 (Proceedings Eurographics Symposium on Rendering)", pages = "195--206", keywords = "view cells, real-time rendering, visibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/MATTAUSCH-2006-AVC/", } @talk{havran-2006-tut, title = "Efficient Sorting and Searching in Rendering Algorithms", author = "Vlastimil Havran and Jir\'{i} Bittner", year = "2006", abstract = "In this tutorial we would like to highlight the connection between rendering algorithms and sorting and searching as classical problems studied in computer science. We will provide both theoretical and empirical evidence that for many rendering techniques most time is spent by sorting and searching. In particular we will discuss problems and solutions for visibility computation, density estimation, and importance sampling. For each problem we mention its specific issues such as dimensionality of the search domain or online versus offline searching. We will present the underlying data structures and their enhancements in the context of specific rendering algorithms such as ray shooting, photon mapping, and hidden surface removal.", event = "Eurographics 2006 Tutorial T4", keywords = "Visibility, Sorting and Searching, Rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/havran-2006-tut/", } @talk{Purgathofer-2006-VRVis, title = "Some Rendering Research Results", author = "Werner Purgathofer", year = "2006", event = "VRVis-Forum", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Purgathofer-2006-VRVis/", } @talk{WIMMER-2006-PSM, title = "Practical Shadow Mapping", author = "Michael Wimmer", year = "2006", event = "Games Convention Developers Conference 2006", location = "Leipzig, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WIMMER-2006-PSM/", } @talk{WIMMER-2006-SIV, title = "Sampling in Visibility", author = "Michael Wimmer", year = "2006", event = "Ayia Napa Summer Seminar on Recent Results in Rendering and Modeling in Computer Graphics", location = "Aiya Napa, Cyprus", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WIMMER-2006-SIV/", } @inproceedings{jeschke-05-ISTAR, title = "Image-based Representations for Accelerated Rendering of Complex Scenes", author = "Stefan Jeschke and Michael Wimmer and Werner Purgathofer", year = "2005", abstract = "This paper gives an overview of image-based representations commonly used for reducing the geometric complexity of a scene description in order to accelerate the rendering process. Several different types of representations and ways for using them have been presented, which are classified and discussed here. Furthermore, the overview includes techniques for accelerating the rendering of static scenes or scenes with animations and/or dynamic lighting effects. The advantages and drawbacks of the different approaches are illuminated, and unsolved problems and roads for further research are shown.", month = aug, booktitle = "EUROGRAPHICS 2005 State of the Art Reports", editor = "Y. Chrysanthou and M. Magnor", location = "Dublin, Ireland", publisher = "The Eurographics Association and The Image Synthesis Group", organization = "EUROGRAPHICS", pages = "1--20", keywords = "Impostors, Display Algorithms, Three Dimensional Graphics and Realism, Color, Shading, Shadowing and Texture", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-ISTAR/", } @inproceedings{bittner-2005-egsr, title = "Fast Exact From-Region Visibility in Urban Scenes", author = "Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2005", abstract = "We present a fast exact from-region visibility algorithm for 2.5D urban scenes. The algorithm uses a hierarchical subdivision of line-space for identifying visibility interactions in a 2D footprint of the scene. Visibility in the remaining vertical dimension is resolved by testing for the existence of lines stabbing sequences of virtual portals. Our results show that exact analytic from-region visibility in urban scenes can be computed at times comparable or even superior to recent conservative methods. ", month = jun, isbn = "3-905673-23-1", publisher = "Eurographics Association", organization = "Eurographics", location = "Konstanz, Germany", editor = "Kavita Bala and Philip Dutr\'{e}", booktitle = "Rendering Techniques 2005 (Proceedings Eurographics Symposium on Rendering)", pages = "223--230", keywords = "real-time rendering, visibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bittner-2005-egsr/", } @inproceedings{havran-2005-sccg, title = "On Cross-Validation and Resampling of BRDF Data Measurements", author = "Vlastimil Havran and Attila Neumann and Georg Zotti and Werner Purgathofer and Hans-Peter Seidel", year = "2005", abstract = "We discuss the validation of BTF data measurements by means used for BRDF measurements. First, we show how to apply the Helmholtz reciprocity and isotropy for a single data set. Second, we discuss a cross-validation for BRDF measurement data obtained from two different measurement setups, where the measurements are not calibrated or the level of accuracy is not known. We show the practical problems encountered and the solutions we have used to validate physical setup for four material samples. We describe a novel coordinate system suitable for resampling the BRDF data from one data set to another data set. Further, we show how the perceptually uniform color space CIELab is used for cross-comparison of BRDF data measurements, which were not calibrated.", month = may, location = "Budmerice, Slovakia", booktitle = "Proceedings SCCG 2005", pages = "161--168", keywords = "reflectance function, BRDF data acquisition, BRDF data validation, predictive rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/havran-2005-sccg/", } @inproceedings{jeschke-05-AIP, title = "Automatic Impostor Placement for Guaranteed Frame Rates and Low Memory Requirements", author = "Stefan Jeschke and Michael Wimmer and Heidrun Schumann and Werner Purgathofer", year = "2005", abstract = "Impostors are image-based primitives commonly used to replace complex geometry in order to reduce the rendering time needed for displaying complex scenes. However, a big problem is the huge amount of memory required for impostors. This paper presents an algorithm that automatically places impostors into a scene so that a desired frame rate and image quality is always met, while at the same time not requiring enormous amounts of impostor memory. The low memory requirements are provided by a new placement method and through the simultaneous use of other acceleration techniques like visibility culling and geometric levels of detail.", month = apr, isbn = "1-59593-013-2", publisher = "ACM Press", organization = "ACM", location = "Washington DC", booktitle = "Proceedings of ACM SIGGRAPH 2005 Symposium on Interactive 3D Graphics and Games", pages = "103--110", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-AIP/", } @phdthesis{jeschke-05-ARI, title = "Accelerating the Rendering Process Using Impostors", author = "Stefan Jeschke", year = "2005", abstract = "The interactive rendering of three-dimensional geometric models is a research area of big interest in computer graphics. The generation of a fluent animation for complex models, consisting of multiple million primitives, with more than 60 frames per second is a special challenge. Possible applications include ship-, driving- and flight simulators, virtual reality and computer games. Although the performance of common computer graphics hardware has dramatically increased in recent years, the demand for more realism and complexity in common scenes is growing even faster. This dissertation is about one approach for accelerating the rendering of such complex scenes. We take advantage of the fact that the appearance of distant scene parts hardly changes for several successive output images. Those scene parts are replaced by precomputed image-based representations, so-called impostors. Impostors are very fast to render while maintaining the appearance of the scene part as long as the viewer moves within a bounded viewing region, a so-called view cell. However, unsolved problems of impostors are the support of a satisfying visual quality with reasonable computational effort for the impostor generation, as well as very high memory requirements for impostors for common scenes. Until today, these problems are the main reason why impostors are hardly used for rendering acceleration. This thesis presents two new impostor techniques that are based on partitioning the scene part to be represented into image layers with different distances to the observer. A new error metric allows a guarantee for a minimum visual quality of an impostor even for large view cells. Furthermore, invisible scene parts are efficiently excluded from the representation without requiring any knowledge about the scene structure, which provides a more compact representation. One of the techniques combines every image layer separately with geometric information. This allows a fast generation of memory-efficient impostors for distant scene parts. In the other technique, the geometry is independent from the depth layers, which allows a compact representation for near scene parts. The second part of this work is about the efficient usage of impostors for a given scene. The goal is to guarantee a minimum frame rate for every view within the scene while at the same time minimizing the memory requirements for all impostors. The presented algorithm automatically selects impostors and view cells so that for every view, only the most suitable scene parts are represented as impostors. Previous approaches generated numerous similar impostors for neighboring view cells, thus wasting memory. The new algorithm overcomes this problem. i The simultaneous use of additional acceleration techniques further reduces the required impostor memory and allows making best use of all available techniques at the same time. The approach is general in the sense that it can handle arbitrary scenes and a broad range of impostor techniques, and the acceleration provided by the impostors can be adapted to the bottlenecks of different rendering systems. In summary, the provided techniques and algorithms dramatically reduce the required impostor memory and simultaneously guarantee a minimum output image quality. This makes impostors useful for numerous scenes and applications where they could hardly be used before.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "image-based rendering, impostors, rendering acceleration", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-ARI/", } @incollection{Wimmer-2005-HOQ, title = "Hardware Occlusion Queries Made Useful", author = "Michael Wimmer and Jir\'{i} Bittner", year = "2005", abstract = "Hardware occlusion queries make it possible for an application to ask the 3D API whether or not any pixels would be drawn if a particular object was rendered. With this feature, applications can check to see whether or not the bounding boxes of complex objects are visible; if the bounds are occluded, the application can skip drawing those objects. In this chapter, we present a simple and powerful algorithm to solve the problem of latency and CPU/GPU stall typically associated with a naive usage of hardware occlusion queries.", month = mar, booktitle = "GPU Gems 2: Programming Techniques for High-Performance Graphics and General-Purpose Computation", editor = "Matt Pharr and Randima Fernando", isbn = "0-32133-559-7", publisher = "Addison-Wesley", keywords = "occlusion culling, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/Wimmer-2005-HOQ/", } @inproceedings{zotti-2005-lum, title = "Approximating Real-World Luminaires with OpenGL Lights", author = "Georg Zotti and Attila Neumann and Werner Purgathofer", year = "2005", abstract = "Dynamic illumination in real-time applications using OpenGL is still usually done with the classical light forms of point lights, directional lights and spot lights. For applications simulating real-world scenes, e.g. architectural planning, finding parameter sets for these simple lights to match real-world luminaires is required for realistic work. This paper describes a simple approach to process a luminaire data file in IESNA IES-LM63-95 format to create an approximation using at most 2 OpenGL lights to represent one luminaire.", month = feb, isbn = "80-903100-9-5", publisher = "UNION press", organization = "University of West Bohemia", note = "only on conference CD-ROM", location = "Plzen", address = "Plzen", editor = "Vaclav Skala", booktitle = "WSCG 2005 Short Paper Proceedings", pages = "49--52", keywords = "interactive illumination planning, OpenGL, Real-world luminaires", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/zotti-2005-lum/", } @inproceedings{havran-2005-egsr, title = "Ray Maps for Global Illumination", author = "Vlastimil Havran and Jir\'{i} Bittner and Robert Herzog and Hans-Peter Seidel", year = "2005", abstract = "We describe a novel data structure for representing light transport called ray map. The ray map extends the concept of photon maps: it stores not only photon impacts but the whole photon paths. We demonstrate the utility of ray maps for global illumination by eliminating boundary bias and reducing topological bias of density estimation in global illumination. Thanks to the elimination of boundary bias we could use ray maps for fast direct visualization with the image quality being close to that obtained by the expensive nal gathering step. We describe in detail our implementation of the ray map using a lazily constructed kD-tree. We also present several optimizations bringing the ray map query performance close to the performance of the photon map.", booktitle = "Eurographics Symposium on Rendering", pages = "43--54", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/havran-2005-egsr/", } @talk{Purgathofer-2005-Ros, title = "Realtime Rendering Algorithmen", author = "Werner Purgathofer", year = "2005", abstract = "Die enorme Entwicklung der Graphikkarten f\"{u}r PCs hat die Computergraphik-Programmierung weitreichend beeinflusst. Das effiziente Ausnutzen der sehr hohen Hardware-Leistung erm\"{o}glicht es, immer komplexere Szenen in Echtzeit zu rendern. Mehrere Strategien wurden entwickelt: die optimale Verteilung der Arbeit zwischen CPU und GPU, das fr\"{u}hzeitige Entfernen sicher nicht sichtbarer Objekte (Visibility Culling), geometrische Vereinfachungen, Texturen verschiedener Art und bildbasierte Bilderstellungsverfahren (Image-based Rendering). Der Vortrag gibt einen \"{U}berblick \"{u}ber diese Strategien und erkl\"{a}rt ausgew\"{a}hlte Algorithmen etwas detaillierter.", event = " ", location = "Universit\"{a}t Rostock", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/Purgathofer-2005-Ros/", } @talk{Purgathofer-2005-Tub, title = "Some Speedup Techniques for Rendering", author = "Werner Purgathofer", year = "2005", abstract = "Recent developments of graphics hardware for PCs have heavily influenced computer graphics programming. Making optimal use of the very efficient hardware features enables real-time rendering of more and more complex scenes. Several strategies are exploited: optimal workload distribution between CPU and GPU, visibility culling, geometric simplifications, textures, and image-based rendering. The talk gives an overview of these strategies and describes some results from the Vienna Graphics Group in more detail.", event = " ", location = "Universit\"{a}t T\"{u}bingen", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/Purgathofer-2005-Tub/", } @article{Bittner-2004-CHC, title = "Coherent Hierarchical Culling: Hardware Occlusion Queries Made Useful", author = "Jir\'{i} Bittner and Michael Wimmer and Harald Piringer and Werner Purgathofer", year = "2004", abstract = "We present a simple but powerful algorithm for optimizing the usage of hardware occlusion queries in arbitrary complex scenes. Our method minimizes the number of issued queries and reduces the delays due to the latency of query results. We reuse the results of the occlusion queries from the last frame in order to initiate and schedule the queries in the next frame. This is done by processing nodes of a spatial hierarchy in front-to-back order, interleaving occlusion queries with the rendering of certain previously visible nodes. The proposed scheduling of the queries makes use of spatial and temporal coherence of visibility. Despite its simplicity, the algorithm achieves good culling efficiency for scenes of various characteristics. The implementation of the algorithm is straightforward, and it can be easily integrated in existing real-time rendering packages using various spatial data structures.", month = sep, journal = "Computer Graphics Forum", volume = "23", number = "3", note = "Proceedings EUROGRAPHICS 2004", issn = "0167-7055", pages = "615--624", keywords = "occlusion query, visibility, real-time rendering, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Bittner-2004-CHC/", } @inproceedings{Wilkie-2004-AMS, title = "An Analytical Model for Skylight Polarisation", author = "Alexander Wilkie and Robert F. Tobler and Christiane Ulbricht and Georg Zotti and Werner Purgathofer", year = "2004", abstract = "Under certain circumstances the polarisation state of the illumination can have a significant influence on the appearance of scenes; outdoor scenes with specular surfaces -- such as water bodies or windows -- under clear, blue skies are good examples of such environments. In cases like that it can be essential to use a polarising renderer if a true prediction of nature is intended, but so far no polarising skylight models have been presented. This paper presents a plausible analytical model for the polarisation of the light emitted from a clear sky. Our approach is based on a suitable combination of several components with well-known characteristics, and yields acceptable results in considerably less time than an exhaustive simulation of the underlying atmospheric scattering phenomena would require.", month = jun, isbn = "3-905673-12-6", publisher = "?", editor = "Alexander Keller and Henrik Wann Jensen ", booktitle = "Proceedings of the Eurographics Symposium on Rendering", pages = "387--399", keywords = "skylight rendering, polarisation", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Wilkie-2004-AMS/", } @inproceedings{Wimmer-2004-LSPM, title = "Light Space Perspective Shadow Maps", author = "Michael Wimmer and Daniel Scherzer and Werner Purgathofer", year = "2004", abstract = "In this paper, we present a new shadow mapping technique that improves the quality of perspective and uniform shadow maps. Our technique uses a perspective transform specified in light space which allows treating all lights as directional lights and does not change the direction of the light sources. This gives all the benefits of the perspective mapping but avoids the problems inherent in perspective shadow mapping like singularities in post-perspective space, missed shadow casters etc. Furthermore, we show that both uniform and perspective shadow maps distribute the perspective aliasing error that occurs in shadow mapping unequally over the available z-range. We therefore propose a transform that equalizes this error and gives equally pleasing results for near and far viewing distances. Our method is simple to implement, requires no scene analysis and is therefore as fast as uniform shadow mapping.", month = jun, isbn = "3-905673-12-6", publisher = "Eurographics Association", organization = "Eurographics", location = "Norrk\"{o}ping, Sweden", editor = "Alexander Keller and Henrik W. Jensen", booktitle = "Rendering Techniques 2004 (Proceedings Eurographics Symposium on Rendering)", pages = "143--151", keywords = "shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Wimmer-2004-LSPM/", } @talk{purgathofer-2004-real, title = "Real- time Rendering Algorithms", author = "Werner Purgathofer", year = "2004", event = "CGI' 2004", location = "Crete, Greece", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/purgathofer-2004-real/", } @talk{purgathofer-2004-some, title = "Some Commercial VR/AR Project", author = "Werner Purgathofer", year = "2004", event = "CONVR 2004", location = "Lisbon, Portugal", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/purgathofer-2004-some/", } @article{Bittner-2003-Vis, title = "Visibility in Computer Graphics", author = "Jir\'{i} Bittner and Peter Wonka", year = "2003", abstract = "Visibility computation is crucial for computer graphics from its very beginning. The first visibility algorithms in computer graphics aimed to determine visible surfaces in a synthesized image of a 3D scene. Nowadays there are many different visibility algorithms for various visibility problems. We propose a new taxonomy of visibility problems that is based on a classification according to the problem domain. We provide a broad overview of visibility problems and algorithms in computer graphics grouped by the proposed taxonomy. The paper surveys visible surface algorithms, visibility culling algorithms, visibility algorithms for shadow computation, global illumination, point-based and image-based rendering, and global visibility computations. Finally, we discuss common concepts of visibility algorithm design and several criteria for the classification of visibility algorithms.", month = sep, issn = "0265-8135", journal = "Environment and Planning B: Planning and Design", number = "5", volume = "30", pages = "729--756", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Bittner-2003-Vis/", } @inproceedings{Artusi-2003-Del, title = "Delivering Interactivity to Complex Tone Mapping Operators", author = "Alessandro Artusi and Jir\'{i} Bittner and Michael Wimmer and Alexander Wilkie", year = "2003", abstract = "The accurate display of high dynamic range images requires the application of complex tone mapping operators. These operators are computationally costly, which prevents their usage in interactive applications. We propose a general framework that delivers interactive performance to an important subclass of tone mapping operators, namely global tone mapping operators. The proposed framework consists of four steps: sampling the input image, applying the tone mapping operator, tting the point-sampled tone mapping curve, and reconstructing the tone mapping curve for all pixels of the input image. We show how to make use of recent graphics hardware while keeping the advantage of generality by performing tone mapping in software. We demonstrate the capabilities of our method by accelerating several common global tone mapping operators and integrating the operators in a real-time rendering application.", month = jun, isbn = "3-905673-03-7", publisher = "Eurographics Association", organization = "Eurographics", location = "Leuven, Belgium", editor = "Per Christensen and Daniel Cohen-Or", booktitle = "Rendering Techniques 2003 (Proceedings Eurographics Symposium on Rendering)", pages = "38--44", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Artusi-2003-Del/", } @inproceedings{Wimmer-2003-RTE, title = "Rendering Time Estimation for Real-Time Rendering", author = "Michael Wimmer and Peter Wonka", year = "2003", abstract = "This paper addresses the problem of estimating the rendering time for a real-time simulation. We study different factors that contribute to the rendering time in order to develop a framework for rendering time estimation. Given a viewpoint (or view cell) and a list of potentially visible objects, we propose several algorithms that can give reasonable upper limits for the rendering time on consumer hardware. This paper also discusses several implementation issues and design choices that are necessary to make the rendering time predictable. Finally, we lay out two extensions to current rendering hardware which would allow implementing a system with constant frame rates.", month = jun, isbn = "3-905673-03-7", publisher = "Eurographics Association", organization = "Eurographics", location = "Leuven, Belgium", editor = "Per Christensen and Daniel Cohen-Or", booktitle = "Rendering Techniques 2003 (Proceedings Eurographics Symposium on Rendering)", pages = "118--129", keywords = "graphics hardware, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Wimmer-2003-RTE/", } @inproceedings{Jeschke-2002-TDMR, title = "Textured Depth Meshes for Real-Time Rendering of Arbitrary Scenes", author = "Stefan Jeschke and Michael Wimmer", year = "2002", abstract = "This paper presents a new approach to generate textured depth meshes (TDMs), an impostor-based scene representation that can be used to accelerate the rendering of static polygonal models. The TDMs are precalculated for a fixed viewing region (view cell). The approach relies on a layered rendering of the scene to produce a voxel-based representation. Secondary, a highly complex polygon mesh is constructed that covers all the voxels. Afterwards, this mesh is simplified using a special error metric to ensure that all voxels stay covered. Finally, the remaining polygons are resampled using the voxel representation to obtain their textures. The contribution of our approach is manifold: first, it can handle polygonal models without any knowledge about their structure. Second, only scene parts that may become visible from within the view cell are represented, thereby cutting down on impostor complexity and storage costs. Third, an error metric guarantees that the impostors are practically indistinguishable compared to the original model (i.e. no rubber-sheet effects or holes appear as in most previous approaches). Furthermore, current graphics hardware is exploited for the construction and use of the impostors.", month = jun, isbn = "1-58133-534-3", publisher = "Eurographics Association", organization = "Eurographics", location = "Pisa, Italy", editor = "Paul Debevec and Simon Gibson", booktitle = "Rendering Techniques 2002 (Proceedings Eurographics Workshop on Rendering)", pages = "181--190", keywords = "Rendering, Walkthrough, Computer Graphics, Impostors", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Jeschke-2002-TDMR/", } @inproceedings{Jeschke-2002-LEMA, title = "Layered Environment-Map Impostors for Arbitrary Scenes", author = "Stefan Jeschke and Michael Wimmer and Heidrun Schumann", year = "2002", abstract = "This paper presents a new impostor-based approach to accelerate the rendering of very complex static scenes. The scene is partitioned into viewing regions, and a layered impostor representation is precalculated for each of them. An optimal placement of impostor layers guarantees that our representation is indistinguishable from the original geometry. Furthermore the algorithm exploits common graphics hardware both during preprocessing and rendering. Moreover the impostor representation is compressed using several strategies to cut down on storage space.", month = may, isbn = "1-56881-183-7", publisher = "AK Peters Ltd.", location = "Calgary, CA", editor = "Wolfgang St\"{u}rzlinger and Michael McCool", booktitle = "Proceedings of Graphics Interface 2002", pages = "1--8", keywords = "virtual environments, environment maps, impostors, walkthroughs, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Jeschke-2002-LEMA/", } @techreport{TR-186-2-02-04, title = "An Error Metric for Layered Environment Map Impostors", author = "Stefan Jeschke and Michael Wimmer", year = "2002", abstract = "Impostors are image-based primitives commonly used to replace complex geometry in order to accelerate the rendering of large virtual environments. This paper describes a “layered impostor technique” used for representing distant scene-parts when seen from a bounded viewing region. A special layer placement is derived which bounds the geometric error introduced by parallaxes to a defined value. In combination with a special technique for image generation, a high-quality impostor representation without image artifacts can be obtained.", month = feb, number = "TR-186-2-02-04", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "impostors, real-time rendering, virtual", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/TR-186-2-02-04/", } @inproceedings{Devlin-2002-STA, title = "STAR Report on Tone Reproduction and Physically Based Spectral Rendering", author = "K. Devlin and A. Chalmers and Alexander Wilkie and Werner Purgathofer", year = "2002", booktitle = "Eurographics 2002", publisher = "Eurographics Association", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Devlin-2002-STA/", } @inproceedings{Bittner-2001-Vis, title = "Visibility Preprocessing for Urban Scenes using Line Space Subdivision", author = "Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2001", abstract = "We present an algorithm for visibility preprocessing of urban environments. The algorithm uses a subdivision of line space to analytically calculate a conservative potentially visible set for a given region in the scene. We present a detailed evaluation of our method including a comparison to another recently published visibility preprocessing algorithm. To the best of our knowledge the proposed method is the first algorithm that scales to large scenes and efficiently handles large view cells.", month = oct, isbn = "0-7695-1227-5", publisher = "IEEE Computer Society Press", location = "Tokyo, Japan", editor = "Bob Werner", booktitle = "Proceedings of Pacific Graphics 2001 (Ninth Pacific Conference on Computer Graphics and Applications)", pages = "276--284", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Bittner-2001-Vis/", } @article{Wonka-2001-Ins, title = "Instant Visibility", author = "Peter Wonka and Michael Wimmer and Fran\c{c}ois Sillion", year = "2001", abstract = "We present an online occlusion culling system which computes visibility in parallel to the rendering pipeline. We show how to use point visibility algorithms to quickly calculate a tight potentially visible set (PVS) which is valid for several frames, by shrinking the occluders used in visibility calculations by an adequate amount. These visibility calculations can be performed on a visibility server, possibly a distinct computer communicating with the display host over a local network. The resulting system essentially combines the advantages of online visibility processing and region-based visibility calculations, allowing asynchronous processing of visibility and display operations. We analyze two different types of hardware-based point visibility algorithms and address the problem of bounded calculation time which is the basis for true real-time behavior. Our results show reliable, sustained 60 Hz performance in a walkthrough with an urban environment of nearly 2 million polygons, and a terrain flyover.", month = sep, journal = "Computer Graphics Forum", volume = "20", number = "3", note = "G\"{u}nther Enderle [Best Paper] Award, Best Student Paper Award. A. Chalmers and T.-M. Rhyne (eds.), Proceedings EUROGRAPHICS 2001", issn = "0167-7055", pages = "411--421", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wonka-2001-Ins/", } @inproceedings{Wimmer-2001-Poi, title = "Point-Based Impostors for Real-Time Visualization", author = "Michael Wimmer and Peter Wonka and Fran\c{c}ois Sillion", year = "2001", abstract = "We present a new data structure for encoding the appearance of a geometric model as seen from a viewing region (view cell). This representation can be used in interactive or real-time visualization applications to replace a complex model by an impostor, maintaining high quality rendering while cutting down rendering time. Our approach relies on an object-space sampled representation similar to a point cloud or a layered depth image, but introduces two fundamental additions to previous techniques. First, the sampling rate is controlled to provide sufficient density across all possible viewing conditions from the specified view cell. Second, a correct, antialiased representation of the plenoptic function is computed using Monte Carlo integration. Our system therefore achieves high quality rendering using a simple representation with bounded complexity. We demonstrate the method for an application in urban visualization.", month = jun, isbn = "3-211-83709-4", publisher = "Springer-Verlag", organization = "Eurographics", editor = "Steven J. Gortler and Karol Myszkowski", booktitle = "Rendering Techniques 2001 (Proceedings Eurographics Workshop on Rendering)", pages = "163--176", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wimmer-2001-Poi/", } @article{Brusi-2001-Opt, title = "Optimal Ray Shooting in Monte Carlo Radiosity", author = "A. Brusi and Mateu Sbert and Philippe Bekaert and Werner Purgathofer", year = "2001", journal = "Computers&Graphics", volume = "26", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Brusi-2001-Opt/", } @article{Wilkie-2001-Ori, title = "Orientation Lightmaps for Photon Radiosity in Complex Environments", author = "Alexander Wilkie and Robert F. Tobler and Werner Purgathofer", year = "2001", abstract = "We present a method that makes the use of photon tracing methods feasible for complex scenes when a totally accurate solution is not essential. This is accomplished by using orientation lightmaps, which average the illumination of complex objects depending on the surface normal. Through this averaging, they considerably reduce the variance of the stochastic solution. In order to use these specialised lightmaps, which consume comparatively small amounts of memory, no changes have to be made to the basic photon-tracing algorithm. Also, they can be freely mixed with normal lightmaps. This gives the user good control over the amount of inaccuracy he introduces by their application. The area computations necessary for their insertion are performed using a stochastic sampling method that performs well for highly complex objects.", journal = "The Visual Computer", note = "In The Visual Computer, Vol. 17, No. 5, pp. 318-327, Springer, Heidelberg, 2001", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wilkie-2001-Ori/", } @inproceedings{wonka-2000-VisP, title = "Visibility Preprocessing with Occluder Fusion for Urban Walkthroughs", author = "Peter Wonka and Michael Wimmer and Dieter Schmalstieg", year = "2000", abstract = "This paper presents an efficient algorithm for occlusion culling of urban environments. It is conservative and accurate in finding all significant occlusion. It discretizes the scene into view cells, for which cell-to-object visibility is precomputed, making on-line overhead negligible. Unlike other precomputation methods for view cells, it is able to conservatively compute all forms of occluder interaction for an arbitrary number of occluders. To speed up preprocessing, standard graphics hardware is exploited and occluder occlusion is considered. A walkthrough application running an 8 million polygon model of the city of Vienna on consumer-level hardware illustrates our results.", month = jun, isbn = "3-211-83535-0", publisher = "Springer-Verlag Wien New York", organization = "Eurographics", location = "held in Brno, Czech Republic, June 26-28, 2000", editor = "Bernard P\'{e}roche and Holly Rushmeier", booktitle = "Rendering Techniques 2000 (Proceedings Eurographics Workshop on Rendering)", pages = "71--82", keywords = "Visibility determination, image-based rendering., occluder occlusion, occluder fusion, urban environments, walkthrough, real-time graphics, shadow algorithms, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2000/wonka-2000-VisP/", } @article{Wimmer-1999-FWIb, title = "Fast Walkthroughs with Image Caches and Ray Casting", author = "Michael Wimmer and Markus Giegl and Dieter Schmalstieg", year = "1999", abstract = "We present an output-sensitive rendering algorithm for accelerating walkthroughs of large, densely occluded virtual environments using a multi-stage Image Based Rendering Pipeline. In the first stage, objects within a certain distance are rendered using the traditional graphics pipeline, whereas the remaining scene is rendered by a pixel-based approach using an Image Cache, horizon estimation to avoid calculating sky pixels, and finally, ray casting. The time complexity of this approach does not depend on the total number of primitives in the scene. We have measured speedups of up to one oder of magnitude.", month = dec, issn = "0097-8493", journal = "Computers and Graphics", number = "6", volume = "23", pages = "831--838", URL = "https://www.cg.tuwien.ac.at/research/publications/1999/Wimmer-1999-FWIb/", } @inproceedings{Wimmer-1999-FWIa, title = "Fast Walkthroughs with Image Caches and Ray Casting", author = "Michael Wimmer and Markus Giegl and Dieter Schmalstieg", year = "1999", abstract = "We present an output-sensitive rendering algorithm for accelerating walkthroughs of large, densely occluded virtual environments using a multi-stage Image Based Rendering Pipeline. In the first stage, objects within a certain distance are rendered using the traditional graphics pipeline, whereas the remaining scene is rendered by a pixel-based approach using an Image Cache, horizon estimation to avoid calculating sky pixels, and finally, ray casting. The time complexity of this approach does not depend on the total number of primitives in the scene. We have measured speedups of up to one oder of magnitude.", month = jun, isbn = "3-211-83347-1", publisher = "Springer-Verlag Wien", organization = "Eurographics", editor = "Michael Gervautz and Dieter Schmalstieg and Axel Hildebrand", booktitle = "Virtual Environments '99. Proceedings of the 5th Eurographics Workshop on Virtual Environments", pages = "73--84", URL = "https://www.cg.tuwien.ac.at/research/publications/1999/Wimmer-1999-FWIa/", } @inproceedings{tobler97_hsdasr, title = "A Hierarchical Subdivision Algorithm for Stochastic Radiosity Methods", author = "Robert F. Tobler and Alexander Wilkie and Martin Feda and Werner Purgathofer", year = "1997", abstract = "The algorithm proposed in this paper uses a stochastic approach to incrementally calculate the illumination function over a surface. By tracking the illumination function at different levels of meshing resolution, it is possible to get a measure for the quality of the current representation, and to adoptively subdivide in places with inadequate accuracy. With this technique a hierarchical mesh that is based on the stochastic evaluation of global illumination is generated.", month = jun, publisher = "Springer Wien", organization = "Eurographics", address = "St. Etienne, France", editor = "Julie Dorsey and Philipp Slusallek", booktitle = "Eurographics Rendering Workshop 1997", pages = "193--204", keywords = "radiosity, monte carlo methods", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/tobler97_hsdasr/", } @inproceedings{traxler-1997-TRA, title = "Efficient Ray Tracing of Complex Natural Scenes", author = "Christoph Traxler and Michael Gervautz", year = "1997", abstract = "In this paper we present a method for the consistent modelling and efficient ray tracing of complex natural scenes. Both plants and terrains are modelled and represented in the same way to allow mutual influences of their appearance and interdependencies of their geometry. Plants are generated together with a fractal terrain, so that they directly grow on it. This allows an accurate calculation of reflections and the cast of shadows. The scenes are modeled with a special kind of PL-Systems and are represented by cyclic object-instancing graphs. This is a very compact representation for ray tracing, which avoids restrictions to the complexity of the scenes. To significantly increase the efficiency of ray tracing with this representation an adaptation of conventional optimization techniques to cyclic graphs is necessary. In this paper we introduce methods for the calculation of a bounding box hierarchy and the use of a regular 3d-grid for cyclic graphs.", publisher = "World Scientific Publishers", location = "Denver, Colorado", editor = "M. M. Novak and T. G. Dewey", booktitle = "Proceedings of Fractal 97", keywords = "Cyclic Object Instancing Graphs, PL-systems, Natural Phenomena , Ray Tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/traxler-1997-TRA/", }