@article{birsak-2017-dpe, title = "Dynamic Path Exploration on Mobile Devices", author = "Michael Birsak and Przemyslaw Musialski and Peter Wonka and Michael Wimmer", year = "2018", abstract = "We present a novel framework for visualizing routes on mobile devices. Our framework is suitable for helping users explore their environment. First, given a starting point and a maximum route length, the system retrieves nearby points of interest (POIs). Second, we automatically compute an attractive walking path through the environment trying to pass by as many highly ranked POIs as possible. Third, we automatically compute a route visualization that shows the current user position, POI locations via pins, and detail lenses for more information about the POIs. The visualization is an animation of an orthographic map view that follows the current user position. We propose an optimization based on a binary integer program (BIP) that models multiple requirements for an effective placement of detail lenses. We show that our path computation method outperforms recently proposed methods and we evaluate the overall impact of our framework in two user studies.", month = may, doi = "10.1109/TVCG.2017.2690294", issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "5", volume = "24", pages = "1784--1798", URL = "https://www.cg.tuwien.ac.at/research/publications/2018/birsak-2017-dpe/", } @article{forsythe-2016-ccm, title = "Resolution-independent superpixels based on convex constrained meshes without small angles", author = "Jeremy Forsythe and Vitaliy Kurlin and Andrew Fitzgibbon", year = "2016", abstract = "The over-segmentation problem for images is studied in the new resolution-independent formulation when a large image is approximated by a small number of convex polygons with straight edges at subpixel precision. These polygonal superpixels are obtained by refining and extending subpixel edge segments to a full mesh of convex polygons without small angles and with approximation guarantees. Another novelty is the objective error difference between an original pixel-based image and the reconstructed image with a best constant color over each superpixel, which does not need human segmentations. The experiments on images from the Berkeley Segmentation Database show that new meshes are smaller and provide better approximations than the state-of-the-art.", month = dec, journal = "Lecture Notes in Computer Science (LNCS)", volume = "10072", issn = "0302-9743", pages = "223--233", keywords = "superpixels, polygonal mesh, Delaunay triangulation, constrained triangulation, edge detection", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/forsythe-2016-ccm/", } @article{arikan-2015-dmrt, title = "Multi-Depth-Map Raytracing for Efficient Large-Scene Reconstruction", author = "Murat Arikan and Reinhold Preiner and Michael Wimmer", year = "2016", abstract = "With the enormous advances of the acquisition technology over the last years, fast processing and high-quality visualization of large point clouds have gained increasing attention. Commonly, a mesh surface is reconstructed from the point cloud and a high-resolution texture is generated over the mesh from the images taken at the site to represent surface materials. However, this global reconstruction and texturing approach becomes impractical with increasing data sizes. Recently, due to its potential for scalability and extensibility, a method for texturing a set of depth maps in a preprocessing and stitching them at runtime has been proposed to represent large scenes. However, the rendering performance of this method is strongly dependent on the number of depth maps and their resolution. Moreover, for the proposed scene representation, every single depth map has to be textured by the images, which in practice heavily increases processing costs. In this paper, we present a novel method to break these dependencies by introducing an efficient raytracing of multiple depth maps. In a preprocessing phase, we first generate high-resolution textured depth maps by rendering the input points from image cameras and then perform a graph-cut based optimization to assign a small subset of these points to the images. At runtime, we use the resulting point-to-image assignments (1) to identify for each view ray which depth map contains the closest ray-surface intersection and (2) to efficiently compute this intersection point. The resulting algorithm accelerates both the texturing and the rendering of the depth maps by an order of magnitude.", month = feb, doi = "10.1109/TVCG.2015.2430333", issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "2", volume = "22", pages = "1127--1137", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/arikan-2015-dmrt/", } @inproceedings{ilcik-2016-cmssg, title = "Collaborative Modeling with Symbolic Shape Grammars", author = "Martin Il\v{c}\'{i}k and Michael Wimmer", year = "2016", abstract = "Generative design based on symbolic grammars is oriented on individual artists. Team work is not supported since single scripts produced by various artists have to be linked and maintained manually with a lot of effort. The main motivation for a collaborative modeling framework was to reduce the script management required for large projects. We achieved even more by extending the design paradigm to a cloud environment where everyone is part of a huge virtual team. The main contribution of the presented work is a web-based modeling system with a specialized variant of a symbolic shape grammar.", location = "Oulu, Finland", booktitle = "Proceedings of eCAADe 2016", pages = "417--426", keywords = "collaboration, procedural modeling, procedural modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ilcik-2016-cmssg/", } @article{steiner_2016_isad, title = "Integrated Structural-Architectural Design for Interactive Planning", author = "Bernhard Steiner and Elham Mousavian and Fatemeh Mehdizadeh Saradj and Michael Wimmer and Przemyslaw Musialski", year = "2017", abstract = "Traditionally, building floorplans are designed by architects with their usability, functionality, and architectural aesthetics in mind, however, the structural properties of the distribution of load-bearing walls and columns are usually not taken into account at this stage. In this paper we propose a novel approach for the design of architectural floorplans by integrating structural layout analysis directly into the planning process. In order to achieve this, we introduce a planning tool which interactively enforces checks for structural stability of the current design, and which on demand proposes how to stabilize it if necessary. Technically, our solution contains an interactive architectural modeling framework as well as a constrained optimization module where both are based on respective architectural rules. Using our tool, an architect can predict already in a very early planning stage which designs are structurally sound such that later changes due to stability reasons can be prevented. We compare manually computed solutions with optimal results of our proposed automated design process in order to show how much our proposed system can help architects to improve the process of laying out structural models optimally.", month = dec, doi = "10.1111/cgf.12996", issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "36", pages = "80--94", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/steiner_2016_isad/", } @article{musialski_2016_sosp, title = "Non-Linear Shape Optimization Using Local Subspace Projections", author = "Przemyslaw Musialski and Christian Hafner and Florian Rist and Michael Birsak and Michael Wimmer and Leif Kobbelt", year = "2016", abstract = "In this paper we present a novel method for non-linear shape optimization of 3d objects given by their surface representation. Our method takes advantage of the fact that various shape properties of interest give rise to underdetermined design spaces implying the existence of many good solutions. Our algorithm exploits this by performing iterative projections of the problem to local subspaces where it can be solved much more efficiently using standard numerical routines. We demonstrate how this approach can be utilized for various shape optimization tasks using different shape parameterizations. In particular, we show how to efficiently optimize natural frequencies, mass properties, as well as the structural yield strength of a solid body. Our method is flexible, easy to implement, and very fast.", journal = "ACM Transactions on Graphics", volume = "35", number = "4", issn = "0730-0301", doi = "10.1145/2897824.2925886", pages = "87:1--87:13", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/musialski_2016_sosp/", } @article{ohrhallinger-2016-sgp, title = "Curve Reconstruction with Many Fewer Samples", author = "Stefan Ohrhallinger and Scott A. Mitchell and Michael Wimmer", year = "2016", abstract = "We consider the problem of sampling points from a collection of smooth curves in the plane, such that the Crust family of proximity-based reconstruction algorithms can rebuild the curves. Reconstruction requires a dense sampling of local features, i.e., parts of the curve that are close in Euclidean distance but far apart geodesically. We show that epsilon<0.47-sampling is sufficient for our proposed HNN-CRUST variant, improving upon the state-of-the-art requirement of epsilon<1/3-sampling. Thus we may reconstruct curves with many fewer samples. We also present a new sampling scheme that reduces the required density even further than epsilon<0.47-sampling. We achieve this by better controlling the spacing between geodesically consecutive points. Our novel sampling condition is based on the reach, the minimum local feature size along intervals between samples. This is mathematically closer to the reconstruction density requirements, particularly near sharp-angled features. We prove lower and upper bounds on reach rho-sampling density in terms of lfs epsilon-sampling and demonstrate that we typically reduce the required number of samples for reconstruction by more than half. ", journal = "Computer Graphics Forum", volume = "35", number = "5", issn = "1467-8659", pages = "167--176", keywords = "sampling condition, curve reconstruction, curve sampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ohrhallinger-2016-sgp/", } @inproceedings{SCHUETZ-2015-HQP, title = "High-Quality Point Based Rendering Using Fast Single Pass Interpolation", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2015", abstract = "We present a method to improve the visual quality of point cloud renderings through a nearest-neighbor-like interpolation of points. This allows applications to render points at larger sizes in order to reduce holes, without reducing the readability of fine details due to occluding points. The implementation requires only few modifications to existing shaders, making it eligible to be integrated in software applications without major design changes.", month = sep, location = "Granada, Spain", booktitle = "Proceedings of Digital Heritage 2015 Short Papers", pages = "369--372", keywords = "point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/SCHUETZ-2015-HQP/", } @article{guerrero-2015-lsp, title = "Learning Shape Placements by Example", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer and Peter Wonka", year = "2015", abstract = "We present a method to learn and propagate shape placements in 2D polygonal scenes from a few examples provided by a user. The placement of a shape is modeled as an oriented bounding box. Simple geometric relationships between this bounding box and nearby scene polygons define a feature set for the placement. The feature sets of all example placements are then used to learn a probabilistic model over all possible placements and scenes. With this model we can generate a new set of placements with similar geometric relationships in any given scene. We introduce extensions that enable propagation and generation of shapes in 3D scenes, as well as the application of a learned modeling session to large scenes without additional user interaction. These concepts allow us to generate complex scenes with thousands of objects with relatively little user interaction.", month = aug, journal = "ACM Transactions on Graphics", volume = "34", number = "4", issn = "0730-0301", doi = "10.1145/2766933", pages = "108:1--108:13", keywords = "modeling by example, complex model generation", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/guerrero-2015-lsp/", } @article{musialski-2015-souos, title = "Reduced-Order Shape Optimization Using Offset Surfaces", author = "Przemyslaw Musialski and Thomas Auzinger and Michael Birsak and Michael Wimmer and Leif Kobbelt", year = "2015", abstract = "Given the 2-manifold surface of a 3d object, we propose a novel method for the computation of an offset surface with varying thickness such that the solid volume between the surface and its offset satisfies a set of prescribed constraints and at the same time minimizes a given objective functional. Since the constraints as well as the objective functional can easily be adjusted to specific application requirements, our method provides a flexible and powerful tool for shape optimization. We use manifold harmonics to derive a reduced-order formulation of the optimization problem, which guarantees a smooth offset surface and speeds up the computation independently from the input mesh resolution without affecting the quality of the result. The constrained optimization problem can be solved in a numerically robust manner with commodity solvers. Furthermore, the method allows simultaneously optimizing an inner and an outer offset in order to increase the degrees of freedom. We demonstrate our method in a number of examples where we control the physical mass properties of rigid objects for the purpose of 3d printing. ", month = aug, journal = "ACM Transactions on Graphics (ACM SIGGRAPH 2015)", volume = "34", number = "4", issn = "0730-0301", doi = "10.1145/2766955", pages = "102:1--102:9", keywords = "reduced-order models, shape optimization, computational geometry, geometry processing, physical mass properties", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/musialski-2015-souos/", } @article{Jimenez_SSS_2015, title = "Separable Subsurface Scattering", author = "Jorge Jimenez and Karoly Zsolnai-Feh\'{e}r and Adrian Jarabo and Christian Freude and Thomas Auzinger and Xian-Chun Wu and Javier van der Pahlen and Michael Wimmer and Diego Gutierrez", year = "2015", abstract = "In this paper we propose two real-time models for simulating subsurface scattering for a large variety of translucent materials, which need under 0.5 milliseconds per frame to execute. This makes them a practical option for real-time production scenarios. Current state-of-the-art, real-time approaches simulate subsurface light transport by approximating the radially symmetric non-separable diffusion kernel with a sum of separable Gaussians, which requires multiple (up to twelve) 1D convolutions. In this work we relax the requirement of radial symmetry to approximate a 2D diffuse reflectance profile by a single separable kernel. We first show that low-rank approximations based on matrix factorization outperform previous approaches, but they still need several passes to get good results. To solve this, we present two different separable models: the first one yields a high-quality diffusion simulation, while the second one offers an attractive trade-off between physical accuracy and artistic control. Both allow rendering subsurface scattering using only two 1D convolutions, reducing both execution time and memory consumption, while delivering results comparable to techniques with higher cost. Using our importance-sampling and jittering strategies, only seven samples per pixel are required. Our methods can be implemented as simple post-processing steps without intrusive changes to existing rendering pipelines. https://www.youtube.com/watch?v=P0Tkr4HaIVk", month = jun, journal = "Computer Graphics Forum", volume = "34", number = "6", issn = "1467-8659", pages = "188--197", keywords = "separable, realtime rendering, subsurface scattering, filtering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Jimenez_SSS_2015/", } @article{Ilcik_2015_LAY, title = "Layer-Based Procedural Design of Facades", author = "Martin Il\v{c}\'{i}k and Przemyslaw Musialski and Thomas Auzinger and Michael Wimmer", year = "2015", abstract = "We present a novel procedural framework for interactively modeling building fa\c{c}ades. Common procedural approaches, such as shape grammars, assume that building fa\c{c}ades are organized in a tree structure, while in practice this is often not the case. Consequently, the complexity of their layout description becomes unmanageable for interactive editing. In contrast, we obtain a fa\c{c}ade by composing multiple overlapping layers, where each layer contains a single rectilinear grid of fa\c{c}ade elements described by two simple generator patterns. This way, the design process becomes more intuitive and the editing effort for complex layouts is significantly reduced. To achieve this, we present a method for the automated merging of different layers in the form of a mixed discrete and continuous optimization problem. Finally, we provide several modeling examples and a comparison to shape grammars in order to highlight the advantages of our method when designing realistic building fa\c{c}ades. You can find the paper video at https://vimeo.com/118400233 .", month = may, journal = "Computer Graphics Forum", volume = "34", number = "2", issn = "1467-8659", pages = "205--216", keywords = "procedural modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/Ilcik_2015_LAY/", } @article{MATTAUSCH-2015-CHCRT, title = "CHC+RT: Coherent Hierarchical Culling for Ray Tracing", author = "Oliver Mattausch and Jir\'{i} Bittner and Alberto Jaspe and Enrico Gobbetti and Michael Wimmer and Renato Pajarola", year = "2015", abstract = "We propose a new technique for in-core and out-of-core GPU ray tracing using a generalization of hierarchical occlusion culling in the style of the CHC++ method. Our method exploits the rasterization pipeline and hardware occlusion queries in order to create coherent batches of work for localized shader-based ray tracing kernels. By combining hierarchies in both ray space and object space, the method is able to share intermediate traversal results among multiple rays. We exploit temporal coherence among similar ray sets between frames and also within the given frame. A suitable management of the current visibility state makes it possible to benefit from occlusion culling for less coherent ray types like diffuse reflections. Since large scenes are still a challenge for modern GPU ray tracers, our method is most useful for scenes with medium to high complexity, especially since our method inherently supports ray tracing highly complex scenes that do not fit in GPU memory. For in-core scenes our method is comparable to CUDA ray tracing and performs up to 5.94 × better than pure shader-based ray tracing.", month = may, journal = "Computer Graphics Forum", volume = "34", number = "2", issn = "1467-8659", pages = "537--548", keywords = "occlusion culling, ray tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/MATTAUSCH-2015-CHCRT/", } @inproceedings{WEBER-2015-PRA, title = "Parallel Reyes-style Adaptive Subdivision with Bounded Memory Usage", author = "Thomas Weber and Michael Wimmer and John Owens", year = "2015", abstract = "Recent advances in graphics hardware have made it a desirable goal to implement the Reyes algorithm on current graphics cards. One key component in this algorithm is the bound-and-split phase, where surface patches are recursively split until they are smaller than a given screen-space bound. While this operation has been successfully parallelized for execution on the GPU using a breadth-first traversal, the resulting implementations are limited by their unpredictable worst-case memory consumption and high global memory bandwidth utilization. In this paper, we propose an alternate strategy that allows limiting the amount of necessary memory by controlling the number of assigned worker threads. The result is an implementation that scales to the performance of the breadth-first approach while offering three advantages: significantly decreased memory usage, a smooth and predictable tradeoff between memory usage and performance, and increased locality for surface processing. This allows us to render scenes that would require too much memory to be processed by the breadth-first method.", month = feb, isbn = "978-1-4503-3392-4", publisher = "ACM", organization = "ACM", location = "San Francisco, CA", booktitle = "Proceedings of the 19th Symposium on Interactive 3D Graphics and Games (i3D 2015)", pages = "39--45", keywords = "micro-rasterization", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/WEBER-2015-PRA/", } @article{fan-2014-scfl, title = "Structure Completion for Facade Layouts", author = "Lubin Fan and Przemyslaw Musialski and Ligang Liu and Peter Wonka", year = "2014", abstract = "We present a method to complete missing structures in facade layouts. Starting from an abstraction of the partially observed layout as a set of shapes, we can propose one or multiple possible completed layouts. Structure completion with large missing parts is an ill-posed problem. Therefore, we combine two sources of information to derive our solution: the observed shapes and a database of complete layouts. The problem is also very difficult, because shape positions and attributes have to be estimated jointly. Our proposed solution is to break the problem into two components: a statistical model to evaluate layouts and a planning algorithm to generate candidate layouts. This ensures that the completed result is consistent with the observation and the layouts in the database.", month = nov, journal = "ACM Transactions on Graphics (ACM SIGGRAPH Asia 2014)", volume = "33", number = "6", pages = "210:1--210:11", keywords = "facade modeling, facade completion, structure completion, urban modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/fan-2014-scfl/", } @article{Guerrero-2014-TPS, title = "Partial Shape Matching using Transformation Parameter Similarity", author = "Paul Guerrero and Thomas Auzinger and Michael Wimmer and Stefan Jeschke", year = "2014", abstract = "In this paper, we present a method for non-rigid, partial shape matching in vector graphics. Given a user-specified query region in a 2D shape, similar regions are found, even if they are non-linearly distorted. Furthermore, a non-linear mapping is established between the query regions and these matches, which allows the automatic transfer of editing operations such as texturing. This is achieved by a two-step approach. First, point-wise correspondences between the query region and the whole shape are established. The transformation parameters of these correspondences are registered in an appropriate transformation space. For transformations between similar regions, these parameters form surfaces in transformation space, which are extracted in the second step of our method. The extracted regions may be related to the query region by a non-rigid transform, enabling non-rigid shape matching.", month = nov, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "33", pages = "1--14", keywords = "Shape Matching, Texture Transfer, Non-Rigid, Deformable, Edit Propagation, Partial", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Guerrero-2014-TPS/", } @article{arikan-2014-pcvis, title = "Large-Scale Point-Cloud Visualization through Localized Textured Surface Reconstruction", author = "Murat Arikan and Reinhold Preiner and Claus Scheiblauer and Stefan Jeschke and Michael Wimmer", year = "2014", abstract = "In this paper, we introduce a novel scene representation for the visualization of large-scale point clouds accompanied by a set of high-resolution photographs. Many real-world applications deal with very densely sampled point-cloud data, which are augmented with photographs that often reveal lighting variations and inaccuracies in registration. Consequently, the high-quality representation of the captured data, i.e., both point clouds and photographs together, is a challenging and time-consuming task. We propose a two-phase approach, in which the first (preprocessing) phase generates multiple overlapping surface patches and handles the problem of seamless texture generation locally for each patch. The second phase stitches these patches at render-time to produce a high-quality visualization of the data. As a result of the proposed localization of the global texturing problem, our algorithm is more than an order of magnitude faster than equivalent mesh-based texturing techniques. Furthermore, since our preprocessing phase requires only a minor fraction of the whole dataset at once, we provide maximum flexibility when dealing with growing datasets.", month = sep, issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "9", volume = "20", pages = "1280--1292", keywords = "image-based rendering, large-scale models, color, surface representation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/arikan-2014-pcvis/", } @article{preiner2014clop, title = "Continuous Projection for Fast L1 Reconstruction", author = "Reinhold Preiner and Oliver Mattausch and Murat Arikan and Renato Pajarola and Michael Wimmer", year = "2014", abstract = "With better and faster acquisition devices comes a demand for fast robust reconstruction algorithms, but no L1-based technique has been fast enough for online use so far. In this paper, we present a novel continuous formulation of the weighted locally optimal projection (WLOP) operator based on a Gaussian mixture describing the input point density. Our method is up to 7 times faster than an optimized GPU implementation of WLOP, and achieves interactive frame rates for moderately sized point clouds. We give a comprehensive quality analysis showing that our continuous operator achieves a generally higher reconstruction quality than its discrete counterpart. Additionally, we show how to apply our continuous formulation to spherical mixtures of normal directions, to also achieve a fast robust normal reconstruction. Project Page: https://www.cg.tuwien.ac.at/~preiner/projects/clop/", month = aug, journal = "ACM Transactions on Graphics (Proc. of ACM SIGGRAPH 2014)", volume = "33", number = "4", issn = "0730-0301", doi = "10.1145/2601097.2601172", pages = "47:1--47:13", keywords = "point set, Gaussian mixture, Hierarchical EM, upsampling, dynamic reconstruction, L1 reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/preiner2014clop/", } @article{bernhard-2014-GTOM, title = "Gaze-To-Object Mapping During Visual Search in 3D Virtual Environments ", author = "Matthias Bernhard and Efstathios Stavrakis and Michael Hecher and Michael Wimmer", year = "2014", abstract = "Stimuli obtained from highly dynamic 3D virtual environments and synchronous eye-tracking data are commonly used by algorithms that strive to correlate gaze to scene objects, a process referred to as Gaze-To-Object Mapping (GTOM). We propose to address this problem with a probabilistic approach using Bayesian inference. The desired result of the inference is a predicted probability density function (PDF) specifying for each object in the scene a probability to be attended by the user. To evaluate the quality of a predicted attention PDF, we present a methodology to assess the information value (i.e., likelihood) in the predictions of dierent approaches that can be used to infer object attention. To this end, we propose an experiment based on a visual search task which allows us to determine the object of attention at a certain point in time under controlled conditions. We perform this experiment with a wide range of static and dynamic visual scenes to obtain a ground-truth evaluation data set, allowing us to assess GTOM techniques in a set of 30 particularly challenging cases.", month = aug, journal = "ACM Transactions on Applied Perception (Special Issue SAP 2014)", volume = "11", number = "3", issn = "1544-3558", pages = "14:1--14:17", keywords = "object-based attention, eye-tracking, virtual environments, visual attention", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-GTOM/", } @article{hecher-2014-MH, title = "A Comparative Perceptual Study of Soft Shadow Algorithms", author = "Michael Hecher and Matthias Bernhard and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2014", abstract = "We performed a perceptual user study of algorithms that approximate soft shadows in real time. Although a huge body of soft-shadow algorithms have been proposed, to our knowledge this is the first methodical study for comparing different real-time shadow algorithms with respect to their plausibility and visual appearance. We evaluated soft-shadow properties like penumbra overlap with respect to their relevance to shadow perception in a systematic way, and we believe that our results can be useful to guide future shadow approaches in their methods of evaluation. In this study, we also capture the predominant case of an inexperienced user observing shadows without comparing to a reference solution, such as when watching a movie or playing a game. One important result of this experiment is to scientifically verify that real-time soft-shadow algorithms, despite having become physically based and very realistic, can nevertheless be intuitively distinguished from a correct solution by untrained users.", month = jun, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", number = "5", volume = "11", pages = "5:1--5:21", keywords = "Perception Studies, Soft Shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/hecher-2014-MH/", } @article{LUKSCH-2014-RTR, title = "Real-Time Rendering of Glossy Materials with Regular Sampling", author = "Christian Luksch and Robert F. Tobler and Thomas M\"{u}hlbacher and Michael Schw\"{a}rzler and Michael Wimmer", year = "2014", abstract = "Rendering view-dependent, glossy surfaces to increase the realism in real-time applications is a computationally complex task, that can only be performed by applying some approximations—especially when immediate changes in the scene in terms of material settings and object placement are a necessity. The use of environment maps is a common approach to this problem, but implicates performance problems due to costly pre-filtering steps or expensive sampling. We, therefore, introduce a regular sampling scheme for environment maps that relies on an efficient MIP-map-based filtering step, and minimizes the number of necessary samples for creating a convincing real-time rendering of glossy BRDF materials.", month = jun, journal = "The Visual Computer", volume = "30", number = "6-8", issn = "0178-2789", pages = "717--727", keywords = "real-time rendering , BRDFs", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/LUKSCH-2014-RTR/", } @inproceedings{Radwan-2014-CDR, title = "Efficient Collision Detection While Rendering Dynamic Point Clouds", author = "Mohamed Radwan and Stefan Ohrhallinger and Michael Wimmer", year = "2014", abstract = "A recent trend in interactive environments is the use of unstructured and temporally varying point clouds. This is driven by both affordable depth cameras and augmented reality simulations. One research question is how to perform collision detection on such point clouds. State-of-the-art methods for collision detection create a spatial hierarchy in order to capture dynamic point cloud surfaces, but they require O(NlogN) time for N points. We propose a novel screen-space representation for point clouds which exploits the property of the underlying surface being 2D. In order for dimensionality reduction, a 3D point cloud is converted into a series of thickened layered depth images. This data structure can be constructed in O(N) time and allows for fast surface queries due to its increased compactness and memory coherency. On top of that, parts of its construction come for free since they are already handled by the rendering pipeline. As an application we demonstrate online collision detection between dynamic point clouds. It shows superior accuracy when compared to other methods and robustness to sensor noise since uncertainty is hidden by the thickened boundary.", month = may, isbn = "978-1-4822-6003-8", publisher = "Canadian Information Processing Society", location = "Montreal, Quebec, Canada ", issn = "0713-5424", event = "Graphics Interface 2014", booktitle = "Proceedings of the 2014 Graphics Interface Conference", pages = "25--33", keywords = "bounding volumes, layered depth images, collision detection, point cloud, dynamic", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Radwan-2014-CDR/", } @inproceedings{charpenay-2014-sgn, title = "Sampling Gabor Noise in the Spatial Domain", author = "Victor Charpenay and Bernhard Steiner and Przemyslaw Musialski", year = "2014", abstract = "Gabor noise is a powerful technique for procedural texture generation. Contrary to other types of procedural noise, its sparse convolution aspect makes it easily controllable locally. In this paper, we demonstrate this property by explicitly introducing spatial variations. We do so by linking the sparse convolution process to the parametrization of the underlying surface. Using this approach, it is possible to provide control maps for the parameters in a natural and convenient way. In order to derive intuitive control of the resulting textures, we accomplish a small study of the influence of the parameters of the Gabor kernel with respect to the outcome and we introduce a solution where we bind values such as the frequency or the orientation of the Gabor kernel to a user-provided control map in order to produce novel visual effects.", month = may, isbn = "978-80-223-3601-7", publisher = "ACM Press", location = "Smolenice castle, Slovakia", editor = "Diego Gutierrez", booktitle = "Proceedings of the 30th Spring Conference on Computer Graphics - SCCG ", pages = "79--82", keywords = "texture synthesis, Gabor noise, procedural texture", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/charpenay-2014-sgn/", } @article{birsak-2014-agtb, title = "Automatic Generation of Tourist Brochures", author = "Michael Birsak and Przemyslaw Musialski and Peter Wonka and Michael Wimmer", year = "2014", abstract = "We present a novel framework for the automatic generation of tourist brochures that include routing instructions and additional information presented in the form of so-called detail lenses. The first contribution of this paper is the automatic creation of layouts for the brochures. Our approach is based on the minimization of an energy function that combines multiple goals: positioning of the lenses as close as possible to the corresponding region shown in an overview map, keeping the number of lenses low, and an efficient numbering of the lenses. The second contribution is a route-aware simplification of the graph of streets used for traveling between the points of interest (POIs). This is done by reducing the graph consisting of all shortest paths through the minimization of an energy function. The output is a subset of street segments that enable traveling between all the POIs without considerable detours, while at the same time guaranteeing a clutter-free visualization. Video: http://www.youtube.com/watch?v=t3w7uxzSR-Y", month = apr, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2014)", volume = "33", number = "2", issn = "1467-8659", pages = "449--458", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/birsak-2014-agtb/", } @article{Guerrero-2014-GRF, title = "Edit Propagation using Geometric Relationship Functions", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer and Peter Wonka", year = "2014", abstract = "We propose a method for propagating edit operations in 2D vector graphics, based on geometric relationship functions. These functions quantify the geometric relationship of a point to a polygon, such as the distance to the boundary or the direction to the closest corner vertex. The level sets of the relationship functions describe points with the same relationship to a polygon. For a given query point we ?rst determine a set of relationships to local features, construct all level sets for these relationships and accumulate them. The maxima of the resulting distribution are points with similar geometric relationships. We show extensions to handle mirror symmetries, and discuss the use of relationship functions as local coordinate systems. Our method can be applied for example to interactive ?oor-plan editing, and is especially useful for large layouts, where individual edits would be cumbersome. We demonstrate populating 2D layouts with tens to hundreds of objects by propagating relatively few edit operations.", month = mar, journal = "ACM Transactions on Graphics", volume = "33", number = "2", issn = "0730-0301", doi = "10.1145/2591010", pages = "15:1--15:15", keywords = "Shape Modeling, Floor Plans, Edit Propagation, Geometric Relationship Functions", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Guerrero-2014-GRF/", } @inproceedings{bernhard-2014-EFD, title = "The Effects of Fast Disparity Adjustments in Gaze-Controlled Stereoscopic Applications", author = "Matthias Bernhard and Camillo Dellmour and Michael Hecher and Efstathios Stavrakis and Michael Wimmer", year = "2014", abstract = "With the emergence of affordable 3D displays, stereoscopy is becoming a commodity. However, often users report discomfort even after brief exposures to stereo content. One of the main reasons is the conflict between vergence and accommodation that is caused by 3D displays. We investigate dynamic adjustment of stereo parameters in a scene using gaze data in order to reduce discomfort. In a user study, we measured stereo fusion times after abrupt manipulation of disparities using gaze data. We found that gaze-controlled manipulation of disparities can lower fusion times for large disparities. In addition we found that gaze-controlled disparity adjustment should be applied in a personalized manner and ideally performed only at the extremities or outside the comfort zone of subjects. These results provide important insight on the problems associated with fast disparity manipulation and are essential for developing appealing gaze-contingent and gaze-controlled applications.", month = mar, isbn = "978-1-4503-2751-0", publisher = "ACM", location = "Safety Harbor, FL, USA", editor = "Pernilla Qvarfordt and Dan Witzner Hansen", booktitle = "Proceedings of the Symposium on Eye Tracking Research and Applications (ETRA 2014)", pages = "111--118", keywords = "stereoscopic rendering, comfort models, fusion time, eye tracking", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/bernhard-2014-EFD/", } @inproceedings{Calatrava_2013_GPGPUSOA, title = "General-Purpose Graphics Processing Units in Service-Oriented Architectures", author = "Mar\'{i}a del Carmen Calatrava Moreno and Thomas Auzinger", year = "2013", abstract = "Over the last decades, graphics processing units have developed from special-purpose graphics accelerators to general-purpose massively parallel co-processors. In recent years they gained increased traction in high performance computing as they provide superior computational performance in terms of runtime and energy consumption for a wide range of problems. In this survey, we review their employment in distributed computing for a broad range of application scenarios. Common characteristics and a classification of the most relevant use cases are described. Furthermore, we discuss possible future developments of the use of general purpose graphics processing units in the area of service-oriented architecture. The aim of this work is to inspire future research in this field and to give guidelines on when and how to incorporate this new hardware technology.", month = dec, isbn = "978-1-4799-2701-2", series = "SOCA ", publisher = "IEEE Computer Society", organization = "IEEE Computer Society", location = "Kauai", booktitle = "Proceedings of the 6th IEEE International Conference on Service Oriented Computing and Applications", keywords = "GPGPU, SOA, parallel, graphics processors, GPU, service-oriented architectures, throughput computing, survey, future", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Calatrava_2013_GPGPUSOA/", } @article{ohrhallinger_stefan-2013-c2d, title = "An Efficient Algorithm for Determining an Aesthetic Shape Connecting Unorganised 2D Points", author = "Stefan Ohrhallinger and Sudhir Mudur", year = "2013", abstract = "We present an efficient algorithm for determining an aesthetically pleasing shape boundary connecting all the points in a given unorganised set of 2D points, with no other information than point coordinates. By posing shape construction as a minimisation problem which follows the Gestalt laws, our desired shape Bmin is non-intersecting, interpolates all points and minimises a criterion related to these laws. The basis for our algorithm is an initial graph, an extension of the Euclidean minimum spanning tree but with no leaf nodes, called as the minimum boundary complex BCmin. BCmin and Bmin can be expressed similarly by parametrising a topological constraint. A close approximation of BCmin, termed BC0 can be computed fast using a greedy algorithm. BC0 is then transformed into a closed interpolating boundary Bout in two steps to satisfy Bmin’s topological and minimization requirements. Computing Bmin exactly is an NP-hard problem, whereas Bout is computed in linearithmic time. We present many examples showing considerable improvement over previous techniques, especially for shapes with sharp corners. Source code is available online.", month = dec, journal = "Computer Graphics Forum", volume = "32", number = "8", pages = "72--88", keywords = "curve reconstruction, boundary representation, sampling condition, computational geometry", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ohrhallinger_stefan-2013-c2d/", } @inproceedings{birsak-2013-sta, title = "Seamless Texturing of Archaeological Data", author = "Michael Birsak and Przemyslaw Musialski and Murat Arikan and Michael Wimmer", year = "2013", abstract = "In this paper we propose a framework for out-of-core real-time rendering of high-quality textured archaeological data-sets. Our input is a triangle mesh and a set of calibrated and registered photographs. Our system performs the actual mapping of the photos to the mesh for high-quality reconstructions, which is a task referred to as the labeling problem. Another problem of such mappings are seams that arise on junctions between triangles that contain information from different photos. These are are approached with blending methods, referred to as leveling. We address both problems and introduce a novel labeling approach based on occlusion detection using depth maps that prevents texturing of parts of the model with images that do not contain the expected region. Moreover, we propose an improved approach for seam-leveling that penalizes too large values and helps to keep the resulting colors in a valid range. For high-performance visualization of the 3D models with a huge amount of textures, we make use of virtual texturing, and present an application that generates the needed texture atlas in significantly less time than existing scripts. Finally, we show how the mentioned components are integrated into a visualization application for digitized archaeological site.", month = oct, isbn = "978-1-4799-3168-2 ", publisher = "IEEE", note = "DOI: 10.1109/DigitalHeritage.2013.6743749", location = "Marseille, France", booktitle = "Digital Heritage International Congress (DigitalHeritage), 2013", pages = "265--272 ", keywords = "digital cultural heritage, out-of-core real-time rendering, seamless texturing, virtual texturing", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/birsak-2013-sta/", } @article{ohrhallinger_stefan-2013-smi, title = "Minimizing Edge Length to Connect Sparsely Sampled Unorganized Point Sets", author = "Stefan Ohrhallinger and Sudhir Mudur and Michael Wimmer", year = "2013", abstract = "Most methods for interpolating unstructured point clouds handle densely sampled point sets quite well but get into trouble when the point set contains regions with much sparser sampling, a situation often encountered in practice. In this paper, we present a new method that provides a better interpolation of sparsely sampled features. We pose the surface construction problem as finding the triangle mesh which minimizes the sum of all triangles’ longest edge. The output is a closed manifold triangulated surface Bmin. Exact computation of Bmin for sparse sampling is most probably NP-hard, and therefore we introduce suitable heuristics for its computing. The algorithm first connects the points by triangles chosen in order of their longest edge and with the requirement that all edges must have at least 2 incident triangles. This yields a closed non-manifold shape which we call the Boundary Complex. Then we transform it into a manifold triangulation using topological operations. We show that in practice, runtime is linear to that of the Delaunay triangulation of the points.", month = oct, journal = "Computers & Graphics (Proceedings of Shape Modeling International 2013)", volume = "37", number = "6", issn = "0097-8493", pages = "645--658", keywords = "point cloud, reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ohrhallinger_stefan-2013-smi/", } @inproceedings{Auzinger_2013_NSAA, title = "Non-Sampled Anti-Aliasing", author = "Thomas Auzinger and Przemyslaw Musialski and Reinhold Preiner and Michael Wimmer", year = "2013", abstract = "In this paper we present a parallel method for high-quality edge anti-aliasing. In contrast to traditional graphics hardware methods, which rely on massive oversampling to combat aliasing issues in the rasterization process, we evaluate a closed-form solution of the associated prefilter convolution. This enables the use of a wide range of filter functions with arbitrary kernel sizes, as well as general shading methods such as texture mapping or complex illumination models. Due to the use of analytic solutions, our results are exact in the mathematical sense and provide objective ground-truth for other anti-aliasing methods and enable the rigorous comparison of different models and filters. An efficient implementation on general purpose graphics hardware is discussed and several comparisons to existing techniques and of various filter functions are given.", month = sep, isbn = "978-3-905674-51-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Lugano, Switzerland", event = "Vision, Modelin, Visualization (VMV)", editor = "Michael Bronstein and Jean Favre and Kai Hormann", booktitle = "Proceedings of the 18th International Workshop on Vision, Modeling and Visualization (VMV 2013)", pages = "169--176", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_NSAA/", } @article{musialski-2013-surcgf, title = "A Survey of Urban Reconstruction", author = "Przemyslaw Musialski and Peter Wonka and Daniel G. Aliaga and Michael Wimmer and Luc van Gool and Werner Purgathofer", year = "2013", abstract = "This paper provides a comprehensive overview of urban reconstruction. While there exists a considerable body of literature, this topic is still under very active research. The work reviewed in this survey stems from the following three research communities: computer graphics, computer vision, and photogrammetry and remote sensing. Our goal is to provide a survey that will help researchers to better position their own work in the context of existing solutions, and to help newcomers and practitioners in computer graphics to quickly gain an overview of this vast field. Further, we would like to bring the mentioned research communities to even more interdisciplinary work, since the reconstruction problem itself is by far not solved.", month = sep, issn = "1467-8659", journal = "Computer Graphics Forum", number = "6", volume = "32", pages = "146--177", keywords = "facade modeling, state-of-the-art report, multi-view stereo, structure from motion, urban modeling, urban reconstruction, inverse-procedural modeling, facade reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/musialski-2013-surcgf/", } @inproceedings{STURN-2013-LSI, title = "Landspotting: A Serious iPad Game for Improving Global Land Cover", author = "Tobias Sturn and Dietmar Pangerl and Linda See and Steffen Fritz and Michael Wimmer", year = "2013", abstract = "Current satellite-derived land cover products, which are very important for answering many crucial research and policy-related questions, show huge disagreements. In this paper we present a serious game for the iPad with the purpose of improving global land cover data. We describe the game, discuss the design decisions made and outline the challenges faced while developing the game. We evaluate how well the players are able to annotate land cover by comparing the game against expert validations collected using the Geo-Wiki tool and provide evidence that games can be a useful way to increase the quality of global land cover.", month = jul, isbn = "978-3-87907-532-4", publisher = "Verlag der \"{O}sterreichischen Akademie der Wissenschaften Austrian Academy of Sciences Press ", organization = "Z_GIS - Department of Geoinformatics", location = "University of Salzburg", booktitle = "Proceedings of the GI-Forum 2013 -- Creating the GISociety", pages = "81--90", keywords = "Landspotting, Serious Game, Improving Global Land Cover", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/STURN-2013-LSI/", } @inproceedings{JAHRMANN-2013-IGR, title = "Interactive Grass Rendering Using Real-Time Tessellation", author = "Klemens Jahrmann and Michael Wimmer", year = "2013", abstract = "Grass rendering is needed for many outdoor scenes, but for real-time applications, rendering each blade of grass as geometry has been too expensive so far. This is why grass is most often drawn as a texture mapped onto the ground or grass patches rendered as transparent billboard quads. Recent approaches use geometry for blades that are near the camera and flat geometry for rendering further away. In this paper, we present a technique which is capable of rendering whole grass fields in real time as geometry by exploiting the capabilities of the tessellation shader. Each single blade of grass is rendered as a two-dimensional tessellated quad facing its own random direction. This enables each blade of grass to be influenced by wind and to interact with its environment. In order to adapt the grass field to the current scene, special textures are developed which encode on the one hand the density and height of the grass and on the other hand its look and composition.", month = jun, isbn = "978-80-86943-74-9", location = "Plzen, CZ", editor = "Manuel Oliveira and Vaclav Skala", booktitle = "WSCG 2013 Full Paper Proceedings", pages = "114--122", keywords = "grass rendering, real-time rendering, billboards", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/JAHRMANN-2013-IGR/", } @inproceedings{scheiblauer-2013-wscg, title = "Analysis of Interactive Editing Operations for Out-of-Core Point-Cloud Hierarchies", author = "Claus Scheiblauer and Michael Wimmer", year = "2013", abstract = "In this paper we compare the time and space complexity of editing operations on two data structures which are suitable for visualizing huge point clouds. The first data structure was introduced by Scheiblauer and Wimmer [SW11] and uses only the original points from a source data set for building a level-of-detail hierarchy that can be used for rendering points clouds. The second data structure introduced by Wand et al. [WBB+07] requires additional points for the level-of-detail hierarchy and therefore needs more memory when stored on disk. Both data structures are based on an octree hierarchy and allow for deleting and inserting points. Besides analyzing and comparing these two data structures we also introduce an improvement to the points deleting algorithm for the data structure of Wand et al. [WBB+07], which thus allows for a more efficient node loading strategy during rendering.", month = jun, isbn = "978-80-86943-74-9", publisher = "Union Agency", location = "Plzen", editor = "Vaclav Skala", booktitle = "WSCG 2013 Full Paper Proceedings", pages = "123--132", keywords = "complexity analysis, point clouds, data structures, viewing algorithms", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/scheiblauer-2013-wscg/", } @inproceedings{STURN-2013-LGI, title = "Landspotting - Games for Improving Global Land Cover", author = "Tobias Sturn and Michael Wimmer and Peter Purgathofer and Steffen Fritz", year = "2013", abstract = "Current satellite-derived land cover products, which are very important for answering many crucial questions, show huge disagreements. In this paper, we introduce four serious game prototypes - a Facebook strategy game played on Google Maps, a Facebook tagging game, a tower-defense game, and an aesthetic tile game for the iPad - with the purpose of improving global land cover data. We describe the games in detail and discuss the design decisions we made and challenges we faced while developing the games. We evaluate how much the players have already been able to improve global land cover data and provide evidence that games can be a useful way to increase the quality of this data. Finally, we discuss how the main game is being perceived by the players and what has to be further improved to attract a bigger audience.", month = may, location = "Chania, Greece", booktitle = "Proceedings of Foundations of Digital Games Conference 2013 (FDG 2013)", pages = "117--125", keywords = "Improving Global Land Cover, Serious Games, Landspotting", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/STURN-2013-LGI/", } @article{Reisner_2013_RSB, title = "Reconstruction of Shape Boundaries with Multimodal Constrains", author = "Irene Reisner-Kollmann and Stefan Maierhofer and Werner Purgathofer", year = "2013", abstract = "Shape primitives are a valuable input for reconstructing 3D models from point clouds. In this paper we present a method for clipping simple shape primitives at reasonable boundaries. The shape primitives, e.g. planes or cylinders, are 2D manifolds which are automatically detected in unstructured point clouds. Shape boundaries are necessary for generating valid 3D models from multiple shape primitives, because shape primitives possibly have dimensions of infinite extent or they are only partially present in the scene. Hints for reasonable boundaries of shape primitives are indicated by different input sources and constraints. Point clouds and range images provide information where shape primitives coincide with measured surface points. Edge detectors offer cues for surface boundaries in color images. The set of shape primitives is analyzed for constraints such as intersections. Due to an iterative approach, intermediate results provide additional constraints such as coplanar boundary points over multiple shape primitives. We present a framework for extracting and optimizing shape boundaries based on the given input data and multiple constraints. Further, we provide a simple user interface for manually adding constraints in order to improve the results. Our approach generates structurally simple 3D models from shape primitives and point clouds. It is useful for reconstructing scenes containing man-made objects, such as buildings, interior scenes, or engineering objects. The application of multiple constraints enables the reconstruction of proper 3D models despite noisy or incomplete point clouds. ", month = may, journal = "Computer & Graphics", number = "3", volume = "37", pages = "137--147", keywords = "Shape boundaries, Shape primitives; , Reconstruction;", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Reisner_2013_RSB/", } @inproceedings{ilcik-2013-cipmi, title = "Challenges and Ideas in Procedural Modeling of Interiors", author = "Martin Il\v{c}\'{i}k and Michael Wimmer", year = "2013", abstract = "While the creation of convincing cityscapes from the outside is already possible, there is a lack of robust and efficient techniques for modeling the interior of buildings. In particular, we focus on challenges for the subdivision of the interior space into rooms and for placement of furniture in those rooms.", month = may, isbn = "978-3-905674-46-0", publisher = "Eurographics Association", location = "Girona, Spain", issn = "2307-8251", editor = "Vincent Tourre and Gonzalo Besuievsky", booktitle = "Proceedings of Eurographics Workshop on Urban Data Modelling and Visualisation (UDMV 2013)", pages = "29--30", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/ilcik-2013-cipmi/", } @article{MATTAUSCH-2013-FSBE, title = "Freeform Shadow Boundary Editing", author = "Oliver Mattausch and Takeo Igarashi and Michael Wimmer", year = "2013", abstract = "We present an algorithm for artistically modifying physically based shadows. With our tool, an artist can directly edit the shadow boundaries in the scene in an intuitive fashion similar to freeform curve editing. Our algorithm then makes these shadow edits consistent with respect to varying light directions and scene configurations, by creating a shadow mesh from the new silhouettes. The shadow mesh helps a modified shadow volume algorithm cast shadows that conform to the artistic shadow boundary edits, while providing plausible interaction with dynamic environments, including animation of both characters and light sources. Our algorithm provides significantly more fine-grained local and direct control than previous artistic light editing methods, which makes it simple to adjust the shadows in a scene to reach a particular effect, or to create interesting shadow shapes and shadow animations. All cases are handled with a single intuitive interface, be it soft shadows, or (self-)shadows on arbitrary receivers.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "0167-7055", pages = "175--184", keywords = "shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/MATTAUSCH-2013-FSBE/", } @article{Auzinger_2013_AnaVis, title = "Analytic Visibility on the GPU", author = "Thomas Auzinger and Michael Wimmer and Stefan Jeschke", year = "2013", abstract = "This paper presents a parallel, implementation-friendly analytic visibility method for triangular meshes. Together with an analytic filter convolution, it allows for a fully analytic solution to anti-aliased 3D mesh rendering on parallel hardware. Building on recent works in computational geometry, we present a new edge-triangle intersection algorithm and a novel method to complete the boundaries of all visible triangle regions after a hidden line elimination step. All stages of the method are embarrassingly parallel and easily implementable on parallel hardware. A GPU implementation is discussed and performance characteristics of the method are shown and compared to traditional sampling-based rendering methods.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "1467-8659", pages = "409--418", keywords = "GPU, anti-aliasing, SIMD, filter, rendering, analytic, visibility, close-form, hidden surface elimination, hidden surface removal, GPGPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_AnaVis/", } @inproceedings{Musialski-2013-ipmum, title = "Inverse-Procedural Methods for Urban Models", author = "Przemyslaw Musialski and Michael Wimmer", year = "2013", abstract = "Procedural modeling is an elegant and fast way to generate huge complex and realistically looking urban sites. Due to its generative nature it can also be referred to as forward-procedural modeling. Its major drawback is the usually quite complicated way of control. To overcome this difficulty a novel modeling paradigm has been introduced: it is commonly referred to as inverse procedural modeling, and its goal is to generate compact procedural descriptions of existing models---in the best case in an automatic manner as possible. These compact procedural representations can be used as a source for the synthesis of identical or similar objects, applied in various simulations and other studies of urban environments. We believe that this technology is still a widely unexplored ground and that it will prove itself as a very important tool in the reconstruction process. In this paper we sketch how inverse procedural modeling can be applied in the urban modeling field.", month = may, isbn = "978-3-905674-46-0", publisher = "Eurographics Association", location = "Girona, Spain", issn = "2307-8251", editor = "V. Tourre and G. Besuievsky", booktitle = "Proceedings of Eurographics Workshop on Urban Data Modelling and Visualisation (UDMV 2013)", pages = "31--32", keywords = "inverse procedural modeling, urban modeling, urban reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Musialski-2013-ipmum/", } @inproceedings{LUKSCH-2013-FLM, title = "Fast Light-Map Computation with Virtual Polygon Lights", author = "Christian Luksch and Robert F. Tobler and Ralf Habel and Michael Schw\"{a}rzler and Michael Wimmer", year = "2013", abstract = "We propose a new method for the fast computation of light maps using a many-light global-illumination solution. A complete scene can be light mapped on the order of seconds to minutes, allowing fast and consistent previews for editing or even generation at loading time. In our method, virtual point lights are clustered into a set of virtual polygon lights, which represent a compact description of the illumination in the scene. The actual light-map generation is performed directly on the GPU. Our approach degrades gracefully, avoiding objectionable artifacts even for very short computation times. ", month = mar, isbn = "978-1-4503-1956-0", publisher = "ACM", location = "Orlando, Florida", booktitle = "Proceedings of ACM Symposium on Interactive 3D Graphics and Games 2013", pages = "87--94", keywords = "instant radiosity, global illumination, light-maps", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/LUKSCH-2013-FLM/", } @inproceedings{SCHWAERZLER-2013-FPCSS, title = "Fast Percentage Closer Soft Shadows using Temporal Coherence", author = "Michael Schw\"{a}rzler and Christian Luksch and Daniel Scherzer and Michael Wimmer", year = "2013", abstract = "We propose a novel way to efficiently calculate soft shadows in real-time applications by overcoming the high computational effort involved with the complex corresponding visibility estimation each frame: We exploit the temporal coherence prevalent in typical scene movement, making the estimation of a new shadow value only necessary whenever regions are newly disoccluded due to camera adjustment, or the shadow situation changes due to object movement. By extending the typical shadow mapping algorithm by an additional light-weight buffer for the tracking of dynamic scene objects, we can robustly and efficiently detect all screen space fragments that need to be updated, including not only the moving objects themselves, but also the soft shadows they cast. By applying this strategy to the popular Percentage Closer Soft Shadow algorithm (PCSS), we double rendering performance in scenes with both static and dynamic objects - as prevalent in various 3D game levels - while maintaining the visual quality of the original approach.", month = mar, isbn = "978-1-4503-1956-0", publisher = "ACM", location = "Orlando, Florida", address = "New York, NY, USA", booktitle = "Proceedings of ACM Symposium on Interactive 3D Graphics and Games 2013", pages = "79--86", keywords = "real-time, temporal coherence, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/SCHWAERZLER-2013-FPCSS/", } @article{knecht_martin_2013_ReflRefrObjsMR, title = "Reflective and Refractive Objects for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Christoph Winklhofer and Michael Wimmer", year = "2013", abstract = "In this paper, we present a novel rendering method which integrates reflective or refractive objects into a differential instant radiosity (DIR) framework usable for mixed-reality (MR) applications. This kind of objects are very special from the light interaction point of view, as they reflect and refract incident rays. Therefore they may cause high-frequency lighting effects known as caustics. Using instant-radiosity (IR) methods to approximate these high-frequency lighting effects would require a large amount of virtual point lights (VPLs) and is therefore not desirable due to real-time constraints. Instead, our approach combines differential instant radiosity with three other methods. One method handles more accurate reflections compared to simple cubemaps by using impostors. Another method is able to calculate two refractions in real-time, and the third method uses small quads to create caustic effects. Our proposed method replaces parts in light paths that belong to reflective or refractive objects using these three methods and thus tightly integrates into DIR. In contrast to previous methods which introduce reflective or refractive objects into MR scenarios, our method produces caustics that also emit additional indirect light. The method runs at real-time frame rates, and the results show that reflective and refractive objects with caustics improve the overall impression for MR scenarios.", month = mar, journal = "IEEE Transactions on Visualization and Computer Graphics (Proceedings of IEEE VR 2013)", volume = "19", number = "4", issn = "1077-2626", pages = "576--582", keywords = "Mixed Reality, Caustics, Reflections, Refractions", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/knecht_martin_2013_ReflRefrObjsMR/", } @article{fink-2013-cag, title = "Teaching a Modern Graphics Pipeline Using a Shader-based Software Renderer", author = "Heinrich Fink and Thomas Weber and Michael Wimmer", year = "2013", abstract = "This paper presents the syllabus for an introductory computer graphics course that emphasizes the use of programmable shaders while teaching raster-level algorithms at the same time. We describe a Java-based framework that is used for programming assignments in this course. This framework implements a shader-enabled software renderer and an interactive 3D editor. Teaching shader programming in concert with the low-level graphics pipeline makes it easier for our students to learn modern OpenGL with shaders in our follow-up intermediate course. We also show how to create attractive course material by using COLLADA, an open standard for 3D content exchange, and our approach to organizing the practical course.", month = feb, issn = "0097-8493", journal = "Computers & Graphics", number = "1--2", volume = "37", pages = "12--20", keywords = "teaching, programmable shading, CG education, course organization, COLLADA", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/fink-2013-cag/", } @article{arikan-2013-osn, title = "O-Snap: Optimization-Based Snapping for Modeling Architecture", author = "Murat Arikan and Michael Schw\"{a}rzler and Simon Fl\"{o}ry and Michael Wimmer and Stefan Maierhofer", year = "2013", abstract = "In this paper, we introduce a novel reconstruction and modeling pipeline to create polygonal models from unstructured point clouds. We propose an automatic polygonal reconstruction that can then be interactively refined by the user. An initial model is automatically created by extracting a set of RANSAC-based locally fitted planar primitives along with their boundary polygons, and then searching for local adjacency relations among parts of the polygons. The extracted set of adjacency relations is enforced to snap polygon elements together, while simultaneously fitting to the input point cloud and ensuring the planarity of the polygons. This optimization-based snapping algorithm may also be interleaved with user interaction. This allows the user to sketch modifications with coarse and loose 2D strokes, as the exact alignment of the polygons is automatically performed by the snapping. The generated models are coarse, offer simple editing possibilities by design and are suitable for interactive 3D applications like games, virtual environments etc. The main innovation in our approach lies in the tight coupling between interactive input and automatic optimization, as well as in an algorithm that robustly discovers the set of adjacency relations.", month = jan, journal = "ACM Transactions on Graphics", volume = "32", number = "1", issn = "0730-0301", doi = "10.1145/2421636.2421642", pages = "6:1--6:15", keywords = "interactive modeling, surface reconstruction, geometric optimization", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/arikan-2013-osn/", } @article{SCHERZER-2012-TCM, title = "Temporal Coherence Methods in Real-Time Rendering", author = "Daniel Scherzer and Lei Yang and Oliver Mattausch and Diego Nehab and Pedro V. Sander and Michael Wimmer and Elmar Eisemann", year = "2012", abstract = "Nowadays, there is a strong trend towards rendering to higher-resolution displays and at high frame rates. This development aims at delivering more detail and better accuracy, but it also comes at a significant cost. Although graphics cards continue to evolve with an ever-increasing amount of computational power, the speed gain is easily counteracted by increasingly complex and sophisticated shading computations. For real-time applications, the direct consequence is that image resolution and temporal resolution are often the first candidates to bow to the performance constraints (e.g., although full HD is possible, PS3 and XBox often render at lower resolutions). In order to achieve high-quality rendering at a lower cost, one can exploit temporal coherence (TC). The underlying observation is that a higher resolution and frame rate do not necessarily imply a much higher workload, but a larger amount of redundancy and a higher potential for amortizing rendering over several frames. In this survey, we investigate methods that make use of this principle and provide practical and theoretical advice on how to exploit temporal coherence for performance optimization. These methods not only allow incorporating more computationally intensive shading effects into many existing applications, but also offer exciting opportunities for extending high-end graphics applications to lower-spec consumer-level hardware. To this end, we first introduce the notion and main concepts of TC, including an overview of historical methods. We then describe a general approach, image-space reprojection, with several implementation algorithms that facilitate reusing shading information across adjacent frames. We also discuss data-reuse quality and performance related to reprojection techniques. Finally, in the second half of this survey, we demonstrate various applications that exploit TC in real-time rendering. ", month = dec, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "31", pages = "2378--2408", keywords = "remote rendering; sampling, perception-based rendering, occlusion culling, non-photo-realistic rendering, level-of-detail, large data visualization, image-based rendering, global illumination, frame interpolation, anti-aliasing, shadows, streaming, temporal coherance, upsampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/SCHERZER-2012-TCM/", } @article{musialski_2012_fice, title = "A Framework for Interactive Image Color Editing", author = "Przemyslaw Musialski and Ming Cui and Jieping Ye and Anshuman Razdan and Peter Wonka", year = "2012", abstract = "We propose a new method for interactive image color replacement that creates smooth and naturally looking results with minimal user interaction. Our system expects as input a source image and rawly scribbled target color values and generates high quality results in interactive rates. To achieve this goal we introduce an algorithm that preserves pairwise distances of the signatures in the original image and simultaneously maps the color to the user defined target values. We propose efficient sub-sampling in order to reduce the computational load and adapt semi-supervised locally linear embedding to optimize the constraints in one objective function. We show the application of the algorithm on typical photographs and compare the results to other color replacement methods.", month = nov, journal = "The Visual Computer", number = "11", volume = "29", pages = "1173--1186", keywords = "interactive image editing, color manipulation, image processing, recoloring, computational photography", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/musialski_2012_fice/", } @inproceedings{SCHWAERZLER-2012-FAS, title = "Fast Accurate Soft Shadows with Adaptive Light Source Sampling", author = "Michael Schw\"{a}rzler and Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2012", abstract = "Physically accurate soft shadows in 3D applications can be simulated by taking multiple samples from all over the area light source and accumulating them. Due to the unpredictability of the size of the penumbra regions, the required sampling density has to be high in order to guarantee smooth shadow transitions in all cases. Hence, several hundreds of shadow maps have to be evaluated in any scene configuration, making the process computationally expensive. Thus, we suggest an adaptive light source subdivision approach to select the sampling points adaptively. The main idea is to start with a few samples on the area light, evaluating there differences using hardware occlusion queries, and adding more sampling points if necessary. Our method is capable of selecting and rendering only the samples which contribute to an improved shadow quality, and hence generate shadows of comparable quality and accuracy. Even though additional calculation time is needed for the comparison step, this method saves valuable rendering time and achieves interactive to real-time frame rates in many cases where a brute force sampling method does not. ", month = nov, isbn = "978-3-905673-95-1", publisher = "Eurographics Association", location = "Magdeburg, Germany", booktitle = "Proceedings of the 17th International Workshop on Vision, Modeling, and Visualization (VMV 2012)", pages = "39--46", keywords = "soft shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/SCHWAERZLER-2012-FAS/", } @inproceedings{scheiblauer-2012-chnt, title = "Graph-based Guidance in Huge Point Clouds", author = "Claus Scheiblauer and Michael Wimmer", year = "2012", abstract = "In recent years the use of remote sensing devices like laser scanners in the documentation of excavation sites or cultural heritage sites has led to huge point cloud models from these sites. These data sets may cover complete sites including galleries, corridors, halls, and open places. Orienting oneself in the point cloud becomes a challenge, if one is not familiar with the layout of the site. Therefore we propose a graph-based guidance system to show tourists round the point cloud models. The tourists can navigate interactively through the point cloud, but they are tied to a predefined 3D graph which represents the possible ways, and which connects the points of interest.", month = nov, isbn = "978-3-200-03281-1", location = "Vienna, Austria", booktitle = "Proceedings of the 17th International Conference on Cultural Heritage and New Technologies", keywords = "user interface, navigation, point rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/scheiblauer-2012-chnt/", } @article{knecht_martin_2012_RSMR, title = "Reciprocal Shading for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Oliver Mattausch and Michael Wimmer", year = "2012", abstract = "In this paper we present a novel plausible rendering method for mixed reality systems, which is useful for many real-life application scenarios, like architecture, product visualization or edutainment. To allow virtual objects to seamlessly blend into the real environment, the real lighting conditions and the mutual illumination effects between real and virtual objects must be considered, while maintaining interactive frame rates. The most important such effects are indirect illumination and shadows cast between real and virtual objects. Our approach combines Instant Radiosity and Differential Rendering. In contrast to some previous solutions, we only need to render the scene once in order to find the mutual effects of virtual and real scenes. In addition, we avoid artifacts like double shadows or inconsistent color bleeding which appear in previous work. The dynamic real illumination is derived from the image stream of a fish-eye lens camera. The scene gets illuminated by virtual point lights, which use imperfect shadow maps to calculate visibility. A sufficiently fast scene reconstruction is done at run-time with Microsoft's Kinect sensor. Thus a time-consuming manual pre-modeling step of the real scene is not necessary. Our results show that the presented method highly improves the illusion in mixed-reality applications and significantly diminishes the artificial look of virtual objects superimposed onto real scenes.", month = nov, issn = "0097-8493", journal = "Computers & Graphics", number = "7", volume = "36", pages = "846--856", keywords = "Differential rendering, Reconstruction, Instant radiosity, Microsoft Kinect, Real-time global illumination, Mixed reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/knecht_martin_2012_RSMR/", } @article{schedl-2012-dof, title = "A layered depth-of-field method for solving partial occlusion", author = "David Schedl and Michael Wimmer", year = "2012", abstract = "Depth of field (DoF) represents a distance range around a focal plane, where objects on an image are crisp. DoF is one of the effects which significantly contributes to the photorealism of images and therefore is often simulated in rendered images. Various methods for simulating DoF have been proposed so far, but little tackle the issue of partial occlusion: Blurry objects near the camera are semi-transparent and result in partially visible background objects. This effect is strongly apparent in miniature and macro photography. In this work a DoF method is presented which simulates partial occlusion. The contribution of this work is a layered method where the scene is rendered into layers. Blurring is done efficiently with recursive Gaussian filters. Due to the usage of Gaussian filters big artifact-free blurring radii can be simulated at reasonable costs.", month = jun, journal = "Journal of WSCG", volume = "20", number = "3", issn = "1213-6972", pages = "239--246", keywords = "realtime, rendering, depth-of-field, layers, depth peeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/schedl-2012-dof/", } @article{MATTAUSCH-2012-TIS, title = "Tessellation-Independent Smooth Shadow Boundaries", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer and Takeo Igarashi", year = "2012", abstract = "We propose an efficient and light-weight solution for rendering smooth shadow boundaries that do not reveal the tessellation of the shadow-casting geometry. Our algorithm reconstructs the smooth contours of the underlying mesh and then extrudes shadow volumes from the smooth silhouettes to render the shadows. For this purpose we propose an improved silhouette reconstruction using the vertex normals of the underlying smooth mesh. Then our method subdivides the silhouette loops until the contours are sufficiently smooth and project to smooth shadow boundaries. This approach decouples the shadow smoothness from the tessellation of the geometry and can be used to maintain equally high shadow quality for multiple LOD levels. It causes only a minimal change to the fill rate, which is the well-known bottleneck of shadow volumes, and hence has only small overhead. ", month = jun, journal = "Computer Graphics Forum", volume = "4", number = "31", issn = "1467-8659", pages = "1465--1470", keywords = "real-time rendering, shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/MATTAUSCH-2012-TIS/", } @article{knecht_martin_2012_BRDFEstimation, title = "Interactive BRDF Estimation for Mixed-Reality Applications", author = "Martin Knecht and Georg Tanzmeister and Christoph Traxler and Michael Wimmer", year = "2012", abstract = "Recent methods in augmented reality allow simulating mutual light interactions between real and virtual objects. These methods are able to embed virtual objects in a more sophisticated way than previous methods. However, their main drawback is that they need a virtual representation of the real scene to be augmented in the form of geometry and material properties. In the past, this representation had to be modeled in advance, which is very time consuming and only allows for static scenes. We propose a method that reconstructs the surrounding environment and estimates its Bidirectional Reflectance Distribution Function (BRDF) properties at runtime without any preprocessing. By using the Microsoft Kinect sensor and an optimized hybrid CPU & GPU-based BRDF estimation method, we are able to achieve interactive frame rates. The proposed method was integrated into a differential instant radiosity rendering system to demonstrate its feasibility.", month = jun, journal = "Journal of WSCG", volume = "20", number = "1", issn = "1213-6972", pages = "47--56", keywords = "Augmented Reality, BRDF Estimation, Reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/knecht_martin_2012_BRDFEstimation/", } @inproceedings{fink-2012-cg1, title = "Teaching a Modern Graphics Pipeline Using a Shader-based Software Renderer", author = "Heinrich Fink and Thomas Weber and Michael Wimmer", year = "2012", abstract = "Shaders are a fundamental pattern of the modern graphics pipeline. This paper presents a syllabus for an introductory computer graphics course that emphasizes the use of programmable shaders while teaching raster-level algorithms at the same time. We describe a Java-based framework that is used for programming assignments in this course. This framework implements a shader-enabled software renderer and an interactive 3D editor. We also show how to create attractive course materials by using COLLADA, an open standard for 3D content exchange.", month = may, publisher = "Eurographics Association", location = "Cagliari, Italy", issn = "1017-4656", event = "Eurographics 2012", editor = "Giovanni Gallo and Beatriz Sousa Santos", booktitle = "Eurographics 2012 -- Education Papers", pages = "73--80", keywords = "Education, Collada, Java, Introductory Computer Graphics, Software Rasterizer", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/fink-2012-cg1/", } @article{Habel_2012_PSP, title = "Practical Spectral Photography", author = "Ralf Habel and Michael Kudenov and Michael Wimmer", year = "2012", abstract = "We introduce a low-cost and compact spectral imaging camera design based on unmodified consumer cameras and a custom camera objective. The device can be used in a high-resolution configuration that measures the spectrum of a column of an imaged scene with up to 0.8 nm spectral resolution, rivalling commercial non-imaging spectrometers, and a mid-resolution hyperspectral mode that allows the spectral measurement of a whole image, with up to 5 nm spectral resolution and 120x120 spatial resolution. We develop the necessary calibration methods based on halogen/fluorescent lamps and laser pointers to acquire all necessary information about the optical system. We also derive the mathematical methods to interpret and reconstruct spectra directly from the Bayer array images of a standard RGGB camera. This objective design introduces accurate spectral remote sensing to computational photography, with numerous applications in color theory, colorimetry, vision and rendering, making the acquisition of a spectral image as simple as taking a high-dynamic-range image.", month = may, journal = "Computer Graphics Forum (Proceedings EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "449--458", keywords = "Computational Photography, Spectroscopy, Computed Tomography Imaging Spectrometer, Practical", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Habel_2012_PSP/", } @inproceedings{preiner_2012_AS, title = "Auto Splats: Dynamic Point Cloud Visualization on the GPU", author = "Reinhold Preiner and Stefan Jeschke and Michael Wimmer", year = "2012", abstract = "Capturing real-world objects with laser-scanning technology has become an everyday task. Recently, the acquisition of dynamic scenes at interactive frame rates has become feasible. A high-quality visualization of the resulting point cloud stream would require a per-frame reconstruction of object surfaces. Unfortunately, reconstruction computations are still too time-consuming to be applied interactively. In this paper we present a local surface reconstruction and visualization technique that provides interactive feedback for reasonably sized point clouds, while achieving high image quality. Our method is performed entirely on the GPU and in screen pace, exploiting the efficiency of the common rasterization pipeline. The approach is very general, as no assumption is made about point connectivity or sampling density. This naturally allows combining the outputs of multiple scanners in a single visualization, which is useful for many virtual and augmented reality applications.", month = may, isbn = " 978-3-905674-35-4", organization = "Eurographics Association 2012", location = "Cagliari", editor = "H. Childs and T. Kuhlen", booktitle = "Proceedings of Eurographics Symposium on Parallel Graphics and Visualization", pages = "139--148", keywords = "point clouds, surface reconstruction, point rendering, Auto Splats, KNN search, GPU rendering, point based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/preiner_2012_AS/", } @article{Auzinger_2012_AAA, title = "Analytic Anti-Aliasing of Linear Functions on Polytopes", author = "Thomas Auzinger and Michael Guthe and Stefan Jeschke", year = "2012", abstract = "This paper presents an analytic formulation for anti-aliased sampling of 2D polygons and 3D polyhedra. Our framework allows the exact evaluation of the convolution integral with a linear function defined on the polytopes. The filter is a spherically symmetric polynomial of any order, supporting approximations to refined variants such as the Mitchell-Netravali filter family. This enables high-quality rasterization of triangles and tetrahedra with linearly interpolated vertex values to regular and non-regular grids. A closed form solution of the convolution is presented and an efficient implementation on the GPU using DirectX and CUDA C is described.", month = may, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "335--344", keywords = "Polytope, Filter Design, Analytic Anti-Aliasing, Sampling, Integral Formula, Spherically Symmetric Filter, CUDA, Closed Form Solution, 2D 3D", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Auzinger_2012_AAA/", } @article{musialski-2012-icb, title = "Interactive Coherence-Based Fa\c{c}ade Modeling", author = "Przemyslaw Musialski and Michael Wimmer and Peter Wonka", year = "2012", abstract = "We propose a novel interactive framework for modeling building fa\c{c}ades from images. Our method is based on the notion of coherence-based editing which allows exploiting partial symmetries across the fa\c{c}ade at any level of detail. The proposed workflow mixes manual interaction with automatic splitting and grouping operations based on unsupervised cluster analysis. In contrast to previous work, our approach leads to detailed 3d geometric models with up to several thousand regions per fa\c{c}ade. We compare our modeling scheme to others and evaluate our approach in a user study with an experienced user and several novice users.", month = may, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "661--670", keywords = "facade modeling, urban modeling, facade reconstruction, image-based modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/musialski-2012-icb/", } @inproceedings{musialski-2012-sur, title = "A Survey of Urban Reconstruction", author = "Przemyslaw Musialski and Peter Wonka and Daniel G. Aliaga and Michael Wimmer and Luc van Gool and Werner Purgathofer", year = "2012", abstract = "This paper provides a comprehensive overview of urban reconstruction. While there exists a considerable body of literature, this topic is still under very active research. The work reviewed in this survey stems from the following three research communities: computer graphics, computer vision, and photogrammetry and remote sensing. Our goal is to provide a survey that will help researchers to better position their own work in the context of existing solutions, and to help newcomers and practitioners in computer graphics to quickly gain an overview of this vast field. Further, we would like to bring the mentioned research communities to even more interdisciplinary work, since the reconstruction problem itself is by far not solved. ", month = may, booktitle = "EUROGRAPHICS 2012 State of the Art Reports", location = "Cagliari, Sardinia, Italy", publisher = "Eurographics Association", series = "EG STARs", pages = "1--28", keywords = "facade modeling, structure from motion, multi-view stereo, urban reconstruction, inverse-procedural modeling, urban modeling, image-based modeling, city reconstruction, state-of-the-art report", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/musialski-2012-sur/", } @inproceedings{Purgathofer_2012_IVA, title = "INTERACTIVE VISUAL ANALYSIS OF INTENSIVE CARE UNIT DATA: Relationship Between Serum Sodium Concentration, its Rate of Change, and Survival Outcome", author = "Kresimir Matkovic and H. Gan and Andreas Ammer and D. Bennett and Werner Purgathofer and Marius Terblanche", year = "2012", abstract = "In this paper we present a case study of interactive visual analysis and exploration of a large ICU data set. The data consists of patients’ records containing scalar data representing various patients’ parameters (e.g. gender, age, weight), and time series data describing logged parameters over time (e.g. heart rate, blood pressure). Due to the size and complexity of the data, coupled with limited time and resources, such ICU data is often not utilized to its full potential, although its analysis could contribute to a better understanding of physiological, pathological and therapeutic processes, and consequently lead to an improvement of medical care. During the exploration of this data we identified several analysis tasks and adapted and improved a coordinated multiple views system accordingly. Besides a curve view which also supports time series with gaps, we introduced a summary view which allows an easy comparison of subsets of the data and a box plot view in a coordinated multiple views setup. Furthermore, we introduced an inverse brush, a secondary brush which automatically selects non-brushed items, and updates itself accordingly when the original brush is modified. The case study describes how we used the system to analyze data from 1447 patients from the ICU at Guy’s & St. Thomas’ NHS Foundation Trust in London. We were interested in the relationship between serum sodium concentration, its rate of change and their effect on ICU mortality rates. The interactive visual analysis led us to findings which were fascinating for medical experts, and which would be very difficult to discover using conventional analysis methods usually applied in the medical field. The overall feedback from domain experts (coauthors of the paper) is very positive.", month = feb, location = "Rome, Italy", event = "IVAPP 2012", booktitle = "Proceeding of IVAAP 2012", journal = "Proceedings of IVAPP 2012 / SciTePress", pages = "648--659", keywords = "Coordinated Multiple Views, Intensive Care Unit Data, Interactive Visual Analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Purgathofer_2012_IVA/", } @article{liu-2012-tcvd, title = "Tensor Completion for Estimating Missing Values in Visual Data", author = "Ji Liu and Przemyslaw Musialski and Peter Wonka and Jieping Ye", year = "2012", abstract = "In this paper we propose an algorithm to estimate missing values in tensors of visual data. Our methodology is built on recent studies about matrix completion using the matrix trace norm. The contribution of our paper is to extend the matrix case to the tensor case by proposing the first definition of the trace norm for tensors and then by building a working algorithm. First, we propose a definition for the tensor trace norm, that generalizes the established definition of the matrix trace norm. Second, similar to matrix completion, the tensor completion is formulated as a convex optimization problem. We developed three algorithms: SiLRTC, FaLRTC, and HaLRTC. The SiLRTC algorithm is simple to implement and employs a relaxation technique to separate the dependant relationships and uses the block coordinate descent (BCD) method to achieve a globally optimal solution; The FaLRTC algorithm utilizes a smoothing scheme to transform the original nonsmooth problem into a smooth one; The HaLRTC algorithm applies the alternating direction method of multipliers (ADMM) to our problem. Our experiments show potential applications of our algorithms and the quantitative evaluation indicates that our methods are more accurate and robust than heuristic approaches.", month = jan, issn = "0162-8828", journal = "IEEE Transactions on Pattern Analysis & Machine Intelligence (PAMI)", number = "1", volume = "35", pages = "208--220", keywords = "matrix completion, trace norm, tensor completion", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/liu-2012-tcvd/", } @article{bernhard-2011-bmtf, title = "Bi-modal Task Faciliation in a Virtual Traffic Scenario through Spatialized Sound Rendering ", author = "Matthias Bernhard and Karl Grosse and Michael Wimmer", year = "2011", abstract = "Audio rendering is generally used to increase the realism of Virtual Environments (VE). In addition, audio rendering may also improve the performance in specific tasks carried out in interactive applications such as games or simulators. In this paper we investigate the effect of the quality of sound rendering on task performance in a task which is inherently vision dominated. The task is a virtual traffic gap crossing scenario with two elements: first, to discriminate crossable and uncrossable gaps in oncoming traffic, and second, to find the right timing to start crossing the street without an accident. A study was carried out with 48 participants in an immersive Virtual Environment setup with a large screen and headphones. Participants were grouped into three different conditions. In the first condition, spatialized audio rendering with head-related transfer function (HRTF) filtering was used. The second group was tested with conventional stereo rendering, and the remaining group ran the experiment in a mute condition. Our results give a clear evidence that spatialized audio improves task performance compared to the unimodal mute condition. Since all task-relevant information was in the participants' field-of-view, we conclude that an enhancement of task performance results from a bimodal advantage due to the integration of visual and auditory spatial cues.", month = nov, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", note = "Article No. 24", number = "4", volume = "8", pages = "1--22", keywords = "bimodal task faciliation, pedestrian safety, virtual environments, audio-visual perception, head related transfer functions", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-bmtf/", } @inproceedings{scheiblauer-2011-chnt, title = "Consolidated Visualization of Enormous 3D Scan Point Clouds with Scanopy", author = "Claus Scheiblauer and Michael Pregesbauer", year = "2011", abstract = "Terrestrial laser scanners are used at many excavations for documentation and inventory taking. For the documentation of the excavation Amphitheatre 1 in Bad Deutsch-Altenburg, Austria, the terrestrial laser scanner Riegl LMS Z420i was used. Overall, the entire excavation was covered by about 200 positions. With each scan position the amount of data recorded increases and this leads to performance problems in the visualization of the data. Due to the enormous number of points a consolidated representation of the entire point cloud is not possible with conventional software. The software Scanopy was developed for presenting and editing huge amounts of point data. Furthermore, it is possible to load polygonal models and display them together with point clouds in one scene. This allows an exact documentation of large archaeological excavation sites. Another application is the visualization of polygonal models of different excavation levels. The simplest visualization of point clouds on screen is a representation of the points as square rectangles. This, however, creates geometric inaccuracies, and colored point clouds are presented with sudden color changes. When the points are displayed by using semi-transparent circles the 3D points can be blended and lead to a more homogenous visual representation of the scanned objects. Thus the visual impression of the point cloud will be improved considerably. The developed software is demonstrated on the excavation mentioned above.", month = nov, isbn = "978-3-200-02740-4", location = "Vienna, Austria", booktitle = "Proceedings of the 16th International Conference on Cultural Heritage and New Technologies", pages = "242--247", keywords = "point rendering, resampling, out-of-core", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/scheiblauer-2011-chnt/", } @inproceedings{KUE11, title = "BRDF approximation and estimation for Augmented Reality", author = "Patrick K\"{u}htreiber and Martin Knecht and Christoph Traxler", year = "2011", abstract = "In Augmented Reality applications it is important to have a good description of the surfaces of real objects if a consistent shading between real and virtual object is required. If such a description of a surface is not vailable it has to be estimated or approximated. In our paper we will present certain methods that deal with real-time bi-directional reflectance distribution function (BRDF) approximation in augmented reality. Of course an important thing to discuss is whether the applications we present all work in real-time and compute good (and real)looking results. There are different methods on how to achieve this goal. All of the methods we are going to present work via image based lighting and some require a 3D polygonal mesh representation of the object whose BRDF shall be approximated. Some methods estimate the BRDF parameters via error values and provide results at each iteration.", month = oct, organization = ""Gheorghe Asachi" Technical University of Iasi, Faculty of Automatic Control and Computer Engineering", location = "Sinaia, Romania", booktitle = "15th International Conference on System Theory, Control and Computing", pages = "318--324", keywords = "Mixed Reality, BRDF Estimation", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/KUE11/", } @inproceedings{knecht-2011-CBCM, title = "Adaptive Camera-Based Color Mapping For Mixed-Reality Applications", author = "Martin Knecht and Christoph Traxler and Werner Purgathofer and Michael Wimmer", year = "2011", abstract = "We present a novel adaptive color mapping method for virtual objects in mixed-reality environments. In several mixed-reality applications, added virtual objects should be visually indistinguishable from real objects. Recent mixed-reality methods use global-illumination algorithms to approach this goal. However, simulating the light distribution is not enough for visually plausible images. Since the observing camera has its very own transfer function from real-world radiance values to RGB colors, virtual objects look artificial just because their rendered colors do not match with those of the camera. Our approach combines an on-line camera characterization method with a heuristic to map colors of virtual objects to colors as they would be seen by the observing camera. Previous tone-mapping functions were not designed for use in mixed-reality systems and thus did not take the camera-specific behavior into account. In contrast, our method takes the camera into account and thus can also handle changes of its parameters during runtime. The results show that virtual objects look visually more plausible than by just applying tone-mapping operators.", month = oct, isbn = "978-1-4577-2183-0 ", publisher = "IEEE/IET Electronic Library (IEL), IEEE-Wiley eBooks Library, VDE VERLAG Conference Proceedings", note = "E-ISBN: 978-1-4577-2184-7", location = "Basel, Switzerland", booktitle = "Proceedings of the 2011 IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2011)", pages = "165--168", keywords = "Color Matching, Differential Rendering, Mixed Reality, Tone Mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/knecht-2011-CBCM/", } @article{mayer_2011_cipa, title = "Virtual Texturing in the Documentation of Cultural Heritage", author = "Irmengard Mayer and Claus Scheiblauer and Albert Julian Mayer", year = "2011", abstract = "In the last decade the documentation of cultural heritage by means of laser range scanning and photogrammetric techniques has gained ever more importance. The amount of data collected by these means may be huge, and adequate presentation of 3D documented cultural heritage is still a challenge. For small and limited projects consisting of only a few range scans, the software provided with the laser scanner can be used for viewing and presenting the data. Large projects, consisting of hundreds of scan positions as well as projects where more and more data are collected over time, still have to deal with a massive reduction of the 3D data for presentation. Public demonstrations in museums, as for example shown by the Digital Michelangelo project, are already state of the art. The combination of huge point-base models and mesh models with high resolution textures in one viewer, the first type of models resulting from the data of laser range scans and the second type of models resulting from a photogrammetric reconstruction process, is still not available. Currently viewers are mostly limited to show models that are based on only one geometric primitive – either points or polygons – at once. In the FWF funded START project “The Domitilla Catacomb in Rome. Archaeology, Architecture and Art History of a Late Roman Cemetery” – which is running for 5 years now – 3D point data was collected for the geometrical documentation of the vast gallery system of the Domitilla Catacomb, resulting in point data of some 2 billion (10^9) point samples. Furthermore high quality textured mesh models of the nearly 90 late Roman / early Christian paintings were generated with photogrammetric tools. In close cooperation with the Institute of Computer Graphics and Algorithms of the Vienna University of Technology the point cloud viewer Scanopy was improved for the combined presentation of huge point clouds and high quality textured mesh models in the same viewer. Our viewer is already capable of rendering huge point clouds, so for this a method to manage the vast amount of textures had to be found. Therefore we integrated a virtual texturing algorithm, which allows using the original photographs of the paintings taken on site to be mapped to the mesh models, resulting in a high quality texture for all mesh models. The photographs have a resolution of 11 Megapixels. Due to shortcomings in the programs used in the photogrammetric processing pipeline we scaled down the photographs to a 7.3 Megapixel resolution. Currently 608 of these images are used for texturing 29 mesh models. The work on the mesh models is still ongoing, and when all mesh models will be completed, we will use some 2000 images for texturing about 90 mesh models. These virtually textured models can show the details of each painting in the Domitilla Catacomb. When used in a virtual walkthrough the paintings in the catacomb can be presented to a broad audience under best lighting conditions, even the paintings normally not accessible by the public.", month = sep, journal = "Geoinformatics FCE CTU", volume = "7", issn = "1802-2669", keywords = "virtual texturing, out-of-core, streaming, point clouds", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/mayer_2011_cipa/", } @inproceedings{bernhard-2011-maicg, title = "Manipulating Attention in Computer Games", author = "Matthias Bernhard and Le Zhang and Michael Wimmer", year = "2011", abstract = "In computer games, a user’s attention is focused on the current task, and task-irrelevant details remain unnoticed. This behavior, known as inattentional blindness, is a main problem for the optimal placement of information or advertisements. We propose a guiding principle based on Wolfe’s theory of Guided Search, which predicts the saliency of objects during a visual search task. Assuming that computer games elicit visual search tasks frequently, we applied this model in a “reverse” direction: Given a target item (e.g., advertisement) which should be noticed by the user, we choose a frequently searched game item and modify it so that it shares some perceptual features (e.g., color or orientation) with the target item. A memory experiment with 36 participants showed that in an action video game, advertisements were more noticeable to users when this method is applied.", month = jun, isbn = "9781457712852", publisher = "IEEE", location = "Ithaca, NY", booktitle = "Proceedings of the IEEE IVMSP Workshop on Perception and Visual Signal Analysis", pages = "153--158", keywords = "saliency, attention guidance, inattentional blindness, in-game advertising, guided search", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bernhard-2011-maicg/", } @article{lipp2011a, title = "Interactive Modeling of City Layouts using Layers of Procedural Content", author = "Markus Lipp and Daniel Scherzer and Peter Wonka and Michael Wimmer", year = "2011", abstract = "In this paper, we present new solutions for the interactive modeling of city layouts that combine the power of procedural modeling with the flexibility of manual modeling. Procedural modeling enables us to quickly generate large city layouts, while manual modeling allows us to hand-craft every aspect of a city. We introduce transformation and merging operators for both topology preserving and topology changing transformations based on graph cuts. In combination with a layering system, this allows intuitive manipulation of urban layouts using operations such as drag and drop, translation, rotation etc. In contrast to previous work, these operations always generate valid, i.e., intersection-free layouts. Furthermore, we introduce anchored assignments to make sure that modifications are persistent even if the whole urban layout is regenerated. ", month = apr, journal = "Computer Graphics Forum (Proceedings EG 2011)", volume = "30", number = "2", issn = "0167-7055", pages = "345--354", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/lipp2011a/", } @article{jeschke-2011-est, title = "Estimating Color and Texture Parameters for Vector Graphics", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2011", abstract = "Diffusion curves are a powerful vector graphic representation that stores an image as a set of 2D Bezier curves with colors defined on either side. These colors are diffused over the image plane, resulting in smooth color regions as well as sharp boundaries. In this paper, we introduce a new automatic diffusion curve coloring algorithm. We start by defining a geometric heuristic for the maximum density of color control points along the image curves. Following this, we present a new algorithm to set the colors of these points so that the resulting diffused image is as close as possible to a source image in a least squares sense. We compare our coloring solution to the existing one which fails for textured regions, small features, and inaccurately placed curves. The second contribution of the paper is to extend the diffusion curve representation to include texture details based on Gabor noise. Like the curves themselves, the defined texture is resolution independent, and represented compactly. We define methods to automatically make an initial guess for the noise texure, and we provide intuitive manual controls to edit the parameters of the Gabor noise. Finally, we show that the diffusion curve representation itself extends to storing any number of attributes in an image, and we demonstrate this functionality with image stippling an hatching applications.", month = apr, journal = "Computer Graphics Forum", volume = "30", number = "2", note = "This paper won the 2nd best paper award at Eurographics 2011.", issn = "0167-7055", pages = "523--532", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/jeschke-2011-est/", } @article{scheiblauer-2011-cag, title = "Out-of-Core Selection and Editing of Huge Point Clouds", author = "Claus Scheiblauer and Michael Wimmer", year = "2011", abstract = "In this paper we present an out-of-core editing system for point clouds, which allows selecting and modifying arbitrary parts of a huge point cloud interactively. We can use the selections to segment the point cloud, to delete points, or to render a preview of the model without the points in the selections. Furthermore we allow for inserting points into an already existing point cloud. All operations are conducted on a rendering optimized data structure that uses the raw point cloud from a laser scanner, and no additionally created points are needed for an ecient level-of-detail (LOD) representation using this data structure. We also propose an algorithm to alleviate the artifacts when rendering a point cloud with large discrepancies in density in dierent areas by estimating point sizes heuristically. These estimated point sizes can be used to mimic a closed surface on the raw point cloud, also when the point cloud is composed of several raw laser scans.", month = apr, issn = "0097-8493", journal = "Computers & Graphics", number = "2", volume = "35", pages = "342--351", keywords = "Graphics data structures and data types, Viewing algorithms, Point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/scheiblauer-2011-cag/", } @inproceedings{knecht_martin-2011-FPSPAR, title = "A Framework For Perceptual Studies In Photorealistic Augmented Reality", author = "Martin Knecht and Andreas D\"{u}nser and Christoph Traxler and Michael Wimmer and Raphael Grasset", year = "2011", abstract = "In photorealistic augmented reality virtual objects are integrated in the real world in a seamless visual manner. To obtain a perfect visual augmentation these objects must be rendered indistinguishable from real objects and should be perceived as such. In this paper we propose a research test bed framework to study the different unresolved perceptual issues in photorealistic augmented reality and its application to different disciplines. The framework computes a global illumination approximation in real-time and therefore leverages a new class of experimental research topics.", month = mar, location = "Singapore", editor = "Frank Steinicke, Pete Willemsen", booktitle = "Proceedings of the 3rd IEEE VR 2011 Workshop on Perceptual Illusions in Virtual Environments", pages = "27--32", keywords = "photorealistic augmented reality, real-time global illumination, human perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/knecht_martin-2011-FPSPAR/", } @article{scherzer2011d, title = "A Survey of Real-Time Hard Shadow Mapping Methods", author = "Daniel Scherzer and Michael Wimmer and Werner Purgathofer", year = "2011", abstract = "Due to its versatility, speed and robustness, shadow mapping has always been a popular algorithm for fast hard shadow generation since its introduction in 1978, first for off-line film productions and later increasingly so in real-time graphics. So it is not surprising that recent years have seen an explosion in the number of shadow map related publications. The last survey that encompassed shadow mapping approaches, but was mainly focused on soft shadow generation, dates back to 2003~cite{HLHS03}, while the last survey for general shadow generation dates back to 1990~cite{Woo:1990:SSA}. No survey that describes all the advances made in hard shadow map generation in recent years exists. On the other hand, shadow mapping is widely used in the game industry, in production, and in many other applications, and it is the basis of many soft shadow algorithms. Due to the abundance of articles on the topic, it has become very hard for practitioners and researchers to select a suitable shadow algorithm, and therefore many applications miss out on the latest high-quality shadow generation approaches. The goal of this survey is to rectify this situation by providing a detailed overview of this field. We provide a detailed analysis of shadow mapping errors and derive a comprehensive classification of the existing methods. We discuss the most influential algorithms, consider their benefits and shortcomings and thereby provide the readers with the means to choose the shadow algorithm best suited to their needs. ", month = feb, issn = "0167-7055", journal = "Computer Graphics Forum", number = "1", volume = "30", pages = "169--186", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/scherzer2011d/", } @inproceedings{bittner-2011-scc, title = "Shadow Caster Culling for Efficient Shadow Mapping", author = "Jir\'{i} Bittner and Oliver Mattausch and Ari Silvennoinen and Michael Wimmer", year = "2011", abstract = "We propose a novel method for efficient construction of shadow maps by culling shadow casters which do not contribute to visible shadows. The method uses a mask of potential shadow receivers to cull shadow casters using a hierarchical occlusion culling algorithm. We propose several variants of the receiver mask implementations with different culling efficiency and computational costs. For scenes with statically focused shadow maps we designed an efficient strategy to incrementally update the shadow map, which comes close to the rendering performance for unshadowed scenes. We show that our method achieves 3x-10x speedup for rendering large city like scenes and 1.5x-2x speedup for rendering an actual game scene.", month = feb, isbn = "978-1-4503-0565-5", publisher = "ACM", organization = "ACM SIGGRAPH", location = "San Francisco", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2011", pages = "81--88", keywords = "occlusion culling, shadow mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/bittner-2011-scc/", } @article{ohrhallinger_stefan-2011-001, title = "Interpolating an unorganized 2D point cloud with a single closed shape", author = "Stefan Ohrhallinger and Sudhir Mudur", year = "2011", abstract = "Given an unorganized two-dimensional point cloud, we address the problem of efficiently constructing a single aesthetically pleasing closed interpolating shape, without requiring dense or uniform spacing. Using Gestalt’s laws of proximity, closure and good continuity as guidance for visual aesthetics, we require that our constructed shape be a minimal perimeter, non-self intersecting manifold. We find that this yields visually pleasing results. Our algorithm is distinct from earlier shape reconstruction approaches, in that it exploits the overlap between the desired shape and a related minimal graph, the Euclidean Minimum Spanning Tree (EMST). Our algorithm segments the EMST to retain as much of it as required and then locally partitions and solves the problem efficiently. Comparison with some of the best currently known solutions shows that our algorithm yields better results. ", month = jan, issn = "0010-4485", journal = "Computer-Aided Design", number = "1", volume = "43", pages = "1629--1638", keywords = "EMST, Curve, Point cloud, Reconstruction, Shape Construction, Boundary, Computational geometry, Point set", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/ohrhallinger_stefan-2011-001/", } @article{mattausch-2010-tao, title = "High-Quality Screen-Space Ambient Occlusion using Temporal Coherence", author = "Oliver Mattausch and Daniel Scherzer and Michael Wimmer", year = "2010", abstract = "Ambient occlusion is a cheap but effective approximation of global illumination. Recently, screen-space ambient occlusion (SSAO) methods, which sample the frame buffer as a discretization of the scene geometry, have become very popular for real-time rendering. We present temporal SSAO (TSSAO), a new algorithm which exploits temporal coherence to produce high-quality ambient occlusion in real time. Compared to conventional SSAO, our method reduces both noise as well as blurring artifacts due to strong spatial filtering, faithfully representing fine-grained geometric structures. Our algorithm caches and reuses previously computed SSAO samples, and adaptively applies more samples and spatial filtering only in regions that do not yet have enough information available from previous frames. The method works well for both static and dynamic scenes.", month = dec, issn = "0167-7055", journal = "Computer Graphics Forum", number = "8", volume = "29", pages = "2492--2503", keywords = "temporal coherence, ambient occlusion, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/mattausch-2010-tao/", } @inproceedings{scherzer2010e, title = "An Overview of Temporal Coherence Methods in Real-Time Rendering ", author = "Daniel Scherzer", year = "2010", abstract = "Most of the power of modern graphics cards is put into the acceleration of shading tasks because here lies the major bottleneck for most sophisticated real-time algorithms. By using temporal coherence, i.e. reusing shading information from a previous frame, this problem can be alleviated. This paper gives an overview of the concepts of temporal coherence in real-time rendering and should give the reader the working practical and theoretical knowledge to exploit temporal coherence in his own algorithms. ", month = oct, organization = "IEEE", location = "Sinaia, Romania", issn = "2068-0465", booktitle = " 14th International Conference on System Theory and Control 2010", pages = "497--502", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/scherzer2010e/", } @inproceedings{Habel_RAV_2010, title = "Real-Time Rendering and Animation of Vegetation", author = "Ralf Habel", year = "2010", abstract = "Vegetation in all its different forms is almost always part of a scenery, be it fully natural or urban. Even in completely cultivated areas or indoor scenes, though not very dominant, potted plants or alley trees and patches of grass are usually part of a surrounding. Rendering and animating vegetation is substantially different from rendering and animating geometry with less geometric complexity such as houses, manufactured products or other objects consisting of largely connected surfaces. In this paper we will discuss several challenges posed by vegetation in real-time applications such as computer games and virtual reality applications and show efficient solutions to the problems.", month = oct, location = "Sinaia", issn = "2068-0465", booktitle = "14th International Conference on System Theory and Control (Joint conference of SINTES14, SACCS10, SIMSIS14)", pages = "231--236", keywords = "Animation, Real-Time Rendering, Vegetation", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Habel_RAV_2010/", } @article{LIPP-2010-PGMS, title = "Parallel Generation of Multiple L-Systems", author = "Markus Lipp and Peter Wonka and Michael Wimmer", year = "2010", abstract = "This paper introduces a solution to compute L-systems on parallel architectures like GPUs and multi-core CPUs. Our solution can split the derivation of the L-system as well as the interpretation and geometry generation into thousands of threads running in parallel. We introduce a highly parallel algorithm for L-system evaluation that works on arbitrary L-systems, including parametric productions, context sensitive productions, stochastic production selection, and productions with side effects. This algorithm is further extended to allow evaluation of multiple independent L-systems in parallel. In contrast to previous work, we directly interpret the productions defined in plain-text, without requiring any compilation or transformation step (e.g., into shaders). Our algorithm is efficient in the sense that it requires no explicit inter-thread communication or atomic operations, and is thus completely lock free.", month = oct, issn = "0097-8493", journal = "Computers & Graphics", number = "5", volume = "34", pages = "585--593", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/LIPP-2010-PGMS/", } @article{bernhard-2010-gph, title = "An Empirical Pipeline to Derive Gaze Prediction Heuristics for 3D Action Games", author = "Matthias Bernhard and Efstathios Stavrakis and Michael Wimmer", year = "2010", abstract = "Gaze analysis and prediction in interactive virtual environments, such as games, is a challenging topic since the 3D perspective and variations of the viewpoint as well as the current task introduce many variables that affect the distribution of gaze. In this article, we present a novel pipeline to study eye-tracking data acquired from interactive 3D applications. The result of the pipeline is an importance map which scores the amount of gaze spent on each object. This importance map is then used as a heuristic to predict a user’s visual attention according to the object properties present at runtime. The novelty of this approach is that the analysis is performed in object space and the importance map is defined in the feature space of high-level properties. High-level properties are used to encode task relevance and other attributes, such as eccentricity, which may have an impact on gaze behavior. The pipeline has been tested with an exemplary study on a first-person shooter game. In particular, a protocol is presented describing the data acquisition procedure, the learning of different importance maps from the data, and finally an evaluation of the performance of the derived gaze predictors. A metric measuring the degree of correlation between attention predicted by the importance map and the actual gaze yielded clearly positive results. The correlation becomes particularly strong when the player is attentive to an in-game task.", month = oct, issn = "1544-3558", journal = "ACM Transactions on Applied Perception", number = "1", volume = "8", pages = "4:1--4:30", keywords = "gaze predictor, video games, virtual environments, eye-tracking, gaze analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bernhard-2010-gph/", } @inproceedings{knecht_martin_2010_DIR, title = "Differential Instant Radiosity for Mixed Reality", author = "Martin Knecht and Christoph Traxler and Oliver Mattausch and Werner Purgathofer and Michael Wimmer", year = "2010", abstract = "In this paper we present a novel plausible realistic rendering method for mixed reality systems, which is useful for many real life application scenarios, like architecture, product visualization or edutainment. To allow virtual objects to seamlessly blend into the real environment, the real lighting conditions and the mutual illumination effects between real and virtual objects must be considered, while maintaining interactive frame rates (20-30fps). The most important such effects are indirect illumination and shadows cast between real and virtual objects. Our approach combines Instant Radiosity and Differential Rendering. In contrast to some previous solutions, we only need to render the scene once in order to find the mutual effects of virtual and real scenes. The dynamic real illumination is derived from the image stream of a fish-eye lens camera. We describe a new method to assign virtual point lights to multiple primary light sources, which can be real or virtual. We use imperfect shadow maps for calculating illumination from virtual point lights and have significantly improved their accuracy by taking the surface normal of a shadow caster into account. Temporal coherence is exploited to reduce flickering artifacts. Our results show that the presented method highly improves the illusion in mixed reality applications and significantly diminishes the artificial look of virtual objects superimposed onto real scenes.", month = oct, note = "Best Paper Award!", location = "Seoul", booktitle = "Proceedings of the 2010 IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2010)", pages = "99--107", keywords = "Instant Radiosity, Differential Rendering, Real-time Global Illumination, Mixed Reality", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/knecht_martin_2010_DIR/", } @article{bagar2010, title = "A Layered Particle-Based Fluid Model for Real-Time Rendering of Water", author = "Florian Bagar and Daniel Scherzer and Michael Wimmer", year = "2010", abstract = "We present a physically based real-time water simulation and rendering method that brings volumetric foam to the real-time domain, significantly increasing the realism of dynamic fluids. We do this by combining a particle-based fluid model that is capable of accounting for the formation of foam with a layered rendering approach that is able to account for the volumetric properties of water and foam. Foam formation is simulated through Weber number thresholding. For rendering, we approximate the resulting water and foam volumes by storing their respective boundary surfaces in depth maps. This allows us to calculate the attenuation of light rays that pass through these volumes very efficiently. We also introduce an adaptive curvature flow filter that produces consistent fluid surfaces from particles independent of the viewing distance.", month = jun, journal = "Computer Graphics Forum (Proceedings EGSR 2010)", volume = "29", number = "4", issn = "0167-7055", pages = "1383--1389", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/bagar2010/", } @inproceedings{ilcik-2010-ps, title = "Procedural Skeletons: Kinematic Extensions to CGA-Shape Grammars", author = "Martin Il\v{c}\'{i}k and Stefan Fiedler and Werner Purgathofer and Michael Wimmer", year = "2010", abstract = "Procedural modeling for architectural scenes was as yet limited to static objects only. We introduce a novel extension layer for shape grammars which creates a skeletal system for posing and interactive manipulation of generated models. Various models can be derived with the same set of parametrized rules for geometric operations. Separation of geometry generation and pose synthesis improves design efficiency and reusability. Moreover, by formal analysis of production rules we show how to efficiently update complex kinematic hierarchies created by the skeletons, allowing state-of-the-art interactive visual rule editing.", month = may, isbn = "978-80-223-2644-5", publisher = "Comenius University, Bratislava", booktitle = "Proceedings of the Spring Conference on Computer Graphics 2010", pages = "177--184", keywords = "procedural modeling, shape grammars, architecture, skeletal animation", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/ilcik-2010-ps/", } @inproceedings{Habel-2010-EIN, title = "Efficient Irradiance Normal Mapping", author = "Ralf Habel and Michael Wimmer", year = "2010", abstract = "Irradiance normal mapping is a method to combine two popular techniques, light mapping and normal mapping, and is used in games such as Half-Life 2 or Halo 3. This combination allows using low-resolution light caching on surfaces with only a few coefficients which are evaluated by normal maps to render spatial high-frequency changes in the lighting. Though there are dedicated bases for this purpose such as the Half-Life 2 basis, higher order basis functions such as quadratic Spherical Harmonics are needed for an accurate representation. However, a full spherical basis is not needed since the irradiance is stored on the surface of a scene. In order to represent the irradiance signals efficiently, we propose a novel polynomial, hemispherically orthonormal basis function set that is specifically designed to carry a directional irradiance signal on the hemisphere and which makes optimal use of the number of coefficients. To compare our results with previous work, we analyze the relations and attributes of previously proposed basis systems and show that 6 coefficients are sufficient to accurately represent an irradiance signal on the hemisphere. To create the necessary irradiance signals, we use Spherical Harmonics as an intermediate basis due to their fast filtering capabilities.", month = feb, isbn = "978-1-60558-939-8", publisher = "ACM", location = "Washington D.C.", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2010", pages = "189--195", keywords = "irradiance, real-time rendering, normal mapping, lightmap", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/Habel-2010-EIN/", } @article{preiner_2010_GIPC, title = "Real-Time Global Illumination for Point Cloud Scenes", author = "Reinhold Preiner and Michael Wimmer", year = "2010", abstract = "In this paper we present a real-time global illumination approach for illuminating scenes containing large point clouds. Our approach is based on the distribution of Virtual Point Lights (VPLs) in the scene, which are then used for the indirect illumination of the visible surfaces, using Imperfect Shadow Maps for visibility calculation of the VPLs. We are able to render multiple indirect light bounces, where each light bounce accounts for the transport of both the diffuse and the specular fraction of the reflected light.", journal = "Computer Graphics & Geometry", number = "1", volume = "12", pages = "2--16", keywords = "virtual point lights, imperfect shadow maps, point rendering, point clouds, global illumination, VPL, ISM", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/preiner_2010_GIPC/", } @article{purgathofer-2010-cgtrends, title = "Current Trends in Computer Graphics", author = "Werner Purgathofer and Robert F. Tobler", year = "2010", abstract = "In this paper we give an overview of the current research trends and explore the challenges in several subfields of the scientific discipline of computer graphics: interactive and photorealistic rendering, scientific and information visualization, and visual analytics. Five challenges are extracted that play a role in each of these areas: scalability, semantics, fusion, interaction, acquisition. Of course, not all of these issues are disjunct to each other, however the chosen structure allows for a easy to follow overview of the concrete future challenges.", issn = "1220 - 2169", journal = "Buletinul Institutului Politehnic din Iaşi", number = "2", pages = "9--24", keywords = "visualization, rendering, computer graphics, challenges, computer vision", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/purgathofer-2010-cgtrends/", } @inproceedings{SSMW09, title = "Real-Time Soft Shadows Using Temporal Coherence", author = "Daniel Scherzer and Michael Schw\"{a}rzler and Oliver Mattausch and Michael Wimmer", year = "2009", abstract = "A vast amount of soft shadow map algorithms have been presented in recent years. Most use a single sample hard shadow map together with some clever filtering technique to calculate perceptually or even physically plausible soft shadows. On the other hand there is the class of much slower algorithms that calculate physically correct soft shadows by taking and combining many samples of the light. In this paper we present a new soft shadow method that combines the benefits of these approaches. It samples the light source over multiple frames instead of a single frame, creating only a single shadow map each frame. Where temporal coherence is low we use spatial filtering to estimate additional samples to create correct and very fast soft shadows. ", month = dec, isbn = "978-3642103308", series = "Lecture Notes in Computer Science", publisher = "Springer", location = "Las Vegas, Nevada, USA", editor = "Bebis, G.; Boyle, R.; Parvin, B.; Koracin, D.; Kuno, Y.; Wang, J.; Pajarola, R.; Lindstrom, P.; Hinkenjann, A.; Encarnacao, M.; Silva, C.; Coming, D.", booktitle = "Advances in Visual Computing: 5th International Symposium on Visual Computing (ISVC 2009)", pages = "13--24", keywords = "real-time rendering, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/SSMW09/", } @article{jeschke-09-solver, title = "A GPU Laplacian Solver for Diffusion Curves and Poisson Image Editing", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "We present a new Laplacian solver for minimal surfaces—surfaces having a mean curvature of zero everywhere except at some fixed (Dirichlet) boundary conditions. Our solution has two main contributions: First, we provide a robust rasterization technique to transform continuous boundary values (diffusion curves) to a discrete domain. Second, we define a variable stencil size diffusion solver that solves the minimal surface problem. We prove that the solver converges to the right solution, and demonstrate that it is at least as fast as commonly proposed multigrid solvers, but much simpler to implement. It also works for arbitrary image resolutions, as well as 8 bit data. We show examples of robust diffusion curve rendering where our curve rasterization and diffusion solver eliminate the strobing artifacts present in previous methods. We also show results for real-time seamless cloning and stitching of large image panoramas.", month = dec, journal = "Transaction on Graphics (Siggraph Asia 2009)", volume = "28", number = "5", issn = "0730-0301", booktitle = "Transactions on Graphics (Siggraph Asia 2009)", organization = "ACM", publisher = "ACM Press", pages = "1--8", keywords = "Poisson equation, Line and Curve rendering , Diffusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-solver/", } @article{jeschke-09-rendering, title = "Rendering Surface Details with Diffusion Curves", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "Diffusion curve images (DCI) provide a powerful tool for efficient 2D image generation, storage and manipulation. A DCI consist of curves with colors defined on either side. By diffusing these colors over the image, the final result includes sharp boundaries along the curves with smoothly shaded regions between them. This paper extends the application of diffusion curves to render high quality surface details on 3D objects. The first extension is a view dependent warping technique that dynamically reallocates texture space so that object parts that appear large on screen get more texture for increased detail. The second extension is a dynamic feature embedding technique that retains crisp, anti-aliased curve details even in extreme closeups. The third extension is the application of dynamic feature embedding to displacement mapping and geometry images. Our results show high quality renderings of diffusion curve textures, displacements, and geometry images, all rendered interactively.", month = dec, journal = "Transaction on Graphics (Siggraph Asia 2009)", volume = "28", number = "5", issn = "0730-0301", booktitle = "Transactions on Graphics (Siggraph Asia 2009)", organization = "ACM", publisher = "ACM Press", pages = "1--8", keywords = "Geometry images, Displacement mapping, Diffusion curves, Line and Curve rendering ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-rendering/", } @inproceedings{LIPP-2009-PGL, title = "Parallel Generation of L-Systems", author = "Markus Lipp and Peter Wonka and Michael Wimmer", year = "2009", abstract = "This paper introduces a solution to compute L-systems on parallel architectures like GPUs and multi-core CPUs. Our solution can split the derivation of the L-system as well as the interpretation and geometry generation into thousands of threads running in parallel. We introduce a highly parallel algorithm for L-system evaluation that works on arbitrary L-systems, including parametric productions, context sensitive productions, stochastic production selection, and productions with side effects. Further we directly interpret the productions defined in plain-text, without requiring any compilation or transformation step (e.g., into shaders). Our algorithm is efficient in the sense that it requires no explicit inter-thread communication or atomic operations, and is thus completely lock free.", month = nov, isbn = "978-3980487481", location = "Braunschweig", editor = "Marcus Magnor, Bodo Rosenhahn, Holger Theisel", booktitle = "Vision, Modeling, and Visualization Workshop (VMV) 2009 ", pages = "205--214", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/LIPP-2009-PGL/", } @inproceedings{SCHEIBLAUER-2009-IDCE, title = "Interactive Domitilla Catacomb Exploration", author = "Claus Scheiblauer and Norbert Zimmermann and Michael Wimmer", year = "2009", abstract = "In this paper we present an approach for interactive visualization and manipulation of huge point clouds. Archaeological monuments like the Domitilla Catacomb in Rome lead to data sets surpassing 1 Billion points or 20GB of storage space, which makes standard techniques like mesh conversion or in-core point-based rendering infeasible. Our system uses an out-of-core octree structure and a number of interactive editing tools to enable many archaeological tasks to be carried out on the whole point cloud that would not be possible using traditional methods. We allow fast selection, insertion and deletion of points, and through out-of-core rendering, the frame rate always stays above 20 frames per second on a fast workstation. To the best of our knowledge, this is the first interactive visualization of the complete data set of a large subterranean catacomb, and we show that direct point cloud visualization on the complete data set of a scan campaign is an important tool in archaeological practice.", month = sep, isbn = "978-3-905674-18-7", publisher = "Eurographics Association", location = "Malta", booktitle = "10th VAST International Symposium on Virtual Reality, Archaeology and Cultural Heritage (VAST09)", pages = "65--72", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/SCHEIBLAUER-2009-IDCE/", } @article{BITTNER-2009-AGVS, title = "Adaptive Global Visibility Sampling", author = "Jir\'{i} Bittner and Oliver Mattausch and Peter Wonka and Vlastimil Havran and Michael Wimmer", year = "2009", abstract = "In this paper we propose a global visibility algorithm which computes from-region visibility for all view cells simultaneously in a progressive manner. We cast rays to sample visibility interactions and use the information carried by a ray for all view cells it intersects. The main contribution of the paper is a set of adaptive sampling strategies based on ray mutations that exploit the spatial coherence of visibility. Our method achieves more than an order of magnitude speedup compared to per-view cell sampling. This provides a practical solution to visibility preprocessing and also enables a new type of interactive visibility analysis application, where it is possible to quickly inspect and modify a coarse global visibility solution that is constantly refined. ", month = aug, journal = "ACM Transactions on Graphics", volume = "28", number = "3", issn = "0730-0301", pages = "94:1--94:10", keywords = "occlusion culling, visibility sampling, visibility, PVS", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/BITTNER-2009-AGVS/", } @article{wilkie-2009-cc, title = "A Robust Illumination Estimate for Chromatic Adaptation in Rendered Images", author = "Alexander Wilkie and Andrea Weidlich", year = "2009", abstract = "We propose a method that improves automatic colour correction operations for rendered images. In particular, we propose a robust technique for estimating the visible and pertinent illumination in a given scene. We do this at very low computational cost by mostly re-using information that is already being computed during the image synthesis process. Conventional illuminant estimations either operate only on 2D image data, or, if they do go beyond pure image analysis, only use information on the luminaires found in the scene. The latter is usually done with little or no regard for how the light sources actually affect the part of the scene that is being viewed. Our technique goes beyond that, and also takes object reflectance into account, as well as the incident light that is actually responsible for the colour of the objects that one sees. It is therefore able to cope with difficult cases, such as scenes with mixed illuminants, complex scenes with many light sources of varying colour, or strongly coloured indirect illumination. ", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1101--1109", keywords = "chromatic adaptation, predicitve rendering, colour constancy", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/wilkie-2009-cc/", } @article{weidlich-2009-dispersion, title = "Anomalous Dispersion in Predictive Rendering", author = "Andrea Weidlich and Alexander Wilkie", year = "2009", abstract = "In coloured media, the index of refraction does not decrease monotonically with increasing wavelength, but behaves in a quite non-monotonical way. This behaviour is called anomalous dispersion and results from the fact that the absorption of a material influences its index of refraction. So far, this interesting fact has not been widely acknowledged by the graphics community. In this paper, we demonstrate how to calculate the correct refractive index for a material based on its absorption spectrum with the Kramers-Kronig relation, and we discuss for which types of objects this effect is relevant in practice. ", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1065--1072", keywords = "Predictive rendering, Spectral Rendering, Dispersion", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich-2009-dispersion/", } @inproceedings{weidlich_2009_REL, title = "Rendering the Effect of Labradorescence", author = "Andrea Weidlich and Alexander Wilkie", year = "2009", abstract = "Labradorescence is a complex optical phenomenon that can be found in certain minerals, such as Labradorite or Spectrolite. Because of their unique colour properties these minerals are often used as gemstones and decorative objects. Since the phenomenon is strongly orientation dependent, such minerals need a special cut to make the most of their unique type of colourful sheen, which makes it desirable to be able to predict the final appearance of a given stone prior to the cutting process. Also, the peculiar properties of the effect make a believable replication with an ad-hoc shader dificult even for normal, non-predictive rendering purposes. We provide a reflectance model for labradorescence that is directly derived from the physical characteristics of such materials. Due to its inherent accuracy, it can be used for predictive rendering purposes, but also for generic rendering applications. ", month = may, isbn = "978-1-56881-470-4", publisher = "ACM", location = "Kelowna, British Columbia, Canada ", booktitle = "Proceedings of Graphics Interface 2009", pages = "79--85", keywords = "Predictive Rendering, Surface, Crystals", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/weidlich_2009_REL/", } @article{Habel_09_PGT, title = "Physically Guided Animation of Trees", author = "Ralf Habel and Alexander Kusternig and Michael Wimmer", year = "2009", abstract = "This paper presents a new method to animate the interaction of a tree with wind both realistically and in real time. The main idea is to combine statistical observations with physical properties in two major parts of tree animation. First, the interaction of a single branch with the forces applied to it is approximated by a novel efficient two step nonlinear deformation method, allowing arbitrary continuous deformations and circumventing the need to segment a branch to model its deformation behavior. Second, the interaction of wind with the dynamic system representing a tree is statistically modeled. By precomputing the response function of branches to turbulent wind in frequency space, the motion of a branch can be synthesized efficiently by sampling a 2D motion texture. Using a hierarchical form of vertex displacement, both methods can be combined in a single vertex shader, fully leveraging the power of modern GPUs to realistically animate thousands of branches and ten thousands of leaves at practically no cost.", month = mar, journal = "Computer Graphics Forum (Proceedings EUROGRAPHICS 2009)", volume = "28", number = "2", issn = "0167-7055", pages = "523--532", keywords = "Animation, Physically Guided animation, Vegetation, Trees", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Habel_09_PGT/", } @inproceedings{GRELAUD-2009-EPA, title = "Efficient and Practical Audio-Visual Rendering for Games using Crossmodal Perception", author = "David Grelaud and Nicolas Bonneel and Michael Wimmer and Manuel Asselot and George Drettakis", year = "2009", abstract = "Interactive applications such as computer games, are inherently audio visual, requiring high-quality rendering of complex 3D audio soundscapes and graphics environments. A frequent source of audio events is impact sounds, typically generated with physics engines. In this paper, we first present an optimization allowing efficient usage of impact sounds in a unified audio rendering pipeline, also including prerecorded sounds. We also exploit a recent result on audio-visual crossmodal perception to introduce a new level-of-detail selection algorithm, which jointly chooses the quality level of audio and graphics rendering. We have integrated these two techniques as a comprehensive crossmodal audio-visual rendering pipeline in a home-grown game engine, thus demonstrating the potential utility of our approach.", month = feb, isbn = "978-1-60558-429-4", publisher = "ACM", location = "Boston, Massachusetts", address = "New York, NY, USA", booktitle = "Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2009", pages = "177--182", keywords = "audio-visual rendering, crossmodal perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/GRELAUD-2009-EPA/", } @article{bhagvat-09-frusta, title = "GPU Rendering of Relief Mapped Conical Frusta", author = "Deepali Bhagvat and Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "This paper proposes to use relief-mapped conical frusta (cones cut by planes) to skin skeletal objects. Based on this representation, current programmable graphics hardware can perform the rendering with only minimal communication between the CPU and GPU. A consistent definition of conical frusta including texture parametrization and a continuous surface normal is provided. Rendering is performed by analytical ray casting of the relief-mapped frusta directly on the GPU. We demonstrate both static and animated objects rendered using our technique and compare to polygonal renderings of similar quality.", issn = "0167-7055", journal = "Computer Graphics Forum", number = "28", volume = "8", pages = "2131--2139", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bhagvat-09-frusta/", } @inproceedings{Scheiblauer-2008-DCW, title = "Domitilla Catacomb Walkthrough - dealing with more than 1 billion points", author = "Claus Scheiblauer", year = "2008", abstract = "Range laser scanning evolved as a means for documenting buildings or archeological excavation sites. Point clouds resulting from these laser scans can consist of hundreds of millions of points. To get a clean model from this vast amount of data needs several person months. Instead we try to visualize the data directly, so archeologists can have a quick overview of the already scanned areas (e.g., during a scanning campaign). The models in our viewer are combined point clouds from several scan positions, and it is possible to add new point clouds to an existing model. We also allow for deleting points from the model. Furthermore we developed a heuristic to estimate point sizes, which enables the viewer to display surfaces as closed objects without a special preprocessing step. For the point size heuristic it suffices to know the positions of the points.", month = nov, isbn = "978-3-85161-016-1", series = "Tagungsreihe "Kulturelles Erbe und Neue Technologien" - Workshop "Arch\"{a}ologie und Computer"", note = "Cultural Heritage & New Technologies", location = "Vienna City Hall", booktitle = "Proceedings of the 13th International Congress on Cultural Heritage and New Technologies", keywords = "laser scanning, virtual reconstruction, point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Scheiblauer-2008-DCW/", } @article{guerrero-2008-sli, title = "Real-time Indirect Illumination and Soft Shadows in Dynamic Scenes Using Spherical Lights", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer", year = "2008", abstract = "We present a method for rendering approximate soft shadows and diffuse indirect illumination in dynamic scenes. The proposed method approximates the original scene geometry with a set of tightly fitting spheres. In previous work, such spheres have been used to dynamically evaluate the visibility function to render soft shadows. In this paper, each sphere also acts as a low-frequency secondary light source, thereby providing diffuse one-bounce indirect illumination. The method is completely dynamic and proceeds in two passes: In a first pass, the light intensity distribution on each sphere is updated based on sample points on the corresponding object surface and converted into the spherical harmonics basis. In a second pass, this radiance information and the visibility are accumulated to shade final image pixels. The sphere approximation allows us to compute visibility and diffuse reflections of an object at interactive frame rates of over 20 fps for moderately complex scenes.", month = oct, journal = "Computer Graphics Forum", number = "8", volume = "27", pages = "2154--2168", keywords = "global illumination, precomputed radiance transfer, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/guerrero-2008-sli/", } @article{LIPP-2008-IEV, title = "Interactive Visual Editing of Grammars for Procedural Architecture", author = "Markus Lipp and Peter Wonka and Michael Wimmer", year = "2008", abstract = "We introduce a real-time interactive visual editing paradigm for shape grammars, allowing the creation of rulebases from scratch without text file editing. In previous work, shape-grammar based procedural techniques were successfully applied to the creation of architectural models. However, those methods are text based, and may therefore be difficult to use for artists with little computer science background. Therefore the goal was to enable a visual workflow combining the power of shape grammars with traditional modeling techniques. We extend previous shape grammar approaches by providing direct and persistent local control over the generated instances, avoiding the combinatorial explosion of grammar rules for modifications that should not affect all instances. The resulting visual editor is flexible: All elements of a complex state-of-the-art grammar can be created and modified visually.", month = aug, journal = "ACM Transactions on Graphics", volume = "27", number = "3", note = "Article No. 102", issn = "0730-0301", doi = "10.1145/1360612.1360701", pages = "102:1--10", keywords = "procedural modeling, shape grammars, architectural modeling", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/LIPP-2008-IEV/", } @inproceedings{SUNDSTEDT-2008-ASF, title = "A Psychophysical Study of Fixation Behavior in a Computer Game", author = "Veronica Sundstedt and Efstathios Stavrakis and Michael Wimmer and Erik Reinhard", year = "2008", abstract = "Prediction of gaze behavior in gaming environments can be a tremendously useful asset to game designers, enabling them to improve gameplay, selectively increase visual fidelity, and optimize the distribution of computing resources. The use of saliency maps is currently being advocated as the method of choice for predicting visual attention, crucially under the assumption that no specific task is present. This is achieved by analyzing images for low-level features such as motion, contrast, luminance, etc. However, the majority of computer games are designed to be easily understood and pose a task readily apparent to most players. Our psychophysical experiment shows that in a task-oriented context such as gaming, the predictive power of saliency maps at design time can be weak. Thus, we argue that a more involved protocol utilizing eye tracking, as part of the computer game design cycle, can be sufficiently robust to succeed in predicting fixation behavior of players.", month = aug, isbn = "978-1-59593-981-4", publisher = "ACM", location = "Los Angeles, California", editor = "Sarah Creem-Regehr and Karol Myszkowski", doi = "10.1145/1394281.1394288", booktitle = "ACM Symposium on Applied Perception in Graphics and Visualization 2008", pages = "43--50", keywords = "saliency, eye tracking, electronic games, visual attention, psychophysics", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/SUNDSTEDT-2008-ASF/", } @article{CADIK-2008-EHD, title = "Evaluation of HDR Tone Mapping Methods Using Essential Perceptual Attributes", author = "Martin \v{C}ad\'{i}k and Michael Wimmer and L\'{a}szl\'{o} Neumann and Alessandro Artusi", year = "2008", abstract = "The problem of reproducing high dynamic range images on devices with restricted dynamic range has gained a lot of interest in the computer graphics community. There exist various approaches to this issue, which span several research areas including computer graphics, image processing, color vision, physiological aspects, etc. These approaches assume a thorough knowledge of both the objective and subjective attributes of an image. However, no comprehensive overview and analysis of such attributes has been published so far. In this contribution, we present an overview about the effects of basic image attributes in HDR tone mapping. Furthermore, we propose a scheme of relationships between these attributes, leading to the definition of an overall image quality measure. We present results of subjective psychophysical experiments that we have performed to prove the proposed relationship scheme. Moreover, we also present an evaluation of existing tone mapping methods (operators) with regard to these attributes. Finally, the execution of with-reference and without a real reference perceptual experiments gave us the opportunity to relate the obtained subjective results. Our effort is not just useful to get into the tone mapping field or when implementing a tone mapping method, but it also sets the stage for well-founded quality comparisons between tone mapping methods. By providing good definitions of the different attributes, user-driven or fully automatic comparisons are made possible. ", month = jun, issn = "0097-8493", journal = "Computers & Graphics", number = "3", volume = "32", pages = "330--349", keywords = "high dynamic range, tone mapping operators, tone mapping evaluation, image attributes", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/CADIK-2008-EHD/", } @article{SCHERZER-2008-FSR, title = "Frame Sequential Interpolation for Discrete Level-of-Detail Rendering", author = "Daniel Scherzer and Michael Wimmer", year = "2008", abstract = "In this paper we present a method for automatic interpolation between adjacent discrete levels of detail to achieve smooth LOD changes in image space. We achieve this by breaking the problem into two passes: We render the two LOD levels individually and combine them in a separate pass afterwards. The interpolation is formulated in a way that only one level has to be updated per frame and the other can be reused from the previous frame, thereby causing roughly the same render cost as with simple non interpolated discrete LOD rendering, only incurring the slight overhead of the final combination pass. Additionally we describe customized interpolation schemes using visibility textures. The method was designed with the ease of integration into existing engines in mind. It requires neither sorting nor blending of objects, nor does it introduce any constrains in the LOD used. The LODs can be coplanar, alpha masked, animated, impostors, and intersecting, while still interpolating smoothly. ", month = jun, journal = "Computer Graphics Forum (Proceedings EGSR 2008)", volume = "27", number = "4", issn = "0167-7055", pages = "1175--1181", keywords = "LOD blending, real-time rendering, levels of detail", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/SCHERZER-2008-FSR/", } @inproceedings{Habel_08_SSH, title = "Efficient Spherical Harmonics Lighting with the Preetham Skylight Model", author = "Ralf Habel and Bogdan Mustata and Michael Wimmer", year = "2008", abstract = "We present a fast and compact representation of a skylight model for spherical harmonics lighting, especially for outdoor scenes. This representation allows dynamically changing the sun position and weather conditions on a per frame basis. We chose the most used model in real-time graphics, the Preetham skylight model, because it can deliver both realistic colors and dynamic range and its extension into spherical harmonics can be used to realistically light a scene. We separate the parameters of the Preetham skylight models' spherical harmonics extension and perform a polynomial two-dimensional non-linear least squares fit for the principal parameters to achieve both negligible memory and computation costs. Additionally, we execute a domain specific Gibbs phenomena suppression to remove ringing artifacts.", month = apr, publisher = "Eurographics Association", location = "Crete, Greece", issn = "1017-4656", editor = "Katerina Mania and Erik Reinhard", booktitle = "Eurographics 2008 - Short Papers", pages = "119--122", keywords = "Natural Phenomena, Spherical Harmonics, Skylight", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/Habel_08_SSH/", } @article{mattausch-2008-CHC, title = "CHC++: Coherent Hierarchical Culling Revisited", author = "Oliver Mattausch and Jir\'{i} Bittner and Michael Wimmer", year = "2008", abstract = "We present a new algorithm for efficient occlusion culling using hardware occlusion queries. The algorithm significantly improves on previous techniques by making better use of temporal and spatial coherence of visibility. This is achieved by using adaptive visibility prediction and query batching. As a result of the new optimizations the number of issued occlusion queries and the number of rendering state changes are significantly reduced. We also propose a simple method for determining tighter bounding volumes for occlusion queries and a method which further reduces the pipeline stalls. The proposed method provides up to an order of magnitude speedup over the previous state of the art. The new technique is simple to implement, does not rely on hardware calibration and integrates well with modern game engines.", month = apr, journal = "Computer Graphics Forum (Proceedings Eurographics 2008)", volume = "27", number = "2", issn = "0167-7055", pages = "221--230", keywords = "temporal coherence, dynamic occlusion culling, occlusion queries", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/mattausch-2008-CHC/", } @inproceedings{CHARALAMBOS-2007-HLOD, title = "Optimized HLOD Refinement Driven by Hardware Occlusion Queries", author = "Jean Pierre Charalambos and Jir\'{i} Bittner and Michael Wimmer and Eduardo Romero", year = "2007", abstract = "We present a new method for integrating hierarchical levels of detail (HLOD) with occlusion culling. The algorithm refines the HLOD hierarchy using geometric criteria as well as the occlusion information. For the refinement we use a simple model which takes into account the possible distribution of the visible pixels. The traversal of the HLOD hierarchy is optimized by a new algorithm which uses spatial and temporal coherence of visibility. We predict the HLOD refinement condition for the current frame based on the results from the last frame. This allows an efficient update of the front of termination nodes as well as an efficient scheduling of hardware occlusion queries. Compared to previous approaches, the new method improves on speed as well as image quality. The results indicate that the method is very close to the optimal scheduling of occlusion queries for driving the HLOD refinement.", month = nov, isbn = "978-3-540-76855-9", series = "Lecture Notes in Computer Science, volume 4841", publisher = "Springer", location = "Lake Tahoe, Nevada/California", editor = "Bebis, G.; Boyle, R.; Parvin, B.; Koracin, D.; Paragios, N.; Tanveer, S.-M.; Ju, T.; Liu, Z.; Coquillart, S.; Cruz-Neira, C.; M\"{o}ller, T.; Malzbender, T.", booktitle = "Advances in Visual Computing (Third International Symposium on Visual Computing -- ISVC 2007)", pages = "106--117", keywords = "occlusion queries, levels of detail, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/CHARALAMBOS-2007-HLOD/", } @inproceedings{zotti-2007-hdr, title = "Measuring Light Pollution with a Calibrated High Dynamic Range All-Sky Image Acquisition System", author = "Georg Zotti", year = "2007", abstract = "Combining series of exposures with different exposure times made with a digital SLR camera and a fish-eye lens allow to create high-resolution images that contain the full dynamic range of daylight, including the Sun, usable as scene background and for skylight illumination computation purposes. These High Dynamic Range (HDR) images can also be calibrated with a luminance meter, so the image data contain meaningful values, and the system becomes a measuring device. At night, long-time exposures can be combined, and the setup, once fine-calibrated with a Sky Quality Meter or another low-level light measuring device, becomes a valuable tool to provide absolute values of sky brightness. Using data from the astronomical literature, false-colour plots of sky luminance can be created that closely match visual estimation of visible stellar magnitudes.", month = oct, location = "Bled", address = "Bled", editor = "Andrej Mohar", booktitle = "DARKSKY2007 - 7th European Symposium for the Protection of the Night Sky", keywords = "HDR imaging, Light Pollution, Skylight", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/zotti-2007-hdr/", } @inproceedings{zotti-2007-eg, title = "Tangible Heritage: Production of Astrolabes on a Laser Engraver", author = "Georg Zotti", year = "2007", abstract = "The astrolabe, an analog computing device, used to be the iconic instrument of astronomers during the Middle Ages. It allowed a multitude of operations of practical astronomy which were otherwise cumbersome to perform in an epoch when mathematics had apparently almost been forgotten. Usually made from wood or sheet metal, a few hundred instruments, mostly from brass, survived until today and are valuable museum showpieces. This paper explains a procedural modelling approach for the construction of the classical kinds of astrolabes, which allows a wide variety of applications from plain explanatory illustrations to 3D models, and even the production of working physical astrolabes usable for public or classroom demonstrations.", month = sep, publisher = "Eurographics", organization = "Eurographics", location = "Prague", issn = "1017-4656", editor = "David B. Arnold and Andrej Ferko", booktitle = "EG2007 Cultural Heritage Papers", pages = "41--48 (colorplate:p60)", keywords = "Cultural Heritage, Astronomy, Astrolabe, Procedural Modelling", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/zotti-2007-eg/", } @inproceedings{Habel_2007_RTT, title = "Physically Based Real-Time Translucency for Leaves", author = "Ralf Habel and Alexander Kusternig and Michael Wimmer", year = "2007", abstract = "This paper presents a new shading model for real-time rendering of plant leaves that reproduces all important attributes of a leaf and allows for a large number of leaves to be shaded. In particular, we use a physically based model for accurate subsurface scattering on the translucent side of directly lit leaves. For real-time rendering of this model, we formulate it as an image convolution process and express the result in an efficient directional basis that is fast to evaluate. We also propose a data acquisition method for leaves that uses off-the-shelf devices.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "253--263", keywords = "Realtime Rendering, Natural Scene Rendering, Physically Based Rendering, Natural Phenomena", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Habel_2007_RTT/", } @inproceedings{JESCHKE-2007-ISC, title = "Interactive Smooth and Curved Shell Mapping", author = "Stefan Jeschke and Stephan Mantler and Michael Wimmer", year = "2007", abstract = "Shell mapping is a technique to represent three-dimensional surface details. This is achieved by extruding the triangles of an existing mesh along their normals, and mapping a 3D function (e.g., a 3D texture) into the resulting prisms. Unfortunately, such a mapping is nonlinear. Previous approaches perform a piece-wise linear approximation by subdividing the prisms into tetrahedrons. However, such an approximation often leads to severe artifacts. In this paper we present a correct (i.e., smooth) mapping that does not rely on a decomposition into tetrahedrons. We present an efficient GPU ray casting algorithm which provides correct parallax, self-occlusion, and silhouettes, at the cost of longer rendering times. The new formulation also allows modeling shells with smooth curvatures using Coons patches within the prisms. Tangent continuity between adjacent prisms is guaranteed, while the mapping itself remains local, i.e. every curved prism content is modeled at runtime in the GPU without the need for any precomputation. This allows instantly replacing animated triangular meshes with prism-based shells.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "10", pages = "351--360", keywords = "Display algorithms, Shading", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/JESCHKE-2007-ISC/", } @inproceedings{Scherzer-2007-PCS, title = "Pixel-Correct Shadow Maps with Temporal Reprojection and Shadow Test Confidence", author = "Daniel Scherzer and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "Shadow mapping suffers from spatial aliasing (visible as blocky shadows) as well as temporal aliasing (visible as flickering). Several methods have already been proposed for reducing such artifacts, but so far none is able to provide satisfying results in real time. This paper extends shadow mapping by reusing information of previously rasterized images, stored efficiently in a so-called history buffer. This buffer is updated in every frame and then used for the shadow calculation. In combination with a special confidence-based method for the history buffer update (based on the current shadow map), temporal and spatial aliasing can be completely removed. The algorithm converges in about 10 to 60 frames and during convergence, shadow borders are sharpened over time. Consequently, in case of real-time frame rates, the temporal shadow adaption is practically imperceptible. The method is simple to implement and is as fast as uniform shadow mapping, incurring only the minor speed hit of the history buffer update. It works together with advanced filtering methods like percentage closer filtering and more advanced shadow mapping techniques like perspective or light space perspective shadow maps.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "45--50", keywords = "shadow mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Scherzer-2007-PCS/", } @inproceedings{GIEGL-2007-FVS, title = "Fitted Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "Too little shadow map resolution and resulting undersampling artifacts, perspective and projection aliasing, have long been a fundamental problem of shadowing scenes with shadow mapping. We present a new smart, real-time shadow mapping algorithm that virtually increases the resolution of the shadow map beyond the GPU hardware limit where needed. We first sample the scene from the eye-point on the GPU to get the needed shadow map resolution in different parts of the scene. We then process the resulting data on the CPU and finally arrive at a hierarchical grid structure, which we traverse in kd-tree fashion, shadowing the scene with shadow map tiles where needed. Shadow quality can be traded for speed through an intuitive parameter, with a homogeneous quality reduction in the whole scene, down to normal shadow mapping. This allows the algorithm to be used on a wide range of hardware.", month = may, isbn = "978-1-56881-337-0", publisher = "Canadian Human-Computer Communications Society", location = "Montreal, Canada", editor = "Christopher G. Healey and Edward Lank", booktitle = "Proceedings of Graphics Interface 2007", pages = "159--168", keywords = "real-time shadowing, shadows, shadow maps, large environemnts", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-FVS/", } @inproceedings{MATTAUSCH-2007-OSP, title = "Optimized Subdivisions for Preprocessed Visibility", author = "Oliver Mattausch and Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2007", abstract = "This paper describes a new tool for preprocessed visibility. It puts together view space and object space partitioning in order to control the render cost and memory cost of the visibility description generated by a visibility solver. The presented method progressively refines view space and object space subdivisions while minimizing the associated render and memory costs. Contrary to previous techniques, both subdivisions are driven by actual visibility information. We show that treating view space and object space together provides a powerful method for controlling the efficiency of the resulting visibility data structures.", month = may, isbn = "978-1-56881-337-0", publisher = "Canadian Human-Computer Communications Society", location = "Montreal, Canada", editor = "Christopher G. Healey and Edward Lank", booktitle = "Proceedings of Graphics Interface 2007", pages = "335--342", keywords = "visibility preprocessing, potentially visible sets, view cells", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/MATTAUSCH-2007-OSP/", } @inproceedings{GIEGL-2007-QV1, title = "Queried Virtual Shadow Maps", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "Shadowing scenes by shadow mapping has long suffered from the fundamental problem of undersampling artifacts due to too low shadow map resolution, leading to so-called perspective and projection aliasing. In this paper we present a new real-time shadow mapping algorithm capable of shadowing large scenes by virtually increasing the resolution of the shadow map beyond the GPU hardware limit. We start with a brute force approach that uniformly increases the resolution of the whole shadow map. We then introduce a smarter version which greatly increases runtime performance while still being GPU-friendly. The algorithm contains an easy to use performance/quality-tradeoff parameter, making it tunable to a wide range of graphics hardware.", month = apr, isbn = "978-1-59593-628-8", publisher = "ACM Press", location = "Seattle, WA", address = "New York, NY, USA", booktitle = "Proceedings of ACM SIGGRAPH 2007 Symposium on Interactive 3D Graphics and Games", pages = "65--72", keywords = "shadow maps, shadows, real-time shadowing, large environemnts", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-QV1/", } @article{GIEGL-2007-UNP, title = "Unpopping: Solving the Image-Space Blend Problem for Smooth Discrete LOD Transitions", author = "Markus Giegl and Michael Wimmer", year = "2007", abstract = "This paper presents a new, simple and practical algorithm to avoid artifacts when switching between discrete levels of detail (LOD) by smoothly blending LOD representations in image space. We analyze the alternatives of conventional alpha-blending and so-called late-switching (the switching of LODs markusquote{far enough} from the eye-point), widely thought to solve the LOD switching discontinuity problem, and conclude that they either do not work in practice, or defeat the concept of LODs. In contrast we show that our algorithm produces visually pleasing blends for static and animated discrete LODs, for discrete LODs with different types of LOD representations (e.g. billboards and meshes) and even to some extent totally different objects with similar spatial extent, with a very small runtime overhead.", month = mar, issn = "0167-7055", journal = "Computer Graphics Forum", number = "1", volume = "26", pages = "46--49", keywords = "popping, LOD switching, levels of detail, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/GIEGL-2007-UNP/", } @article{Habel_2007_IAG, title = "Instant Animated Grass", author = "Ralf Habel and Michael Wimmer and Stefan Jeschke", year = "2007", abstract = "This paper introduces a technique for rendering animated grass in real time. The technique uses front-to-back compositing of implicitly defined grass slices in a fragment shader and therefore significantly reduces the overhead associated with common vegetation rendering systems. We also introduce a texture-based animation scheme that combines global wind movements with local turbulences. Since the technique is confined to a fragment shader, it can be easily integrated into any rendering system and used as a material in existing scenes. ", month = jan, journal = "Journal of WSCG", volume = "15", number = "1-3", note = "ISBN 978-80-86943-00-8", issn = "1213-6972", pages = "123--128", keywords = "Real-time Rendering, Natural Scene Rendering, Natural Phenomena, GPU Programming", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Habel_2007_IAG/", } @inproceedings{zotti-2007-wscg, title = "A Critical Review of the Preetham Skylight Model", author = "Georg Zotti and Alexander Wilkie and Werner Purgathofer", year = "2007", abstract = "The Preetham skylight model is currently one of the most widely used analytic models of skylight luminance in computer graphics. Despite its widespread use, very little work has been carried out to verify the results generated by the model, both in terms of the luminance patterns it generates, and in terms of numerical reliability and stability. We have implemented the model in Mathematica, visualise and discuss those parameter ranges which exhibit problematic behaviour, and compare the computed luminance values with references from literature, especially the 15 standard skylight distributions of the CIE 2003 Standard General Sky. We also performed luminance measurements on real cloudless skies, and compare these measurements to the predictions of the model.", month = jan, isbn = "978-80-86943-02-2", publisher = "University of West Bohemia", location = "Plzen", editor = "Vaclav Skala", booktitle = "WSCG ", pages = "23--30", keywords = "Verification, Skylight", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/zotti-2007-wscg/", } @article{neumann-2006-gamma, title = "Accurate Display Gamma Functions based on Human Observation", author = "Attila Neumann and Alessandro Artusi and L\'{a}szl\'{o} Neumann and Georg Zotti and Werner Purgathofer", year = "2007", abstract = "This paper describes an accurate method to obtain the Tone Reproduction Curve (TRC) of display devices without using a measurement device. It is an improvement of an existing technique based on human observation, solving its problem of numerical instability and resulting in functions in log--log scale which correspond better to the nature of display devices. We demonstrate the effiency of our technique on different monitor technologies, comparing it with direct measurements using a spectrophotometer.", issn = "0361-2317", journal = "Color Research & Applications", note = "2006 angenommen, 2007 erschienen", number = "4", volume = "32", pages = "310--319", keywords = "Generalized Gamma Function, Colour Reproduction, Display Measurement, Human Visual System, Spatial Vision", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/neumann-2006-gamma/", } @inproceedings{zotti-2006-pla, title = "Using Virtual Reconstructions in a Planetarium for Demonstrations in Archaeo-Astronomy", author = "Georg Zotti and Alexander Wilkie and Werner Purgathofer", year = "2006", abstract = "In the last decades, archaeologists in central Europe have found traces of enigmatic neolithic circular building structures buried in the soil. Recent studies indicate that the orientation of many of their doorways may have been chosen with an astronomical background in mind. This paper explains the use of virtual reconstructions of these buildings from archaeological data, in combination with a simulation of the sky of that time in a Planetarium, to present the astronomical findings to the public.", month = nov, isbn = "963-9495-89-1", publisher = "Pannonian University Press", location = "Eger", editor = "Cecilia Sik Lanyi ", booktitle = "Third Central European Multimedia and Virtual Reality Conference (Proc. CEMVRC2006)", pages = "43--51", keywords = "Virtual Reality, Public Dissemination, Archaeo-Astronomy", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/zotti-2006-pla/", } @inproceedings{wilkie-2006-dfs, title = "A Reflectance Model for Diffuse Fluorescent Surfaces", author = "Alexander Wilkie and Andrea Weidlich and Caroline Larboulette and Werner Purgathofer", year = "2006", abstract = "Fluorescence is an interesting and visually prominent effect, which has not been fully covered by Computer Graphics research so far. While the physical phenomenon of fluorescence has been addressed in isolation, the actual reflection behaviour of real fluorescent surfaces has never been documented, and no analytical BRDF models for such surfaces have been published yet. This paper aims to illustrate the reflection properties typical for diffuse fluorescent surfaces, and provides a BRDF model based on a layered microfacet approach that mimics them.", month = nov, isbn = "1-59593-564-9", location = "Kuala Lumpur, Malaysia", booktitle = "Proceedings of Graphite 2006", pages = "8", pages = "321--328", keywords = "Fluorescence, Analytical BRDF models", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/wilkie-2006-dfs/", } @inproceedings{Mantler-06-landscape, title = "Interactive Landscape Visualization Using GPU Ray Casting", author = "Stephan Mantler and Stefan Jeschke", year = "2006", abstract = "This paper demonstrates the simple yet effective usage of height fields for interactive landscape visualizations using a ray casting approach implemented in the pixel shader of modern graphics cards. The rendering performance is output sensitive, i.e., it scales with the number of pixels rather than the complexity of the landscape. Given a height field of a terrain and a topographic map or similar data as input, the vegetation cover is extracted and stored on top of the height field in a preprocess, enhancing the terrain with forest canopies or other mesostructure. In addition, enhanced illumination models like shadowing and ambient occlusion can be calculated at runtime with reasonable computational cost, which greatly enhances the scene realism. Finally, including the presented technique into existing rendering systems is relatively simple, mainly consisting of data preparation and pixel shader programming.", month = nov, booktitle = "Proceedings of Graphite 2006", keywords = "real-time rendering, gpu ray casting", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Mantler-06-landscape/", } @article{vis-foa, title = "Importance-Driven Focus of Attention", author = "Ivan Viola and Miquel Feixas and Mateu Sbert and Eduard Gr\"{o}ller", year = "2006", abstract = "This paper introduces a concept for automatic focusing on features within a volumetric data set. The user selects a focus, i.e., object of interest, from a set of pre-defined features. Our system automatically determines the most expressive view on this feature. A characteristic viewpoint is estimated by a novel information-theoretic framework which is based on the mutual information measure. Viewpoints change smoothly by switching the focus from one feature to another one. This mechanism is controlled by changes in the importance distribution among features in the volume. The highest importance is assigned to the feature in focus. Apart from viewpoint selection, the focusing mechanism also steers visual emphasis by assigning a visually more prominent representation. To allow a clear view on features that are normally occluded by other parts of the volume, the focusing for example incorporates cut-away views.", month = oct, journal = "IEEE Transactions on Visualization and Computer Graphics", volume = "12", number = "5", pages = "933--940", keywords = "illustrative visualization, interacting with volumetric datasets, optimal viewpoint estimation, focus+context techniques, volume visualization", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/vis-foa/", } @inproceedings{CADIK-2006-IAQ, title = "Image Attributes and Quality for Evaluation of Tone Mapping Operators", author = "Martin \v{C}ad\'{i}k and Michael Wimmer and L\'{a}szl\'{o} Neumann and Alessandro Artusi", year = "2006", abstract = "The problem of reproducing high dynamic range images on devices with restricted dynamic range has gained a lot of interest in the computer graphics community. There exist various approaches to this issue, which span several research areas including computer graphics, image processing, color science, physiology, neurology, psychology, etc. These approaches assume a thorough knowledge of both the objective and subjective attributes of an image. However, no comprehensive overview and analysis of such attributes has been published so far. In this paper, we present an overview of image quality attributes of different tone mapping methods. Furthermore, we propose a scheme of relationships between these attributes, leading to the definition of an overall image quality measure. We present results of subjective psychophysical tests that we have performed to prove the proposed relationship scheme. We also present the evaluation of existing tone mapping methods with regard to these attributes. Our effort is not just useful to get into the tone mapping field or when implementing a tone mapping operator, but it also sets the stage for well-founded quality comparisons between tone mapping operators. By providing good definitions of the different attributes, user-driven or fully automatic comparisons are made possible at all.", month = oct, publisher = "National Taiwan University Press", location = "Taipe, Taiwan", booktitle = "Proceedings of Pacific Graphics 2006 (14th Pacific Conference on Computer Graphics and Applications)", pages = "35--44", keywords = "tone mapping evaluation, tone mapping, high-dynamic range images", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/CADIK-2006-IAQ/", } @inproceedings{WIMMER-2006-DWN, title = "Do We Need Accurate Reconstruction?", author = "Michael Wimmer", year = "2006", abstract = "The accurate reconstruction of high-quality representations from range scanning devices for archaeology is very time consuming and costly. The objective of this paper is to show that this accurate reconstruction step can be avoided in many cases. Instead, we present a method to make range scanning data instantly available to archaeologists and other scientists, so that they can immediately experiment and work with the data.", month = oct, location = "Vienna City Hall, Vienna, Austria", booktitle = "Proceedings of 11th International Congress on Cultural Heritage and New Technologies", keywords = "point-based rendering, laser scanning, virtual reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WIMMER-2006-DWN/", } @inproceedings{WIMMER-2006-IP, title = "Instant Points: Fast Rendering of Unprocessed Point Clouds", author = "Michael Wimmer and Claus Scheiblauer", year = "2006", abstract = "We present an algorithm to display enormous unprocessed point clouds at interactive rates without requiring long preprocessing. The novelty here is that we do not make any assumptions about sampling density or availability of normal vectors for the points. This is very important because such information is available only after lengthy postprocessing of scanned datasets, whereas users want to interact with the dataset immediately. Instant Points is an out-of-core algorithm that makes use of nested octrees and an enhanced version of sequential point trees.", month = jul, isbn = "3-90567-332-0", publisher = "Eurographics Association", organization = "Eurographics", location = "Boston, USA", booktitle = "Proceedings Symposium on Point-Based Graphics 2006", pages = "129--136", keywords = "unprocessed point clouds, point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WIMMER-2006-IP/", } @article{WONKA-2006-GVS, title = "Guided Visibility Sampling", author = "Peter Wonka and Michael Wimmer and Kaichi Zhou and Stefan Maierhofer and Gerd Hesina and Alexander Reshetov", year = "2006", abstract = "This paper addresses the problem of computing the triangles visible from a region in space. The proposed aggressive visibility solution is based on stochastic ray shooting and can take any triangular model as input. We do not rely on connectivity information, volumetric occluders, or the availability of large occluders, and can therefore process any given input scene. The proposed algorithm is practically memoryless, thereby alleviating the large memory consumption problems prevalent in several previous algorithms. The strategy of our algorithm is to use ray mutations in ray space to cast rays that are likely to sample new triangles. Our algorithm improves the sampling efficiency of previous work by over two orders of magnitude.", month = jul, journal = "ACM Transactions on Graphics", volume = "25", number = "3", note = "Proceedings ACM SIGGRAPH 2006", issn = "0730-0301", doi = "10.1145/1141911.1141914", pages = "494--502", keywords = "visibility, visibility sampling, occlusion culling, PVS", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/WONKA-2006-GVS/", } @inproceedings{MATTAUSCH-2006-AVC, title = "Adaptive Visibility-Driven View Cell Construction", author = "Oliver Mattausch and Jir\'{i} Bittner and Michael Wimmer", year = "2006", abstract = "We present a new method for the automatic partitioning of view space into a multi-level view cell hierarchy. We use a cost-based model in order to minimize the average rendering time. Unlike previous methods, our model takes into account the actual visibility in the scene, and the partition is not restricted to planes given by the scene geometry. We show that the resulting view cell hierarchy works for different types of scenes and gives lower average rendering time than previously used methods.", month = jun, isbn = "3-90567-335-5", publisher = "Eurographics Association", organization = "Eurographics", location = "Nicosia, Cyprus", editor = "Wolfgang Heidrich and Tomas Akenine-Moller", booktitle = "Rendering Techniques 2006 (Proceedings Eurographics Symposium on Rendering)", pages = "195--206", keywords = "view cells, real-time rendering, visibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/MATTAUSCH-2006-AVC/", } @article{zotti-2006-dgm, title = "A Sky Dome Visualisation for Identification of Astronomical Orientations", author = "Georg Zotti", year = "2006", abstract = "It has long been known that ancient temples were frequently oriented along the cardinal directions or to certain points along the horizon where the Sun or the Moon rises or sets on special days of the year. In the last decades, archaeologists have found evidence of even older building structures buried in the soil, with doorways that also appear to have distinct orientations. This paper presents a novel diagram combining archaeological maps with a folded-apart, flattened view of the whole sky, showing the local horizon and the daily paths of the Sun, Moon and brighter stars. By use of this diagram, interesting groupings of astronomical orientation directions, e.g. to certain sunrise and sunset points could be identified, which were evidently used to mark certain days of the year. Orientations towards rising and setting points of a few significant stars very likely indicated the beginning of the agricultural year in the middle neolithic period.", issn = "1473-8716", journal = "Information Visualization", number = "5", volume = "2006", pages = "152--166", keywords = "data mining, astronomy, archaeology", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/zotti-2006-dgm/", } @inproceedings{Zotti-2005-vis, title = "A Sky Dome Visualisation for Identification of Astronomical Orientations", author = "Georg Zotti and Eduard Gr\"{o}ller", year = "2005", abstract = " It has long been known that ancient temples were frequently oriented along the cardinal directions or to certain points along the horizon where Sun or Moon rise or set on special days of the year. In the last decades, archaeologists have found evidence of even older building structures buried in the soil, with doorways that also appear to have distinct orientations. This paper presents a novel diagram combining archaeological maps with a folded-apart, flattened view of the whole sky, showing the local horizon and the daily paths of sun, moon and brighter stars. By use of this diagram, interesting groupings of astronomical orientation directions, e.g. to certain sunrise and sunset points could be identified, which were evidently used to mark certain days of the year. Orientations to a few significant stars very likely indicated the beginning of the agricultural year in the middle neolithic period.", month = oct, isbn = "0-7803-9464-X", publisher = "IEEE", location = "Minneapolis", editor = "John Stasko and Matt Ward", booktitle = "Proceedings IEEE Symposium on Information Visualization", pages = "9--16", keywords = "data mining, Astronomy, Archaeology", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/Zotti-2005-vis/", } @inproceedings{bittner-2005-egsr, title = "Fast Exact From-Region Visibility in Urban Scenes", author = "Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2005", abstract = "We present a fast exact from-region visibility algorithm for 2.5D urban scenes. The algorithm uses a hierarchical subdivision of line-space for identifying visibility interactions in a 2D footprint of the scene. Visibility in the remaining vertical dimension is resolved by testing for the existence of lines stabbing sequences of virtual portals. Our results show that exact analytic from-region visibility in urban scenes can be computed at times comparable or even superior to recent conservative methods. ", month = jun, isbn = "3-905673-23-1", publisher = "Eurographics Association", organization = "Eurographics", location = "Konstanz, Germany", editor = "Kavita Bala and Philip Dutr\'{e}", booktitle = "Rendering Techniques 2005 (Proceedings Eurographics Symposium on Rendering)", pages = "223--230", keywords = "real-time rendering, visibility", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/bittner-2005-egsr/", } @inproceedings{havran-2005-sccg, title = "On Cross-Validation and Resampling of BRDF Data Measurements", author = "Vlastimil Havran and Attila Neumann and Georg Zotti and Werner Purgathofer and Hans-Peter Seidel", year = "2005", abstract = "We discuss the validation of BTF data measurements by means used for BRDF measurements. First, we show how to apply the Helmholtz reciprocity and isotropy for a single data set. Second, we discuss a cross-validation for BRDF measurement data obtained from two different measurement setups, where the measurements are not calibrated or the level of accuracy is not known. We show the practical problems encountered and the solutions we have used to validate physical setup for four material samples. We describe a novel coordinate system suitable for resampling the BRDF data from one data set to another data set. Further, we show how the perceptually uniform color space CIELab is used for cross-comparison of BRDF data measurements, which were not calibrated.", month = may, location = "Budmerice, Slovakia", booktitle = "Proceedings SCCG 2005", pages = "161--168", keywords = "reflectance function, BRDF data acquisition, BRDF data validation, predictive rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/havran-2005-sccg/", } @inproceedings{jeschke-05-AIP, title = "Automatic Impostor Placement for Guaranteed Frame Rates and Low Memory Requirements", author = "Stefan Jeschke and Michael Wimmer and Heidrun Schumann and Werner Purgathofer", year = "2005", abstract = "Impostors are image-based primitives commonly used to replace complex geometry in order to reduce the rendering time needed for displaying complex scenes. However, a big problem is the huge amount of memory required for impostors. This paper presents an algorithm that automatically places impostors into a scene so that a desired frame rate and image quality is always met, while at the same time not requiring enormous amounts of impostor memory. The low memory requirements are provided by a new placement method and through the simultaneous use of other acceleration techniques like visibility culling and geometric levels of detail.", month = apr, isbn = "1-59593-013-2", publisher = "ACM Press", organization = "ACM", location = "Washington DC", booktitle = "Proceedings of ACM SIGGRAPH 2005 Symposium on Interactive 3D Graphics and Games", pages = "103--110", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-AIP/", } @inproceedings{zotti-2005-lum, title = "Approximating Real-World Luminaires with OpenGL Lights", author = "Georg Zotti and Attila Neumann and Werner Purgathofer", year = "2005", abstract = "Dynamic illumination in real-time applications using OpenGL is still usually done with the classical light forms of point lights, directional lights and spot lights. For applications simulating real-world scenes, e.g. architectural planning, finding parameter sets for these simple lights to match real-world luminaires is required for realistic work. This paper describes a simple approach to process a luminaire data file in IESNA IES-LM63-95 format to create an approximation using at most 2 OpenGL lights to represent one luminaire.", month = feb, isbn = "80-903100-9-5", publisher = "UNION press", organization = "University of West Bohemia", note = "only on conference CD-ROM", location = "Plzen", address = "Plzen", editor = "Vaclav Skala", booktitle = "WSCG 2005 Short Paper Proceedings", pages = "49--52", keywords = "interactive illumination planning, OpenGL, Real-world luminaires", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/zotti-2005-lum/", } @inproceedings{havran-2005-egsr, title = "Ray Maps for Global Illumination", author = "Vlastimil Havran and Jir\'{i} Bittner and Robert Herzog and Hans-Peter Seidel", year = "2005", abstract = "We describe a novel data structure for representing light transport called ray map. The ray map extends the concept of photon maps: it stores not only photon impacts but the whole photon paths. We demonstrate the utility of ray maps for global illumination by eliminating boundary bias and reducing topological bias of density estimation in global illumination. Thanks to the elimination of boundary bias we could use ray maps for fast direct visualization with the image quality being close to that obtained by the expensive nal gathering step. We describe in detail our implementation of the ray map using a lazily constructed kD-tree. We also present several optimizations bringing the ray map query performance close to the performance of the photon map.", booktitle = "Eurographics Symposium on Rendering", pages = "43--54", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/havran-2005-egsr/", } @article{Bittner-2004-CHC, title = "Coherent Hierarchical Culling: Hardware Occlusion Queries Made Useful", author = "Jir\'{i} Bittner and Michael Wimmer and Harald Piringer and Werner Purgathofer", year = "2004", abstract = "We present a simple but powerful algorithm for optimizing the usage of hardware occlusion queries in arbitrary complex scenes. Our method minimizes the number of issued queries and reduces the delays due to the latency of query results. We reuse the results of the occlusion queries from the last frame in order to initiate and schedule the queries in the next frame. This is done by processing nodes of a spatial hierarchy in front-to-back order, interleaving occlusion queries with the rendering of certain previously visible nodes. The proposed scheduling of the queries makes use of spatial and temporal coherence of visibility. Despite its simplicity, the algorithm achieves good culling efficiency for scenes of various characteristics. The implementation of the algorithm is straightforward, and it can be easily integrated in existing real-time rendering packages using various spatial data structures.", month = sep, journal = "Computer Graphics Forum", volume = "23", number = "3", note = "Proceedings EUROGRAPHICS 2004", issn = "0167-7055", pages = "615--624", keywords = "occlusion query, visibility, real-time rendering, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Bittner-2004-CHC/", } @inproceedings{Wilkie-2004-AMS, title = "An Analytical Model for Skylight Polarisation", author = "Alexander Wilkie and Robert F. Tobler and Christiane Ulbricht and Georg Zotti and Werner Purgathofer", year = "2004", abstract = "Under certain circumstances the polarisation state of the illumination can have a significant influence on the appearance of scenes; outdoor scenes with specular surfaces -- such as water bodies or windows -- under clear, blue skies are good examples of such environments. In cases like that it can be essential to use a polarising renderer if a true prediction of nature is intended, but so far no polarising skylight models have been presented. This paper presents a plausible analytical model for the polarisation of the light emitted from a clear sky. Our approach is based on a suitable combination of several components with well-known characteristics, and yields acceptable results in considerably less time than an exhaustive simulation of the underlying atmospheric scattering phenomena would require.", month = jun, isbn = "3-905673-12-6", publisher = "?", editor = "Alexander Keller and Henrik Wann Jensen ", booktitle = "Proceedings of the Eurographics Symposium on Rendering", pages = "387--399", keywords = "skylight rendering, polarisation", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Wilkie-2004-AMS/", } @inproceedings{Wimmer-2004-LSPM, title = "Light Space Perspective Shadow Maps", author = "Michael Wimmer and Daniel Scherzer and Werner Purgathofer", year = "2004", abstract = "In this paper, we present a new shadow mapping technique that improves the quality of perspective and uniform shadow maps. Our technique uses a perspective transform specified in light space which allows treating all lights as directional lights and does not change the direction of the light sources. This gives all the benefits of the perspective mapping but avoids the problems inherent in perspective shadow mapping like singularities in post-perspective space, missed shadow casters etc. Furthermore, we show that both uniform and perspective shadow maps distribute the perspective aliasing error that occurs in shadow mapping unequally over the available z-range. We therefore propose a transform that equalizes this error and gives equally pleasing results for near and far viewing distances. Our method is simple to implement, requires no scene analysis and is therefore as fast as uniform shadow mapping.", month = jun, isbn = "3-905673-12-6", publisher = "Eurographics Association", organization = "Eurographics", location = "Norrk\"{o}ping, Sweden", editor = "Alexander Keller and Henrik W. Jensen", booktitle = "Rendering Techniques 2004 (Proceedings Eurographics Symposium on Rendering)", pages = "143--151", keywords = "shadows, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Wimmer-2004-LSPM/", } @inproceedings{neumann-2004-ipm, title = "Interactive Perception Based Model for Characterization of Display device", author = "Attila Neumann and Alessandro Artusi and Georg Zotti and L\'{a}szl\'{o} Neumann and Werner Purgathofer", year = "2004", abstract = "This paper describes a simple to use, yet accurate way to obtain the Tone Reproduction Curve (TRC) of display devices without the use of a measurement device. Human vision is used to compare a series of dithered color patches against interactively changeable homogeneously colored display areas. Results comparing this method with spectrophotometer measurements are given for three monitors.", month = jan, isbn = "0819451967", organization = "IST&SPIE", location = "San Jose, California, USA", booktitle = "Color Imaging IX: Processing, Hardcopy, and Applications IX", keywords = "Display Measurement, Human Visual System", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/neumann-2004-ipm/", } @inproceedings{Neum04GCM, title = "Gamut Clipping and Mapping based on Coloroid System", author = "L\'{a}szl\'{o} Neumann and Attila Neumann", year = "2004", abstract = "The paper presents novel techniques based on the Coloroid color system which hitherto has not been applied to gamut mapping. The introduced methods described are hue preserving and use three gravity centers in a hue-dependent way. Two of the gravity centers are shifted toward the direction of ‘negative saturation’, and can be used for regions of bright and dark colors, while a third gravity center is used for saturated colors. After a short survey of gamut mapping an introduction to the Coloroid describes concisely its features and formulas. Then a simple color clipping rule is presented applicable to any rendering applications. A gamut mapping method will be defined mapping from a generic input image to an RGB display. Other variations of the method describe RGB-CYMK transformation and also cross-media mapping for known input and output gamut boundaries. The introduced methods apply hue and chroma dependent lightness compression. They can be applied in realistic computer graphics and digital photography as well as in printing technologies. They are straightforward to implement and have low computational costs.", booktitle = "Proceedings of Second European Conference on Colour in Graphics, Imaging and Vision", pages = "548--555", keywords = "Coloroid Color System, Gamut Clipping, Gamut Mapping, Human Perception", URL = "https://www.cg.tuwien.ac.at/research/publications/2004/Neum04GCM/", } @article{Bittner-2003-Vis, title = "Visibility in Computer Graphics", author = "Jir\'{i} Bittner and Peter Wonka", year = "2003", abstract = "Visibility computation is crucial for computer graphics from its very beginning. The first visibility algorithms in computer graphics aimed to determine visible surfaces in a synthesized image of a 3D scene. Nowadays there are many different visibility algorithms for various visibility problems. We propose a new taxonomy of visibility problems that is based on a classification according to the problem domain. We provide a broad overview of visibility problems and algorithms in computer graphics grouped by the proposed taxonomy. The paper surveys visible surface algorithms, visibility culling algorithms, visibility algorithms for shadow computation, global illumination, point-based and image-based rendering, and global visibility computations. Finally, we discuss common concepts of visibility algorithm design and several criteria for the classification of visibility algorithms.", month = sep, issn = "0265-8135", journal = "Environment and Planning B: Planning and Design", number = "5", volume = "30", pages = "729--756", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Bittner-2003-Vis/", } @article{Wonka-2003-Ins, title = "Instant Architecture", author = "Peter Wonka and Michael Wimmer and Fran\c{c}ois Sillion and William Ribarsky", year = "2003", abstract = "This paper presents a new method for the automatic modeling of architecture. Building designs are derived using split grammars, a new type of parametric set grammar based on the concept of shape. The paper also introduces an attribute matching system and a separate control grammar, which offer the flexibility required to model buildings using a large variety of different styles and design ideas. Through the adaptive nature of the design grammar used, the created building designs can either be generic or adhere closely to a specified goal, depending on the amount of data available.", month = jul, journal = "ACM Transaction on Graphics", volume = "22", number = "3", note = "Proceedings ACM SIGGRAPH 2003", issn = "0730-0301", doi = "10.1145/882262.882324", pages = "669--677", keywords = "architecture, shape grammars, urban environments, modeling, real-time simulation, building design", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Wonka-2003-Ins/", } @article{Artusi-2003-CCG, title = "L’importanza del Colore in Computer Graphics", author = "Alessandro Artusi", year = "2003", abstract = "Il colore rappresenta un’importante caratteristica, della realt\'{a}, di particolare interesse per molte applicazioni come: fotografia e stampa digitale, computer graphics, film restoration, color science ecc. Nonostante questo, molto spesso non viene presa in considerazione nel modo adeguato. La conseguenza di questo, \'{e} una rappresentazione non corretta della scena, dal punto di vista qualitativo, che molto spesso non \'{e} adeguata per gli scopi prefissati. La Computer Graphics, \'{e} una di quelle applicazioni in cui la componente colore \'{e} di particolare importanza. Ma per molto tempo una sua non corretta rappresentazione \'{e} stata usata. Solo negli ultimi anni sono state intorodotte diverse metodologie, gi\'{a} in uso in Colore Science. In questo articolo verr\'{a} presentato il processo di integrazione della componente colore in Computer Graphics, e i suoi aspetti importanti.", month = jul, journal = "DDD_IL COLORE Rivista trimestrale Disegno e Design Digitale", number = "5", volume = "2", keywords = "Colorimetric characterization, Computer Graphics, Colour, Gamut Mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Artusi-2003-CCG/", } @article{Artusi-2003-Col, title = "Novel Colour Printer Characterization Model", author = "Alessandro Artusi and Alexander Wilkie", year = "2003", abstract = "A key problem in multimedia systems is the faithful reproduction of color. One of the main reasons why this is a complicated issue are the different color reproduction technologies used by the various devices; displays use easily modeled additive color mixing, while printers use a subtractive process, the characterization of which is much more complex than that of self--luminous displays. In order to resolve these problems several processing steps are necessary, one of which is accurate device characterization. Our study examines different learning algorithms for one particular neural network technique which already has been found to be useful in related contexts -- namely radial basis function network models -- and proposes a modified learning algorithm which improves the colorimetric characterization process of printers. In particular our results show that is possible to obtain good performance by using a learning algorithm that is trained on only small sets of color samples, and use it to generate a larger look--up table (LUT) through use of multiple polynomial regression or an interpolation algorithm. We deem our findings to be a good start point for further studies on learning algorithms used in conjunction with this problem.", month = jul, issn = "1017-9909", journal = "Journal of Electronic Imaging", number = "3", volume = "12", pages = "448--458", keywords = "neural network, Colorimetric characterization, Radial basis function", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Artusi-2003-Col/", } @inproceedings{Artusi-2003-Del, title = "Delivering Interactivity to Complex Tone Mapping Operators", author = "Alessandro Artusi and Jir\'{i} Bittner and Michael Wimmer and Alexander Wilkie", year = "2003", abstract = "The accurate display of high dynamic range images requires the application of complex tone mapping operators. These operators are computationally costly, which prevents their usage in interactive applications. We propose a general framework that delivers interactive performance to an important subclass of tone mapping operators, namely global tone mapping operators. The proposed framework consists of four steps: sampling the input image, applying the tone mapping operator, tting the point-sampled tone mapping curve, and reconstructing the tone mapping curve for all pixels of the input image. We show how to make use of recent graphics hardware while keeping the advantage of generality by performing tone mapping in software. We demonstrate the capabilities of our method by accelerating several common global tone mapping operators and integrating the operators in a real-time rendering application.", month = jun, isbn = "3-905673-03-7", publisher = "Eurographics Association", organization = "Eurographics", location = "Leuven, Belgium", editor = "Per Christensen and Daniel Cohen-Or", booktitle = "Rendering Techniques 2003 (Proceedings Eurographics Symposium on Rendering)", pages = "38--44", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Artusi-2003-Del/", } @inproceedings{Wimmer-2003-RTE, title = "Rendering Time Estimation for Real-Time Rendering", author = "Michael Wimmer and Peter Wonka", year = "2003", abstract = "This paper addresses the problem of estimating the rendering time for a real-time simulation. We study different factors that contribute to the rendering time in order to develop a framework for rendering time estimation. Given a viewpoint (or view cell) and a list of potentially visible objects, we propose several algorithms that can give reasonable upper limits for the rendering time on consumer hardware. This paper also discusses several implementation issues and design choices that are necessary to make the rendering time predictable. Finally, we lay out two extensions to current rendering hardware which would allow implementing a system with constant frame rates.", month = jun, isbn = "3-905673-03-7", publisher = "Eurographics Association", organization = "Eurographics", location = "Leuven, Belgium", editor = "Per Christensen and Daniel Cohen-Or", booktitle = "Rendering Techniques 2003 (Proceedings Eurographics Symposium on Rendering)", pages = "118--129", keywords = "graphics hardware, real-time rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/Wimmer-2003-RTE/", } @inproceedings{zotti-2003-ash, title = "The ASH Virtual Reality Model of the Solar System (VRMoSS)", author = "Georg Zotti and Christoph Traxler", year = "2003", abstract = "This paper describes the development of the 3D graphics part of an astronomical education installation for school children, called the Virtual Control Room. The content is a very detailed model of the Solar System, which can be used with an augmented reality interface or remote controlled with an external XML capable control application.", isbn = "0-88986-382-2", publisher = "ACTA Press", organization = "IASTED", address = "Anaheim, Calgary, Zurich", editor = "M. H. Hamza", booktitle = "Proc. of the Third IASTED Int. Conf. on Visualization, Imaging, and Image Processing", pages = "964--969", keywords = "Augmented Reality, Educational Application, Astronomy and Space Education", URL = "https://www.cg.tuwien.ac.at/research/publications/2003/zotti-2003-ash/", } @inproceedings{Jeschke-2002-TDMR, title = "Textured Depth Meshes for Real-Time Rendering of Arbitrary Scenes", author = "Stefan Jeschke and Michael Wimmer", year = "2002", abstract = "This paper presents a new approach to generate textured depth meshes (TDMs), an impostor-based scene representation that can be used to accelerate the rendering of static polygonal models. The TDMs are precalculated for a fixed viewing region (view cell). The approach relies on a layered rendering of the scene to produce a voxel-based representation. Secondary, a highly complex polygon mesh is constructed that covers all the voxels. Afterwards, this mesh is simplified using a special error metric to ensure that all voxels stay covered. Finally, the remaining polygons are resampled using the voxel representation to obtain their textures. The contribution of our approach is manifold: first, it can handle polygonal models without any knowledge about their structure. Second, only scene parts that may become visible from within the view cell are represented, thereby cutting down on impostor complexity and storage costs. Third, an error metric guarantees that the impostors are practically indistinguishable compared to the original model (i.e. no rubber-sheet effects or holes appear as in most previous approaches). Furthermore, current graphics hardware is exploited for the construction and use of the impostors.", month = jun, isbn = "1-58133-534-3", publisher = "Eurographics Association", organization = "Eurographics", location = "Pisa, Italy", editor = "Paul Debevec and Simon Gibson", booktitle = "Rendering Techniques 2002 (Proceedings Eurographics Workshop on Rendering)", pages = "181--190", keywords = "Rendering, Walkthrough, Computer Graphics, Impostors", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Jeschke-2002-TDMR/", } @inproceedings{Jeschke-2002-LEMA, title = "Layered Environment-Map Impostors for Arbitrary Scenes", author = "Stefan Jeschke and Michael Wimmer and Heidrun Schumann", year = "2002", abstract = "This paper presents a new impostor-based approach to accelerate the rendering of very complex static scenes. The scene is partitioned into viewing regions, and a layered impostor representation is precalculated for each of them. An optimal placement of impostor layers guarantees that our representation is indistinguishable from the original geometry. Furthermore the algorithm exploits common graphics hardware both during preprocessing and rendering. Moreover the impostor representation is compressed using several strategies to cut down on storage space.", month = may, isbn = "1-56881-183-7", publisher = "AK Peters Ltd.", location = "Calgary, CA", editor = "Wolfgang St\"{u}rzlinger and Michael McCool", booktitle = "Proceedings of Graphics Interface 2002", pages = "1--8", keywords = "virtual environments, environment maps, impostors, walkthroughs, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Jeschke-2002-LEMA/", } @inproceedings{tobler02_amrmg, title = "A Multiresolution Mesh Generation Approach for Procedural Definition of Complex Geometry", author = "Robert F. Tobler and Stefan Maierhofer and Alexander Wilkie", year = "2002", isbn = "0-7695-1546-0", publisher = "IEEE", location = "Banff, Alberta, Canada", booktitle = "Shape Modeling International 2002", pages = "35--42", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/tobler02_amrmg/", } @article{tobler02_mbpls, title = "Mesh-Based Parametrized L-Systems and Generalized Subdivision for Generating Complex Geometry", author = "Robert F. Tobler and Stefan Maierhofer and Alexander Wilkie", year = "2002", journal = "International Journal of Shape Modeling", number = "2", volume = "8", pages = "173--191", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/tobler02_mbpls/", } @inproceedings{Bittner-2001-Vis, title = "Visibility Preprocessing for Urban Scenes using Line Space Subdivision", author = "Jir\'{i} Bittner and Peter Wonka and Michael Wimmer", year = "2001", abstract = "We present an algorithm for visibility preprocessing of urban environments. The algorithm uses a subdivision of line space to analytically calculate a conservative potentially visible set for a given region in the scene. We present a detailed evaluation of our method including a comparison to another recently published visibility preprocessing algorithm. To the best of our knowledge the proposed method is the first algorithm that scales to large scenes and efficiently handles large view cells.", month = oct, isbn = "0-7695-1227-5", publisher = "IEEE Computer Society Press", location = "Tokyo, Japan", editor = "Bob Werner", booktitle = "Proceedings of Pacific Graphics 2001 (Ninth Pacific Conference on Computer Graphics and Applications)", pages = "276--284", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Bittner-2001-Vis/", } @article{Wonka-2001-Ins, title = "Instant Visibility", author = "Peter Wonka and Michael Wimmer and Fran\c{c}ois Sillion", year = "2001", abstract = "We present an online occlusion culling system which computes visibility in parallel to the rendering pipeline. We show how to use point visibility algorithms to quickly calculate a tight potentially visible set (PVS) which is valid for several frames, by shrinking the occluders used in visibility calculations by an adequate amount. These visibility calculations can be performed on a visibility server, possibly a distinct computer communicating with the display host over a local network. The resulting system essentially combines the advantages of online visibility processing and region-based visibility calculations, allowing asynchronous processing of visibility and display operations. We analyze two different types of hardware-based point visibility algorithms and address the problem of bounded calculation time which is the basis for true real-time behavior. Our results show reliable, sustained 60 Hz performance in a walkthrough with an urban environment of nearly 2 million polygons, and a terrain flyover.", month = sep, journal = "Computer Graphics Forum", volume = "20", number = "3", note = "G\"{u}nther Enderle [Best Paper] Award, Best Student Paper Award. A. Chalmers and T.-M. Rhyne (eds.), Proceedings EUROGRAPHICS 2001", issn = "0167-7055", pages = "411--421", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wonka-2001-Ins/", } @inproceedings{Wimmer-2001-Poi, title = "Point-Based Impostors for Real-Time Visualization", author = "Michael Wimmer and Peter Wonka and Fran\c{c}ois Sillion", year = "2001", abstract = "We present a new data structure for encoding the appearance of a geometric model as seen from a viewing region (view cell). This representation can be used in interactive or real-time visualization applications to replace a complex model by an impostor, maintaining high quality rendering while cutting down rendering time. Our approach relies on an object-space sampled representation similar to a point cloud or a layered depth image, but introduces two fundamental additions to previous techniques. First, the sampling rate is controlled to provide sufficient density across all possible viewing conditions from the specified view cell. Second, a correct, antialiased representation of the plenoptic function is computed using Monte Carlo integration. Our system therefore achieves high quality rendering using a simple representation with bounded complexity. We demonstrate the method for an application in urban visualization.", month = jun, isbn = "3-211-83709-4", publisher = "Springer-Verlag", organization = "Eurographics", editor = "Steven J. Gortler and Karol Myszkowski", booktitle = "Rendering Techniques 2001 (Proceedings Eurographics Workshop on Rendering)", pages = "163--176", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wimmer-2001-Poi/", } @inproceedings{Artusi-2001-Col, title = "Color Printer Characterization Using Radial Basis Function Networks", author = "Alessandro Artusi and Alexander Wilkie", year = "2001", abstract = "Colorimetric characterization is one step in the colorimetric reproduction process that permits faithful image reproduction across different devices. Its goal is to define a mapping function between the device--dependent color spaces in question (such as RGB or CMYK) and device--independent colour spaces (such as CIELAB or CIEXYZ), and vice versa. The work presented in this paper is an application study of utilizing radial basis function networks for the problem of colorimetric characterization of printer devices. The work we present is novel in seven ways: to begin with, this is the first work that uses radial basis function networks to resolve the colorimetric characterization of printers. Second, we used a new learning model to train such networks; our approach is based on a proposal by Carozza. Third, we use only 125 measured samples for the training of the network. Fourth, the computational costs for this training are very low when compared to previous techniques and allow to use this model in consumer products. Fifth, it is a general model which one can also use to define other transformations between color spaces. Sixth, it is possible to have a fast recharacterization of the device because the computational cost of the training phase and the number of training samples are low. Finally, it improves on the performance of multiple polynomials regression and tetrahedral interpolation. ", month = jan, isbn = "0-8194-3978-9", publisher = "SPIE", note = "SPIE Conference, San Jose California, January 2001", location = "San Jose (USA)", booktitle = "Proceedings Colour Imaging Conference: Device-Independent Colour, Colour Hardcopy, and Graphics Arts VI, IST&SPIE, Electronic Imaging", pages = "70--80", keywords = "neural network, Colorimetric characterization", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Artusi-2001-Col/", } @article{Brusi-2001-Opt, title = "Optimal Ray Shooting in Monte Carlo Radiosity", author = "A. Brusi and Mateu Sbert and Philippe Bekaert and Werner Purgathofer", year = "2001", journal = "Computers&Graphics", volume = "26", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Brusi-2001-Opt/", } @article{Wilkie-2001-Ori, title = "Orientation Lightmaps for Photon Radiosity in Complex Environments", author = "Alexander Wilkie and Robert F. Tobler and Werner Purgathofer", year = "2001", abstract = "We present a method that makes the use of photon tracing methods feasible for complex scenes when a totally accurate solution is not essential. This is accomplished by using orientation lightmaps, which average the illumination of complex objects depending on the surface normal. Through this averaging, they considerably reduce the variance of the stochastic solution. In order to use these specialised lightmaps, which consume comparatively small amounts of memory, no changes have to be made to the basic photon-tracing algorithm. Also, they can be freely mixed with normal lightmaps. This gives the user good control over the amount of inaccuracy he introduces by their application. The area computations necessary for their insertion are performed using a stochastic sampling method that performs well for highly complex objects.", journal = "The Visual Computer", note = "In The Visual Computer, Vol. 17, No. 5, pp. 318-327, Springer, Heidelberg, 2001", URL = "https://www.cg.tuwien.ac.at/research/publications/2001/Wilkie-2001-Ori/", } @inproceedings{wonka-2000-VisP, title = "Visibility Preprocessing with Occluder Fusion for Urban Walkthroughs", author = "Peter Wonka and Michael Wimmer and Dieter Schmalstieg", year = "2000", abstract = "This paper presents an efficient algorithm for occlusion culling of urban environments. It is conservative and accurate in finding all significant occlusion. It discretizes the scene into view cells, for which cell-to-object visibility is precomputed, making on-line overhead negligible. Unlike other precomputation methods for view cells, it is able to conservatively compute all forms of occluder interaction for an arbitrary number of occluders. To speed up preprocessing, standard graphics hardware is exploited and occluder occlusion is considered. A walkthrough application running an 8 million polygon model of the city of Vienna on consumer-level hardware illustrates our results.", month = jun, isbn = "3-211-83535-0", publisher = "Springer-Verlag Wien New York", organization = "Eurographics", location = "held in Brno, Czech Republic, June 26-28, 2000", editor = "Bernard P\'{e}roche and Holly Rushmeier", booktitle = "Rendering Techniques 2000 (Proceedings Eurographics Workshop on Rendering)", pages = "71--82", keywords = "Visibility determination, image-based rendering., occluder occlusion, occluder fusion, urban environments, walkthrough, real-time graphics, shadow algorithms, occlusion culling", URL = "https://www.cg.tuwien.ac.at/research/publications/2000/wonka-2000-VisP/", } @article{Wimmer-1999-FWIb, title = "Fast Walkthroughs with Image Caches and Ray Casting", author = "Michael Wimmer and Markus Giegl and Dieter Schmalstieg", year = "1999", abstract = "We present an output-sensitive rendering algorithm for accelerating walkthroughs of large, densely occluded virtual environments using a multi-stage Image Based Rendering Pipeline. In the first stage, objects within a certain distance are rendered using the traditional graphics pipeline, whereas the remaining scene is rendered by a pixel-based approach using an Image Cache, horizon estimation to avoid calculating sky pixels, and finally, ray casting. The time complexity of this approach does not depend on the total number of primitives in the scene. We have measured speedups of up to one oder of magnitude.", month = dec, issn = "0097-8493", journal = "Computers and Graphics", number = "6", volume = "23", pages = "831--838", URL = "https://www.cg.tuwien.ac.at/research/publications/1999/Wimmer-1999-FWIb/", } @inproceedings{Wimmer-1999-FWIa, title = "Fast Walkthroughs with Image Caches and Ray Casting", author = "Michael Wimmer and Markus Giegl and Dieter Schmalstieg", year = "1999", abstract = "We present an output-sensitive rendering algorithm for accelerating walkthroughs of large, densely occluded virtual environments using a multi-stage Image Based Rendering Pipeline. In the first stage, objects within a certain distance are rendered using the traditional graphics pipeline, whereas the remaining scene is rendered by a pixel-based approach using an Image Cache, horizon estimation to avoid calculating sky pixels, and finally, ray casting. The time complexity of this approach does not depend on the total number of primitives in the scene. We have measured speedups of up to one oder of magnitude.", month = jun, isbn = "3-211-83347-1", publisher = "Springer-Verlag Wien", organization = "Eurographics", editor = "Michael Gervautz and Dieter Schmalstieg and Axel Hildebrand", booktitle = "Virtual Environments '99. Proceedings of the 5th Eurographics Workshop on Virtual Environments", pages = "73--84", URL = "https://www.cg.tuwien.ac.at/research/publications/1999/Wimmer-1999-FWIa/", } @inproceedings{tobler97_hsdasr, title = "A Hierarchical Subdivision Algorithm for Stochastic Radiosity Methods", author = "Robert F. Tobler and Alexander Wilkie and Martin Feda and Werner Purgathofer", year = "1997", abstract = "The algorithm proposed in this paper uses a stochastic approach to incrementally calculate the illumination function over a surface. By tracking the illumination function at different levels of meshing resolution, it is possible to get a measure for the quality of the current representation, and to adoptively subdivide in places with inadequate accuracy. With this technique a hierarchical mesh that is based on the stochastic evaluation of global illumination is generated.", month = jun, publisher = "Springer Wien", organization = "Eurographics", address = "St. Etienne, France", editor = "Julie Dorsey and Philipp Slusallek", booktitle = "Eurographics Rendering Workshop 1997", pages = "193--204", keywords = "radiosity, monte carlo methods", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/tobler97_hsdasr/", } @inproceedings{traxler-1997-TRA, title = "Efficient Ray Tracing of Complex Natural Scenes", author = "Christoph Traxler and Michael Gervautz", year = "1997", abstract = "In this paper we present a method for the consistent modelling and efficient ray tracing of complex natural scenes. Both plants and terrains are modelled and represented in the same way to allow mutual influences of their appearance and interdependencies of their geometry. Plants are generated together with a fractal terrain, so that they directly grow on it. This allows an accurate calculation of reflections and the cast of shadows. The scenes are modeled with a special kind of PL-Systems and are represented by cyclic object-instancing graphs. This is a very compact representation for ray tracing, which avoids restrictions to the complexity of the scenes. To significantly increase the efficiency of ray tracing with this representation an adaptation of conventional optimization techniques to cyclic graphs is necessary. In this paper we introduce methods for the calculation of a bounding box hierarchy and the use of a regular 3d-grid for cyclic graphs.", publisher = "World Scientific Publishers", location = "Denver, Colorado", editor = "M. M. Novak and T. G. Dewey", booktitle = "Proceedings of Fractal 97", keywords = "Cyclic Object Instancing Graphs, PL-systems, Natural Phenomena , Ray Tracing", URL = "https://www.cg.tuwien.ac.at/research/publications/1997/traxler-1997-TRA/", } @inproceedings{purgathofer-1988-simple, title = "A Simple Method for Color Quantization: Octree Quantization", author = "Michael Gervautz and Werner Purgathofer", year = "1988", publisher = "Springer", location = "Genf, Schweiz", event = "Computer Graphics International", editor = "Nadia Magnenat-Thalmann & Daniel Thalmann", booktitle = "New Trends in Computer Graphics", pages = "219--231", URL = "https://www.cg.tuwien.ac.at/research/publications/1988/purgathofer-1988-simple/", }