@article{guerrero-2015-lsp, title = "Learning Shape Placements by Example", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer and Peter Wonka", year = "2015", abstract = "We present a method to learn and propagate shape placements in 2D polygonal scenes from a few examples provided by a user. The placement of a shape is modeled as an oriented bounding box. Simple geometric relationships between this bounding box and nearby scene polygons define a feature set for the placement. The feature sets of all example placements are then used to learn a probabilistic model over all possible placements and scenes. With this model we can generate a new set of placements with similar geometric relationships in any given scene. We introduce extensions that enable propagation and generation of shapes in 3D scenes, as well as the application of a learned modeling session to large scenes without additional user interaction. These concepts allow us to generate complex scenes with thousands of objects with relatively little user interaction.", month = aug, journal = "ACM Transactions on Graphics", volume = "34", number = "4", issn = "0730-0301", doi = "10.1145/2766933", pages = "108:1--108:13", keywords = "modeling by example, complex model generation", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/guerrero-2015-lsp/", } @article{Guerrero-2014-TPS, title = "Partial Shape Matching using Transformation Parameter Similarity", author = "Paul Guerrero and Thomas Auzinger and Michael Wimmer and Stefan Jeschke", year = "2014", abstract = "In this paper, we present a method for non-rigid, partial shape matching in vector graphics. Given a user-specified query region in a 2D shape, similar regions are found, even if they are non-linearly distorted. Furthermore, a non-linear mapping is established between the query regions and these matches, which allows the automatic transfer of editing operations such as texturing. This is achieved by a two-step approach. First, point-wise correspondences between the query region and the whole shape are established. The transformation parameters of these correspondences are registered in an appropriate transformation space. For transformations between similar regions, these parameters form surfaces in transformation space, which are extracted in the second step of our method. The extracted regions may be related to the query region by a non-rigid transform, enabling non-rigid shape matching.", month = nov, issn = "1467-8659", journal = "Computer Graphics Forum", number = "8", volume = "33", pages = "1--14", keywords = "Shape Matching, Texture Transfer, Non-Rigid, Deformable, Edit Propagation, Partial", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Guerrero-2014-TPS/", } @article{arikan-2014-pcvis, title = "Large-Scale Point-Cloud Visualization through Localized Textured Surface Reconstruction", author = "Murat Arikan and Reinhold Preiner and Claus Scheiblauer and Stefan Jeschke and Michael Wimmer", year = "2014", abstract = "In this paper, we introduce a novel scene representation for the visualization of large-scale point clouds accompanied by a set of high-resolution photographs. Many real-world applications deal with very densely sampled point-cloud data, which are augmented with photographs that often reveal lighting variations and inaccuracies in registration. Consequently, the high-quality representation of the captured data, i.e., both point clouds and photographs together, is a challenging and time-consuming task. We propose a two-phase approach, in which the first (preprocessing) phase generates multiple overlapping surface patches and handles the problem of seamless texture generation locally for each patch. The second phase stitches these patches at render-time to produce a high-quality visualization of the data. As a result of the proposed localization of the global texturing problem, our algorithm is more than an order of magnitude faster than equivalent mesh-based texturing techniques. Furthermore, since our preprocessing phase requires only a minor fraction of the whole dataset at once, we provide maximum flexibility when dealing with growing datasets.", month = sep, issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "9", volume = "20", pages = "1280--1292", keywords = "image-based rendering, large-scale models, color, surface representation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/arikan-2014-pcvis/", } @article{Guerrero-2014-GRF, title = "Edit Propagation using Geometric Relationship Functions", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer and Peter Wonka", year = "2014", abstract = "We propose a method for propagating edit operations in 2D vector graphics, based on geometric relationship functions. These functions quantify the geometric relationship of a point to a polygon, such as the distance to the boundary or the direction to the closest corner vertex. The level sets of the relationship functions describe points with the same relationship to a polygon. For a given query point we ?rst determine a set of relationships to local features, construct all level sets for these relationships and accumulate them. The maxima of the resulting distribution are points with similar geometric relationships. We show extensions to handle mirror symmetries, and discuss the use of relationship functions as local coordinate systems. Our method can be applied for example to interactive ?oor-plan editing, and is especially useful for large layouts, where individual edits would be cumbersome. We demonstrate populating 2D layouts with tens to hundreds of objects by propagating relatively few edit operations.", month = mar, journal = "ACM Transactions on Graphics", volume = "33", number = "2", issn = "0730-0301", doi = "10.1145/2591010", pages = "15:1--15:15", keywords = "Shape Modeling, Floor Plans, Edit Propagation, Geometric Relationship Functions", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Guerrero-2014-GRF/", } @article{Auzinger_2013_AnaVis, title = "Analytic Visibility on the GPU", author = "Thomas Auzinger and Michael Wimmer and Stefan Jeschke", year = "2013", abstract = "This paper presents a parallel, implementation-friendly analytic visibility method for triangular meshes. Together with an analytic filter convolution, it allows for a fully analytic solution to anti-aliased 3D mesh rendering on parallel hardware. Building on recent works in computational geometry, we present a new edge-triangle intersection algorithm and a novel method to complete the boundaries of all visible triangle regions after a hidden line elimination step. All stages of the method are embarrassingly parallel and easily implementable on parallel hardware. A GPU implementation is discussed and performance characteristics of the method are shown and compared to traditional sampling-based rendering methods.", month = may, journal = "Computer Graphics Forum (Proceeding of EUROGRAPHICS 2013)", volume = "32", number = "2", issn = "1467-8659", pages = "409--418", keywords = "GPU, anti-aliasing, SIMD, filter, rendering, analytic, visibility, close-form, hidden surface elimination, hidden surface removal, GPGPU", URL = "https://www.cg.tuwien.ac.at/research/publications/2013/Auzinger_2013_AnaVis/", } @article{Auzinger_2012_AAA, title = "Analytic Anti-Aliasing of Linear Functions on Polytopes", author = "Thomas Auzinger and Michael Guthe and Stefan Jeschke", year = "2012", abstract = "This paper presents an analytic formulation for anti-aliased sampling of 2D polygons and 3D polyhedra. Our framework allows the exact evaluation of the convolution integral with a linear function defined on the polytopes. The filter is a spherically symmetric polynomial of any order, supporting approximations to refined variants such as the Mitchell-Netravali filter family. This enables high-quality rasterization of triangles and tetrahedra with linearly interpolated vertex values to regular and non-regular grids. A closed form solution of the convolution is presented and an efficient implementation on the GPU using DirectX and CUDA C is described.", month = may, journal = "Computer Graphics Forum (Proceedings of EUROGRAPHICS 2012)", volume = "31", number = "2", pages = "335--344", keywords = "Polytope, Filter Design, Analytic Anti-Aliasing, Sampling, Integral Formula, Spherically Symmetric Filter, CUDA, Closed Form Solution, 2D 3D", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/Auzinger_2012_AAA/", } @inproceedings{preiner_2012_AS, title = "Auto Splats: Dynamic Point Cloud Visualization on the GPU", author = "Reinhold Preiner and Stefan Jeschke and Michael Wimmer", year = "2012", abstract = "Capturing real-world objects with laser-scanning technology has become an everyday task. Recently, the acquisition of dynamic scenes at interactive frame rates has become feasible. A high-quality visualization of the resulting point cloud stream would require a per-frame reconstruction of object surfaces. Unfortunately, reconstruction computations are still too time-consuming to be applied interactively. In this paper we present a local surface reconstruction and visualization technique that provides interactive feedback for reasonably sized point clouds, while achieving high image quality. Our method is performed entirely on the GPU and in screen pace, exploiting the efficiency of the common rasterization pipeline. The approach is very general, as no assumption is made about point connectivity or sampling density. This naturally allows combining the outputs of multiple scanners in a single visualization, which is useful for many virtual and augmented reality applications.", month = may, isbn = " 978-3-905674-35-4", organization = "Eurographics Association 2012", location = "Cagliari", editor = "H. Childs and T. Kuhlen", booktitle = "Proceedings of Eurographics Symposium on Parallel Graphics and Visualization", pages = "139--148", keywords = "point clouds, surface reconstruction, point rendering, Auto Splats, KNN search, GPU rendering, point based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2012/preiner_2012_AS/", } @article{jeschke-2011-est, title = "Estimating Color and Texture Parameters for Vector Graphics", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2011", abstract = "Diffusion curves are a powerful vector graphic representation that stores an image as a set of 2D Bezier curves with colors defined on either side. These colors are diffused over the image plane, resulting in smooth color regions as well as sharp boundaries. In this paper, we introduce a new automatic diffusion curve coloring algorithm. We start by defining a geometric heuristic for the maximum density of color control points along the image curves. Following this, we present a new algorithm to set the colors of these points so that the resulting diffused image is as close as possible to a source image in a least squares sense. We compare our coloring solution to the existing one which fails for textured regions, small features, and inaccurately placed curves. The second contribution of the paper is to extend the diffusion curve representation to include texture details based on Gabor noise. Like the curves themselves, the defined texture is resolution independent, and represented compactly. We define methods to automatically make an initial guess for the noise texure, and we provide intuitive manual controls to edit the parameters of the Gabor noise. Finally, we show that the diffusion curve representation itself extends to storing any number of attributes in an image, and we demonstrate this functionality with image stippling an hatching applications.", month = apr, journal = "Computer Graphics Forum", volume = "30", number = "2", note = "This paper won the 2nd best paper award at Eurographics 2011.", issn = "0167-7055", pages = "523--532", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/jeschke-2011-est/", } @talk{jeschke-2011-esttalk, title = "Estimating Color and Texture Parameters for Vector Graphics", author = "Stefan Jeschke", year = "2011", abstract = "Diffusion curves are a powerful vector graphic representation that stores an image as a set of 2D Bezier curves with colors defined on either side. These colors are diffused over the image plane, resulting in smooth color regions as well as sharp boundaries. In this paper, we introduce a new automatic diffusion curve coloring algorithm. We start by defining a geometric heuristic for the maximum density of color control points along the image curves. Following this, we present a new algorithm to set the colors of these points so that the resulting diffused image is as close as possible to a source image in a least squares sense. We compare our coloring solution to the existing one which fails for textured regions, small features, and inaccurately placed curves. The second contribution of the paper is to extend the diffusion curve representation to include texture details based on Gabor noise. Like the curves themselves, the defined texture is resolution independent, and represented compactly. We define methods to automatically make an initial guess for the noise texure, and we provide intuitive manual controls to edit the parameters of the Gabor noise. Finally, we show that the diffusion curve representation itself extends to storing any number of attributes in an image, and we demonstrate this functionality with image stippling an hatching applications.", event = "Kolloquium of the Institute for Computer Graphics at University of Rostock", location = "Rostock, Germany", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/jeschke-2011-esttalk/", } @xmascard{jeschke-2011-xMasCard, title = "X-Mas Card 2011", author = "Stefan Jeschke", year = "2011", abstract = "Happy 2012!", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/jeschke-2011-xMasCard/", } @talk{jeschke-2011-talkPrague, title = "Estimating Color and Texture Parameters for Vector Graphics", author = "Stefan Jeschke", year = "2011", abstract = "Diffusion curves are a powerful vector graphic representation that stores an image as a set of 2D Bezier curves with colors defined on either side. These colors are diffused over the image plane, resulting in smooth color regions as well as sharp boundaries. In this paper, we introduce a new automatic diffusion curve coloring algorithm. We start by defining a geometric heuristic for the maximum density of color control points along the image curves. Following this, we present a new algorithm to set the colors of these points so that the resulting diffused image is as close as possible to a source image in a least squares sense. We compare our coloring solution to the existing one which fails for textured regions, small features, and inaccurately placed curves. The second contribution of the paper is to extend the diffusion curve representation to include texture details based on Gabor noise. Like the curves themselves, the defined texture is resolution independent, and represented compactly. We define methods to automatically make an initial guess for the noise texure, and we provide intuitive manual controls to edit the parameters of the Gabor noise. Finally, we show that the diffusion curve representation itself extends to storing any number of attributes in an image, and we demonstrate this functionality with image stippling an hatching applications.", event = "Academy of Sciences of the Czech Republic in Prague", location = "Prague (Czech Republic)", URL = "https://www.cg.tuwien.ac.at/research/publications/2011/jeschke-2011-talkPrague/", } @talk{jeschke-2010-diff, title = "Rendering Diffusion Curves in 2 and 3 Dimensions", author = "Stefan Jeschke", year = "2010", abstract = "Diffusion curve images (DCI) provide a powerful tool for efficient 2D image generation, storage and manipulation. A DCI consist of curves with colors defined on either side. By diffusing these colors over the image, the final result includes sharp boundaries along the curves with smoothly shaded regions between them. The first part of the talk presents a new Laplacian surface solver for a stable rendering of DCIs. It consists of a robust rasterization technique to transform the algebraic curves to the discrete image domain, and a variable stencil size diffusion solver that solves the minimal surface problem. The solver is proven to converge to the right solution, it is at least as fast as commonly used multigrid solvers, but much simpler to implement, works for arbitrary image resolutions, as well as 8 bit data. The second part of the talk extends the application of diffusion curves to render high quality surface details on 3D objects. The first extension is a view dependent warping technique that dynamically allocates more texture memory for details close to the observer. The second extension is a dynamic feature embedding technique that retains crisp, anti-aliased curve details even in extreme closeups. The third extension is the application of dynamic feature embedding to displacement mapping and geometry images. Our results show high quality renderings at interactive frame rates.", event = "Visit of MPII in Saarbruecken", location = "MPII Saarbruecken", keywords = "Diffusion curves", URL = "https://www.cg.tuwien.ac.at/research/publications/2010/jeschke-2010-diff/", } @article{jeschke-09-solver, title = "A GPU Laplacian Solver for Diffusion Curves and Poisson Image Editing", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "We present a new Laplacian solver for minimal surfaces—surfaces having a mean curvature of zero everywhere except at some fixed (Dirichlet) boundary conditions. Our solution has two main contributions: First, we provide a robust rasterization technique to transform continuous boundary values (diffusion curves) to a discrete domain. Second, we define a variable stencil size diffusion solver that solves the minimal surface problem. We prove that the solver converges to the right solution, and demonstrate that it is at least as fast as commonly proposed multigrid solvers, but much simpler to implement. It also works for arbitrary image resolutions, as well as 8 bit data. We show examples of robust diffusion curve rendering where our curve rasterization and diffusion solver eliminate the strobing artifacts present in previous methods. We also show results for real-time seamless cloning and stitching of large image panoramas.", month = dec, journal = "Transaction on Graphics (Siggraph Asia 2009)", volume = "28", number = "5", issn = "0730-0301", booktitle = "Transactions on Graphics (Siggraph Asia 2009)", organization = "ACM", publisher = "ACM Press", pages = "1--8", keywords = "Poisson equation, Line and Curve rendering , Diffusion", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-solver/", } @article{jeschke-09-rendering, title = "Rendering Surface Details with Diffusion Curves", author = "Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "Diffusion curve images (DCI) provide a powerful tool for efficient 2D image generation, storage and manipulation. A DCI consist of curves with colors defined on either side. By diffusing these colors over the image, the final result includes sharp boundaries along the curves with smoothly shaded regions between them. This paper extends the application of diffusion curves to render high quality surface details on 3D objects. The first extension is a view dependent warping technique that dynamically reallocates texture space so that object parts that appear large on screen get more texture for increased detail. The second extension is a dynamic feature embedding technique that retains crisp, anti-aliased curve details even in extreme closeups. The third extension is the application of dynamic feature embedding to displacement mapping and geometry images. Our results show high quality renderings of diffusion curve textures, displacements, and geometry images, all rendered interactively.", month = dec, journal = "Transaction on Graphics (Siggraph Asia 2009)", volume = "28", number = "5", issn = "0730-0301", booktitle = "Transactions on Graphics (Siggraph Asia 2009)", organization = "ACM", publisher = "ACM Press", pages = "1--8", keywords = "Geometry images, Displacement mapping, Diffusion curves, Line and Curve rendering ", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-rendering/", } @article{cline-09-poisson, title = "Dart Throwing on Surfaces", author = "David Cline and Stefan Jeschke and Anshuman Razdan and Kenric White and Peter Wonka", year = "2009", abstract = "In this paper we present dart throwing algorithms to generate maximal Poisson disk point sets directly on 3D surfaces. We optimize dart throwing by efficiently excluding areas of the domain that are already covered by existing darts. In the case of triangle meshes, our algorithm shows dramatic speed improvement over comparable sampling methods. The simplicity of our basic algorithm naturally extends to the sampling of other surface types, including spheres, NURBS, subdivision surfaces, and implicits. We further extend the method to handle variable density points, and the placement of arbitrary ellipsoids without overlap. Finally, we demonstrate how to adapt our algorithm to work with geodesic instead of Euclidean distance. Applications for our method include fur modeling, the placement of mosaic tiles and polygon remeshing.", month = jun, journal = "Computer Graphics Forum", volume = "28", number = "4", pages = "1217--1226", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/cline-09-poisson/", } @article{bhagvat-09-frusta, title = "GPU Rendering of Relief Mapped Conical Frusta", author = "Deepali Bhagvat and Stefan Jeschke and David Cline and Peter Wonka", year = "2009", abstract = "This paper proposes to use relief-mapped conical frusta (cones cut by planes) to skin skeletal objects. Based on this representation, current programmable graphics hardware can perform the rendering with only minimal communication between the CPU and GPU. A consistent definition of conical frusta including texture parametrization and a continuous surface normal is provided. Rendering is performed by analytical ray casting of the relief-mapped frusta directly on the GPU. We demonstrate both static and animated objects rendered using our technique and compare to polygonal renderings of similar quality.", issn = "0167-7055", journal = "Computer Graphics Forum", number = "28", volume = "8", pages = "2131--2139", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/bhagvat-09-frusta/", } @talk{jeschke-09-praguetalk, title = "Diffusion Curve Images--- Rendering in 2 and 3 Dimensions", author = "Stefan Jeschke", year = "2009", abstract = "Diffusion curve images (DCI) provide a powerful tool for efficient 2D image generation, storage and manipulation. A DCI consist of curves with colors defined on either side. By diffusing these colors over the image, the final result includes sharp boundaries along the curves with smoothly shaded regions between them. The first part of the talk presents a new Laplacian surface solver for a stable rendering of DCIs. It consists of a robust rasterization technique to transform the algebraic curves to the discrete image domain, and a variable stencil size diffusion solver that solves the minimal surface problem. The solver is proven to converge to the right solution, it is at least as fast as commonly used multigrid solvers, but much simpler to implement, works for arbitrary image resolutions, as well as 8 bit data. The second part of the talk extends the application of diffusion curves to render high quality surface details on 3D objects. The first extension is a view dependent warping technique that dynamically allocates more texture memory for details close to the observer. The second extension is a dynamic feature embedding technique that retains crisp, anti-aliased curve details even in extreme closeups. The third extension is the application of dynamic feature embedding to displacement mapping and geometry images. Our results show high quality renderings at interactive frame rates.", event = "Konversatorium Technical University of Prague ", location = "Prague", keywords = "Diffusion curves", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/jeschke-09-praguetalk/", } @article{Karnik-2010-routevis, title = "Route Visualization using Detail Lenses", author = "Pushpak Karnik and David Cline and Stefan Jeschke and Anshuman Razdan and Peter Wonka", year = "2009", abstract = "We present a method designed to address some limitations of typical route map displays of driving directions. The main goal of our system is to generate a printable version of a route map that shows the overview and detail views of the route within a single, consistent visual frame. Our proposed visualization provides a more intuitive spatial context than a simple list of turns. We present a novel multi-focus technique to achieve this goal, where the foci are defined by points-of-interest (POI) along the route. A detail lens that encapsulates the POI at a finer geospatial scale is created for each focus. The lenses are laid out on the map to avoid occlusion with the route and each other, and to optimally utilize the free space around the route. We define a set of layout metrics to evaluate the quality of a lens layout for a given route map visualization. We compare standard lens layout methods to our proposed method and demonstrate the effectiveness of our method in generating aesthetically pleasing layouts. Finally, we perform a user study to evaluate the effectiveness of our layout choices.", issn = "1077-2626", journal = "IEEE Transactions on Visualization and Computer Graphics", number = "2", volume = "16", pages = "235--247", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/Karnik-2010-routevis/", } @article{karnik-09-shapegrammar, title = "A Shape Grammar for Developing Glyph-based Visualizations", author = "Pushpak Karnik and Stefan Jeschke and David Cline and Anshuman Razdan and E. Wentz and Peter Wonka", year = "2009", abstract = "In this paper we address the question of how to quickly model glyph-based GIS visualizations. Our solution is based on using shape grammars to set up the different aspects of a visualization, including the geometric content of the visualization, methods for resolving layout conflicts and interaction methods. Our approach significantly increases modeling efficiency over similarly flexible systems currently in use.", issn = "0167-7055", journal = "Computer Graphics Forum", number = "8", volume = "28", pages = "2176--2188", URL = "https://www.cg.tuwien.ac.at/research/publications/2009/karnik-09-shapegrammar/", } @article{guerrero-2008-sli, title = "Real-time Indirect Illumination and Soft Shadows in Dynamic Scenes Using Spherical Lights", author = "Paul Guerrero and Stefan Jeschke and Michael Wimmer", year = "2008", abstract = "We present a method for rendering approximate soft shadows and diffuse indirect illumination in dynamic scenes. The proposed method approximates the original scene geometry with a set of tightly fitting spheres. In previous work, such spheres have been used to dynamically evaluate the visibility function to render soft shadows. In this paper, each sphere also acts as a low-frequency secondary light source, thereby providing diffuse one-bounce indirect illumination. The method is completely dynamic and proceeds in two passes: In a first pass, the light intensity distribution on each sphere is updated based on sample points on the corresponding object surface and converted into the spherical harmonics basis. In a second pass, this radiance information and the visibility are accumulated to shade final image pixels. The sphere approximation allows us to compute visibility and diffuse reflections of an object at interactive frame rates of over 20 fps for moderately complex scenes.", month = oct, journal = "Computer Graphics Forum", number = "8", volume = "27", pages = "2154--2168", keywords = "global illumination, precomputed radiance transfer, soft shadows", URL = "https://www.cg.tuwien.ac.at/research/publications/2008/guerrero-2008-sli/", } @inproceedings{Scherzer-2007-PCS, title = "Pixel-Correct Shadow Maps with Temporal Reprojection and Shadow Test Confidence", author = "Daniel Scherzer and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "Shadow mapping suffers from spatial aliasing (visible as blocky shadows) as well as temporal aliasing (visible as flickering). Several methods have already been proposed for reducing such artifacts, but so far none is able to provide satisfying results in real time. This paper extends shadow mapping by reusing information of previously rasterized images, stored efficiently in a so-called history buffer. This buffer is updated in every frame and then used for the shadow calculation. In combination with a special confidence-based method for the history buffer update (based on the current shadow map), temporal and spatial aliasing can be completely removed. The algorithm converges in about 10 to 60 frames and during convergence, shadow borders are sharpened over time. Consequently, in case of real-time frame rates, the temporal shadow adaption is practically imperceptible. The method is simple to implement and is as fast as uniform shadow mapping, incurring only the minor speed hit of the history buffer update. It works together with advanced filtering methods like percentage closer filtering and more advanced shadow mapping techniques like perspective or light space perspective shadow maps.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "45--50", keywords = "shadow mapping", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Scherzer-2007-PCS/", } @inproceedings{JESCHKE-2007-ISC, title = "Interactive Smooth and Curved Shell Mapping", author = "Stefan Jeschke and Stephan Mantler and Michael Wimmer", year = "2007", abstract = "Shell mapping is a technique to represent three-dimensional surface details. This is achieved by extruding the triangles of an existing mesh along their normals, and mapping a 3D function (e.g., a 3D texture) into the resulting prisms. Unfortunately, such a mapping is nonlinear. Previous approaches perform a piece-wise linear approximation by subdividing the prisms into tetrahedrons. However, such an approximation often leads to severe artifacts. In this paper we present a correct (i.e., smooth) mapping that does not rely on a decomposition into tetrahedrons. We present an efficient GPU ray casting algorithm which provides correct parallax, self-occlusion, and silhouettes, at the cost of longer rendering times. The new formulation also allows modeling shells with smooth curvatures using Coons patches within the prisms. Tangent continuity between adjacent prisms is guaranteed, while the mapping itself remains local, i.e. every curved prism content is modeled at runtime in the GPU without the need for any precomputation. This allows instantly replacing animated triangular meshes with prism-based shells.", month = jun, isbn = "978-3-905673-52-4", publisher = "Eurographics Association", organization = "Eurographics", location = "Grenoble, France", editor = "Jan Kautz and Sumanta Pattanaik", booktitle = "Rendering Techniques 2007 (Proceedings Eurographics Symposium on Rendering)", pages = "10", pages = "351--360", keywords = "Display algorithms, Shading", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/JESCHKE-2007-ISC/", } @misc{MANTLER-2007-DMBBC, title = "Displacement Mapped Billboard Clouds", author = "Stephan Mantler and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "This paper introduces displacement mapped billboard clouds (DMBBC), a new image-based rendering primitive for the fast display of geometrically complex objects at medium to far distances. The representation is based on the well-known billboard cloud (BBC) technique, which represents an object as several textured rectangles in order to dramatically reduce its geometric complexity. Our new method uses boxes instead of rectangles, each box representing a volumetric part of the model. Rendering the contents of a box is done entirely on the GPU using ray casting. DMBBCs will often obviate the need to switch to full geometry for closer distances, which is especially helpful for scenes that are densely populated with complex objects, e.g. for vegetation scenes. We show several ways to store the volumetric information, with different tradeoffs between memory requirements and image quality. In addition we discuss techniques to accelerate the ray casting algorithm, and a way for smoothly switching between DMBBCs for medium distances and BBCs for far distances.", month = apr, event = "Symposium on Interactive 3D Graphics and Games", Conference date = "Poster presented at Symposium on Interactive 3D Graphics and Games (2007-04-30--2007-05-02)", keywords = "rendering acceleration, billboard clouds, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/MANTLER-2007-DMBBC/", } @techreport{TR-186-2-07-01, title = "Displacement Mapped Billboard Clouds", author = "Stephan Mantler and Stefan Jeschke and Michael Wimmer", year = "2007", abstract = "This paper introduces displacement mapped billboard clouds (DMBBC), a new image-based rendering primitive for the fast display of geometrically complex objects at medium to far distances. The representation is based on the well-known billboard cloud (BBC) technique, which represents an object as several textured rectangles in order to dramatically reduce its geometric complexity. Our new method uses boxes instead of rectangles, each box representing a volumetric part of the model. Rendering the contents of a box is done entirely on the GPU using ray casting. DMBBCs will often obviate the need to switch to full geometry for closer distances, which is especially helpful for scenes that are densely populated with complex objects, e.g. for vegetation scenes. We show several ways to store the volumetric information, with different tradeoffs between memory requirements and image quality. In addition we discuss techniques to accelerate the ray casting algorithm, and a way for smoothly switching between DMBBCs for medium distances and BBCs for far distances.", month = jan, number = "TR-186-2-07-01", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "rendering acceleration, billboard clouds, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/TR-186-2-07-01/", } @article{Habel_2007_IAG, title = "Instant Animated Grass", author = "Ralf Habel and Michael Wimmer and Stefan Jeschke", year = "2007", abstract = "This paper introduces a technique for rendering animated grass in real time. The technique uses front-to-back compositing of implicitly defined grass slices in a fragment shader and therefore significantly reduces the overhead associated with common vegetation rendering systems. We also introduce a texture-based animation scheme that combines global wind movements with local turbulences. Since the technique is confined to a fragment shader, it can be easily integrated into any rendering system and used as a material in existing scenes. ", month = jan, journal = "Journal of WSCG", volume = "15", number = "1-3", note = "ISBN 978-80-86943-00-8", issn = "1213-6972", pages = "123--128", keywords = "Real-time Rendering, Natural Scene Rendering, Natural Phenomena, GPU Programming", URL = "https://www.cg.tuwien.ac.at/research/publications/2007/Habel_2007_IAG/", } @inproceedings{Mantler-06-landscape, title = "Interactive Landscape Visualization Using GPU Ray Casting", author = "Stephan Mantler and Stefan Jeschke", year = "2006", abstract = "This paper demonstrates the simple yet effective usage of height fields for interactive landscape visualizations using a ray casting approach implemented in the pixel shader of modern graphics cards. The rendering performance is output sensitive, i.e., it scales with the number of pixels rather than the complexity of the landscape. Given a height field of a terrain and a topographic map or similar data as input, the vegetation cover is extracted and stored on top of the height field in a preprocess, enhancing the terrain with forest canopies or other mesostructure. In addition, enhanced illumination models like shadowing and ambient occlusion can be calculated at runtime with reasonable computational cost, which greatly enhances the scene realism. Finally, including the presented technique into existing rendering systems is relatively simple, mainly consisting of data preparation and pixel shader programming.", month = nov, booktitle = "Proceedings of Graphite 2006", keywords = "real-time rendering, gpu ray casting", URL = "https://www.cg.tuwien.ac.at/research/publications/2006/Mantler-06-landscape/", } @inproceedings{jeschke-05-ISTAR, title = "Image-based Representations for Accelerated Rendering of Complex Scenes", author = "Stefan Jeschke and Michael Wimmer and Werner Purgathofer", year = "2005", abstract = "This paper gives an overview of image-based representations commonly used for reducing the geometric complexity of a scene description in order to accelerate the rendering process. Several different types of representations and ways for using them have been presented, which are classified and discussed here. Furthermore, the overview includes techniques for accelerating the rendering of static scenes or scenes with animations and/or dynamic lighting effects. The advantages and drawbacks of the different approaches are illuminated, and unsolved problems and roads for further research are shown.", month = aug, booktitle = "EUROGRAPHICS 2005 State of the Art Reports", editor = "Y. Chrysanthou and M. Magnor", location = "Dublin, Ireland", publisher = "The Eurographics Association and The Image Synthesis Group", organization = "EUROGRAPHICS", pages = "1--20", keywords = "Impostors, Display Algorithms, Three Dimensional Graphics and Realism, Color, Shading, Shadowing and Texture", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-ISTAR/", } @inproceedings{jeschke-05-AIP, title = "Automatic Impostor Placement for Guaranteed Frame Rates and Low Memory Requirements", author = "Stefan Jeschke and Michael Wimmer and Heidrun Schumann and Werner Purgathofer", year = "2005", abstract = "Impostors are image-based primitives commonly used to replace complex geometry in order to reduce the rendering time needed for displaying complex scenes. However, a big problem is the huge amount of memory required for impostors. This paper presents an algorithm that automatically places impostors into a scene so that a desired frame rate and image quality is always met, while at the same time not requiring enormous amounts of impostor memory. The low memory requirements are provided by a new placement method and through the simultaneous use of other acceleration techniques like visibility culling and geometric levels of detail.", month = apr, isbn = "1-59593-013-2", publisher = "ACM Press", organization = "ACM", location = "Washington DC", booktitle = "Proceedings of ACM SIGGRAPH 2005 Symposium on Interactive 3D Graphics and Games", pages = "103--110", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-AIP/", } @phdthesis{jeschke-05-ARI, title = "Accelerating the Rendering Process Using Impostors", author = "Stefan Jeschke", year = "2005", abstract = "The interactive rendering of three-dimensional geometric models is a research area of big interest in computer graphics. The generation of a fluent animation for complex models, consisting of multiple million primitives, with more than 60 frames per second is a special challenge. Possible applications include ship-, driving- and flight simulators, virtual reality and computer games. Although the performance of common computer graphics hardware has dramatically increased in recent years, the demand for more realism and complexity in common scenes is growing even faster. This dissertation is about one approach for accelerating the rendering of such complex scenes. We take advantage of the fact that the appearance of distant scene parts hardly changes for several successive output images. Those scene parts are replaced by precomputed image-based representations, so-called impostors. Impostors are very fast to render while maintaining the appearance of the scene part as long as the viewer moves within a bounded viewing region, a so-called view cell. However, unsolved problems of impostors are the support of a satisfying visual quality with reasonable computational effort for the impostor generation, as well as very high memory requirements for impostors for common scenes. Until today, these problems are the main reason why impostors are hardly used for rendering acceleration. This thesis presents two new impostor techniques that are based on partitioning the scene part to be represented into image layers with different distances to the observer. A new error metric allows a guarantee for a minimum visual quality of an impostor even for large view cells. Furthermore, invisible scene parts are efficiently excluded from the representation without requiring any knowledge about the scene structure, which provides a more compact representation. One of the techniques combines every image layer separately with geometric information. This allows a fast generation of memory-efficient impostors for distant scene parts. In the other technique, the geometry is independent from the depth layers, which allows a compact representation for near scene parts. The second part of this work is about the efficient usage of impostors for a given scene. The goal is to guarantee a minimum frame rate for every view within the scene while at the same time minimizing the memory requirements for all impostors. The presented algorithm automatically selects impostors and view cells so that for every view, only the most suitable scene parts are represented as impostors. Previous approaches generated numerous similar impostors for neighboring view cells, thus wasting memory. The new algorithm overcomes this problem. i The simultaneous use of additional acceleration techniques further reduces the required impostor memory and allows making best use of all available techniques at the same time. The approach is general in the sense that it can handle arbitrary scenes and a broad range of impostor techniques, and the acceleration provided by the impostors can be adapted to the bottlenecks of different rendering systems. In summary, the provided techniques and algorithms dramatically reduce the required impostor memory and simultaneously guarantee a minimum output image quality. This makes impostors useful for numerous scenes and applications where they could hardly be used before.", month = mar, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "image-based rendering, impostors, rendering acceleration", URL = "https://www.cg.tuwien.ac.at/research/publications/2005/jeschke-05-ARI/", } @inproceedings{Jeschke-2002-TDMR, title = "Textured Depth Meshes for Real-Time Rendering of Arbitrary Scenes", author = "Stefan Jeschke and Michael Wimmer", year = "2002", abstract = "This paper presents a new approach to generate textured depth meshes (TDMs), an impostor-based scene representation that can be used to accelerate the rendering of static polygonal models. The TDMs are precalculated for a fixed viewing region (view cell). The approach relies on a layered rendering of the scene to produce a voxel-based representation. Secondary, a highly complex polygon mesh is constructed that covers all the voxels. Afterwards, this mesh is simplified using a special error metric to ensure that all voxels stay covered. Finally, the remaining polygons are resampled using the voxel representation to obtain their textures. The contribution of our approach is manifold: first, it can handle polygonal models without any knowledge about their structure. Second, only scene parts that may become visible from within the view cell are represented, thereby cutting down on impostor complexity and storage costs. Third, an error metric guarantees that the impostors are practically indistinguishable compared to the original model (i.e. no rubber-sheet effects or holes appear as in most previous approaches). Furthermore, current graphics hardware is exploited for the construction and use of the impostors.", month = jun, isbn = "1-58133-534-3", publisher = "Eurographics Association", organization = "Eurographics", location = "Pisa, Italy", editor = "Paul Debevec and Simon Gibson", booktitle = "Rendering Techniques 2002 (Proceedings Eurographics Workshop on Rendering)", pages = "181--190", keywords = "Rendering, Walkthrough, Computer Graphics, Impostors", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Jeschke-2002-TDMR/", } @inproceedings{Jeschke-2002-LEMA, title = "Layered Environment-Map Impostors for Arbitrary Scenes", author = "Stefan Jeschke and Michael Wimmer and Heidrun Schumann", year = "2002", abstract = "This paper presents a new impostor-based approach to accelerate the rendering of very complex static scenes. The scene is partitioned into viewing regions, and a layered impostor representation is precalculated for each of them. An optimal placement of impostor layers guarantees that our representation is indistinguishable from the original geometry. Furthermore the algorithm exploits common graphics hardware both during preprocessing and rendering. Moreover the impostor representation is compressed using several strategies to cut down on storage space.", month = may, isbn = "1-56881-183-7", publisher = "AK Peters Ltd.", location = "Calgary, CA", editor = "Wolfgang St\"{u}rzlinger and Michael McCool", booktitle = "Proceedings of Graphics Interface 2002", pages = "1--8", keywords = "virtual environments, environment maps, impostors, walkthroughs, image-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/Jeschke-2002-LEMA/", } @techreport{TR-186-2-02-04, title = "An Error Metric for Layered Environment Map Impostors", author = "Stefan Jeschke and Michael Wimmer", year = "2002", abstract = "Impostors are image-based primitives commonly used to replace complex geometry in order to accelerate the rendering of large virtual environments. This paper describes a “layered impostor technique” used for representing distant scene-parts when seen from a bounded viewing region. A special layer placement is derived which bounds the geometric error introduced by parallaxes to a defined value. In combination with a special technique for image generation, a high-quality impostor representation without image artifacts can be obtained.", month = feb, number = "TR-186-2-02-04", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", institution = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", note = "human contact: technical-report@cg.tuwien.ac.at", keywords = "impostors, real-time rendering, virtual", URL = "https://www.cg.tuwien.ac.at/research/publications/2002/TR-186-2-02-04/", }