@mastersthesis{SCHUETZ-2016-POT, title = "Potree: Rendering Large Point Clouds in Web Browsers", author = "Markus Sch\"{u}tz", year = "2016", abstract = "This thesis introduces Potree, a web-based renderer for large point clouds. It allows users to view data sets with billions of points, from sources such as LIDAR or photogrammetry, in real time in standard web browsers. One of the main advantages of point cloud visualization in web browser is that it allows users to share their data sets with clients or the public without the need to install third-party applications and transfer huge amounts of data in advance. The focus on large point clouds, and a variety of measuring tools, also allows users to use Potree to look at, analyze and validate raw point cloud data, without the need for a time-intensive and potentially costly meshing step. The streaming and rendering of billions of points in web browsers, without the need to load large amounts of data in advance, is achieved with a hierarchical structure that stores subsamples of the original data at different resolutions. A low resolution is stored in the root node and with each level, the resolution gradually increases. The structure allows Potree to cull regions of the point cloud that are outside the view frustum, and to render distant regions at a lower level of detail. The result is an open source point cloud viewer, which was able to render point cloud data sets of up to 597 billion points, roughly 1.6 terabytes after compression, in real time in a web browser.", month = sep, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "point cloud rendering, WebGL, LIDAR", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/SCHUETZ-2016-POT/", } @article{arikan-2015-dmrt, title = "Multi-Depth-Map Raytracing for Efficient Large-Scene Reconstruction", author = "Murat Arikan and Reinhold Preiner and Michael Wimmer", year = "2016", abstract = "With the enormous advances of the acquisition technology over the last years, fast processing and high-quality visualization of large point clouds have gained increasing attention. Commonly, a mesh surface is reconstructed from the point cloud and a high-resolution texture is generated over the mesh from the images taken at the site to represent surface materials. However, this global reconstruction and texturing approach becomes impractical with increasing data sizes. Recently, due to its potential for scalability and extensibility, a method for texturing a set of depth maps in a preprocessing and stitching them at runtime has been proposed to represent large scenes. However, the rendering performance of this method is strongly dependent on the number of depth maps and their resolution. Moreover, for the proposed scene representation, every single depth map has to be textured by the images, which in practice heavily increases processing costs. In this paper, we present a novel method to break these dependencies by introducing an efficient raytracing of multiple depth maps. In a preprocessing phase, we first generate high-resolution textured depth maps by rendering the input points from image cameras and then perform a graph-cut based optimization to assign a small subset of these points to the images. At runtime, we use the resulting point-to-image assignments (1) to identify for each view ray which depth map contains the closest ray-surface intersection and (2) to efficiently compute this intersection point. The resulting algorithm accelerates both the texturing and the rendering of the depth maps by an order of magnitude.", month = feb, doi = "10.1109/TVCG.2015.2430333", issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "2", volume = "22", pages = "1127--1137", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/arikan-2015-dmrt/", } @bachelorsthesis{Mayrhauser-2016-Cnc, title = "Migration of Surface Curve to Most Concave Isoline", author = "Maximilian Mayrhauser", year = "2016", abstract = "In this paper, I present a solution for migrating a curve on a three dimensional surface to the most concave isoline in its vicinity. Essentially, this problem statement tackles mesh segmentation from a different angle. The search for a suitable segmentation boundary is reduced to a shortest path problem. First, a graph is built using the mesh’s vertices and edges near the input curve. Then, the shortest path is found using the Dijkstra algorithm, whereas a modified weighting scheme that makes the passing through of concave edges cheaper, among other factors, results in a path suitable as segmentation boundary. The final algorithm provides segmentation boundaries of a quality similar to existing segmentation algorithms. The runtime generally lies below a second, thus making it viable for on the go optimization of the user’s input.", month = dec, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/Mayrhauser-2016-Cnc/", } @article{ohrhallinger-2016-sgp, title = "Curve Reconstruction with Many Fewer Samples", author = "Stefan Ohrhallinger and Scott A. Mitchell and Michael Wimmer", year = "2016", abstract = "We consider the problem of sampling points from a collection of smooth curves in the plane, such that the Crust family of proximity-based reconstruction algorithms can rebuild the curves. Reconstruction requires a dense sampling of local features, i.e., parts of the curve that are close in Euclidean distance but far apart geodesically. We show that epsilon<0.47-sampling is sufficient for our proposed HNN-CRUST variant, improving upon the state-of-the-art requirement of epsilon<1/3-sampling. Thus we may reconstruct curves with many fewer samples. We also present a new sampling scheme that reduces the required density even further than epsilon<0.47-sampling. We achieve this by better controlling the spacing between geodesically consecutive points. Our novel sampling condition is based on the reach, the minimum local feature size along intervals between samples. This is mathematically closer to the reconstruction density requirements, particularly near sharp-angled features. We prove lower and upper bounds on reach rho-sampling density in terms of lfs epsilon-sampling and demonstrate that we typically reduce the required number of samples for reconstruction by more than half. ", journal = "Computer Graphics Forum", volume = "35", number = "5", issn = "1467-8659", pages = "167--176", keywords = "sampling condition, curve reconstruction, curve sampling", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/ohrhallinger-2016-sgp/", } @inproceedings{SCHUETZ-2015-HQP, title = "High-Quality Point Based Rendering Using Fast Single Pass Interpolation", author = "Markus Sch\"{u}tz and Michael Wimmer", year = "2015", abstract = "We present a method to improve the visual quality of point cloud renderings through a nearest-neighbor-like interpolation of points. This allows applications to render points at larger sizes in order to reduce holes, without reducing the readability of fine details due to occluding points. The implementation requires only few modifications to existing shaders, making it eligible to be integrated in software applications without major design changes.", month = sep, location = "Granada, Spain", booktitle = "Proceedings of Digital Heritage 2015 Short Papers", pages = "369--372", keywords = "point-based rendering", URL = "https://www.cg.tuwien.ac.at/research/publications/2015/SCHUETZ-2015-HQP/", } @article{arikan-2014-pcvis, title = "Large-Scale Point-Cloud Visualization through Localized Textured Surface Reconstruction", author = "Murat Arikan and Reinhold Preiner and Claus Scheiblauer and Stefan Jeschke and Michael Wimmer", year = "2014", abstract = "In this paper, we introduce a novel scene representation for the visualization of large-scale point clouds accompanied by a set of high-resolution photographs. Many real-world applications deal with very densely sampled point-cloud data, which are augmented with photographs that often reveal lighting variations and inaccuracies in registration. Consequently, the high-quality representation of the captured data, i.e., both point clouds and photographs together, is a challenging and time-consuming task. We propose a two-phase approach, in which the first (preprocessing) phase generates multiple overlapping surface patches and handles the problem of seamless texture generation locally for each patch. The second phase stitches these patches at render-time to produce a high-quality visualization of the data. As a result of the proposed localization of the global texturing problem, our algorithm is more than an order of magnitude faster than equivalent mesh-based texturing techniques. Furthermore, since our preprocessing phase requires only a minor fraction of the whole dataset at once, we provide maximum flexibility when dealing with growing datasets.", month = sep, issn = "1077-2626", journal = "IEEE Transactions on Visualization & Computer Graphics", number = "9", volume = "20", pages = "1280--1292", keywords = "image-based rendering, large-scale models, color, surface representation", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/arikan-2014-pcvis/", } @article{preiner2014clop, title = "Continuous Projection for Fast L1 Reconstruction", author = "Reinhold Preiner and Oliver Mattausch and Murat Arikan and Renato Pajarola and Michael Wimmer", year = "2014", abstract = "With better and faster acquisition devices comes a demand for fast robust reconstruction algorithms, but no L1-based technique has been fast enough for online use so far. In this paper, we present a novel continuous formulation of the weighted locally optimal projection (WLOP) operator based on a Gaussian mixture describing the input point density. Our method is up to 7 times faster than an optimized GPU implementation of WLOP, and achieves interactive frame rates for moderately sized point clouds. We give a comprehensive quality analysis showing that our continuous operator achieves a generally higher reconstruction quality than its discrete counterpart. Additionally, we show how to apply our continuous formulation to spherical mixtures of normal directions, to also achieve a fast robust normal reconstruction. Project Page: https://www.cg.tuwien.ac.at/~preiner/projects/clop/", month = aug, journal = "ACM Transactions on Graphics (Proc. of ACM SIGGRAPH 2014)", volume = "33", number = "4", issn = "0730-0301", doi = "10.1145/2601097.2601172", pages = "47:1--47:13", keywords = "point set, Gaussian mixture, Hierarchical EM, upsampling, dynamic reconstruction, L1 reconstruction", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/preiner2014clop/", } @inproceedings{Radwan-2014-CDR, title = "Efficient Collision Detection While Rendering Dynamic Point Clouds", author = "Mohamed Radwan and Stefan Ohrhallinger and Michael Wimmer", year = "2014", abstract = "A recent trend in interactive environments is the use of unstructured and temporally varying point clouds. This is driven by both affordable depth cameras and augmented reality simulations. One research question is how to perform collision detection on such point clouds. State-of-the-art methods for collision detection create a spatial hierarchy in order to capture dynamic point cloud surfaces, but they require O(NlogN) time for N points. We propose a novel screen-space representation for point clouds which exploits the property of the underlying surface being 2D. In order for dimensionality reduction, a 3D point cloud is converted into a series of thickened layered depth images. This data structure can be constructed in O(N) time and allows for fast surface queries due to its increased compactness and memory coherency. On top of that, parts of its construction come for free since they are already handled by the rendering pipeline. As an application we demonstrate online collision detection between dynamic point clouds. It shows superior accuracy when compared to other methods and robustness to sensor noise since uncertainty is hidden by the thickened boundary.", month = may, isbn = "978-1-4822-6003-8", publisher = "Canadian Information Processing Society", location = "Montreal, Quebec, Canada ", issn = "0713-5424", event = "Graphics Interface 2014", booktitle = "Proceedings of the 2014 Graphics Interface Conference", pages = "25--33", keywords = "bounding volumes, layered depth images, collision detection, point cloud, dynamic", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/Radwan-2014-CDR/", } @talk{WIMMER-2014-DWNT, title = "Do we need the full reconstruction pipeline?", author = "Michael Wimmer", year = "2014", abstract = "The traditional cultural heritage documentation pipeline from acquisition using a range scanner to interactive display to the user is a tedious and labor-intensive process. In particular, reconstructing high-quality meshes from large point clouds can be time consuming. In this talk, I will present shortcuts to this pipeline. The first idea is not to reconstruct a mesh at all, but keep the original point cloud as long as possible. I will discuss the challenges in maintaining interactivity and high quality when dealing with the display and manipulation of huge point clouds. The second idea is to reconstruct extremely simple models for regular and man-made structures, using shape analysis and user guidance. These models can be shown in end-user installations and require very few resources for display. ", event = "EU-Korea Conference on Science and Technology", location = "Vienna, Austria", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/WIMMER-2014-DWNT/", } @phdthesis{scheiblauer-thesis, title = "Interactions with Gigantic Point Clouds", author = "Claus Scheiblauer", year = "2014", abstract = "During the last decade the increased use of laser range-scanners for sampling the environment has led to gigantic point cloud data sets. Due to the size of such data sets, tasks like viewing, editing, or presenting the data have become a challenge per se, as the point data is too large to fit completely into the main memory of a customary computer system. In order to accomplish these tasks and enable the interaction with gigantic point clouds on consumer grade computer systems, this thesis presents novel methods and data structures for efficiently dealing with point cloud data sets consisting of more than 109 point samples. To be able to access point samples fast that are stored on disk or in memory, they have to be spatially ordered, and for this a data structure is proposed which organizes the points samples in a level-of-detail hierarchy. Point samples stored in this hierarchy cannot only be rendered fast, but can also be edited, for example existing points can be deleted from the hierarchy or new points can be inserted. Furthermore, the data structure is memory efficient, as it only uses the point samples from the original data set. Therefore, the memory consumption of the point samples on disk, when stored in this data structure, is comparable to the original data set. A second data structure is proposed for selecting points. This data structure describes a volume inside which point samples are considered to be selected, and this has the advantage that the information about a selection does not have to be stored at the point samples. In addition to these two previously mentioned data structures, which represent novel contributions for point data visualization and manipulation, methods for supporting the presentation of point data sets are proposed. With these methods the user experience can be enhanced when navigating through the data. One possibility to do this is by using regional meshes that employ an out-of-core texturing method to show details in the mesoscopic scale on the surface of sampled objects, and which are displayed together with point clouds. Another possibility to increase the user experience is to use graphs in 3D space, which helps users to orient themselves inside point cloud models of large sites, where otherwise it would be difficult to find the places of interest. Furthermore, the quality of the displayed point cloud models can be increased by using a point size heuristics that can mimic a closed surface in areas that would otherwise appear undersampled, by utilizing the density of the rendered points in the different areas of the point cloud model. Finally, the use of point cloud models as a tool for archaeological work is proposed. Since it becomes increasingly common to document archaeologically interesting monuments with laser scanners, the number application areas of the resulting point clouds is raising as well. These include, but are not limited to, new views of the monument that are impossible when studying the monument on-site, creating cuts and floor plans, or perform virtual anastylosis. All these previously mentioned methods and data structures are implemented in a single software application that has been developed during the course of this thesis and can be used to interactively explore gigantic point clouds.", address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "point-based rendering, out-of-core rendering, data structures, complexity analysis", URL = "https://www.cg.tuwien.ac.at/research/publications/2014/scheiblauer-thesis/", }