@xmascard{ilcik-2021-xmas, title = "X-Mas Card 2021", author = "Martin Il\v{c}\'{i}k and Philipp Erler", year = "2021", month = nov, keywords = "christmas", URL = "https://www.cg.tuwien.ac.at/research/publications/2021/ilcik-2021-xmas/", } @inproceedings{erler-2020-p2s, title = "Points2Surf: Learning Implicit Surfaces from Point Clouds", author = "Philipp Erler and Paul Guerrero and Stefan Ohrhallinger and Michael Wimmer and Niloy Mitra", year = "2020", abstract = "A key step in any scanning-based asset creation workflow is to convert unordered point clouds to a surface. Classical methods (e.g., Poisson reconstruction) start to degrade in the presence of noisy and partial scans. Hence, deep learning based methods have recently been proposed to produce complete surfaces, even from partial scans. However, such data-driven methods struggle to generalize to new shapes with large geometric and topological variations. We present Points2Surf, a novel patch-based learning framework that produces accurate surfaces directly from raw scans without normals. Learning a prior over a combination of detailed local patches and coarse global information improves generalization performance and reconstruction accuracy. Our extensive comparison on both synthetic and real data demonstrates a clear advantage of our method over state-of-the-art alternatives on previously unseen classes (on average, Points2Surf brings down reconstruction error by 30% over SPR and by 270%+ over deep learning based SotA methods) at the cost of longer computation times and a slight increase in small-scale topological noise in some cases. Our source code, pre-trained model, and dataset are available on: https://github.com/ErlerPhilipp/points2surf ", month = oct, isbn = "978-3-030-58558-7", series = "Lecture Notes in Computer Science", publisher = "Springer International Publishing", location = "Glasgow, UK (online)", address = "Cham", event = "ECCV 2020", editor = "Vedaldi, Andrea and Bischof, Horst and Brox, Thomas and Frahm, Jan-Michael", doi = "10.1007/978-3-030-58558-7_7", booktitle = "Computer Vision -- ECCV 2020", journal = "Computer Vision – ECCV 2020", pages = "17", volume = "12350", pages = "108--124", keywords = "surface reconstruction, implicit surfaces, point clouds, patch-based, local and global, deep learning, generalization", URL = "https://www.cg.tuwien.ac.at/research/publications/2020/erler-2020-p2s/", } @mastersthesis{ERLER-2017-HVR, title = "Haptic Feedback in Room-Scale VR", author = "Philipp Erler", year = "2017", abstract = "Virtual reality (VR) is now becoming a mainstream medium. Current systems like the HTC Vive offer accurate tracking of the HMD and controllers, which allows for highly immersive interactions with the virtual environment. The interactions can be further enhanced by adding feedback. As an example, a controller can vibrate when it is close to a grabbable ball. As such interactions are not exhaustingly researched, we conducted a user study. Specifically, we examine: - grabbing and throwing with controllers in a simple basketball game. - the influence of haptic and optical feedback on performance, presence, task load, and usability. - the advantages of VR over desktop for point-cloud editing. Several new techniques emerged from the point-cloud editor for VR. The bi-manual pinch gesture, which extends the handlebar metaphor, is a novel viewing method used to translate, rotate, and scale the point-cloud. Our new rendering technique uses the geometry shader to draw sparse point clouds quickly. The selection volumes at the controllers are our new technique to efficiently select points in point clouds. The resulting selection is visualized in real time. The results of the user study show that: - grabbing with a controller button is intuitive but throwing is not. Releasing a button is a bad metaphor for releasing a grabbed virtual object in order to throw it. - any feedback is better than none. Adding haptic, optical, or both feedback types to the grabbing improves the user performance and presence. However, only sub-scores like accuracy and predictability are significantly improved. Usability and task load are mostly unaffected by feedback. - the point-cloud editing is significantly better in VR with the bi-manual pinch gesture and selection volumes than on the desktop with the orbiting camera and lasso selections. ", month = jul, address = "Favoritenstrasse 9-11/E193-02, A-1040 Vienna, Austria", school = "Institute of Computer Graphics and Algorithms, Vienna University of Technology ", keywords = "virtual reality, room-scale VR, throwing, grabbing, physics, basketball, haptic feedback, optical feedback, controllers, point cloud, point-cloud editing, presence, performance, usability, task load", URL = "https://www.cg.tuwien.ac.at/research/publications/2017/ERLER-2017-HVR/", } @studentproject{erler_philipp-2016-femfluid-prakt, title = "Finite Element Fluids in Matlab", author = "Philipp Erler", year = "2016", abstract = "Implementation of a 2D finite elements free-surface liquid simulation with solid obstables in Matlab.", month = oct, keywords = "Finite Element Method, FEM, Fluid Simulation", URL = "https://www.cg.tuwien.ac.at/research/publications/2016/erler_philipp-2016-femfluid-prakt/", }