[
    {
        "id": "Meier_2024_WIALT",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Simulating Windows for Lighting Design Optimization",
        "date": "2024-12-20",
        "abstract": "For lighting design, optimizing windows plays a major role, especially for office and living spaces, as natural light is important for focus, productivity and also mood. The rendering framework Tamashii, which is currently in development at the research unit of computer graphics at TU Wien, offers a feature to automatically optimize multiple parameters of light sources like the position, intensity or rotation for a predefined lighting target. This thesis aims to expand the possibilities Tamashii offers for lighting design by simulating windows through area lights. Tamashii’s automatic light parameter optimization relies on light tracing, which unlike path tracing, casts the light rays from the light sources instead of the camera. This is why implementing environment maps in a classical sense is not feasible, as emitting light rays from each pixel of the environment map only for a small percentage to go through the window is very inefficient. We implement a new type of light that combines area lights with Illuminating Engineering Society (IES) lights in order to simulate windows. The IES standard is a file format commonly used by luminaire manufacturers to describe the physical properties of a luminaire for simulation in software. To accurately mimic the light that shines through real windows, we convert High Dynamic Range (HDR) files into IES profiles, which our lights can then use. Our new light type can also be attached to models in the scene, such as walls or roofs, which constrains the manual movement of the windows to the connected object and makes their usage more intuitive. In our tests, we find that our implementation is able to realistically simulate real windows when compared to the same combination of scenes and HDR files in Blender’s path tracing renderer Cycles. To ensure that the light parameter optimization algorithm only moves the window lights inside the model its connected to, we implement a constraint that gets evaluated repeatedly while optimizing. We realize this by calculating penalties when the light reaches the edges of the model, in order to encourage the algorithm to keep the window light inside. When evaluating our implementation we find that with the activated constraint, the algorithm is able to find valid positions for the window lights when optimizing.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5362
        ],
        "date_end": "2024-12-20",
        "date_start": "2024-05-02",
        "matrikelnr": "e11908106",
        "supervisor": [
            193,
            1946
        ],
        "research_areas": [
            "Rendering"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "Meier_2024_WIALT-thesis.pdf",
                "type": "application/pdf",
                "size": 80250870,
                "path": "Publication:Meier_2024_WIALT",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Meier_2024_WIALT/Meier_2024_WIALT-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/Meier_2024_WIALT/Meier_2024_WIALT-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend",
            "d4314"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Meier_2024_WIALT/",
        "__class": "Publication"
    },
    {
        "id": "cardoso-thesis",
        "type_id": "phdthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/209309",
        "title": "Approaching Under-Explored Image-Space Problems with Optimization",
        "date": "2024-12-19",
        "abstract": "This doctoral dissertation delves into three distinct yet interconnected problems in the realm of interactive image-space computing in computer graphics, each of which has not been tackled by existing literature.The first problem centers on the prediction of visual error metrics in real-time applications, specifically in the context of content-adaptive shading and shading reuse. Utilizing convolutional neural networks, this research aims to estimate visual errors without requiring reference or rendered images. The models developed can account for 70%–90% of the variance and achieve computation times that are an order of magnitude faster than existing methods. This enables a balance between resource-saving and visual quality, particularly in deferred shading pipelines, and can achieve up to twice the performance compared to state-of-the-art methods depending on the portion of unseen image regions. The second problem focuses on the burgeoning field of light-field cameras and the challenges associated with depth prediction. This research argues for the refinement of cost volumes rather than depth maps to increase the accuracy of depth predictions. A set of cost-volume refinement algorithms is proposed, which dynamically operate at runtime to find optimal solutions, thereby enhancing the accuracy and reliability of depth estimation in light fields.The third problem tackles the labor-intensive nature of hand-drawn animation, specifically in the detailing of character eyes. An unsupervised network is introduced that blends inpainting and image-to-image translation techniques. This network employs a novel style-aware clustering method and a dual-discriminator optimization strategy with a triple-reconstruction loss. The result is an improvement in the level of detail and artistic consistency in hand-drawn animation, preferred over existing work 95.16% of the time according to a user study.Optimization techniques are the common thread that ties these problems together. While dynamic optimization at runtime is employed for cost volume refinement, deep-learning methods are used offline to train global solutions for the other two problems. This research not only fills gaps in the existing literature but also paves the way for future explorations in the field of computer graphics and optimization, offering new avenues for both academic research and practical applications.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "date",
        "repositum_presentation_id": null,
        "authors": [
            1639
        ],
        "ac_number": "AC17414787",
        "date_end": "2024-12-19",
        "date_start": "2019-04",
        "doi": "10.34726/hss.2025.128664",
        "matrikelnr": "11937133",
        "open_access": "yes",
        "pages": "110",
        "reviewer_1": [
            1825
        ],
        "reviewer_2": [
            5420
        ],
        "rigorosum": "2024-12-19",
        "supervisor": [
            193
        ],
        "research_areas": [
            "Perception",
            "Rendering"
        ],
        "keywords": [
            "variable-rate shading",
            "light-fields",
            "limited animation",
            "anime",
            "convolutional neural networks"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "cardoso-thesis-thesis.pdf",
                "type": "application/pdf",
                "size": 47447576,
                "path": "Publication:cardoso-thesis",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-thesis/cardoso-thesis-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-thesis/cardoso-thesis-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "EVOCATION"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-thesis/",
        "__class": "Publication"
    },
    {
        "id": "groeller-2024-cui",
        "type_id": "talk",
        "tu_id": null,
        "repositum_id": "20.500.12708/210589",
        "title": "Certain Uncertainties in Visual Data Science",
        "date": "2024-12-10",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            166
        ],
        "date_from": "2024-12-10",
        "date_to": "2024-12-10",
        "event": "Faculty Colloquium",
        "lecturer": [
            166
        ],
        "location": "Passau",
        "research_areas": [],
        "keywords": [
            "uncertainty visualization"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/groeller-2024-cui/",
        "__class": "Publication"
    },
    {
        "id": "marin-2024-sing",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/208566",
        "title": "SING: Stability-Incorporated Neighborhood Graph",
        "date": "2024-12",
        "abstract": "We introduce the Stability-Incorporated Neighborhood Graph (SING), a novel density-aware structure designed to capture the intrinsic geometric properties of a point set. We improve upon the spheres-of-influence graph by incorporating additional features to offer more flexibility and control in encoding proximity information and capturing local density variations. Through persistence analysis on our proximity graph, we propose a new clustering technique and explore additional variants incorporating extra features for the proximity criterion. Alongside the detailed analysis and comparison to evaluate its performance on various datasets, our experiments demonstrate that the proposed method can effectively extract meaningful clusters from diverse datasets with variations in density and correlation. Our application scenarios underscore the advantages of the proposed graph over classical neighborhood graphs, particularly in terms of parameter tuning.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2607,
            "image_height": 709,
            "name": "marin-2024-sing-image.png",
            "type": "image/png",
            "size": 897420,
            "path": "Publication:marin-2024-sing",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-sing/marin-2024-sing-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-sing/marin-2024-sing-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "date",
        "repositum_presentation_id": null,
        "authors": [
            1848,
            1825,
            948,
            193,
            5431,
            5297
        ],
        "booktitle": "SA '24: SIGGRAPH Asia 2024 Conference Papers",
        "date_from": "2024-12-03",
        "date_to": "2024-12-06",
        "doi": "10.1145/3680528.3687674",
        "editor": "Igarashi, Takeo and Shamir, Ariel and Zhang, Hao",
        "event": "SA '24: SIGGRAPH Asia 2024",
        "isbn": "979-8-4007-1131-2",
        "lecturer": [
            1848
        ],
        "location": "Tokyo",
        "pages": "10",
        "pages_from": "1",
        "pages_to": "10",
        "publisher": "Association for Computing Machinery",
        "research_areas": [
            "Geometry"
        ],
        "keywords": [
            "Proximity graphs",
            "Stipple art editing",
            "Pattern design",
            "Network topology",
            "clustering",
            "point patterns",
            "similarity metric",
            "discrete distributions",
            "persistence analysis",
            "Neighborhood graph",
            "topological data analysis",
            "K-means",
            "Rips complexes"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2607,
                "image_height": 709,
                "name": "marin-2024-sing-image.png",
                "type": "image/png",
                "size": 897420,
                "path": "Publication:marin-2024-sing",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-sing/marin-2024-sing-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-sing/marin-2024-sing-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "marin-2024-sing-paper.pdf",
                "type": "application/pdf",
                "size": 12994577,
                "path": "Publication:marin-2024-sing",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-sing/marin-2024-sing-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-sing/marin-2024-sing-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "WorldScale"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-sing/",
        "__class": "Publication"
    },
    {
        "id": "xmas-2024",
        "type_id": "xmascard",
        "tu_id": null,
        "repositum_id": null,
        "title": "X-Mas Card 2024",
        "date": "2024-12",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1863,
            "image_height": 1318,
            "name": "xmas-2024-.PNG",
            "type": "image/png",
            "size": 6124629,
            "path": "Publication:xmas-2024",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/xmas-2024/xmas-2024-.PNG",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/xmas-2024/xmas-2024-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1937
        ],
        "research_areas": [
            "Modeling"
        ],
        "keywords": [
            "Christmas",
            "Digital Elevation Models",
            "L-system-based modeling"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1863,
                "image_height": 1318,
                "name": "xmas-2024-.PNG",
                "type": "image/png",
                "size": 6124629,
                "path": "Publication:xmas-2024",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/xmas-2024/xmas-2024-.PNG",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/xmas-2024/xmas-2024-:thumb{{size}}.png"
            },
            {
                "description": "Christmas Card as PDF",
                "filetitle": "Card",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "xmas-2024-Card.pdf",
                "type": "application/pdf",
                "size": 3113633,
                "path": "Publication:xmas-2024",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/xmas-2024/xmas-2024-Card.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/xmas-2024/xmas-2024-Card:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "xmas"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/xmas-2024/",
        "__class": "Publication"
    },
    {
        "id": "sakai-2024-asa",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/209940",
        "title": "A Statistical Approach to Monte Carlo Denoising",
        "date": "2024-12",
        "abstract": "The stochastic nature of modern Monte Carlo (MC) rendering methods inevitably produces noise in rendered images for a practical number of samples per pixel. The problem of denoising these images has been widely studied, with most recent methods relying on data-driven, pretrained neural networks. In contrast, in this paper we propose a statistical approach to the denoising problem, treating each pixel as a random variable and reasoning about its distribution. Considering a pixel of the noisy rendered image, we formulate fast pair-wise statistical tests—based on online estimators—to decide which of the nearby pixels to exclude from the denoising filter. We show that for symmetric pixel weights and normally distributed samples, the classical Welch t-test is optimal in terms of mean squared error. We then show how to extend this result to handle non-normal distributions, using more recent confidence-interval formulations in combination with the Box-Cox transformation. Our results show that our statistical denoising approach matches the performance of state-of-the-art neural image denoising without having to resort to any computation-intensive pretraining. Furthermore, our approach easily generalizes to other quantities besides pixel intensity, which we demonstrate by showing additional applications to Russian roulette path termination and multiple importance sampling.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Image illustrating the proposed denoising method, created by the paper authors. The “Wooden Staircase” scene has been created by Wig42 (https://blendswap.com/profile/130393) under the CC BY 3.0 license.",
            "filetitle": "Representative Image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1980,
            "image_height": 1320,
            "name": "sakai-2024-asa-Representative Image.jpg",
            "type": "image/jpeg",
            "size": 3344845,
            "path": "Publication:sakai-2024-asa",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/sakai-2024-asa/sakai-2024-asa-Representative Image.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/sakai-2024-asa/sakai-2024-asa-Representative Image:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,articleno",
        "repositum_presentation_id": null,
        "authors": [
            1129,
            1128,
            808,
            1946,
            193
        ],
        "articleno": "68",
        "booktitle": "SA '24: SIGGRAPH Asia 2024 Conference Papers",
        "date_from": "2024-12-03",
        "date_to": "2024-12-06",
        "doi": "10.1145/3680528.3687591",
        "event": "SA '24: SIGGRAPH Asia 2024",
        "isbn": "979-8-4007-1131-2",
        "lecturer": [
            1129
        ],
        "location": "Tokyo",
        "open_access": "yes",
        "pages": "11",
        "publisher": "Association for Computing Machinery",
        "research_areas": [
            "Rendering"
        ],
        "keywords": [
            "Monte Carlo rendering",
            "path tracing",
            "denoising",
            "image filtering",
            "statistics"
        ],
        "weblinks": [
            {
                "href": "https://www.cg.tuwien.ac.at/StatMC",
                "caption": "Project Page",
                "description": null,
                "main_file": 1
            }
        ],
        "files": {
            "0": {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "sakai-2024-asa-paper.pdf",
                "type": "application/pdf",
                "size": 11789710,
                "path": "Publication:sakai-2024-asa",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/sakai-2024-asa/sakai-2024-asa-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/sakai-2024-asa/sakai-2024-asa-paper:thumb{{size}}.png"
            },
            "1": {
                "description": "Image illustrating the proposed denoising method, created by the paper authors. The “Wooden Staircase” scene has been created by Wig42 (https://blendswap.com/profile/130393) under the CC BY 3.0 license.",
                "filetitle": "Representative Image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1980,
                "image_height": 1320,
                "name": "sakai-2024-asa-Representative Image.jpg",
                "type": "image/jpeg",
                "size": 3344845,
                "path": "Publication:sakai-2024-asa",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/sakai-2024-asa/sakai-2024-asa-Representative Image.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/sakai-2024-asa/sakai-2024-asa-Representative Image:thumb{{size}}.png"
            },
            "3": {
                "description": null,
                "filetitle": "Supplementary Document",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "sakai-2024-asa-Supplementary Document.pdf",
                "type": "application/pdf",
                "size": 10739991,
                "path": "Publication:sakai-2024-asa",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/sakai-2024-asa/sakai-2024-asa-Supplementary Document.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/sakai-2024-asa/sakai-2024-asa-Supplementary Document:thumb{{size}}.png"
            }
        },
        "projects_workgroups": [
            "d9259"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/sakai-2024-asa/",
        "__class": "Publication"
    },
    {
        "id": "pahr-2024-squishicalization",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/208691",
        "title": "Squishicalization: Exploring Elastic Volume Physicalization",
        "date": "2024-12",
        "abstract": "We introduce Squishicalization , a pipeline for generating physicalizations of volumetric data that encode scalar information through their physical characteristics—specifically, by varying their “squishiness” or local elasticity. Data physicalization research is increasingly exploring multisensory information encoding, with a particular focus on enhancing direct interactivity. With Squishicalization , we leverage the tactile dimension of physicalization as a means of direct interactivity. Inspired by conventional volume rendering, we adapt the concept of transfer functions to encode scalar values from volumetric data into local elasticity levels. In this way, volumetric scalar data are transformed into sculptures, where the elasticity represents physical properties such as the material's density distribution within the volume. In our pipeline, scalar values guide the weighted sampling of the scalar field. The sampled data is then processed through Voronoi tessellation to create a sponge-like structure, which can be printed with consumer-grade 3D printers and readily available filament. To validate our pipeline, we conduct a computational and mechanical evaluation, as well as a two-stage perceptual study of the capabilities of our generated squishicalizations. To further investigate potential application scenarios, we interview experts across several domains. Finally, we summarize actionable insights and future avenues for the application of our All supplemental materials are available at https://osf.io/35gnv/?view_only=605e5085061f40439a98545f0c447cf3 .",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "A hand coming from the left side of the picture squeezes the face of a printed representation of an MRI.",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2177,
            "image_height": 1639,
            "name": "pahr-2024-squishicalization-teaser.png",
            "type": "image/png",
            "size": 3380920,
            "path": "Publication:pahr-2024-squishicalization",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-squishicalization/pahr-2024-squishicalization-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-squishicalization/pahr-2024-squishicalization-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "projects,open_access",
        "repositum_presentation_id": null,
        "authors": [
            1813,
            5430,
            1464,
            1410
        ],
        "doi": "10.1109/TVCG.2024.3516481",
        "issn": "1941-0506",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "open_access": "yes",
        "pages": "14",
        "pages_from": "1",
        "pages_to": "14",
        "publisher": "IEEE COMPUTER SOC",
        "research_areas": [
            "Fabrication"
        ],
        "keywords": [
            "Elasticity",
            "Three Dimensional Printing",
            "Pipelines",
            "Fabrication",
            "Microstructures",
            "Rendering Computer Graphics",
            "Encoding",
            "Printing",
            "Data Physicalization",
            "Data Visualization",
            "Digital Fabrication"
        ],
        "weblinks": [],
        "files": [
            {
                "description": "A hand coming from the left side of the picture squeezes the face of a printed representation of an MRI.",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2177,
                "image_height": 1639,
                "name": "pahr-2024-squishicalization-teaser.png",
                "type": "image/png",
                "size": 3380920,
                "path": "Publication:pahr-2024-squishicalization",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-squishicalization/pahr-2024-squishicalization-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-squishicalization/pahr-2024-squishicalization-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-squishicalization/",
        "__class": "Publication"
    },
    {
        "id": "ehlers-2024-mmm",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/205726",
        "title": "Me! Me! Me! Me! A study and comparison of ego network representations",
        "date": "2024-12",
        "abstract": "From social networks to brain connectivity, ego networks are a simple yet powerful approach to visualizing parts of a larger graph, i.e. those related to a selected focal node — the so-called “ego”. While surveys and comparisons of general graph visualization approaches exist in the literature, we note (i) the many conflicting results of comparisons of adjacency matrices and node-link diagrams, thus motivating further study, as well as (ii) the absence of such systematic comparisons for ego networks specifically. In this paper, we propose the development of empirical recommendations for ego network visualization strategies. First, we survey the literature across application domains and collect examples of network visualizations to identify the most common visual encodings, namely straight-line, radial, and layered node-link diagrams, as well as adjacency matrices. These representations are then applied to a representative, intermediate-sized network and subsequently compared in a large-scale, crowd-sourced user study in a mixed-methods analysis setup to investigate their impact on both user experience and performance. Within the limits of this study, and contrary to previous comparative investigations of adjacency matrices and node-link diagrams (outside of ego networks specifically), participants performed systematically worse when using adjacency matrices than those using node-link diagrammatic representations. Similar to previous comparisons of different node-link diagrams, we do not detect any notable differences in participant performance between the three node-link diagrams. Lastly, our quantitative and qualitative results indicate that participants found adjacency matrices harder to learn, use, and understand than node-link diagrams. We conclude that in terms of both participant experience and performance, a layered node-link diagrammatic representation appears to be the most preferable for ego network visualization purposes.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            1850,
            1813,
            5417,
            1464,
            1410
        ],
        "articleno": "104123",
        "doi": "10.1016/j.cag.2024.104123",
        "issn": "1873-7684",
        "journal": "COMPUTERS & GRAPHICS-UK",
        "open_access": "yes",
        "pages": "15",
        "pages_from": "1",
        "pages_to": "15",
        "publisher": "PERGAMON-ELSEVIER SCIENCE LTD",
        "volume": "125",
        "research_areas": [
            "NetVis"
        ],
        "keywords": [
            "Adjacency matrix",
            "Ego network visualization",
            "Layered node-link diagram",
            "Radial node-link diagram",
            "Straight-line node-link diagram",
            "User study"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "ehlers-2024-mmm-paper.pdf",
                "type": "application/pdf",
                "size": 2475868,
                "path": "Publication:ehlers-2024-mmm",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ehlers-2024-mmm/ehlers-2024-mmm-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/ehlers-2024-mmm/ehlers-2024-mmm-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ehlers-2024-mmm/",
        "__class": "Publication"
    },
    {
        "id": "schultes_lampi",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Live Ambient Physicalization Interface for dynamic Data - LAMPI",
        "date": "2024-11",
        "abstract": "Data physicalizations are becoming increasingly popular as a means of connecting people to abstract data and may help integrate the flood of information collected by modern technology into our everyday lives. In this thesis, I describe the design process for a software framework facilitating the physicalization of a stream of live data as well as the prototype of a dynamic shape and color-changing data physicalization for said data. I simulated elderly patients sharing their data using a recorded dataset to show the capabilities of the software framework and physicalization. The proposed concept provides a new method for communicating data in remote monitoring scenarios that can be built from accessible materials. It is also capable of showcasing data for other use cases with minimal adaptations, further expanding the possibilities for data physicalization.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1750,
            "image_height": 1800,
            "name": "schultes_lampi-teaser.png",
            "type": "image/png",
            "size": 2507235,
            "path": "Publication:schultes_lampi",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/schultes_lampi/schultes_lampi-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/schultes_lampi/schultes_lampi-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5416
        ],
        "date_end": "2024-11",
        "date_start": "2024-06",
        "matrikelnr": "12025959",
        "supervisor": [
            1813
        ],
        "research_areas": [
            "Fabrication",
            "IllVis",
            "MedVis"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1750,
                "image_height": 1800,
                "name": "schultes_lampi-teaser.png",
                "type": "image/png",
                "size": 2507235,
                "path": "Publication:schultes_lampi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/schultes_lampi/schultes_lampi-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/schultes_lampi/schultes_lampi-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "schultes_lampi-thesis.pdf",
                "type": "application/pdf",
                "size": 27678879,
                "path": "Publication:schultes_lampi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/schultes_lampi/schultes_lampi-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/schultes_lampi/schultes_lampi-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/schultes_lampi/",
        "__class": "Publication"
    },
    {
        "id": "lucio-2024-yfy",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/208563",
        "title": "Your Face, Your Anatomy: Flashcard Lenses Enriched with Knowledge Maps for Anatomy Education",
        "date": "2024-11",
        "abstract": "Traditional anatomy flashcards, with their recognizable static illustrations on the front side and comprehensive lists of concepts on the back, are a long-standing tool for memorizing and refreshing anatomical concepts. This study repurposes such established tool by introducing two key elements: (i) Augmented Reality (AR) lenses acting as magic mirrors enabling users to view anatomical illustrations mapped onto their own faces, and (ii) a knowledge map layer acting as the card’s backside to visually and explicitly illustrate conceptual connections between anatomical reference points. Using Snapchat’s Lens Studio, we crafted a deck of interactive facial anatomy flashcards to assess the potential of AR and knowledge maps for retaining and refreshing anatomical concepts. We conducted a user study involving 44 university-level students. Divided into two groups, participants utilized either flashcard lenses with knowledge maps or traditional flashcards to quickly grasp and refresh anatomical concepts. By employing an approach that integrates anatomical quizzes for objective assessment with surveys and interviews for subjective feedback, our results indicate that anatomy flashcard lenses with knowledge maps offer a more engaging educational experience, yielding higher user preferences and satisfaction levels compared to traditional flashcards. While both approaches showed similar effectiveness in quiz scores, anatomy flashcard lenses with knowledge maps were favored for their usability, significantly reducing temporal demand. These findings underscore the engaging and effective nature of anatomy flashcard lenses with knowledge maps, highlighting them as an alternative tool for the quick retention and review of anatomical concepts.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5407,
            1410,
            5412,
            5413
        ],
        "booktitle": "Proceedings 2024 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
        "date_from": "2024-10-21",
        "date_to": "2024-10-25",
        "editor": "Eck, Ulrich and Sra, Misha and Stefanucci, Jeanine and Sugimoto, Maki and Tatzgern, Markus and Williams, Ian",
        "event": "2024 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)",
        "isbn": "979-8-3315-1647-5",
        "lecturer": [
            5407
        ],
        "location": "Seattle",
        "pages": "10",
        "pages_from": "495",
        "pages_to": "504",
        "research_areas": [],
        "keywords": [
            "flashcards",
            "anatomy education",
            "mobile augmented reality",
            "embodied learning",
            "knowledge maps",
            "snapchat"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/lucio-2024-yfy/",
        "__class": "Publication"
    },
    {
        "id": "thesis-kronsteiner",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Einfluss des Seitenverhältnisses auf Parallele Koordinaten",
        "date": "2024-10-10",
        "abstract": "Parallel coordinates are a unique visualization technique that presents promising opportunities for the visualization of large and diverse multivariate datasets. Applications\nsuch as web-based visualizations and dashboards are common use cases for this type\nof data. Prevalent concepts in the modern web are responsive design - the ability of\na web page to fit any screen resolution - as well as interactivity and customizability,\nrequiring us to consider the role of aspect ratio in the design of visual displays. We\nimplemented a web-based tool and conducted a statistical analysis of angle parameters\nin parallel coordinates plots. Our results indicate a significant influence of aspect ratio\non the display of parallel coordinates, and show that landscape orientations are more\nconsistent across different aspect ratios than portrait orientations.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1159,
            "image_height": 814,
            "name": "thesis-kronsteiner-image.jpeg",
            "type": "image/jpeg",
            "size": 400170,
            "path": "Publication:thesis-kronsteiner",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-image.jpeg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5398
        ],
        "date_end": "2024-10-10",
        "date_start": "2024-04-10",
        "matrikelnr": "11808233",
        "supervisor": [
            166
        ],
        "research_areas": [
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1159,
                "image_height": 814,
                "name": "thesis-kronsteiner-image.jpeg",
                "type": "image/jpeg",
                "size": 400170,
                "path": "Publication:thesis-kronsteiner",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-image.jpeg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "thesis-kronsteiner-thesis.pdf",
                "type": "application/pdf",
                "size": 7513311,
                "path": "Publication:thesis-kronsteiner",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/thesis-kronsteiner-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-kronsteiner/",
        "__class": "Publication"
    },
    {
        "id": "furmanova-2024-bvp",
        "type_id": "book",
        "tu_id": null,
        "repositum_id": "20.500.12708/204500",
        "title": "BioMedical Visualization : Past Work, Current Trends, and Open Challenges",
        "date": "2024-10-01",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5406,
            1248,
            1819,
            166,
            563,
            1410
        ],
        "doi": "10.1007/978-3-031-66789-3",
        "isbn": "978-3-031-66788-6",
        "pages": "159",
        "publisher": "Springer Nature",
        "research_areas": [],
        "keywords": [
            "biomedical visualization"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/furmanova-2024-bvp/",
        "__class": "Publication"
    },
    {
        "id": "shilo-2024-vnt",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/200043",
        "title": "Visual narratives to edutain against misleading visualizations in healthcare",
        "date": "2024-10",
        "abstract": "We propose an interactive game based on visual narratives to edutain, i.e., to educate while entertaining, broad audiences against misleading visualizations in healthcare. Uncertainty at various stages of the visualization pipeline may give rise to misleading visual representations. These comprise misleading elements that may negatively impact the audiences by contributing to misinformed decisions, delayed treatments, and a lack of trust in medical information. We investigate whether visual narratives within the setting of an educational game support recognizing and addressing misleading elements in healthcare-related visualizations. Our methodological approach focuses on three key aspects: (i) identifying uncertainty types in the visualization pipeline which could serve as the origin of misleading elements, (ii) designing fictional visual narratives that comprise several misleading elements linking to these uncertainties, and (iii) proposing an interactive game that aids the communication of these misleading visualization elements to broad audiences. The game features eight fictional visual narratives built around misleading visualizations, each with specific assumptions linked to uncertainties. Players assess the correctness of these assumptions to earn points and rewards. In case of incorrect assessments, interactive explanations are provided to enhance understanding For an initial assessment of our game, we conducted a user study with 21 participants. Our study indicates that when participants incorrectly assess assumptions, they also spend more time elaborating on the reasons for their mistakes, indicating a willingness to learn more. The study also provided positive indications on game aspects such as memorability, reinforcement, and engagement, while it gave us pointers for future improvement.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5288,
            1410
        ],
        "articleno": "104011",
        "doi": "10.1016/j.cag.2024.104011",
        "issn": "1873-7684",
        "journal": "COMPUTERS & GRAPHICS-UK",
        "pages": "11",
        "publisher": "PERGAMON-ELSEVIER SCIENCE LTD",
        "volume": "123",
        "research_areas": [],
        "keywords": [
            "Healthcare edutainment",
            "Interactive game",
            "Misleading visualizations",
            "Uncertainty",
            "Visual narratives"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/shilo-2024-vnt/",
        "__class": "Publication"
    },
    {
        "id": "meka-2024-lpi",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/202164",
        "title": "Line Perception in Parallel Coordinates under different Aspect Ratios",
        "date": "2024-10",
        "abstract": "This thesis investigates the impact of different aspect ratios on the perception of angles and lines in parallel coordinates. Parallel coordinates are a visualization technique for representing multivariate data where each variable is drawn as a parallel axis, and data points are connected by lines across these axes. This method allows for the simultaneous visualization of more than two variables and enables the interpretation of correlation patterns within a given dataset.However, the reliability and accuracy of this interpretation can be significantly influenced by the aspect ratio of the plot. This thesis aims to explore how variations in aspect ratios affect the accuracy and confidence of users in perceiving correlations within parallel coordinates.The methodological approach comprises three components: the development of a web-based visualization tool, a statistical analysis of line and angle parameters, and an empirical user study. The visualization tool enables users to display parallel coordinates in various aspect ratios and analyze the geometric properties of the lines in the plot. The statistical analysis reveals that aspect ratios significantly correlate with the minimum and maximum angles in parallel coordinates, which in turn affects the visual perception and interpretation of the data. These findings are validated through a web-based user study, demonstrating that specific aspect ratios lead to more accurate and reliable correlation estimates. The results underscore considerate usage of flexible aspect ratios to minimize distortion and ensure the reliability of visual data interpretation in parallel coordinates.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "Teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1199,
            "image_height": 556,
            "name": "meka-2024-lpi-Teaser.png",
            "type": "image/png",
            "size": 738921,
            "path": "Publication:meka-2024-lpi",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/meka-2024-lpi/meka-2024-lpi-Teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/meka-2024-lpi/meka-2024-lpi-Teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,projects",
        "repositum_presentation_id": null,
        "authors": [
            5397
        ],
        "co_supervisor": [
            950
        ],
        "date_end": "2024-10",
        "date_start": "2024-02",
        "diploma_examina": "2024-10-09",
        "doi": "10.34726/hss.2024.119265",
        "matrikelnr": "12045662",
        "open_access": "yes",
        "pages": "113",
        "supervisor": [
            166
        ],
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Parallel Coordinates",
            "Aspect Ratio",
            "Perception"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "Teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1199,
                "image_height": 556,
                "name": "meka-2024-lpi-Teaser.png",
                "type": "image/png",
                "size": 738921,
                "path": "Publication:meka-2024-lpi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/meka-2024-lpi/meka-2024-lpi-Teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/meka-2024-lpi/meka-2024-lpi-Teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "meka-2024-lpi-Thesis.pdf",
                "type": "application/pdf",
                "size": 5693734,
                "path": "Publication:meka-2024-lpi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/meka-2024-lpi/meka-2024-lpi-Thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/meka-2024-lpi/meka-2024-lpi-Thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/meka-2024-lpi/",
        "__class": "Publication"
    },
    {
        "id": "kimmersdorfer-2024-vcpr",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Vertex Compression with Mesh Shaders for Skinned Meshes",
        "date": "2024-10",
        "abstract": "Vertex compression helps to enhance the performance of real-time rendering applications, making it a valuable technique in modern computer graphics. In this work, we investigate current state-of-the-art methods for the compression of positions, normals, texture coordinates and blend attributes. Our primary objective is to efficiently compress blend attributes in rigged meshes, particularly focusing on bone weights and indices. We leverage a recent hardware advancement: the mesh shading pipeline. This pipeline enables us to propose a novel compression scheme for blend attributes, which achieves a significant reduction in memory usage of up to 92.75% compared to existing state-of-theart methods using a traditional rendering pipeline. Additionally, we briefly discuss and compare different meshlet building algorithms, meshlet buffer structures, and meshlet extensions within the Vulkan framework. Finally, the proposed codecs are validated through a series of benchmarks focused on resource utilization and performance.\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "name": "kimmersdorfer-2024-vcpr-image.png",
            "type": "image/png",
            "size": 1866926,
            "path": "Publication:kimmersdorfer-2024-vcpr",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/kimmersdorfer-2024-vcpr/kimmersdorfer-2024-vcpr-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/kimmersdorfer-2024-vcpr/kimmersdorfer-2024-vcpr-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1869
        ],
        "date_end": "2024-10",
        "date_start": "2023-01",
        "matrikelnr": "01326608",
        "supervisor": [
            848,
            1650,
            193
        ],
        "research_areas": [
            "Rendering"
        ],
        "keywords": [
            "compression",
            "vertex data",
            "vertex compression",
            "meshlets",
            "mesh shaders"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "name": "kimmersdorfer-2024-vcpr-image.png",
                "type": "image/png",
                "size": 1866926,
                "path": "Publication:kimmersdorfer-2024-vcpr",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/kimmersdorfer-2024-vcpr/kimmersdorfer-2024-vcpr-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/kimmersdorfer-2024-vcpr/kimmersdorfer-2024-vcpr-image:thumb{{size}}.png"
            },
            {
                "description": "Bachelor thesis",
                "filetitle": "thesis",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "kimmersdorfer-2024-vcpr-thesis.pdf",
                "type": "application/pdf",
                "size": 2269890,
                "path": "Publication:kimmersdorfer-2024-vcpr",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/kimmersdorfer-2024-vcpr/kimmersdorfer-2024-vcpr-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/kimmersdorfer-2024-vcpr/kimmersdorfer-2024-vcpr-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/kimmersdorfer-2024-vcpr/",
        "__class": "Publication"
    },
    {
        "id": "Petersen_Viktoria-2024-SBO",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Global optimization and learning for lighting design",
        "date": "2024-09-30",
        "abstract": "Global optimisation and learning for lighting design is a work in extension of the Tamashii rendering framework by the Rendering and Modeling Group at TU Wien. Tamashii offers a user interface and implementation to perform a local optimisation task on a scene to find the position of the scene’s light object in order to recreate a given target illumination. We extend the existing framework by implementing a global search for the optimum and additional surrogate models to provide machine learning alternatives to apply various optimisation algorithms. We examine the performance of several different optimisation methods with respect to efficiency, accuracy, and versatility. Furthermore, we compare the use of algorithms in combination with surrogate models versus optimising on the Tamashii model directly.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5337
        ],
        "date_end": "2024-09-30",
        "date_start": "2024-03-15",
        "matrikelnr": "e11924496",
        "supervisor": [
            1946,
            193
        ],
        "research_areas": [],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "BSc Thesis",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "Petersen_Viktoria-2024-SBO-BSc Thesis.pdf",
                "type": "application/pdf",
                "size": 12586508,
                "path": "Publication:Petersen_Viktoria-2024-SBO",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Petersen_Viktoria-2024-SBO/Petersen_Viktoria-2024-SBO-BSc Thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/Petersen_Viktoria-2024-SBO/Petersen_Viktoria-2024-SBO-BSc Thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Petersen_Viktoria-2024-SBO/",
        "__class": "Publication"
    },
    {
        "id": "Dhanoa2024",
        "type_id": "phdthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "From Analysis to Communication: Supporting Users in Understanding Complex Spreadsheets and Dashboards",
        "date": "2024-09-26",
        "abstract": "Advancements in big data processing and interactive visualization tools have led to significant changes in how users analyze and explore their data. This thesis aims to\naddress the challenges resulting from these changes through a two-step approach to support users. We first address the issues at the spreadsheet level before moving on to more complex visual representations in a dashboard environment. We use the Fuzzy Spreadsheet approach at the spreadsheet level to include uncertain information in the decision-making process. Our approach augments traditional spreadsheets with uncertain\ninformation where a cell can hold and display a distribution of values, in addition to other contextually relevant information, such as impact and relationship between cells, to convey sensitivity and robustness information to the user. When users transition from spreadsheet representations to advanced visualization tools such as interactive dashboards, they often face challenges related to their use that can lead them to revert to their old, familiar static analysis tools. With the help of dashboard onboarding, authors can communicate the intended use and purpose of their dashboards, along with the\nworkings of visualizations present on the dashboards, to fill the user’s knowledge gap.\nWe created a process model for dashboard onboarding that formalizes and unifies different onboarding strategies for dashboards and facilitates the design and implementation of new\nonboarding approaches. Using this process model as a base and drawing inspiration from the fields of data storytelling and open-world game design, we developed an approach for\ncrafting semi-automated interactive dashboard tours (D-Tours) to produce an onboarding experience tailored to individual users while preserving their agency. We implemented\nthis concept in a tool called D-Tour Prototype which allows authors to create D-Tours from scratch or using automatic templates. Finally, we provide future directions based\non the insights from this thesis to explore the role of AI in the design and development of dashboard onboarding.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 717,
            "image_height": 624,
            "name": "Dhanoa2024-image.JPG",
            "type": "image/jpeg",
            "size": 57594,
            "path": "Publication:Dhanoa2024",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Dhanoa2024/Dhanoa2024-image.JPG",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/Dhanoa2024/Dhanoa2024-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1892
        ],
        "co_supervisor": [
            1059
        ],
        "date_end": "2024-09-26",
        "date_start": "2020-10-11",
        "open_access": "yes",
        "reviewer_1": [
            166
        ],
        "rigorosum": "2024-10-01",
        "supervisor": [
            1896
        ],
        "research_areas": [
            "IllVis"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 717,
                "image_height": 624,
                "name": "Dhanoa2024-image.JPG",
                "type": "image/jpeg",
                "size": 57594,
                "path": "Publication:Dhanoa2024",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Dhanoa2024/Dhanoa2024-image.JPG",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/Dhanoa2024/Dhanoa2024-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "PhD Thesis",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "name": "Dhanoa2024-PhD Thesis.pdf",
                "type": "application/pdf",
                "size": 5493271,
                "path": "Publication:Dhanoa2024",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Dhanoa2024/Dhanoa2024-PhD Thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/Dhanoa2024/Dhanoa2024-PhD Thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Dhanoa2024/",
        "__class": "Publication"
    },
    {
        "id": "klaffenboeck-2024-rva",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/209323",
        "title": "RSVP for VPSA : A Meta Design Study on Rapid Suggestive Visualization Prototyping for Visual Parameter Space Analysis",
        "date": "2024-09-12",
        "abstract": "Visual Parameter Space Analysis (VPSA) enables domain scientists to explore input-output relationships of computational models. Existing VPSA applications often feature multi-view visualizations designed by visualization experts for a specific scenario, making it hard for domain scientists to adapt them to their problems without professional help. We present RSVP, the Rapid Suggestive Visualization Prototyping system encoding VPSA knowledge to enable domain scientists to prototype custom visualization dashboards tailored to their specific needs. The system implements a task-oriented, multi-view visualization recommendation strategy over a visualization design space optimized for VPSA to guide users in meeting their analytical demands. We derived the VPSA knowledge implemented in the system by conducting an extensive meta design study over the body of work on VPSA. We show how this process can be used to perform a data and task abstraction, extract a common visualization design space, and derive a task-oriented VisRec strategy. User studies indicate that the system is user-friendly and can uncover novel insights.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 586,
            "image_height": 568,
            "name": "klaffenboeck-2024-rva-image.png",
            "type": "image/png",
            "size": 169141,
            "path": "Publication:klaffenboeck-2024-rva",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/klaffenboeck-2024-rva/klaffenboeck-2024-rva-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/klaffenboeck-2024-rva/klaffenboeck-2024-rva-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            1598,
            5247,
            1072,
            193,
            196
        ],
        "doi": "10.1109/TVCG.2024.3431930",
        "first_published": "2024-09-12",
        "issn": "1941-0506",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "open_access": "yes",
        "pages": "18",
        "publisher": "IEEE COMPUTER SOC",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "input-output model",
            "literature analysis",
            "mixed-initiative system",
            "unobtrusive visualization recommendation",
            "user study"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 586,
                "image_height": 568,
                "name": "klaffenboeck-2024-rva-image.png",
                "type": "image/png",
                "size": 169141,
                "path": "Publication:klaffenboeck-2024-rva",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/klaffenboeck-2024-rva/klaffenboeck-2024-rva-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/klaffenboeck-2024-rva/klaffenboeck-2024-rva-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "klaffenboeck-2024-rva-paper.pdf",
                "type": "application/pdf",
                "size": 12681537,
                "path": "Publication:klaffenboeck-2024-rva",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/klaffenboeck-2024-rva/klaffenboeck-2024-rva-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/klaffenboeck-2024-rva/klaffenboeck-2024-rva-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/klaffenboeck-2024-rva/",
        "__class": "Publication"
    },
    {
        "id": "wolf-2024-jhd",
        "type_id": "studentproject",
        "tu_id": null,
        "repositum_id": null,
        "title": "Joint Human-Machine Data Exploration Sandbox",
        "date": "2024-09",
        "abstract": "Data analysis exploration is becoming increasingly challenging as datasets grow in scale and complexity. The Joint Human-Machine Data Exploration (JDE) framework offers a novel solution for analyzing large, unstructured datasets by integrating human insight with machine learning. The framework facilitates dynamic user interaction and visual exploration through three interconnected views: the data view, frame view, and knowledge view. These views enable users to align data exploration with evolving knowledge models. Implemented as a flexible, modular system using a client-server architecture, the JDE framework supports interactive data manipulation and real-time feedback. This project developed a functional prototype, the JDE sandbox, showcasing the system's potential for enhancing exploratory data analysis. Future work will focus on expanding the framework's capabilities and improving accessibility for a broader audience.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 7680,
            "image_height": 3915,
            "name": "wolf-2024-jhd-teaser.png",
            "type": "image/png",
            "size": 2648227,
            "path": "Publication:wolf-2024-jhd",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5238
        ],
        "date_end": "2024-09",
        "date_start": "2023-09",
        "matrikelnr": "00925239",
        "supervisor": [
            1110
        ],
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://gitlab.tuwien.ac.at/e193-02-jde/jde-sandbox",
                "caption": "GitLab",
                "description": null,
                "main_file": 1
            },
            {
                "href": "https://sandbox.jde.cg.tuwien.ac.at/sandbox",
                "caption": "demo",
                "description": "Online demo of the sandbox",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "report",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "wolf-2024-jhd-report.pdf",
                "type": "application/pdf",
                "size": 8752213,
                "path": "Publication:wolf-2024-jhd",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-report.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-report:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 7680,
                "image_height": 3915,
                "name": "wolf-2024-jhd-teaser.png",
                "type": "image/png",
                "size": 2648227,
                "path": "Publication:wolf-2024-jhd",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/",
        "__class": "Publication"
    },
    {
        "id": "hoffer-toth-com",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "CycleSafely on mobile",
        "date": "2024-08-12",
        "abstract": "SFA3D:\n\nZeitplan: Auf Mobile portiert bis Ende März / Anfang April.\n\nSAF3D ist mit Pytorch implementiert worden. Folgende Möglichkeiten gibt es um Pytorch Modelle auf Mobile/Android zu portieren:\n\n- Pytorch Mobile: https://pytorch.org/mobile/home/\n\n- Pytorch Flutter Plugins: https://pub.dev/packages/flutter_pytorch oder https://pub.dev/packages/pytorch_mobile\n\n- Deep Java Library: https://djl.ai/\n\n \n\n3D-Multi-Object-Tracker:\n\nZeitplan: Auf Mobile implementiert bis Ende April.\n\nDa 3DMOT ein non-ml Algorithmus ist, kann er in der dann gewählten Sprache implementiert werden.\n\n \n\nPrecog:\n\nZeitplan: Auf Mobile portiert bis Ende Mai.\n\nPrecog habe ich leider nicht zum Laufen bekommen, da ich nicht alle Daten gefunden habe bzw. nicht sicher war welche benötigt werden und wie sie konfiguriert werden sollen.\n\nPrecog verwendet Tensorflow. Dazu habe ich folgendes gefunden:\n\n- Tensorflow Lite: https://www.tensorflow.org/lite\n\n- Tflite Flutter: https://pub.dev/packages/tflite_flutter\n\n \n\nVergleich zwischen Server und Mobile:\n\nZeitplan: Server und Mobile werden Anfang Juni verglichen, wenn bis dahin alles nach Plan läuft.\n\n \n\nSchriftliche Arbeit:\n\nZeitplan: Mit dem Schreiben wird nach dem Vergleichen angefangen und dafür kann der Rest von Juni verwendet werden. Falls sich eines der Schritte herauszögert, dann kann ich die vorlesungsfreie Zeit im Sommer auch verwenden.\n\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 614,
            "image_height": 613,
            "name": "hoffer-toth-com-image.png",
            "type": "image/png",
            "size": 164978,
            "path": "Publication:hoffer-toth-com",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/hoffer-toth-com/hoffer-toth-com-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/hoffer-toth-com/hoffer-toth-com-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5353
        ],
        "date_end": "2024-08-12",
        "date_start": "2024-03-11",
        "matrikelnr": "12122086",
        "supervisor": [
            948
        ],
        "research_areas": [
            "Geometry"
        ],
        "keywords": [
            "bicycle",
            "mobile",
            "scanning"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 614,
                "image_height": 613,
                "name": "hoffer-toth-com-image.png",
                "type": "image/png",
                "size": 164978,
                "path": "Publication:hoffer-toth-com",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/hoffer-toth-com/hoffer-toth-com-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/hoffer-toth-com/hoffer-toth-com-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "hoffer-toth-com-thesis.pdf",
                "type": "application/pdf",
                "size": 6093633,
                "path": "Publication:hoffer-toth-com",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/hoffer-toth-com/hoffer-toth-com-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/hoffer-toth-com/hoffer-toth-com-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "WorldScale"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/hoffer-toth-com/",
        "__class": "Publication"
    },
    {
        "id": "goel-2024-rdr",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/204000",
        "title": "Real-Time Decompression and Rasterization of Massive Point Clouds",
        "date": "2024-08-09",
        "abstract": "Large-scale capturing of real-world scenes as 3D point clouds (e.g., using LIDAR scanning) generates billions of points that are challenging to visualize. High storage requirements prevent the quick and easy inspection of captured datasets on user-grade hardware. The fastest real-time rendering methods are limited by the available GPU memory and render only around 1 billion points interactively. We show that we can achieve state-of-the-art in both while simultaneously supporting datasets that surpass the capabilities of other methods. We present an on-the-fly point cloud decompression scheme that tightly integrates with software rasterization to reduce on-chip memory requirements by more than 4×. Our method compresses geometry losslessly and provides high visual quality at real-time framerates. We use a GPU-friendly, clipped Huffman encoding for compression. Point clouds are divided into equal-sized batches, which are Huffman-encoded independently. Batches are further subdivided to form easy-to-consume streams of data for massively parallel execution. The compressed point clouds are stored in an access-aware manner to achieve coherent GPU memory access and a high L1 cache hit rate at render time. Our approach can decompress and rasterize up to 120 million Huffman-encoded points per millisecond on-the-fly. We evaluate the quality and performance of our approach on various large datasets against the fastest competing methods. Our approach renders massive 3D point clouds at competitive frame rates and visual quality while consuming significantly less memory, thus unlocking unprecedented performance for the visualization of challenging datasets on commodity GPUs.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "cover",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1695,
            "image_height": 961,
            "name": "goel-2024-rdr-cover.jpg",
            "type": "image/jpeg",
            "size": 276042,
            "path": "Publication:goel-2024-rdr",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/goel-2024-rdr/goel-2024-rdr-cover.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/goel-2024-rdr/goel-2024-rdr-cover:thumb{{size}}.png"
        },
        "sync_repositum_override": "projects,date_from,date_to,event,lecturer,location",
        "repositum_presentation_id": null,
        "authors": [
            5402,
            1116,
            5403,
            1650
        ],
        "articleno": "48",
        "date_from": "2024-06-26",
        "date_to": "2024-06-28",
        "doi": "10.1145/3675373",
        "event": "High Performance Graphics",
        "issn": "2577-6193",
        "journal": "Proceedings of the ACM on Computer Graphics and Interactive Techniques",
        "lecturer": [
            1116
        ],
        "location": "Denver, USA",
        "number": "3",
        "pages": "15",
        "pages_from": "1",
        "pages_to": "15",
        "publisher": "Association for Computing Machinery (ACM)",
        "volume": "7",
        "research_areas": [
            "Rendering"
        ],
        "keywords": [
            "compression",
            "point cloud",
            "rasterization",
            "real-time rendering"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "cover",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1695,
                "image_height": 961,
                "name": "goel-2024-rdr-cover.jpg",
                "type": "image/jpeg",
                "size": 276042,
                "path": "Publication:goel-2024-rdr",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/goel-2024-rdr/goel-2024-rdr-cover.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/goel-2024-rdr/goel-2024-rdr-cover:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend",
            "d9275"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/goel-2024-rdr/",
        "__class": "Publication"
    },
    {
        "id": "cardoso-2024-r-c",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/209907",
        "title": "Re:Draw - Context Aware Translation as a Controllable Method for Artistic Production",
        "date": "2024-08",
        "abstract": "We introduce context-aware translation, a novel method that combines the benefits of inpainting and image-to-image translation, respecting simultaneously the original input and contextual relevance – where existing methods fall short. By doing so, our method opens new avenues for the controllable use of AI within artistic creation, from animation to digital art.\nAs an use case, we apply our method to redraw any hand-drawn animated character eyes based on any design specifications – eyes serve as a focal point that captures viewer attention and conveys a range of emotions; however, the labor-intensive na-\nture of traditional animation often leads to compromises in the complexity and consistency of eye design. Furthermore, we remove the need for production data for training and introduce a new character recognition method that surpasses existing work\nby not requiring fine-tuning to specific productions.\nThis proposed use case could help maintain consistency throughout production and unlock bolder and\nmore detailed design choices without the production cost drawbacks. A user study shows contextaware translation is preferred over existing work 95.16% of the time.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 343,
            "image_height": 294,
            "name": "cardoso-2024-r-c-image.bmp",
            "type": "image/bmp",
            "size": 303462,
            "path": "Publication:cardoso-2024-r-c",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-2024-r-c/cardoso-2024-r-c-image.bmp",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-2024-r-c/cardoso-2024-r-c-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,projects",
        "repositum_presentation_id": null,
        "authors": [
            1639,
            5437,
            1519,
            193
        ],
        "booktitle": "Proceedings of the Thirty-Third International Joint Conference on Artificial Intelligence (IJCAI-24)",
        "date_from": "2024-08-03",
        "date_to": "2024-08-09",
        "doi": "10.24963/ijcai.2024/842",
        "event": "33rd International Joint Conference on Artificial Intelligence (IJCAI 2024)",
        "isbn": "978-1-956792-04-1",
        "lecturer": [
            1639
        ],
        "location": "Jeju Island",
        "pages": "9",
        "pages_from": "7609",
        "pages_to": "7617",
        "publisher": "International Joint Conferences on Artificial Intelligence",
        "research_areas": [
            "Rendering"
        ],
        "keywords": [
            "Application domains: Images, movies and visual arts",
            "Application domains: Computer Graphics and Animation",
            "Methods and resources: AI systems for collaboration and co-creation",
            "Methods and resources: Machine learning, deep learning, neural models, reinforcement learning",
            "Theory and philosophy of arts and creativity in AI systems: Social (multi-agent) creativity and human-computer co-creation"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 343,
                "image_height": 294,
                "name": "cardoso-2024-r-c-image.bmp",
                "type": "image/bmp",
                "size": 303462,
                "path": "Publication:cardoso-2024-r-c",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-2024-r-c/cardoso-2024-r-c-image.bmp",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-2024-r-c/cardoso-2024-r-c-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "cardoso-2024-r-c-paper.pdf",
                "type": "application/pdf",
                "size": 2952059,
                "path": "Publication:cardoso-2024-r-c",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-2024-r-c/cardoso-2024-r-c-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-2024-r-c/cardoso-2024-r-c-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "EVOCATION"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/cardoso-2024-r-c/",
        "__class": "Publication"
    },
    {
        "id": "kerbl-2024-ah3",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/220026",
        "title": "A Hierarchical 3D Gaussian Representation for Real-Time Rendering of Very Large Datasets",
        "date": "2024-07-19",
        "abstract": "Novel view synthesis has seen major advances in recent years, with 3D Gaussian splatting offering an excellent level of visual quality, fast training and real-time rendering. However, the resources needed for training and rendering inevitably limit the size of the captured scenes that can be represented with good visual quality. We introduce a hierarchy of 3D Gaussians that preserves visual quality for very large scenes, while offering an efficient Level-of-Detail (LOD) solution for efficient rendering of distant content with effective level selection and smooth transitions between levels. We introduce a divide-and-conquer approach that allows us to train very large scenes in independent chunks. We consolidate the chunks into a hierarchy that can be optimized to further improve visual quality of Gaussians merged into intermediate nodes. Very large captures typically have sparse coverage of the scene, presenting many challenges to the original 3D Gaussian splatting training method; we adapt and regularize training to account for these issues. We present a complete solution, that enables real-time rendering of very large scenes and can adapt to available resources thanks to our LOD method. We show results for captured scenes with up to tens of thousands of images with a simple and affordable rig, covering trajectories of up to several kilometers and lasting up to one hour.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "date_from,date_to,event,lecturer,location",
        "repositum_presentation_id": null,
        "authors": [
            1650,
            5532,
            5372,
            193,
            5373,
            5503
        ],
        "articleno": "62",
        "date_from": "2024-07-28",
        "date_to": "2024-08-01",
        "doi": "10.1145/3658160",
        "event": "ACM SIGGRAPH 2024",
        "issn": "1557-7368",
        "journal": "ACM Transactions on Graphics",
        "lecturer": [
            5532
        ],
        "location": "Denver, USA",
        "number": "4",
        "pages": "15",
        "pages_from": "1",
        "pages_to": "15",
        "publisher": "ASSOC COMPUTING MACHINERY",
        "volume": "43",
        "research_areas": [
            "Rendering"
        ],
        "keywords": [
            "real-time rendering",
            "3d gaussian splatting",
            "level-of-detail",
            "Large Scenes"
        ],
        "weblinks": [
            {
                "href": "https://repo-sam.inria.fr/fungraph/hierarchical-3d-gaussians/",
                "caption": "project page",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kerbl-2024-ah3-paper.pdf",
                "type": "application/pdf",
                "size": 10836570,
                "path": "Publication:kerbl-2024-ah3",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/kerbl-2024-ah3/kerbl-2024-ah3-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/kerbl-2024-ah3/kerbl-2024-ah3-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9275"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/kerbl-2024-ah3/",
        "__class": "Publication"
    },
    {
        "id": "koenigsberger-2024-msu",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/206831",
        "title": "Micromechanics stiffness upscaling of plant fiber-reinforced composites",
        "date": "2024-07-15",
        "abstract": "Fiber-reinforced green composites made from natural plant fibers are an increasingly popular sustainable alternative to conventional high-performance composite materials. Given the variety of natural fibers themselves, and the even larger variety of possible composites with specific fiber dosage, fiber orientation distribution, fiber length distribution, and fiber–matrix bond characteristics, micromechanics-based modeling is essential for characterizing the macroscopic response of these composites. Herein, an analytical multiscale micromechanics model for elastic homogenization is developed, capable of capturing the variety. The model features (i) a nanoscopic representation of the natural fibers to predict the fiber stiffness from the universal stiffness of the fiber constituents, mainly cellulose, (ii) a spring-interface model to quantify the compliance of the fiber–matrix bond, and (iii) the ability to model any (and any combination of) orientation distribution and aspect ratio distribution. Validation is performed by comparing the predicted stiffness to experimental results for as many as 73 composites available in the literature. Extensive sensitivity analyses quantify the composite stiffening upon increasing fiber volume fraction, fiber alignment, fiber length, and fiber–matrix interface stiffness, respectively.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            5423,
            5424,
            5425,
            193,
            5209
        ],
        "articleno": "111571",
        "doi": "10.1016/j.compositesb.2024.111571",
        "issn": "1879-1069",
        "journal": "COMPOSITES PART B-ENGINEERING",
        "open_access": "yes",
        "pages": "20",
        "pages_from": "1",
        "pages_to": "20",
        "publisher": "ELSEVIER SCI LTD",
        "volume": "281",
        "research_areas": [
            "Modeling"
        ],
        "keywords": [
            "Biocomposite",
            "Elasticity",
            "Fiber orientation",
            "Multiscale modeling",
            "Natural fibers",
            "Weak interface",
            "Young's modulus"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "koenigsberger-2024-msu-paper.pdf",
                "type": "application/pdf",
                "size": 2290463,
                "path": "Publication:koenigsberger-2024-msu",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/koenigsberger-2024-msu/koenigsberger-2024-msu-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/koenigsberger-2024-msu/koenigsberger-2024-msu-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d4314"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/koenigsberger-2024-msu/",
        "__class": "Publication"
    },
    {
        "id": "Klaus2024",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Multidimensional Clustering for Machine Data Analysis",
        "date": "2024-07-11",
        "abstract": "Machine data analysis is an important aspect in modern industrial facilities, as stakeholders want their machinery to be as efficient as possible. To this end, they utilize the IIoT, enabling the analysis of gathered machine data. To gain useful information through the aggregated data, Big Data analytics are invaluable to the domain experts conducting\nmachine data analysis. The insights gained through Big Data analytics allow for a better efficiency of the facility by enabling data-driven decisions.\nThis thesis sets out to explore the feasibility of multidimensional clustering for machine data analysis in a web-based environment. To do this, we developed an application that\ncombines statistical methods and several visualization techniques into a web interface.\nWe evaluated the tool based on its real-world applicability and performance. The developed application has produced promising results, when employed on multivariate time series from industrial machinery, and thereby provides a robust foundation for future improvements.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 917,
            "image_height": 520,
            "name": "Klaus2024-image.JPG",
            "type": "image/jpeg",
            "size": 79072,
            "path": "Publication:Klaus2024",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Klaus2024/Klaus2024-image.JPG",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/Klaus2024/Klaus2024-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5376
        ],
        "date_end": "2024-07-11",
        "date_start": "2023-11-11",
        "matrikelnr": "12120487",
        "supervisor": [
            166
        ],
        "research_areas": [
            "NetVis"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "Bachelor thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Klaus2024-Bachelor thesis.pdf",
                "type": "application/pdf",
                "size": 7283210,
                "path": "Publication:Klaus2024",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Klaus2024/Klaus2024-Bachelor thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/Klaus2024/Klaus2024-Bachelor thesis:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 917,
                "image_height": 520,
                "name": "Klaus2024-image.JPG",
                "type": "image/jpeg",
                "size": 79072,
                "path": "Publication:Klaus2024",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Klaus2024/Klaus2024-image.JPG",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/Klaus2024/Klaus2024-image:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/Klaus2024/",
        "__class": "Publication"
    },
    {
        "id": "brandmair-2024-rust",
        "type_id": "studentproject",
        "tu_id": null,
        "repositum_id": null,
        "title": "Rust Tutorial",
        "date": "2024-07",
        "abstract": "A 2-day Rust tutorial was prepared and given which introduces the Rust programming language to Computer Graphics students and assistants. Its contents are based on Google's \"Comprehensive Rust\" tutorial, trimmed down to two days, and adapted to the target audience (expected to have a solid background in systems programming and a strong background in graphics programming).",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 2048,
            "image_height": 1024,
            "name": "brandmair-2024-rust-image.jpg",
            "type": "image/jpeg",
            "size": 136698,
            "path": "Publication:brandmair-2024-rust",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-image.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5377
        ],
        "date_end": "2024-07",
        "date_start": "2024-06",
        "matrikelnr": "12024754",
        "supervisor": [
            848,
            193
        ],
        "research_areas": [],
        "keywords": [
            "Rust",
            "Systems Programming",
            "Tutorial"
        ],
        "weblinks": [
            {
                "href": "https://github.com/stefnotch/rust-tutorial",
                "caption": "Rust Tutorial on GitHub",
                "description": "A Rust tutorial, based and adapted from Google's Comprehensive Rust tutorial",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 2048,
                "image_height": 1024,
                "name": "brandmair-2024-rust-image.jpg",
                "type": "image/jpeg",
                "size": 136698,
                "path": "Publication:brandmair-2024-rust",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-image.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-image:thumb{{size}}.png"
            },
            {
                "description": "Partial video recording of day 1, forenoon",
                "filetitle": "video recording 1",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "brandmair-2024-rust-video recording 1.mp4",
                "type": "video/mp4",
                "size": 127989764,
                "path": "Publication:brandmair-2024-rust",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-video recording 1.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-video recording 1:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-video recording 1:video.mp4"
            },
            {
                "description": "Partial video recording of day 1, afternoon",
                "filetitle": "video recording 2",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "brandmair-2024-rust-video recording 2.mp4",
                "type": "video/mp4",
                "size": 83261837,
                "path": "Publication:brandmair-2024-rust",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-video recording 2.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-video recording 2:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-video recording 2:video.mp4"
            },
            {
                "description": "Partial video recording of day 2, forenoon",
                "filetitle": "video recording 3",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "brandmair-2024-rust-video recording 3.mp4",
                "type": "video/mp4",
                "size": 326893062,
                "path": "Publication:brandmair-2024-rust",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-video recording 3.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-video recording 3:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/brandmair-2024-rust-video recording 3:video.mp4"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/brandmair-2024-rust/",
        "__class": "Publication"
    },
    {
        "id": "melcher-2024-asi",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Acoustics Simulation in the Browser - Making Roommode Calculation with the Finite Element Method Accessible for Non-Experts",
        "date": "2024-06-27",
        "abstract": "Rooms that are used for communication or entertainment need suitable room acoustics.\nOften acoustics simulation is a fundamental part of the design of such rooms. While\nexpensive and/or hard to use software packages exist that have a wide range of scientific\napplications, cheap and easy to use possibilities have not been available in the past.\nIn the course of this work, a prototype was developed that aims to simplify the specific\nuse case of calculating and visualizing room modes, a specific undesirable phenomenon in\nroom acoustics.\nIf possible, meaningful values are automatically assumed without user input. High\nautomation and good user guidance are the focus.\nInterviews with test users have shown that not only do they manage to visualize room\nmodes without instruction, but also that they only need 1-2 minutes during their first\nuse to obtain their first calculation result.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "name": "melcher-2024-asi-image.png",
            "type": "image/png",
            "size": 206234,
            "path": "Publication:melcher-2024-asi",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/melcher-2024-asi/melcher-2024-asi-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/melcher-2024-asi/melcher-2024-asi-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5447
        ],
        "date_end": "2024-06",
        "date_start": "2024-01",
        "matrikelnr": "00004876",
        "supervisor": [
            166
        ],
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "bachelor-thesis",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "melcher-2024-asi-bachelor-thesis.pdf",
                "type": "application/pdf",
                "size": 2621026,
                "path": "Publication:melcher-2024-asi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/melcher-2024-asi/melcher-2024-asi-bachelor-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/melcher-2024-asi/melcher-2024-asi-bachelor-thesis:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "melcher-2024-asi-image.png",
                "type": "image/png",
                "size": 206234,
                "path": "Publication:melcher-2024-asi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/melcher-2024-asi/melcher-2024-asi-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/melcher-2024-asi/melcher-2024-asi-image:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/melcher-2024-asi/",
        "__class": "Publication"
    },
    {
        "id": "pahr-2024-ieo",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/199161",
        "title": "Investigating the Effect of Operation Mode and Manifestation on Physicalizations of Dynamic Processes",
        "date": "2024-06",
        "abstract": "We conducted a study to systematically investigate the communication of complex dynamic processes along a two-dimensional design space, where the axes represent a representation's manifestation (physical or virtual) and operation (manual or automatic). We exemplify the design space on a model embodying cardiovascular pathologies, represented by a mechanism where a liquid is pumped into a draining vessel, with complications illustrated through modifications to the model. The results of a mixed-methods lab study with 28 participants show that both physical manifestation and manual operation have a strong positive impact on the audience's engagement. The study does not show a measurable knowledge increase with respect to cardiovascular pathologies using manually operated physical representations. However, subjectively, participants report a better understanding of the process—mainly through non-visual cues like haptics, but also auditory cues. The study also indicates an increased task load when interacting with the process, which, however, seems to play a minor role for the participants. Overall, the study shows a clear potential of physicalization for the communication of complex dynamic processes, which only fully unfold if observers have to chance to interact with the process.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 2634,
            "image_height": 1232,
            "name": "pahr-2024-ieo-teaser.png",
            "type": "image/png",
            "size": 172079,
            "path": "Publication:pahr-2024-ieo",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "projects,date_from,date_to,event,lecturer,pages_from,pages_to",
        "repositum_presentation_id": null,
        "authors": [
            1813,
            1850,
            1464,
            1110,
            1410
        ],
        "articleno": "e15106",
        "date_from": "2024-06-27",
        "date_to": "2024-06-31",
        "doi": "10.1111/cgf.15106",
        "event": "EUROVIS",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1813
        ],
        "number": "3",
        "pages": "12",
        "pages_from": "1",
        "pages_to": "12",
        "publisher": "WILEY",
        "volume": "43",
        "research_areas": [
            "InfoVis",
            "MedVis",
            "Perception"
        ],
        "keywords": [
            "Data Physicalization",
            "Study",
            "Cardiovascular Diseases",
            "Edutainment",
            "Human Computer Interaction (HCI)",
            "Mixed Methods"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 2634,
                "image_height": 1232,
                "name": "pahr-2024-ieo-teaser.png",
                "type": "image/png",
                "size": 172079,
                "path": "Publication:pahr-2024-ieo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/",
        "__class": "Publication"
    },
    {
        "id": "marin-2024-rcf",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/202374",
        "title": "Reconstructing Curves from Sparse Samples on Riemannian Manifolds",
        "date": "2024-06",
        "abstract": "Reconstructing 2D curves from sample points has long been a critical challenge in computer graphics, finding essential applications in vector graphics. The design and editing of curves on surfaces has only recently begun to receive attention, primarily relying on human assistance, and where not, limited by very strict sampling conditions. In this work, we formally improve on the state-of-the-art requirements and introduce an innovative algorithm capable of reconstructing closed curves directly on surfaces from a given sparse set of sample points. We extend and adapt a state-of-the-art planar curve reconstruction method to the realm of surfaces while dealing with the challenges arising from working on non-Euclidean domains. We demonstrate the robustness of our method by reconstructing multiple curves on various surface meshes. We explore novel potential applications of our approach, allowing for automated reconstruction of curves on Riemannian manifolds.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "name": "marin-2024-rcf-image.png",
            "type": "image/png",
            "size": 706684,
            "path": "Publication:marin-2024-rcf",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-rcf/marin-2024-rcf-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-rcf/marin-2024-rcf-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "date_from,date_to,event,lecturer,location,pages_from,pages_to",
        "repositum_presentation_id": "20.500.12708/202064",
        "authors": [
            1848,
            5388,
            5389,
            948,
            193
        ],
        "articleno": "e15136",
        "date_from": "2024-06-24",
        "date_to": "2024-06-26",
        "doi": "10.1111/cgf.15136",
        "event": "Symposium on Geometry Processing",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1848
        ],
        "location": "Boston",
        "number": "5",
        "pages": "14",
        "pages_from": "-",
        "pages_to": "-",
        "publisher": "WILEY",
        "volume": "43",
        "research_areas": [],
        "keywords": [
            "CCS Concepts",
            "Graph algorithms",
            "Mesh geometry models",
            "Paths and connectivity problems"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "name": "marin-2024-rcf-image.png",
                "type": "image/png",
                "size": 706684,
                "path": "Publication:marin-2024-rcf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-rcf/marin-2024-rcf-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-rcf/marin-2024-rcf-image:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "WorldScale"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-rcf/",
        "__class": "Publication"
    },
    {
        "id": "ohrhallinger_stefan-2024-inv",
        "type_id": "talk",
        "tu_id": null,
        "repositum_id": null,
        "title": "Sampling and reconstructing point clouds",
        "date": "2024-05-28",
        "abstract": "Curve and surface reconstruction from unstructured points represent a fundamental problem in computer graphics and computer vision, with many applications. The quest for better solutions for this ill-posed problem is riddled with various kinds of artifacts such as noise, outliers, and missing data.\n\nMoreover, the reconstruction problem usually implies further input requirements: how many samples do we need for a successful reconstruction, what properties should these samples satisfy and how can we obtain such sets. And once we obtain these point samples, how can we extract connectivity that best approximates the initial surface they have been sampled from?\n\nWe will discuss about various sampling strategies, corresponding reconstruction methods, with multiple applications in automating sketch coloring, adaptive meshing for faster simulations, and cultural heritage.\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1310,
            "image_height": 596,
            "name": "ohrhallinger_stefan-2024-inv-image.png",
            "type": "image/png",
            "size": 762834,
            "path": "Publication:ohrhallinger_stefan-2024-inv",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ohrhallinger_stefan-2024-inv/ohrhallinger_stefan-2024-inv-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/ohrhallinger_stefan-2024-inv/ohrhallinger_stefan-2024-inv-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            948,
            1848
        ],
        "event": "TU Graz",
        "location": "Graz",
        "research_areas": [
            "Geometry"
        ],
        "keywords": [
            "curve reconstruction",
            "surface reconstruction",
            "sampling"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1310,
                "image_height": 596,
                "name": "ohrhallinger_stefan-2024-inv-image.png",
                "type": "image/png",
                "size": 762834,
                "path": "Publication:ohrhallinger_stefan-2024-inv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ohrhallinger_stefan-2024-inv/ohrhallinger_stefan-2024-inv-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/ohrhallinger_stefan-2024-inv/ohrhallinger_stefan-2024-inv-image:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "WorldScale"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ohrhallinger_stefan-2024-inv/",
        "__class": "Publication"
    },
    {
        "id": "matt-2024-cvil",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/199888",
        "title": "cVIL: Class-Centric Visual Interactive Labeling",
        "date": "2024-05-27",
        "abstract": "We present cVIL, a class-centric approach to visual interactive labeling, which facilitates human annotation of large and complex image data sets. cVIL uses different property measures to support instance labeling for labeling difficult instances and batch labeling to quickly label easy instances. Simulated experiments reveal that cVIL with batch labeling can outperform traditional labeling approaches based on active learning. In a user study, cVIL led to better accuracy and higher user preference compared to a traditional instance-based visual interactive labeling approach based on 2D scatterplots.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Screenshot of cVIL as employed in the user study",
            "filetitle": "cVIL teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1104,
            "image_height": 449,
            "name": "matt-2024-cvil-cVIL teaser.png",
            "type": "image/png",
            "size": 293869,
            "path": "Publication:matt-2024-cvil",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "abstract,projects,open_access",
        "repositum_presentation_id": null,
        "authors": [
            5263,
            5370,
            1110
        ],
        "booktitle": "Eurographics Proceedings",
        "date_from": "2024-05-27",
        "date_to": "2024-05-27",
        "doi": "10.2312/eurova.20241113",
        "editor": "El-Assady, Mennatallah and Schulz, Hans-Jorg",
        "event": "EuroVis Workshop on Visual Analytics (EuroVA 2024)",
        "isbn": "978-3-03868-056-7",
        "lecturer": [
            5263
        ],
        "location": "Aarhus",
        "open_access": "yes",
        "pages": "6",
        "publisher": "Eurographics",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Visual Analytics",
            "Interactive Machine Learning",
            "User Interface Design"
        ],
        "weblinks": [
            {
                "href": "https://diglib.eg.org/server/api/core/bitstreams/c18fafcc-b4b4-4e51-bd2f-cec056c6d93a/content",
                "caption": "paper",
                "description": null,
                "main_file": 1
            },
            {
                "href": "https://gitlab.tuwien.ac.at/e193-02-jde/lava",
                "caption": "GitLab",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": "Screenshot of cVIL as employed in the user study",
                "filetitle": "cVIL teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1104,
                "image_height": 449,
                "name": "matt-2024-cvil-cVIL teaser.png",
                "type": "image/png",
                "size": 293869,
                "path": "Publication:matt-2024-cvil",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/",
        "__class": "Publication"
    },
    {
        "id": "lipp-2024-val",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/203067",
        "title": "View-Independent Adjoint Light Tracing for Lighting Design Optimization",
        "date": "2024-05-22",
        "abstract": "Differentiable rendering methods promise the ability to optimize various parameters of three-dimensional (3D) scenes to achieve a desired result. However, lighting design has so far received little attention in this field. In this article, we introduce a method that enables continuous optimization of the arrangement of luminaires in a 3D scene via differentiable light tracing. Our experiments show two major issues when attempting to apply existing methods from differentiable path tracing to this problem: First, many rendering methods produce images, which restricts the ability of a designer to define lighting objectives to image space. Second, most previous methods are designed for scene geometry or material optimization and have not been extensively tested for the case of optimizing light sources. Currently available differentiable ray-tracing methods do not provide satisfactory performance, even on fairly basic test cases in our experience. In this article, we propose, to the best of our knowledge, a novel adjoint light tracing method that overcomes these challenges and enables gradient-based lighting design optimization in a view-independent (camera-free) way. Thus, we allow the user to paint illumination targets directly onto the 3D scene or use existing baked illumination data (e.g., light maps). Using modern ray-tracing hardware, we achieve interactive performance. We find light tracing advantageous over path tracing in this setting, as it naturally handles irregular geometry, resulting in less noise and improved optimization convergence. We compare our adjoint gradients to state-of-the-art image-based differentiable rendering methods. We also demonstrate that our gradient data works with various common optimization algorithms, providing good convergence behaviour. Qualitative comparisons with real-world scenes underline the practical applicability of our method.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "preview",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1469,
            "image_height": 1228,
            "name": "lipp-2024-val-preview.jpg",
            "type": "image/jpeg",
            "size": 264705,
            "path": "Publication:lipp-2024-val",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/lipp-2024-val/lipp-2024-val-preview.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/lipp-2024-val/lipp-2024-val-preview:thumb{{size}}.png"
        },
        "sync_repositum_override": "event,lecturer",
        "repositum_presentation_id": null,
        "authors": [
            1525,
            1946,
            1949,
            1063,
            193
        ],
        "articleno": "35",
        "doi": "10.1145/3662180",
        "event": "SIGGRAPH 2024",
        "issn": "1557-7368",
        "journal": "ACM Transactions on Graphics",
        "number": "3",
        "open_access": "yes",
        "pages": "16",
        "publisher": "ASSOC COMPUTING MACHINERY",
        "volume": "43",
        "research_areas": [
            "Rendering"
        ],
        "keywords": [
            "differentiable rendering",
            "global illumination",
            "Lighting design",
            "optimization",
            "ray tracing"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "preview",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1469,
                "image_height": 1228,
                "name": "lipp-2024-val-preview.jpg",
                "type": "image/jpeg",
                "size": 264705,
                "path": "Publication:lipp-2024-val",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/lipp-2024-val/lipp-2024-val-preview.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/lipp-2024-val/lipp-2024-val-preview:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d4314"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/lipp-2024-val/",
        "__class": "Publication"
    },
    {
        "id": "eichner-2024-erv",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Effizientes Rendern von Wäldern mittels Gruppierter Detailgrade in 3D-Geoinformationssystemen",
        "date": "2024-05-15",
        "abstract": "Interactive visualization is important for many workflows. Especially so in the context\nof 3D geo-informations systems, where large quantities of data have to be processed\nand presented to the user at interactive speeds for productivity and orientation in the\ngeo-spatial context. In heavily forested countries like Austria enormous amounts of\ngeometry have to be drawn when visualizing forests. Naïve rendering approaches fail,\neven when using heavily simplified geometry for the individual trees. The region in which\ndetails are necessary is small and changes frequently. A major part of the scene is far\naway and needs little detail. These constraints are what this thesis attempts to find a\nsolution for. Thus each tree is represented by a billboard, if not close to the camera. To\ndecrease the computational complexity of selecting the appropriate level of detail for all\ntrees, they are grouped into batches, for which frustum culling and level of detail selection\nhappens. This new approach is implemented, qualitatively evaluated, and compared with\nexisting alternative approaches. Comparison of the approaches on a stress test scene\nshows that our new approach can be between 1.7 and 6 times faster than the approaches\ntested against depending on the scenario, while barely reducing visual quality.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "name": "eichner-2024-erv-image.png",
            "type": "image/png",
            "size": 984848,
            "path": "Publication:eichner-2024-erv",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/eichner-2024-erv/eichner-2024-erv-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/eichner-2024-erv/eichner-2024-erv-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5448
        ],
        "date_end": "2024-05",
        "date_start": "2024-01",
        "matrikelnr": "11808244",
        "supervisor": [
            166
        ],
        "research_areas": [
            "InfoVis",
            "Rendering"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "bachelor-thesis",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "eichner-2024-erv-bachelor-thesis.pdf",
                "type": "application/pdf",
                "size": 6457572,
                "path": "Publication:eichner-2024-erv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/eichner-2024-erv/eichner-2024-erv-bachelor-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/eichner-2024-erv/eichner-2024-erv-bachelor-thesis:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "eichner-2024-erv-image.png",
                "type": "image/png",
                "size": 984848,
                "path": "Publication:eichner-2024-erv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/eichner-2024-erv/eichner-2024-erv-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/eichner-2024-erv/eichner-2024-erv-image:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/eichner-2024-erv/",
        "__class": "Publication"
    },
    {
        "id": "SCHUETZ-2024-SIMLOD",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/208716",
        "title": "SimLOD: Simultaneous LOD Generation and Rendering for Point Clouds",
        "date": "2024-05-13",
        "abstract": "LOD construction is typically implemented as a preprocessing step that requires users to wait before they are able to view the results in real time. We propose an incremental LOD generation approach for point clouds that allows us to simultaneously load points from disk, update an octree-based level-of-detail representation, and render the intermediate results in real time while additional points are still being loaded from disk. LOD construction and rendering are both implemented in CUDA and share the GPU’s processing power, but each incremental update is lightweight enough to leave enough time to maintain real-time frame rates. Our approach is able to stream points from an SSD and update the octree on the GPU at rates of up to 580 million points per second (~9.3GB/s) on an RTX 4090 and a PCIe 5.0 SSD. Depending on the data set, our approach spends an average of about 1 to 2 ms to incrementally insert 1 million points into the octree, allowing us to insert several million points per frame into the LOD structure and render the intermediate results within the same frame.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Rendered Point Cloud to the left and points/voxels colored by the containing octree node to the right.",
            "filetitle": "SimLOD",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1911,
            "image_height": 1072,
            "name": "SCHUETZ-2024-SIMLOD-SimLOD.jpg",
            "type": "image/jpeg",
            "size": 323598,
            "path": "Publication:SCHUETZ-2024-SIMLOD",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/SCHUETZ-2024-SIMLOD/SCHUETZ-2024-SIMLOD-SimLOD.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/SCHUETZ-2024-SIMLOD/SCHUETZ-2024-SIMLOD-SimLOD:thumb{{size}}.png"
        },
        "sync_repositum_override": "event",
        "repositum_presentation_id": null,
        "authors": [
            1116,
            1857,
            193
        ],
        "articleno": "17",
        "date_from": "2023-05",
        "date_to": "2023-05",
        "doi": "10.1145/3651287",
        "event": "ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games 2024",
        "issn": "2577-6193",
        "journal": "Proceedings of the ACM on Computer Graphics and Interactive Techniques",
        "lecturer": [
            1116
        ],
        "note": "Source Code: https://github.com/m-schuetz/SimLOD",
        "number": "1",
        "open_access": "yes",
        "pages": "20",
        "publisher": "Association for Computing Machinery (ACM)",
        "volume": "7",
        "research_areas": [
            "Rendering"
        ],
        "keywords": [
            "LOD",
            "real-time rendering",
            "Point Cloud Rendering",
            "rasterization",
            "Octree"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "SCHUETZ-2024-SIMLOD-paper.pdf",
                "type": "application/pdf",
                "size": 5928589,
                "path": "Publication:SCHUETZ-2024-SIMLOD",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/SCHUETZ-2024-SIMLOD/SCHUETZ-2024-SIMLOD-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/SCHUETZ-2024-SIMLOD/SCHUETZ-2024-SIMLOD-paper:thumb{{size}}.png"
            },
            {
                "description": "Rendered Point Cloud to the left and points/voxels colored by the containing octree node to the right.",
                "filetitle": "SimLOD",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1911,
                "image_height": 1072,
                "name": "SCHUETZ-2024-SIMLOD-SimLOD.jpg",
                "type": "image/jpeg",
                "size": 323598,
                "path": "Publication:SCHUETZ-2024-SIMLOD",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/SCHUETZ-2024-SIMLOD/SCHUETZ-2024-SIMLOD-SimLOD.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/SCHUETZ-2024-SIMLOD/SCHUETZ-2024-SIMLOD-SimLOD:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9275",
            "d4386"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/SCHUETZ-2024-SIMLOD/",
        "__class": "Publication"
    },
    {
        "id": "papantonakis-2024-rmf",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/198651",
        "title": "Reducing the Memory Footprint of 3D Gaussian Splatting",
        "date": "2024-05-13",
        "abstract": "3D Gaussian splatting provides excellent visual quality for novel view synthesis, with fast training and realtime rendering; unfortunately, the memory requirements of this method for storing and transmission are unreasonably high. We first analyze the reasons for this, identifying three main areas where storage can be reduced: the number of 3D Gaussian primitives used to represent a scene, the number of coefficients for the spherical harmonics used to represent directional radiance, and the precision required to store Gaussian primitive attributes. We present a solution to each of these issues. First, we propose an efficient, resolution-aware primitive pruning approach, reducing the primitive count by half. Second, we introduce an adaptive adjustment method to choose the number of coefficients used to represent directional radiance for each Gaussian primitive, and finally a codebook-based quantization method, together with a half-float representation for further memory reduction. Taken together, these three components result in a x27 reduction in overall size on disk on the standard datasets we tested, along with a x1.7 speedup in rendering speed. We demonstrate our method on standard datasets and show how our solution results in significantly reduced download times when using the method on a mobile device (see Fig. 1).",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            5371,
            5372,
            1650,
            5373,
            803
        ],
        "articleno": "16",
        "doi": "10.1145/3651282",
        "issn": "2577-6193",
        "journal": "Proceedings of the ACM on Computer Graphics and Interactive Techniques",
        "number": "1",
        "pages": "17",
        "pages_from": "1",
        "pages_to": "17",
        "publisher": "Association for Computing Machinery (ACM)",
        "volume": "7",
        "research_areas": [],
        "keywords": [
            "3D gaussian splatting",
            "memory reduction",
            "novel view synthesis",
            "radiance fields"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [
            "d9275",
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/papantonakis-2024-rmf/",
        "__class": "Publication"
    },
    {
        "id": "rasoulzadeh-2024-strokes2surface",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/208007",
        "title": "Strokes2Surface: Recovering Curve Networks From 4D Architectural Design Sketches",
        "date": "2024-05",
        "abstract": "We present Strokes2Surface, an offline geometry reconstruction pipeline that recovers well-connected curve networks from imprecise 4D sketches to bridge concept design and digital modeling stages in architectural design. The input to our pipeline consists of 3D strokes' polyline vertices and their timestamps as the 4th dimension, along with additional metadata recorded throughout sketching. Inspired by architectural sketching practices, our pipeline combines a classifier and two clustering models to achieve its goal. First, with a set of extracted hand-engineered features from the sketch, the classifier recognizes the type of individual strokes between those depicting boundaries (Shape strokes) and those depicting enclosed areas (Scribble strokes). Next, the two clustering models parse strokes of each type into distinct groups, each representing an individual edge or face of the intended architectural object. Curve networks are then formed through topology recovery of consolidated Shape clusters and surfaced using Scribble clusters guiding the cycle discovery. Our evaluation is threefold: We confirm the usability of the Strokes2Surface pipeline in architectural design use cases via a user study, we validate our choice of features via statistical analysis and ablation studies on our collected dataset, and we compare our outputs against a range of reconstructions computed using alternative methods.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5233,
            193,
            5429,
            1799
        ],
        "articleno": "e15054",
        "doi": "10.1111/cgf.15054",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "number": "2",
        "pages": "16",
        "pages_from": "1",
        "pages_to": "16",
        "publisher": "WILEY",
        "volume": "43",
        "research_areas": [],
        "keywords": [
            "CCS Concepts",
            "Computer graphics",
            "Computing methodologies → Artificial intelligence",
            "Machine learning"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [
            "d4314"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/rasoulzadeh-2024-strokes2surface/",
        "__class": "Publication"
    },
    {
        "id": "unterguggenberger-2024-fropo",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/199275",
        "title": "Fast Rendering of Parametric Objects on Modern GPUs",
        "date": "2024-05",
        "abstract": "Parametric functions are an extremely efficient representation for 3D geometry, capable of compactly modelling highly complex objects. Once specified, parametric 3D objects allow for visualization at arbitrary levels of detail, at no additional memory cost, limited only by the amount of evaluated samples. However, mapping the sample evaluation to the hardware rendering pipelines of modern graphics processing units (GPUs) is not trivial. This has given rise to several specialized solutions, each targeting interactive rendering of a constrained set of parametric functions. In this paper, we propose a general method for efficient rendering of parametrically defined 3D objects. Our solution is carefully designed around modern hardware architecture. Our method adaptively analyzes, allocates and evaluates parametric function samples to produce high-quality renderings. Geometric precision can be modulated from few pixels down to sub-pixel level, enabling real-time frame rates of several 100 frames per second (FPS) for various parametric functions. We propose a dedicated level-of-detail (LOD) stage, which outputs patches of similar geometric detail to a subsequent rendering stage that uses either a hardware tessellation-based approach or performs point-based softare rasterization. Our method requires neither preprocessing nor caching, and the proposed LOD mechanism is fast enough to run each frame. Hence, our approach also lends itself to animated parametric objects. We demonstrate the benefits of our method over a state-of-the-art spherical harmonics (SH) glyph rendering method, while showing its flexibility on a range of other demanding shapes.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Teaser image, different parametric objects",
            "filetitle": "image",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1440,
            "image_height": 1364,
            "name": "unterguggenberger-2024-fropo-image.png",
            "type": "image/png",
            "size": 2235242,
            "path": "Publication:unterguggenberger-2024-fropo",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/unterguggenberger-2024-fropo/unterguggenberger-2024-fropo-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/unterguggenberger-2024-fropo/unterguggenberger-2024-fropo-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,projects",
        "repositum_presentation_id": null,
        "authors": [
            848,
            1525,
            193,
            1650,
            1116
        ],
        "booktitle": "EGPGV24: Eurographics Symposium on Parallel Graphics and Visualization",
        "date_from": "2024-05-27",
        "date_to": "2024-05-27",
        "doi": "10.2312/pgv.20241129",
        "event": "Eurographics Symposium on Parallel Graphics and Visualization (2024)",
        "isbn": "978-3-03868-243-1",
        "lecturer": [
            848
        ],
        "location": "Odense",
        "open_access": "yes",
        "pages": "12",
        "publisher": "The Eurographics Association",
        "research_areas": [
            "Rendering"
        ],
        "keywords": [
            "Tessellation Shaders",
            "Point-Based Rendering",
            "Parametric Objects",
            "Fast Rendering",
            "Modern GPUs"
        ],
        "weblinks": [
            {
                "href": "https://github.com/cg-tuwien/FastRenderingOfParametricObjects",
                "caption": "FastRenderingOfParametricObjects",
                "description": "Source Code on GitHub",
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": "Teaser image, different parametric objects",
                "filetitle": "image",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1440,
                "image_height": 1364,
                "name": "unterguggenberger-2024-fropo-image.png",
                "type": "image/png",
                "size": 2235242,
                "path": "Publication:unterguggenberger-2024-fropo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/unterguggenberger-2024-fropo/unterguggenberger-2024-fropo-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/unterguggenberger-2024-fropo/unterguggenberger-2024-fropo-image:thumb{{size}}.png"
            },
            {
                "description": "Published Paper",
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "unterguggenberger-2024-fropo-paper.pdf",
                "type": "application/pdf",
                "size": 17591180,
                "path": "Publication:unterguggenberger-2024-fropo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/unterguggenberger-2024-fropo/unterguggenberger-2024-fropo-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/unterguggenberger-2024-fropo/unterguggenberger-2024-fropo-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9275",
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/unterguggenberger-2024-fropo/",
        "__class": "Publication"
    },
    {
        "id": "kovacs-2024-smt",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/200040",
        "title": "Surface-aware Mesh Texture Synthesis with Pre-trained 2D CNNs",
        "date": "2024-05",
        "abstract": "Mesh texture synthesis is a key component in the automatic generation of 3D content. Existing learning-based methods have drawbacks—either by disregarding the shape manifold during texture generation or by requiring a large number of different views to mitigate occlusion-related inconsistencies. In this paper, we present a novel surface-aware approach for mesh texture synthesis that overcomes these drawbacks by leveraging the pre-trained weights of 2D Convolutional Neural Networks (CNNs) with the same architecture, but with convolutions designed for 3D meshes. Our proposed network keeps track of the oriented patches surrounding each texel, enabling seamless texture synthesis and retaining local similarity to classical 2D convolutions with square kernels. Our approach allows us to synthesize textures that account for the geometric content of mesh surfaces, eliminating discontinuities and achieving comparable quality to 2D image synthesis algorithms. We compare our approach with state-of-the-art methods where, through qualitative and quantitative evaluations, we demonstrate that our approach is more effective for a variety of meshes and styles, while also producing visually appealing and consistent textures on meshes.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1950,
            1919,
            1410
        ],
        "articleno": "e15016",
        "doi": "10.1111/cgf.15016",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "number": "2",
        "pages": "13",
        "publisher": "WILEY",
        "volume": "43",
        "research_areas": [],
        "keywords": [
            "Deep learning (DL)",
            "Computer Graphics",
            "Texture Synthesis"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/kovacs-2024-smt/",
        "__class": "Publication"
    },
    {
        "id": "dworschak-2024-aml",
        "type_id": "studentproject",
        "tu_id": null,
        "repositum_id": null,
        "title": "Alpine Maps Labels",
        "date": "2024-04-03",
        "abstract": "The goal of this project was to receive mountain peak names from a vector tile server, process this information and visualize them on the AlpineMapsOrg application. Initially we\nagreed on using basemap as the vector tile provider but after encountering some problems (mentioned below), we switched to a custom tile server approach that we developed to fill\nour needs.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 2256,
            "image_height": 1577,
            "name": "dworschak-2024-aml-teaser.png",
            "type": "image/png",
            "size": 5789426,
            "path": "Publication:dworschak-2024-aml",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/dworschak-2024-aml/dworschak-2024-aml-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/dworschak-2024-aml/dworschak-2024-aml-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1353
        ],
        "date_end": "2023-04",
        "date_start": "2023-12",
        "matrikelnr": "01225883",
        "supervisor": [
            1110,
            1013
        ],
        "research_areas": [
            "Rendering"
        ],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://github.com/AlpineMapsOrg/renderer",
                "caption": "Repository",
                "description": null,
                "main_file": 0
            },
            {
                "href": "https://alpinemapsorg.github.io/renderer/wasm_singlethread/alpineapp.html",
                "caption": "Demo",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "developer documentation",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "dworschak-2024-aml-developer documentation.pdf",
                "type": "application/pdf",
                "size": 525967,
                "path": "Publication:dworschak-2024-aml",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/dworschak-2024-aml/dworschak-2024-aml-developer documentation.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/dworschak-2024-aml/dworschak-2024-aml-developer documentation:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "report",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "dworschak-2024-aml-report.pdf",
                "type": "application/pdf",
                "size": 3084811,
                "path": "Publication:dworschak-2024-aml",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/dworschak-2024-aml/dworschak-2024-aml-report.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/dworschak-2024-aml/dworschak-2024-aml-report:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 2256,
                "image_height": 1577,
                "name": "dworschak-2024-aml-teaser.png",
                "type": "image/png",
                "size": 5789426,
                "path": "Publication:dworschak-2024-aml",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/dworschak-2024-aml/dworschak-2024-aml-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/dworschak-2024-aml/dworschak-2024-aml-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/dworschak-2024-aml/",
        "__class": "Publication"
    },
    {
        "id": "marin-2024-dsr",
        "type_id": "poster",
        "tu_id": null,
        "repositum_id": "20.500.12708/201670",
        "title": "Distributed Surface Reconstruction",
        "date": "2024-04",
        "abstract": "Recent advancements in scanning technologies and their rise in availability have shifted the focus from reconstructing surfaces from point clouds of small areas to large, e.g., city-wide scenes, containing massive amounts of data. We adapt a surface reconstruction method to work in a distributed fashion on a high-performance cluster, reconstructing datasets with millions of vertices in seconds by exploiting the locality of the connectivity required by the reconstruction algorithm to efficiently divide-and-conquer the problem of creating triangulations from very large unstructured point clouds.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2017,
            "image_height": 1227,
            "name": "marin-2024-dsr-image.png",
            "type": "image/png",
            "size": 3376780,
            "path": "Publication:marin-2024-dsr",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-dsr/marin-2024-dsr-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-dsr/marin-2024-dsr-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "date",
        "repositum_presentation_id": null,
        "authors": [
            1848,
            1859,
            948,
            193
        ],
        "booktitle": "EG 2024 - Posters",
        "cfp": {
            "name": "Call for Posters _ EUROGRAPHICS.pdf",
            "type": "application/pdf",
            "error": 0,
            "size": 210661,
            "orig_name": "Call for Posters _ EUROGRAPHICS.pdf",
            "ext": "pdf"
        },
        "date_from": "2024-04-22",
        "date_to": "2024-04-26",
        "doi": "10.2312/egp.20241037",
        "editor": "Liu, Lingjie and Averkiou, Melinos",
        "event": "45th Annual Conference of the European Association for Computer Graphics (Eurographics 2024)",
        "isbn": "978-3-03868-239-4",
        "lecturer": [
            1848
        ],
        "location": "Limassol",
        "pages": "2",
        "research_areas": [],
        "keywords": [
            "Surface Reconstruction",
            "Distributed Computing",
            "Point Clouds",
            "High Performance Computing",
            "delaunay triangulation"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2017,
                "image_height": 1227,
                "name": "marin-2024-dsr-image.png",
                "type": "image/png",
                "size": 3376780,
                "path": "Publication:marin-2024-dsr",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-dsr/marin-2024-dsr-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-dsr/marin-2024-dsr-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "marin-2024-dsr-paper.pdf",
                "type": "application/pdf",
                "size": 5699964,
                "path": "Publication:marin-2024-dsr",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-dsr/marin-2024-dsr-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-dsr/marin-2024-dsr-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "WorldScale"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-dsr/",
        "__class": "Publication"
    },
    {
        "id": "heim-2024-accustripes",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/197495",
        "title": "AccuStripes: Visual exploration and comparison of univariate data distributions using color and binning",
        "date": "2024-04",
        "abstract": "Understanding and analyzing univariate distributions of data in terms of their shapes as well as their specific characteristics, regarding gaps, spikes, or outliers, is crucial in many scientific disciplines. In this paper, we propose a design space composed of the visual channels position and color for representing accumulated distributions. The designs are a mixture of color-coded stripes with density lines. The width and coloring of the stripes is based on the applied binning technique. In a crowd-sourced experiment we explore a subspace, called the AccuStripes (i.e., “accumulated stripes”) design space, consisting of nine representations. These AccuStripes designs integrate three composition strategies (color only, overlay, filled curve) with three binning techniques, one uniform (UB) and two adaptive methods, namely Bayesian Blocks (BB) and Jenks’ Natural Breaks (NB). We evaluate the accuracy, efficiency, and confidence ratings of the nine AccuStripes designs for structural estimation and comparison tasks. Across all study tasks, the overlay composition was found to be most accurate and preferred by observers. Furthermore, the results demonstrate that while no binning method performed best in both identification and comparison, detection of structures using adaptive binning was the most accurate one. For validation we compared the best AccuStripes’ design, i.e., the overlay composition, to line charts. Our results show that the AccuStripes’ design outperformed the line charts in accuracy for all study tasks.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "AccuStripes - Graphical Abstract",
            "filetitle": "accuStripes",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1333,
            "image_height": 428,
            "name": "heim-2024-accustripes-accuStripes.png",
            "type": "image/png",
            "size": 366335,
            "path": "Publication:heim-2024-accustripes",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/heim-2024-accustripes/heim-2024-accustripes-accuStripes.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/heim-2024-accustripes/heim-2024-accustripes-accuStripes:thumb{{size}}.png"
        },
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            1354,
            1355,
            1110,
            166,
            611
        ],
        "articleno": "103906",
        "doi": "10.1016/j.cag.2024.103906",
        "issn": "1873-7684",
        "journal": "COMPUTERS & GRAPHICS-UK",
        "publisher": "PERGAMON-ELSEVIER SCIENCE LTD",
        "volume": "119",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Adaptive binning",
            "Crowd-sourced experiment",
            "Univariate data distributions",
            "Visual analysis"
        ],
        "weblinks": [
            {
                "href": "https://www.sciencedirect.com/science/article/pii/S0097849324000414",
                "caption": "paper",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": "AccuStripes - Graphical Abstract",
                "filetitle": "accuStripes",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1333,
                "image_height": 428,
                "name": "heim-2024-accustripes-accuStripes.png",
                "type": "image/png",
                "size": 366335,
                "path": "Publication:heim-2024-accustripes",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/heim-2024-accustripes/heim-2024-accustripes-accuStripes.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/heim-2024-accustripes/heim-2024-accustripes-accuStripes:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/heim-2024-accustripes/",
        "__class": "Publication"
    },
    {
        "id": "steinkellner-2024-dll",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Deep Learning for Lighting Evaluation",
        "date": "2024-03-25",
        "abstract": "Lighting has a major impact on the mood of an indoor scene. This plays an important\nrole in interior design. The way a room is lit affects the people in it. While a human\ncan understand this concept and express how a scene feels to them, a computer cannot\nprovide the same type of analysis at the moment. To provide a solution for this, this\nthesis seeks to implement a convolutional neural network to employ deep learning and\nclassify scenes by their lighting mood. To this extent I created a data set consisting\nof images representing different lighting moods. The deep learning technique I used is\ntransfer learning in which an already pre-trained network, in this case VGG16, is used.\nAt first I tried a multi-class approach in which all classes are evaluated by one classifier,\nbut later I moved to a binary classifier structure in which the classifier only learns and\npredicts if an image belongs to one given class or not. This lead to promising results and\nprovides ground to improve upon in future work.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "thesis-img",
            "main_file": false,
            "use_in_gallery": false,
            "access": "hidden",
            "image_width": 1557,
            "image_height": 775,
            "name": "steinkellner-2024-dll-thesis-img.jpg",
            "type": "image/jpeg",
            "size": 140810,
            "path": "Publication:steinkellner-2024-dll",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/steinkellner-2024-dll/steinkellner-2024-dll-thesis-img.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/steinkellner-2024-dll/steinkellner-2024-dll-thesis-img:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5255
        ],
        "date_end": "2024-03-25",
        "date_start": "2023",
        "matrikelnr": "11705362",
        "supervisor": [
            193
        ],
        "research_areas": [
            "Rendering"
        ],
        "keywords": [],
        "weblinks": [],
        "files": {
            "1": {
                "description": "BSc Thesis",
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "steinkellner-2024-dll-thesis.pdf",
                "type": "application/pdf",
                "size": 6916558,
                "path": "Publication:steinkellner-2024-dll",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/steinkellner-2024-dll/steinkellner-2024-dll-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/steinkellner-2024-dll/steinkellner-2024-dll-thesis:thumb{{size}}.png"
            }
        },
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/steinkellner-2024-dll/",
        "__class": "Publication"
    },
    {
        "id": "wagner-2024-par",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Pixel Art Restoration",
        "date": "2024-03-20",
        "abstract": "Games developed in the 8 and 16-bit era of computing used low-resolution images called\nsprites for displaying game worlds and their objects. This art style is often referred to as\nPixel-Art and evolved into its own subgenre of games with new games still getting released\nto this day. While modern Pixel-Art games incorporate the visual fidelity of modern\nLiquid Crystal Display (LCD) monitors into their design, games from the Cathode Ray\nTube (CRT) era often look worse when displayed on an LCD monitor. Understandable,\nsince games at that time were designed to look good on CRT TVs and Monitors which\nfunction in very different ways compared to modern Displays.\nSince the upcoming of retro game console emulators, a lot of effort was put into reproducing\nthe effects of CRT monitors when playing back these old game files on LCD monitors. This\noften requires the use of upscaling algorithms to improve the look of the low-resolution\ngame assets on higher-resolution monitors. Upscaling in general but also tailored towards\nPixel-Art is still an unsolved problem and the focus of many recent publications.\nIn this work, we will explore what effects CRT monitors have on Pixel-Art and how to\nuse this information to guide the upscaling of 8 and 16-bit era Pixel-Art to improve\nvisual fidelity when displayed on modern LCD monitors.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1424,
            "image_height": 748,
            "name": "wagner-2024-par-teaser.jpg",
            "type": "image/jpeg",
            "size": 194244,
            "path": "Publication:wagner-2024-par",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wagner-2024-par/wagner-2024-par-teaser.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/wagner-2024-par/wagner-2024-par-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5340
        ],
        "date_end": "2024-03-20",
        "date_start": "2023",
        "matrikelnr": "11907095",
        "supervisor": [
            193
        ],
        "research_areas": [
            "InfoVis",
            "Rendering"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1424,
                "image_height": 748,
                "name": "wagner-2024-par-teaser.jpg",
                "type": "image/jpeg",
                "size": 194244,
                "path": "Publication:wagner-2024-par",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wagner-2024-par/wagner-2024-par-teaser.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/wagner-2024-par/wagner-2024-par-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "wagner-2024-par-thesis.pdf",
                "type": "application/pdf",
                "size": 104824143,
                "path": "Publication:wagner-2024-par",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wagner-2024-par/wagner-2024-par-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/wagner-2024-par/wagner-2024-par-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wagner-2024-par/",
        "__class": "Publication"
    },
    {
        "id": "ehlers-2024-vgs",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/196069",
        "title": "Visualizing Group Structure in Compound Graphs: The Current State, Lessons Learned, and Outstanding Opportunities",
        "date": "2024-03-11",
        "abstract": "Compound graphs are common across domains, from social science to biochemical pathway studies, and their visualization is important to both their exploration and analysis. However, effectively visualizing a compound graph's topology and group structure requires careful consideration, as evident by the many different approaches to this particular problem. To better understand the current advancements in compound graph visualization, we have consolidated and streamlined existing surveys' taxonomies. More specifically, we aim to disentangle the visual relationship between graph topology and group structure from the visual encoding used to visualize its group structure in order to identify interesting gaps in the literature. In so doing, we are able to enumerate a number of lessons learned and gain a better understanding of the outstanding research opportunities and practical implications across domains.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            1850,
            1848,
            1464,
            1410
        ],
        "booktitle": "Proceedings of the 19th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications - Volume 1, HUCAPP and IVAPP",
        "date_from": "2024-02-27",
        "date_to": "2024-02-29",
        "doi": "10.5220/0012431200003660",
        "event": "19th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications",
        "isbn": "978-989-758-679-8",
        "lecturer": [
            1850
        ],
        "location": "Rom",
        "pages": "12",
        "pages_from": "697",
        "pages_to": "708",
        "research_areas": [
            "InfoVis",
            "NetVis"
        ],
        "keywords": [
            "compound graph visualization",
            "literature survey",
            "group structure visualization"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ehlers-2024-vgs/",
        "__class": "Publication"
    },
    {
        "id": "metzger-2024-smv",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/195907",
        "title": "Semantically meaningful vectorization of line art in drawn animation",
        "date": "2024-03",
        "abstract": "Animation consists of sequentially showing multiple single frames with small mutual differences in order to achieve the visual effect of a moving scene. In limited animation, these frames are drawn as semantically meaningful vector images which could be referred to as clean animation frames. There are limited animation workflows in which these clean animation frames are only available in raster format, requiring laborious manual vectorization.This work explores the extent to which line-art image vectorization methods can be used to automatize this process. For this purpose, a line-art image vectorization method is designed by taking into account the structural information about clean animation frames. Together with existing state-of-the-art line-art image vectorization methods, this method is evaluated on a dataset consisting of clean animation frames. The reproducible evaluation shows that the performance of the developed method is remarkably stable across different input image resolution sizes and binarized or non-binarized versions of input images, even outperforming state-of-the-art methods at input images of the default clean animation frame resolution. Furthermore, it is up to 4.5 times faster than the second-fastest deep learning-based method. However, ultimately the evaluation shows that neither the developed method nor existing state-of-the-art methods can produce vector images that achieve both visual similarity and sufficiently semantically correct vector structures.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1183,
            "image_height": 387,
            "name": "metzger-2024-smv-image.png",
            "type": "image/png",
            "size": 77582,
            "path": "Publication:metzger-2024-smv",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/metzger-2024-smv/metzger-2024-smv-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/metzger-2024-smv/metzger-2024-smv-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,projects",
        "repositum_presentation_id": null,
        "authors": [
            5342
        ],
        "co_supervisor": [
            1639
        ],
        "date_end": "2024-03-11",
        "date_start": "2022-05",
        "diploma_examina": "2024-03-11",
        "doi": "10.34726/hss.2024.102471",
        "matrikelnr": "1454634",
        "open_access": "yes",
        "pages": "198",
        "supervisor": [
            193
        ],
        "research_areas": [
            "Rendering"
        ],
        "keywords": [
            "Animation",
            "Limited animation",
            "Line art",
            "Image vectorization",
            "Vector graphics",
            "Deep learning",
            "Machine learning"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1183,
                "image_height": 387,
                "name": "metzger-2024-smv-image.png",
                "type": "image/png",
                "size": 77582,
                "path": "Publication:metzger-2024-smv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/metzger-2024-smv/metzger-2024-smv-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/metzger-2024-smv/metzger-2024-smv-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "metzger-2024-smv-thesis.pdf",
                "type": "application/pdf",
                "size": 9139349,
                "path": "Publication:metzger-2024-smv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/metzger-2024-smv/metzger-2024-smv-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/metzger-2024-smv/metzger-2024-smv-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "rend"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/metzger-2024-smv/",
        "__class": "Publication"
    },
    {
        "id": "ulschmid-2024-reo",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/195927",
        "title": "Real-Time Editing of Path-Traced Scenes with Prioritized Re-Rendering",
        "date": "2024-02",
        "abstract": "With recent developments in GPU ray tracing performance and (AI-accelerated) noise reduction techniques, Monte Carlo Path Tracing at real-time rates becomes a viable solution for interactive 3D scene editing, with growing support in popular software. However, even for minor edits (e.g., adjusting materials or moving small objects), current solutions usually discard previous samples and the image formation process is started from scratch. In this paper, we present two adaptive, priority-based re-rendering techniques with incremental updates, prioritizing the reconstruction of regions with high importance, before gradually moving to less important regions. The suggested methods automatically identify and schedule sampling and accumulation of immediately affected regions. An extensive user study analyzes whether such prioritized renderings are beneficial to interactive scene editing, comparing them with same-time conventional re-rendering. Our evaluation shows that even with simple prio rity policies, there is a significant preference for such incremental rendering techniques for interactive editing of small objects over full-screen re-rendering with denoising.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 378,
            "image_height": 323,
            "name": "ulschmid-2024-reo-teaser.png",
            "type": "image/png",
            "size": 254543,
            "path": "Publication:ulschmid-2024-reo",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ulschmid-2024-reo/ulschmid-2024-reo-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/ulschmid-2024-reo/ulschmid-2024-reo-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "booktitle",
        "repositum_presentation_id": null,
        "authors": [
            1954,
            1650,
            1030,
            193
        ],
        "booktitle": "Proceedings of the 19th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications - GRAPP and VISIGRAPP",
        "date_from": "2024-02-27",
        "date_to": "2024-02-29",
        "doi": "10.5220/0012324600003660",
        "editor": "Bashford-Rogers, Thomas and Meneveaux, Daniel and Ziat, Mounia and Ammi, Mehdi and Jänicke, Stefan and Purchase, Helen and Bouatouch, Kadi and Sousa, Augusto A.",
        "event": "19th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications",
        "isbn": "978-989-758-679-8",
        "lecturer": [
            1954
        ],
        "location": "Rom",
        "note": "Best student paper award",
        "pages": "12",
        "pages_from": "46",
        "pages_to": "57",
        "research_areas": [],
        "keywords": [
            "ray tracing",
            "user study",
            "editing"
        ],
        "weblinks": [
            {
                "href": "https://github.com/cg-tuwien/Prioritized-ReRendering",
                "caption": "GitHub",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "ulschmid-2024-reo-paper.pdf",
                "type": "application/pdf",
                "size": 14987716,
                "path": "Publication:ulschmid-2024-reo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ulschmid-2024-reo/ulschmid-2024-reo-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/ulschmid-2024-reo/ulschmid-2024-reo-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 378,
                "image_height": 323,
                "name": "ulschmid-2024-reo-teaser.png",
                "type": "image/png",
                "size": 254543,
                "path": "Publication:ulschmid-2024-reo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ulschmid-2024-reo/ulschmid-2024-reo-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/ulschmid-2024-reo/ulschmid-2024-reo-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "ulschmid-2024-reo-video.mp4",
                "type": "video/mp4",
                "size": 38712872,
                "path": "Publication:ulschmid-2024-reo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ulschmid-2024-reo/ulschmid-2024-reo-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/ulschmid-2024-reo/ulschmid-2024-reo-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2024/ulschmid-2024-reo/ulschmid-2024-reo-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "d9275",
            "d4314"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/ulschmid-2024-reo/",
        "__class": "Publication"
    },
    {
        "id": "marin-2024-pcp",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/196496",
        "title": "Parameter-free connectivity for point clouds",
        "date": "2024-02",
        "abstract": "Determining connectivity in unstructured point clouds is a long-standing problem that has still not been addressed satisfactorily. In this paper, we analyze an alternative to the often-used k-nearest neighborhood (kNN) graph - the Spheres of Influence Graph (SIG). We show that the edges that are neighboring each vertex are spatially bounded, which allows for fast computation of SIG. Our approach shows a better encoding of the ground truth connectivity compared to the kNN for a wide range of k, and additionally, it is parameter-free. Our result for this fundamental task offers potential for many applications relying on kNN, e.g., parameter-free normal estimation, and consequently, surface reconstruction, motion planning, simulations, and many more.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 742,
            "image_height": 912,
            "name": "marin-2024-pcp-image.jpg",
            "type": "image/jpeg",
            "size": 190057,
            "path": "Publication:marin-2024-pcp",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-pcp/marin-2024-pcp-image.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-pcp/marin-2024-pcp-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "date",
        "repositum_presentation_id": null,
        "authors": [
            1848,
            948,
            193
        ],
        "booktitle": "Proceedings of the 19th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications - Volume 1, HUCAPP and IVAPP",
        "date_from": "2024-02-27",
        "date_to": "2024-02-29",
        "doi": "10.5220/0012394900003660",
        "event": "19th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications (2024)",
        "isbn": "978-989-758-679-8",
        "lecturer": [
            1848
        ],
        "location": "Rome",
        "open_access": "yes",
        "pages": "11",
        "pages_from": "92",
        "pages_to": "102",
        "publisher": "SciTePress, Science and Technology Publications",
        "volume": "1",
        "research_areas": [
            "Geometry"
        ],
        "keywords": [
            "Proximity Graphs",
            "Point Clouds",
            "Connectivity"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 742,
                "image_height": 912,
                "name": "marin-2024-pcp-image.jpg",
                "type": "image/jpeg",
                "size": 190057,
                "path": "Publication:marin-2024-pcp",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-pcp/marin-2024-pcp-image.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-pcp/marin-2024-pcp-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "marin-2024-pcp-paper.pdf",
                "type": "application/pdf",
                "size": 2193223,
                "path": "Publication:marin-2024-pcp",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-pcp/marin-2024-pcp-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-pcp/marin-2024-pcp-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "Superhumans",
            "WorldScale"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/marin-2024-pcp/",
        "__class": "Publication"
    },
    {
        "id": "erler_2024_ppsurf",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/192690",
        "title": "PPSurf: Combining Patches and Point Convolutions for Detailed Surface Reconstruction",
        "date": "2024-01-12",
        "abstract": "Abstract 3D surface reconstruction from point clouds is a key step in areas such as content creation, archaeology, digital cultural heritage and engineering. Current approaches either try to optimize a non-data-driven surface representation to fit the points, or learn a data-driven prior over the distribution of commonly occurring surfaces and how they correlate with potentially noisy point clouds. Data-driven methods enable robust handling of noise and typically either focus on a global or a local prior, which trade-off between robustness to noise on the global end and surface detail preservation on the local end. We propose PPSurf as a method that combines a global prior based on point convolutions and a local prior based on processing local point cloud patches. We show that this approach is robust to noise while recovering surface details more accurately than the current state-of-the-art. Our source code, pre-trained model and dataset are available at https://github.com/cg-tuwien/ppsurf.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "PPSurf teaser with comparison",
            "filetitle": "teaser",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2288,
            "image_height": 740,
            "name": "erler_2024_ppsurf-teaser.png",
            "type": "image/png",
            "size": 1734284,
            "path": "Publication:erler_2024_ppsurf",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "number,volume",
        "repositum_presentation_id": null,
        "authors": [
            1395,
            5317,
            1919,
            627,
            1184,
            193
        ],
        "date_from": "2020",
        "date_to": "2024-01-12",
        "doi": "https://doi.org/10.1111/cgf.15000",
        "event": "Eurographics 2024",
        "first_published": "2024-01-12",
        "html_block": "<h2>Demo</h2>\n<iframe src=\"https://perler-ppsurf.hf.space\" frameborder=\"0\" style=\"width: 100%; height: 450px;\"></iframe>                                                                      \n",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1395
        ],
        "number": "1",
        "open_access": "yes",
        "pages": "12",
        "pages_from": "tbd",
        "pages_to": "tbd",
        "publisher": "WILEY",
        "volume": "43",
        "research_areas": [
            "Geometry",
            "Modeling"
        ],
        "keywords": [
            "modeling",
            "surface reconstruction"
        ],
        "weblinks": [
            {
                "href": "https://huggingface.co/spaces/perler/ppsurf",
                "caption": "Live System",
                "description": null,
                "main_file": 1
            },
            {
                "href": "https://github.com/cg-tuwien/ppsurf",
                "caption": "Repo (Github)",
                "description": null,
                "main_file": 1
            },
            {
                "href": "https://onlinelibrary.wiley.com/doi/10.1111/cgf.15000",
                "caption": "Official Publication (Wiley Computer Graphics Forum)",
                "description": null,
                "main_file": 0
            },
            {
                "href": "https://arxiv.org/abs/2401.08518",
                "caption": "Preprint (ArXiv)",
                "description": null,
                "main_file": 0
            },
            {
                "href": "https://www.replicabilitystamp.org/#https-github-com-cg-tuwien-ppsurf",
                "caption": "Graphics Replicability Stamp Initiative",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": "Paper Reproduction Code and Models\nNote: use the repo instead of this messy code",
                "filetitle": "paper_repro",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "erler_2024_ppsurf-paper_repro.7z",
                "type": "application/octet-stream",
                "size": 1512332777,
                "path": "Publication:erler_2024_ppsurf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-paper_repro.7z",
                "thumb_image_sizes": []
            },
            {
                "description": "PPSurf (ArXiv Version)",
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "erler_2024_ppsurf-paper.pdf",
                "type": "application/pdf",
                "size": 3581909,
                "path": "Publication:erler_2024_ppsurf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-paper:thumb{{size}}.png"
            },
            {
                "description": "PPSurf 50NN Model Checkpoint",
                "filetitle": "ppsurf_50nn_model",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "erler_2024_ppsurf-ppsurf_50nn_model.zip",
                "type": "application/x-zip-compressed",
                "size": 153351534,
                "path": "Publication:erler_2024_ppsurf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-ppsurf_50nn_model.zip",
                "thumb_image_sizes": []
            },
            {
                "description": "PPSurf 50NN Results (Meshes and Tables)",
                "filetitle": "ppsurf_50nn_results",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "erler_2024_ppsurf-ppsurf_50nn_results.7z",
                "type": "application/octet-stream",
                "size": 1609430340,
                "path": "Publication:erler_2024_ppsurf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-ppsurf_50nn_results.7z",
                "thumb_image_sizes": []
            },
            {
                "description": "Eurographics 2024 Slides (PDF)",
                "filetitle": "slides_eg24_pdf",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "erler_2024_ppsurf-slides_eg24_pdf.pdf",
                "type": "application/pdf",
                "size": 2551903,
                "path": "Publication:erler_2024_ppsurf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-slides_eg24_pdf.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-slides_eg24_pdf:thumb{{size}}.png"
            },
            {
                "description": "Eurographics 2024 Slides",
                "filetitle": "slides_eg24",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "erler_2024_ppsurf-slides_eg24.pptx",
                "type": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
                "size": 26027659,
                "path": "Publication:erler_2024_ppsurf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-slides_eg24.pptx",
                "thumb_image_sizes": []
            },
            {
                "description": "PPSurf teaser with comparison",
                "filetitle": "teaser",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2288,
                "image_height": 740,
                "name": "erler_2024_ppsurf-teaser.png",
                "type": "image/png",
                "size": 1734284,
                "path": "Publication:erler_2024_ppsurf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-teaser:thumb{{size}}.png"
            },
            {
                "description": "Testsets (ABC, Famous, Thingi10k)",
                "filetitle": "testsets",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "erler_2024_ppsurf-testsets.zip",
                "type": "application/x-zip-compressed",
                "size": 1879212476,
                "path": "Publication:erler_2024_ppsurf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-testsets.zip",
                "thumb_image_sizes": []
            },
            {
                "description": "ABC Training Set",
                "filetitle": "trainset",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "erler_2024_ppsurf-trainset.zip",
                "type": "application/x-zip-compressed",
                "size": 2513802131,
                "path": "Publication:erler_2024_ppsurf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/erler_2024_ppsurf-trainset.zip",
                "thumb_image_sizes": []
            }
        ],
        "projects_workgroups": [
            "rend",
            "Superhumans",
            "ShapeAcquisition",
            "d4388"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/erler_2024_ppsurf/",
        "__class": "Publication"
    },
    {
        "id": "thesis-tekaya",
        "type_id": "runphdthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Context Analysis in Historical Media Collections",
        "date": "2024-01",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5330
        ],
        "date_start": "2024-01",
        "research_areas": [],
        "keywords": [],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/thesis-tekaya/",
        "__class": "Publication"
    },
    {
        "id": "herzberger-2024-roh",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/193209",
        "title": "Residency Octree: a hybrid approach for scalable web-based multi-volume rendering",
        "date": "2024-01",
        "abstract": "We present a hybrid multi-volume rendering approach based on a novel Residency Octree that combines the advantages of out-of-core volume rendering using page tables with those of standard octrees. Octree approaches work by performing hierarchical tree traversal. However, in octree volume rendering, tree traversal and the selection of data resolution are intrinsically coupled. This makes fine-grained empty-space skipping costly. Page tables, on the other hand, allow access to any cached brick from any resolution. However, they do not offer a clear and efficient strategy for substituting missing high-resolution data with lower-resolution data. We enable flexible mixed-resolution out-of-core multi-volume rendering by decoupling the cache residency of multi-resolution data from a resolution-independent spatial subdivision determined by the tree. Instead of one-to-one node-to-brick correspondences, each residency octree node is mapped to a set of bricks from different resolution levels. This makes it possible to efficiently and adaptively choose and mix resolutions, adapt sampling rates, and compensate for cache misses. At the same time, residency octrees support fine-grained empty-space skipping, independent of the data subdivision used for caching. Finally, to facilitate collaboration and outreach, and to eliminate local data storage, our implementation is a web-based, pure client-side renderer using WebGPU and WebAssembly. Our method is faster than prior approaches and efficient for many data channels with a flexible and adaptive choice of data resolution.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2656,
            "image_height": 912,
            "name": "herzberger-2024-roh-teaser.png",
            "type": "image/png",
            "size": 550157,
            "path": "Publication:herzberger-2024-roh",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/herzberger-2024-roh/herzberger-2024-roh-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/herzberger-2024-roh/herzberger-2024-roh-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "projects,date_from,date_to,event,lecturer,location",
        "repositum_presentation_id": null,
        "authors": [
            1857,
            226,
            5321,
            5322,
            1933,
            166,
            818
        ],
        "date_from": "2023-10-22",
        "date_to": "2023-10-27",
        "doi": "10.1109/TVCG.2023.3327193",
        "event": "IEEE VIS 2023",
        "issn": "1941-0506",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "lecturer": [
            1857
        ],
        "location": "Melbourne, Victoria, Australia",
        "number": "1",
        "open_access": "yes",
        "pages": "11",
        "pages_from": "1380",
        "pages_to": "1390",
        "publisher": "IEEE",
        "volume": "30",
        "research_areas": [],
        "keywords": [
            "Rendering (computer graphics)",
            "Octrees",
            "Spatial resolution",
            "Graphics processing units",
            "Data visualization",
            "Optimization",
            "Standards",
            "Volume rendering",
            "Ray-guided rendering",
            "Large-scale data",
            "Out-of-core rendering",
            "Multi-resolution",
            "Multi-channel",
            "Web-Based Visualization"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "herzberger-2024-roh-paper.pdf",
                "type": "application/pdf",
                "size": 8426051,
                "path": "Publication:herzberger-2024-roh",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/herzberger-2024-roh/herzberger-2024-roh-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/herzberger-2024-roh/herzberger-2024-roh-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2656,
                "image_height": 912,
                "name": "herzberger-2024-roh-teaser.png",
                "type": "image/png",
                "size": 550157,
                "path": "Publication:herzberger-2024-roh",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/herzberger-2024-roh/herzberger-2024-roh-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/herzberger-2024-roh/herzberger-2024-roh-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/herzberger-2024-roh/",
        "__class": "Publication"
    },
    {
        "id": "bayat-2024-awt",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/197494",
        "title": "A Workflow to Visually Assess Interobserver Variability in Medical Image Segmentation",
        "date": "2024-01",
        "abstract": "We introduce a workflow for the visual assessment of interobserver variability in medical image segmentation. Image segmentation is a crucial step in the diagnosis, prognosis, and treatment of many diseases. Despite the advancements in autosegmentation, clinical practice widely relies on manual delineations performed by radiologists. Our work focuses on designing a solution for understanding the radiologists' thought processes during segmentation and for unveiling reasons that lead to interobserver variability. To this end, we propose a visual analysis tool connecting multiple radiologists' delineation processes with their outcomes, and we demonstrate its potential in a case study.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1693,
            1110,
            1410
        ],
        "doi": "10.1109/MCG.2023.3333475",
        "issn": "1558-1756",
        "journal": "IEEE Computer Graphics and Applications",
        "number": "1",
        "pages": "9",
        "pages_from": "86",
        "pages_to": "94",
        "publisher": "IEEE COMPUTER SOC",
        "volume": "44",
        "research_areas": [],
        "keywords": [
            "Humans",
            "Observer Variation",
            "Workflow",
            "Algorithms"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/bayat-2024-awt/",
        "__class": "Publication"
    },
    {
        "id": "muth-2024-edt",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/202187",
        "title": "Exploring Drusen Type and Appearance using Interpretable GANs",
        "date": "2024",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5391,
            5392,
            1410,
            5393,
            5394,
            5395
        ],
        "booktitle": "VCBM 2024: Eurographics Workshop on Visual Computing for Biology and Medicine",
        "date_from": "2024-09-19",
        "date_to": "2024-09-20",
        "doi": "10.2312/vcbm.20241187",
        "event": "Eurographics Workshop on Visual Computing for Biology and Medicine (VCBM 2024)",
        "lecturer": [
            5396
        ],
        "research_areas": [],
        "keywords": [
            "Image Processing",
            "Machine Learning",
            "Ophthalmology"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/muth-2024-edt/",
        "__class": "Publication"
    },
    {
        "id": "amabili-2024-smg",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/200899",
        "title": "Show Me the GIFference! Using data-GIFs as Educational Tools",
        "date": "2024",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1861,
            166,
            1410
        ],
        "booktitle": "Computer Science Research Notes: CSRN 3401: WSCG 2024: Proceedings",
        "date_from": "2024-06-03",
        "date_to": "2024-06-06",
        "doi": "10.24132/CSRN.3401.7",
        "editor": "Skala, Vaclav",
        "event": "32. International Conference in Central Europe on  Computer Graphics, Visualization and Computer Vision (WSCG 2024)",
        "lecturer": [
            1410
        ],
        "location": "Plzen",
        "pages": "10",
        "pages_from": "57",
        "pages_to": "66",
        "research_areas": [],
        "keywords": [
            "data visualization",
            "education for visualization"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/amabili-2024-smg/",
        "__class": "Publication"
    },
    {
        "id": "irendorfer-2024-uat",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/202525",
        "title": "User Approaches to Knowledge Externalization in Visual Analytics of Unstructured Data",
        "date": "2024",
        "abstract": "Traditional machine learning approaches for analyzing large unstructured data often depend on labelled training data and well-defined target definitions. However, these may not be available or feasible when dealing with unknown and unstructured data. It requires human reasoning and domain knowledge to interpret it. Interactive systems that combine human analytical abilities with machine learning techniques can address this limitation. However, to incorporate human knowledge in such systems, we need a better understanding of the semantic information and structures that users observe and expect while exploring unstructured data, as well as how they make their tacit knowledge explicit. This thesis aims to narrow the gap between human cognition and (knowledge-assisted) visual analytics. In a qualitative and exploratory user study, this thesis investigates how individuals explore a large unstructured dataset and which strategies they apply to externalize their mental models. By analyzing users' externalized mental models, we aim to better understand how their knowledge evolves during data exploration. We evaluate the comprehensiveness, detail and evolution of users' external knowledge representations by applying quantitative and qualitative methods, including a crowdsourcing step. The results show that users' externalized structures are able to represent a given dataset comprehensively and to a high degree of detail. While these knowledge representations are highly subjective and show various individual differences, we could identify structural similarities between individuals. In addition to the insights about how users externalize their tacit knowledge during data exploration, we propose design guidelines for (knowledge-assisted) visual analytics systems.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            5399
        ],
        "doi": "10.34726/hss.2024.115066",
        "open_access": "yes",
        "pages": "80",
        "supervisor": [
            1110
        ],
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Knowledge Externalization",
            "Knowledge-Assisted Visualization",
            "Visual Analytics",
            "Unstructured Data",
            "Concept Maps",
            "Mental Models",
            "User Study",
            "Data Exploration"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "irendorfer-2024-uat-thesis.pdf",
                "type": "application/pdf",
                "size": 1981913,
                "path": "Publication:irendorfer-2024-uat",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/irendorfer-2024-uat/irendorfer-2024-uat-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/irendorfer-2024-uat/irendorfer-2024-uat-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/irendorfer-2024-uat/",
        "__class": "Publication"
    },
    {
        "id": "wimmer-2024-adh",
        "type_id": "phdthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/201047",
        "title": "Addressing Data Heterogeneity in Image-Based Computer-Aided Detection and Diagnosis Methods",
        "date": "2024",
        "abstract": "The acquisition of medical imaging data is inevitable for screening, diagnosis, planning of surgery or therapy, or monitoring of diseases. In clinical practice, the data is assessed by medical experts, which can be a very time-consuming task. Hence, for decades a lot of research effort has been dedicated to the automated analysis of medical imaging data and to the question of how Computer-Aided Detection and Diagnosis algorithms can assist the tasks mentioned above. However, one of the biggest challenges in this regard is the highly heterogeneous nature of medical imaging data. The acquisition of data from different imaging modalities, like X-ray or Magnetic Resonance Imaging (MRI), changes of acquisition parameters, and the use of different scanners results in diverse data. The varying spatial resolution as well as the high dimensionality of the data pose additional challenges to the development of automated solutions. In this thesis, we investigate different machine learning-based methods to address the analysis of heterogeneous medical imaging data, such as multi-parametric, multi-modal, multi-center, or multi-view data. We present three different pipeline approaches that follow generalization- and fusion- based approaches and demonstrate their applicability on diverse public datasets. Our contributions target two selected use cases in radiology: the semantic labeling of the spine in MRI data and the analysis of mammograms. In semi- and fully-automated spine labeling in MRI data, we are confronted with the problem that MRI data does not exhibit a standardized intensity scale, which results in a large variety of different image contrasts. To overcome this problem for semantic spine labeling, we propose an iterative labeling pipeline that employs Entropy-Optimized Texture Models (ETMs). The application of trained ETMs allows us to apply our models to a wide range of different MRI data. This is in contrast to various related works that develop methods for specific MRI image sequences and protocols. For the analysis of mammography screening data, not only one but four X-ray images from different fields of view are available that form a study of a patient. In addition to this multi-view data, we deal with multi-scale information at various levels, e.g., on a patient, image, or lesion level. To utilize and combine this information efficiently, we develop several deep learning-based models that aim for a specific task important in examining mammograms, such as the localization of abnormalities. For a comprehensive prediction on a patient level, we propose to fuse predictions and features from the individual models to increase performance, which is in contrast to standard ensembling techniques. The results in this thesis demonstrate that considering the different aspects of heterogeneous medical imaging data is inevitable to improve both generalization and predictive capabilities of Computer-Aided Detection and Diagnosis methods.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1137
        ],
        "doi": "10.34726/hss.2024.125391",
        "open_access": "yes",
        "pages": "163",
        "supervisor": [
            166
        ],
        "research_areas": [],
        "keywords": [
            "Medical Image Analysis",
            "Image Processing",
            "Data Heterogeneity",
            "Computer-Aided Detection and Diagnosis",
            "Mammography",
            "Spine Labeling",
            "Machine Learning",
            "Deep Learning"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "wimmer-2024-adh-thesis.pdf",
                "type": "application/pdf",
                "size": 6694551,
                "path": "Publication:wimmer-2024-adh",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wimmer-2024-adh/wimmer-2024-adh-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/wimmer-2024-adh/wimmer-2024-adh-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wimmer-2024-adh/",
        "__class": "Publication"
    },
    {
        "id": "kovacs-2024-gsg",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/205211",
        "title": "G-Style: Stylized Gaussian Splatting",
        "date": "2024",
        "abstract": "We introduce G-Style, a novel algorithm designed to transfer the style of an image onto a 3D scene represented using Gaussian Splatting. Gaussian Splatting is a powerful 3D representation for novel view synthesis, as—compared to other approaches based on Neural Radiance Fields—it provides fast scene renderings and user control over the scene. Recent pre-prints have demonstrated that the style of Gaussian Splatting scenes can be modified using an image exemplar. However, since the scene geometry remains fixed during the stylization process, current solutions fall short of producing satisfactory results. Our algorithm aims to address these limitations by following a three-step process: In a pre-processing step, we remove undesirable Gaussians with large projection areas or highly elongated shapes. Subsequently, we combine several losses carefully designed to preserve different scales of the style in the image, while maintaining as much as possible the integrity of the original scene content. During the stylization process and following the original design of Gaussian Splatting, we split Gaussians where additional detail is necessary within our scene by tracking the gradient of the stylized color. Our experiments demonstrate that G-Style generates high-quality stylizations within just a few minutes, outperforming existing methods both qualitatively and quantitatively.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1950,
            5415,
            1410
        ],
        "articleno": "e15259",
        "doi": "10.1111/cgf.15259",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "number": "7",
        "pages": "13",
        "publisher": "WILEY",
        "volume": "43",
        "research_areas": [],
        "keywords": [
            "Artificial intelligence",
            "Computer graphics",
            "Neural networks"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/kovacs-2024-gsg/",
        "__class": "Publication"
    },
    {
        "id": "lucio-2024-kma",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/204917",
        "title": "Knowledge maps as a complementary tool to learn and teach surgical anatomy in virtual reality: A case study in dental implantology",
        "date": "2024",
        "abstract": "A thorough understanding of surgical anatomy is essential for preparing and training medical students to become competent and skilled surgeons. While Virtual Reality (VR) has shown to be a suitable interaction paradigm for surgical training, traditional anatomical VR models often rely on simple labels and arrows pointing to relevant landmarks. Yet, studies have indicated that such visual settings could benefit from knowledge maps as such representations explicitly illustrate the conceptual connections between anatomical landmarks. In this article, a VR educational tool is presented designed to explore the potential of knowledge maps as a complementary visual encoding for labeled 3D anatomy models. Focusing on surgical anatomy for implantology, it was investigated whether integrating knowledge maps within a VR environment could improve students' understanding and retention of complex anatomical relationships. The study involved 30 master's students in dentistry and 3 anatomy teachers, who used the tool and were subsequently assessed through surgical anatomy quizzes (measuring both completion times and scores) and subjective feedback (assessing user satisfaction, preferences, system usability, and task workload). The results showed that using knowledge maps in an immersive environment facilitates learning and teaching surgical anatomy applied to implantology, serving as a complementary tool to conventional VR educational methods.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5407,
            5408,
            1410,
            5409,
            5410,
            5411,
            5412,
            5413
        ],
        "booktitle": "Healthcare Technology Letters",
        "date_from": "2024-10-06",
        "date_to": "2024-10-06",
        "doi": "10.1049/htl2.12094",
        "event": "27th International Conference on Medical Image Computing and Computer Assisted Invertention (MICCAI 2024)",
        "lecturer": [
            5407
        ],
        "pages": "12",
        "research_areas": [],
        "keywords": [
            "biomedical education",
            "user interfaces",
            "virtual reality",
            "biomedical education",
            "user interfaces",
            "virtual reality"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/lucio-2024-kma/",
        "__class": "Publication"
    },
    {
        "id": "2024-po2",
        "type_id": "proceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/209728",
        "title": "Proceedings of the 28th Central European Seminar on Computer Graphics : CESCG 2024",
        "date": "2024",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [],
        "doi": "10.34726/8401",
        "editor": "Ilčík, Martin and Bittner, Jiří and Berger Haladová, Zuzana and Wimmer, Michael",
        "isbn": "978-3-9504701-5-4",
        "open_access": "yes",
        "pages": "192",
        "research_areas": [],
        "keywords": [
            "visual computing",
            "realtime graphics",
            "computer vision",
            "visualization",
            "virtual reality",
            "medical imaging",
            "user experience",
            "rendering"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "2024-po2-paper.pdf",
                "type": "application/pdf",
                "size": 133424212,
                "path": "Publication:2024-po2",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/2024-po2/2024-po2-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/2024-po2/2024-po2-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/2024-po2/",
        "__class": "Publication"
    },
    {
        "id": "toepfer-2024-spx",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/203924",
        "title": "SPX: A Versatile Spatial Indexing Framework",
        "date": "2024",
        "abstract": "The integration of modern genetic techniques, advanced volumetric imaging methods, and single-cell extraction methods has empowered neurobiologists to create extensive digitized and coregistered specimen sample collections. These collections serve as valuable resources for studying neuronal structures, functional compartments, and neurological connections within the brain. By sampling single cells from various locations within the animal brain, scientists can investigate cell type distributions and gene expressions. However, the exploration of these vast collections, which include volumetric images, segmented structures, and gene expression data, poses a significant challenge in neuroscience. Efficient access to specific regions of interest in all images, derivative processed data, cell samples, and metadata is essential for researchers. In this thesis, I present a flexible and extensible approach to spatially index and store volumetric grid data and region-based data, enabling efficient access and providing a streamlined method for implementing new data abstractions, query types, and indexing strategies. The framework supports different datasets, the encoding of neurological structural types, and incorporates a layering mechanism to handle multiple data representations or time-dependent data within a single data structure. Standardized interfaces are defined for loading voxel and region data, preprocessing them, creating data abstractions, and implementing new query types. The data storage is managed using a storage engine approach, allowing users to leverage different storage mechanisms or introduce their own.This thesis provides an overview of conceptual ideas, implementation details, current data abstractions, and query types. The system was evaluated in terms of performance and scalability in its current use cases. A short introduction to three applications, BrainBaseWeb, BrainTrawler, and BrainTrawler Lite, exemplifies the usage of this framework.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5401
        ],
        "doi": "10.34726/hss.2024.119688",
        "open_access": "yes",
        "pages": "157",
        "supervisor": [
            166
        ],
        "research_areas": [],
        "keywords": [
            "spatial indexing",
            "volume data",
            "brain atlas"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "toepfer-2024-spx-thesis.pdf",
                "type": "application/pdf",
                "size": 3832321,
                "path": "Publication:toepfer-2024-spx",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/toepfer-2024-spx/toepfer-2024-spx-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/toepfer-2024-spx/toepfer-2024-spx-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/toepfer-2024-spx/",
        "__class": "Publication"
    },
    {
        "id": "matt-2024-cvi",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/198406",
        "title": "Class-Centric Visual Interactive Labeling using Property Measures",
        "date": "2024",
        "abstract": "Human annotation of image data is relevant for supervised machine learning, where labeled datasets are essential for training models. Traditionally, reducing the labeling effort was achieved through active learning, where the optimal next instance for labeling is selected by some heuristic to maximize utility. More recent work has focused on integrating user initiative in the labeling process through visual interactive labeling to steer the labeling process. This thesis proposes cVIL, a class-centric approach for visual interactive labeling that simplifies the human annotation process for large and complex image datasets. Previously, visual labeling approaches were typically instance-based, where the system visualizes individual instances for the user to label. cVIL utilizes diverse property measures to enable the labeling of difficult instances individually and in batches to label simpler cases rapidly. Since the property measures express the properties of an instance using a single scalar value, the visualizations are simple and scalable. cVIL combines the heuristic guidance approach of active learning with the user-centered approach of visual interactive labeling. In simulations, we could show that property measures can facilitate effective instance and batch labeling. In a user study, cVIL demonstrated superior accuracy and user satisfaction compared to the conventional instance-based visual interactive labeling approach that employs scatterplots. Participants also needed less time to complete the assigned tasks in cVIL compared to the baseline.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5263
        ],
        "co_supervisor": [
            5370
        ],
        "doi": "10.34726/hss.2024.102653",
        "open_access": "yes",
        "pages": "101",
        "supervisor": [
            1110
        ],
        "research_areas": [],
        "keywords": [
            "Human-centered computing",
            "Visual analytics",
            "User interface design"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "matt-2024-cvi-thesis.pdf",
                "type": "application/pdf",
                "size": 4771814,
                "path": "Publication:matt-2024-cvi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvi/matt-2024-cvi-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvi/matt-2024-cvi-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvi/",
        "__class": "Publication"
    },
    {
        "id": "riegelnegg-2024-aeo",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/198102",
        "title": "Automated Extraction of Complexity Measures from Engineering Drawings",
        "date": "2024",
        "abstract": "An engineering drawing is a detailed representation of an object used to communicate complex information for the purposes of design, manufacturing, and maintenance.These line drawings typically consist of multiple 2D orthographic views of a 3D object, along with dimensioning information and metadata about specific properties.Over the past decades, engineering drawings have evolved from hand-drawn sketches to highly standardized documents created with the help of CAD software.The large variety of engineering drawings makes it difficult to automatically extract abstract information in a robust way.The emergence of additive manufacturing (AM) promises companies that they can produce spare parts on demand for maintenance, potentially increasing the operational time of their infrastructure.Evaluating the AM potential of spare parts is essential from both an economic and technical perspective.This analysis of economic and technical viability requires the interpretation of complexity measures that can be derived from the engineering drawing of a spare part.The external dimensions of an object are key complexity measures to facilitate an AM potential analysis.In this thesis, we propose a processing pipeline that automates the extraction of complexity measures from engineering drawings, focusing on the external dimensions of the depicted objects.An in-depth examination of engineering drawings from different eras forms the basis of our methodology.Our pipeline is designed to be adaptable and consists of interpretable stages for specific tasks.We segment important entities in the input drawing to detect candidate dimension lines that are subsequently filtered by a sequence of processing steps.The grid structure of the orthographic views is determined, which allows us to assign axis labels to each view.We run optical character recognition (OCR) on detected dimension numbers and use the results to optimize the ratio between the OCR values and the length of dimension lines in pixels, providing us with a solution that is resilient to errors in the OCR predictions.A prototypical implementation of our pipeline demonstrates its capabilities in handling a large variety of drawings.We conduct a basic quantitative and qualitative evaluation of our methodology.The results confirm the effectiveness of our approach in automatically extracting abstract information from real-world engineering drawings.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1655
        ],
        "doi": "10.34726/hss.2024.115164",
        "open_access": "yes",
        "pages": "117",
        "supervisor": [
            166
        ],
        "research_areas": [],
        "keywords": [
            "engineering drawing",
            "information extraction",
            "computer vision",
            "pattern recognition",
            "deep learning",
            "additive manufacturing",
            "vision transformer"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "riegelnegg-2024-aeo-thesis.pdf",
                "type": "application/pdf",
                "size": 5046482,
                "path": "Publication:riegelnegg-2024-aeo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/riegelnegg-2024-aeo/riegelnegg-2024-aeo-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/riegelnegg-2024-aeo/riegelnegg-2024-aeo-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/riegelnegg-2024-aeo/",
        "__class": "Publication"
    },
    {
        "id": "tanaka-2024-vor",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/209946",
        "title": "Visualization of Relationships between Precipitation and River Water Levels",
        "date": "2024",
        "abstract": "Observation of precipitation changes is important for a variety of purposes such as predicting river levels. Previous studies for data visualization of precipitation and river water levels plotted graphs and color bars were many stations on a map. Instead of such visualizations on a map, we construct a graph to imitate a connected structure such as a tributary of a river in this study. Our method displays two pseudo-coloring sparklines at nodes of the graph as the stations. The method can visualize the time difference between the increase in precipitation upstream and the increase in river water level downstream. Users can observe precipitation and river water levels at different observation points. Our method uses a Delaunay diagram connecting gauging positions to interpolate and calculate precipitation at river level observation points. This avoids the discrepancy between observation points.In addition, we adjust the amount of visualized information by skipping the display of several observation points based on the similarity of the time-series data at each station, which is calculated by applying the dynamic time-stretching method. The visualization results show that downstream, once the water level rises, it tends to take longer for the water level to drop. In addition, the results show that a time lag occurs between the increase in precipitation and the rise in river levels in the mainstream, while tributaries have little time lag. In addition, data on rainfall and river levels at the same station over multiple periods and their relationship are plotted as scatter plots. The scatter plots make it easier to compare data from multiple periods at the same time than two-tone pseudo coloring sparklines.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5438,
            1937,
            1850,
            1410,
            166,
            1754
        ],
        "booktitle": "2024 28th International Conference Information Visualisation (IV)",
        "date_from": "2024-07-22",
        "date_to": "2024-07-26",
        "doi": "10.1109/IV64223.2024.00020",
        "event": "2024 28th International Conference Information Visualisation (IV)",
        "isbn": "979-8-3503-8016-3",
        "lecturer": [
            5438
        ],
        "location": "Coimbra",
        "pages": "6",
        "pages_from": "58",
        "pages_to": "63",
        "publisher": "IEEE",
        "research_areas": [],
        "keywords": [
            "Geographic Information",
            "Meteorological Information",
            "River Water level",
            "Interpolation",
            "Precipitation",
            "Rain",
            "Image color analysis",
            "Data visualization",
            "Rivers",
            "Space stations",
            "Bars"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/tanaka-2024-vor/",
        "__class": "Publication"
    },
    {
        "id": "boffi-2024-bagginghook",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/197699",
        "title": "BaggingHook: Selecting Moving Targets by Pruning Distractors Away for Intention-Prediction Heuristics in Dense 3D Environments",
        "date": "2024",
        "abstract": "Selecting targets in dense, dynamic 3D environments presents a significant challenge. In this study, we introduce two novel selection techniques based on distractor pruning to assist users in selecting targets moving unpredictably: BaggingHook and AutoBaggingHook. Both are built upon the Hook intention-prediction heuristic, which continuously measures the distance between the user's cursor and each object to compute per-object scores and estimate the intended target. Our techniques reduce the number of targets in the environment, making heuristic convergence potentially faster. Once pruned away, distractors are also made semi-transparent to reduce occlusion and the overall difficulty of the task. However, their motion is not altered, so that users can still perceive the dynamics of the environment. We designed two pruning approaches: BaggingHook lets users manually prune distractors away, while AutoBaggingHook uses automated, score-based pruning. We conducted a user study in a virtual reality setting inspired by molecular dynamics simulations, featuring crowded scenes of objects moving fast and unpredictably, in 3D. We compared both proposed techniques to the Hook baseline under more challenging circumstances than it had previously been tested. Our results show that AutoBaggingHook was the fastest, and did not lead to higher error rates. BaggingHook, on the other hand, was preferred by the majority of participants, due to the greater degree of control it provides to users, leading some to see entertainment value in its use. This work shows the potential benefits of varying the types of inputs used in intention-prediction heuristics, not just to improve performance, but also to reduce occlusion, overall task load, and improve user experience.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5365,
            5366,
            1110,
            5367,
            171
        ],
        "booktitle": "2024 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
        "date_from": "2024-03-16",
        "date_to": "2024-03-21",
        "doi": "10.1109/VR58804.2024.00110",
        "event": "2024 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
        "isbn": "9798350374025",
        "lecturer": [
            5368
        ],
        "location": "Orlando, FL",
        "pages": "11",
        "pages_from": "913",
        "pages_to": "923",
        "research_areas": [],
        "keywords": [
            "Algorithms",
            "AR/VR/Immersive",
            "Human-Subjects Qualitative Studies",
            "Human-Subjects Quantitative Studies",
            "Interaction Design",
            "Mobile",
            "Specialized Input/Display Hardware"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/boffi-2024-bagginghook/",
        "__class": "Publication"
    },
    {
        "id": "staats-2024-atr",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/195524",
        "title": "Alpine Terrain Relighting",
        "date": "2024",
        "abstract": "Aerial orthophotos together with digital elevation models (DEMs) allow the rendering of 3D representations of the earth, including alpine terrain. These virtual landscapes provide the opportunity to simulate light conditions at different times of the day, aiding in trip planning. However, orthophotos used as texture often contain large shadows stemming from cliffs and rocks, which significantly impact the visual quality of relighted textures. The necessary single-image shadow-removal process presents a crucial problem for the computer vision domain, which also functions as a prerequisite for many other tasks like segmentation and classification. Many promising approaches have already been developed, but unlike previous methods, this study tries to capitalize on the availability of DEMs to enhance the shadow removal process. Shadows in orthophotos are inherently linked to the underlying geospatial topology, and DEMs provide a valuable source of information for mitigating their impact. Therefore, this thesis explores the integration of DEMs into a state-of-the-art deep learning pipeline. DEMs are examined for their role in generating training sets and as supplementary input for a multi-modal network. Notably, 3D geometry derived from DEMs complemented by ray-tracing is used to generate artificial shadows with realistic shapes. Subsequently, an experiment is conducted with the created dataset to empirically test if additional elevation data is beneficial for the performance of the models. Additionally, the model’s ability to generalize from artificial to real shadows was probed. The experiment on virtual shadows showed that providing additional elevation data to the shadow-removal network does yield significantly better results with a medium to large effect size. Initially, all trained models failed to generalize to real shadow data. Downsizing the dataset to a lower level of detail mitigated this problem. Together with an analysis of the output of each network layer, it was concluded that the reason for the subpar real data performance are remaining small-scale shadows in the train set. A visual analysis of the improved models showed noticeable improvements with the generated realistic shadow shapes compared to random ones. Moreover, the utility of additional elevation data as input for the models was demonstrated.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5338
        ],
        "co_supervisor": [
            1919
        ],
        "doi": "10.34726/hss.2024.112641",
        "open_access": "yes",
        "pages": "70",
        "supervisor": [
            1110
        ],
        "research_areas": [],
        "keywords": [
            "Shadow-Removal",
            "Shadow-Detection",
            "Orthophotos",
            "Digital Elevation Models",
            "Deep-Learning",
            "Generative Adversarial Models",
            "Computer Vision"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "staats-2024-atr-thesis.pdf",
                "type": "application/pdf",
                "size": 5090122,
                "path": "Publication:staats-2024-atr",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/staats-2024-atr/staats-2024-atr-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/staats-2024-atr/staats-2024-atr-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/staats-2024-atr/",
        "__class": "Publication"
    },
    {
        "id": "parakkat-2024-ballmerge",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/197864",
        "title": "BallMerge: High‐quality Fast Surface Reconstruction via Voronoi Balls",
        "date": "2024",
        "abstract": "We introduce a Delaunay-based algorithm for reconstructing the underlying surface of a given set of unstructured points in 3D. The implementation is very simple, and it is designed to work in a parameter-free manner. The solution builds upon the fact that in the continuous case, a closed surface separates the set of maximal empty balls (medial balls) into an interior and exterior. Based on discrete input samples, our reconstructed surface consists of the interface between Voronoi balls, which approximate the interior and exterior medial balls. An initial set of Voronoi balls is iteratively processed, merging Voronoi-ball pairs if they fulfil an overlapping error criterion. Our complete open-source reconstruction pipeline performs up to two quick linear-time passes on the Delaunay complex to output the surface, making it an order of magnitude faster than the state of the art while being competitive in memory usage and often superior in quality. We propose two variants (local and global), which are carefully designed to target two different reconstruction scenarios for watertight surfaces from accurate or noisy samples, as well as real-world scanned data sets, exhibiting noise, outliers, and large areas of missing data. The results of the global variant are, by definition, watertight, suitable for numerical analysis and various applications (e.g., 3D printing). Compared to classical Delaunay-based reconstruction techniques, our method is highly stable and robust to noise and outliers, evidenced via various experiments, including on real-world data with challenges such as scan shadows, outliers, and noise, even without additional preprocessing.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 813,
            "image_height": 248,
            "name": "parakkat-2024-ballmerge-image.png",
            "type": "image/png",
            "size": 308382,
            "path": "Publication:parakkat-2024-ballmerge",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/parakkat-2024-ballmerge/parakkat-2024-ballmerge-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/parakkat-2024-ballmerge/parakkat-2024-ballmerge-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1825,
            948,
            823,
            5297
        ],
        "doi": "10.1111/cgf.15019",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "number": "2",
        "open_access": "yes",
        "pages": "13",
        "publisher": "WILEY",
        "volume": "43",
        "research_areas": [],
        "keywords": [
            "surface reconstruction",
            "voronoi",
            "point clouds"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 813,
                "image_height": 248,
                "name": "parakkat-2024-ballmerge-image.png",
                "type": "image/png",
                "size": 308382,
                "path": "Publication:parakkat-2024-ballmerge",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/parakkat-2024-ballmerge/parakkat-2024-ballmerge-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/parakkat-2024-ballmerge/parakkat-2024-ballmerge-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "parakkat-2024-ballmerge-paper.pdf",
                "type": "application/pdf",
                "size": 75917181,
                "path": "Publication:parakkat-2024-ballmerge",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/parakkat-2024-ballmerge/parakkat-2024-ballmerge-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/parakkat-2024-ballmerge/parakkat-2024-ballmerge-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "Superhumans",
            "WorldScale"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/parakkat-2024-ballmerge/",
        "__class": "Publication"
    },
    {
        "id": "eitler-2024-sos",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/205306",
        "title": "Spatial Organization Strategies in Exploratory Analysis of Unstructured Data",
        "date": "2024",
        "abstract": "As not only the amount but also the complexity of data increases, there is a growing need to support humans in the analysis of data that is not structured in a way that can be easily interpreted by machines. So-called “knowledge-assisted visual analytics” (KAVA) tools aim to address these challenges by integrating the knowledge of the analyst into their system to support the analysis process.In this thesis, we investigate the spatial organization strategies that users employ when exploring unstructured data. We aim to characterize the types of strategies that users employ, how they change over time, and how we can use them to infer the users’ knowledge of the data. To answer these questions, we first conduct a user study in which the participants explore an image dataset on a multitouch tabletop interface imitating an analogue setting and externalize their findings into concept maps. We observe their organization strategies and analyse their methods in a mixed-methods approach, combining qualitative analysis of the participants’ interview statements with quantitative analysis of the interaction logs.We find that the participants’ spatial organization strategies can be characterized by four features: semantic clusters, type of layout, uncovering process, and reorganization of the data. While most participants prefer layouts that give them an overview of the data, only about half create semantic clusters (i.e., grouping similar images together). The participants also mostly uncovered all images — which were initially on a stack — in the task right away before externalizing their knowledge, and only a few reorganized the images. We further find that the participants generally did not change their organization strategies over time, and that the resulting spatial arrangements do not necessarily provide valuable insights into the users’ knowledge of the data.Finally, we discuss our findings and list the limitations of our study. As this thesis is embedded in a research project that aims to develop a tool for knowledge-assisted visual analytics, we discuss potential design implications for the development of such a tool.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            5419
        ],
        "doi": "10.34726/hss.2024.117186",
        "open_access": "yes",
        "pages": "95",
        "supervisor": [
            1110
        ],
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "visual analytics",
            "unstructured data",
            "spatial organization",
            "exploratory analysis",
            "knowledge-assisted visual analytics",
            "semantic interaction",
            "knowledge externalization"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "eitler-2024-sos-thesis.pdf",
                "type": "application/pdf",
                "size": 2087980,
                "path": "Publication:eitler-2024-sos",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/eitler-2024-sos/eitler-2024-sos-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/eitler-2024-sos/eitler-2024-sos-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/eitler-2024-sos/",
        "__class": "Publication"
    },
    {
        "id": "pichler-2024-vaf",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/204038",
        "title": "Visual Analytics für Deep Learning mit Graphen: Case Study Neuronen Clustering",
        "date": "2024",
        "abstract": "Many deep learning applications are based on graph data in order to explore relationships or to analyze structures. Labeling this data is expensive and often requires expert knowledge. For the application of graph clustering to neuron data, the SOTA method GraphDINO generates self-supervised graph embeddings combined with the downstream task of clustering these embeddings. We observe on a particularly challenging neuron dataset that this method does not lead to satisfying clustering results. Therefore we use the graph embeddings generated by GraphDINO as an initial starting point to improve the network and to guide the network training. To achieve this, we developed the visual analytics framework NetDive. The user can analyze the graph embeddings and label single neurons that are falsely clustered. This annotation information is then used to train a semi-supervised model. To this end, we developed a network architecture, named GraphPAWS, that assembles components of GraphDINO and of the semi-supervised network architecture PAWS. The model training can be started from within the visual analytics application NetDive and the resulting graph embeddings are available in NetDive as soon as the retraining is completed. We demonstrate how we iteratively train the model using NetDive and GraphPAWS and evaluate our model against the self-supervised SOTA for our dataset.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Example neurons for each spiny clusters of the BBP dataset, with apical\ndendrites in lighter color and basal dendrites in darker color.",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 811,
            "image_height": 528,
            "name": "pichler-2024-vaf-teaser.png",
            "type": "image/png",
            "size": 365626,
            "path": "Publication:pichler-2024-vaf",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pichler-2024-vaf/pichler-2024-vaf-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pichler-2024-vaf/pichler-2024-vaf-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1940
        ],
        "co_supervisor": [
            5404,
            231
        ],
        "doi": "10.34726/hss.2024.112190",
        "open_access": "yes",
        "pages": "134",
        "supervisor": [
            166
        ],
        "research_areas": [],
        "keywords": [
            "Graph embedding",
            "Visual Analytics"
        ],
        "weblinks": [],
        "files": [
            {
                "description": "Example neurons for each spiny clusters of the BBP dataset, with apical\ndendrites in lighter color and basal dendrites in darker color.",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 811,
                "image_height": 528,
                "name": "pichler-2024-vaf-teaser.png",
                "type": "image/png",
                "size": 365626,
                "path": "Publication:pichler-2024-vaf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pichler-2024-vaf/pichler-2024-vaf-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pichler-2024-vaf/pichler-2024-vaf-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "pichler-2024-vaf-thesis.pdf",
                "type": "application/pdf",
                "size": 5460090,
                "path": "Publication:pichler-2024-vaf",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pichler-2024-vaf/pichler-2024-vaf-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pichler-2024-vaf/pichler-2024-vaf-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pichler-2024-vaf/",
        "__class": "Publication"
    },
    {
        "id": "rauer-zechmeister-2024-h3p",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/206130",
        "title": "HORA 3D: Personalized Flood Risk Visualization as an Interactive Web Service",
        "date": "2024",
        "abstract": "We propose an interactive web-based application to inform the general public about personal flood risks. Flooding is the natural hazard affecting most people worldwide. Protection against flooding is not limited to mitigation measures, but also includes communicating its risks to affected individuals to raise awareness and preparedness for its adverse effects. Until now, this is mostly done with static and indiscriminate 2D maps of the water depth. These flood hazard maps can be difficult to interpret and the user has to derive a personal flood risk based on prior knowledge. In addition to the hazard, the flood risk has to consider the exposure of the own house and premises to high water depths and flow velocities as well as the vulnerability of particular parts. Our application is centered around an interactive personalized visualization to raise awareness of these risk factors for an object of interest. We carefully extract and show only the relevant information from large precomputed flood simulation and geospatial data to keep the visualization simple and comprehensible. To achieve this goal, we extend various existing approaches and combine them with new real-time visualization and interaction techniques in 3D. A new view-dependent focus+context design guides user attention and supports an intuitive interpretation of the visualization to perform predefined exploration tasks. HORA 3D enables users to individually inform themselves about their flood risks. We evaluated the user experience through a broad online survey with 87 participants of different levels of expertise, who rated the helpfulness of the application with 4.7 out of 5 on average.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5421,
            877,
            5422,
            1207,
            1205,
            1688,
            1689,
            1330,
            166,
            798
        ],
        "articleno": "e15110",
        "doi": "10.1111/cgf.15110",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "number": "3",
        "pages": "12",
        "publisher": "WILEY",
        "volume": "43",
        "research_areas": [],
        "keywords": [
            "CCS Concepts",
            "Human-centered computing",
            "Information systems",
            "Visualization",
            "Spatial-temporal systems"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/rauer-zechmeister-2024-h3p/",
        "__class": "Publication"
    }
]
