[
    {
        "id": "muth-2026-clouds",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Real-Time Volumetric Rendering of Meteorological Cloud Data",
        "date": "2026-03-25",
        "abstract": "Traditional meteorological visualizations collapse the vertical structure of the atmosphere\ninto two-dimensional map overlays, losing precisely the information most relevant in\ncomplex terrain. Expert tools for three-dimensional atmospheric exploration exist, but\ntarget domain scientists on dedicated workstation infrastructure. This thesis presents a\npipeline for the real-time volumetric rendering of meteorological cloud data within the\nweBIGeo web-based geographic visualization platform, with the goal of making cloud\nstructure intuitively readable to general users in a standard web browser.\nThe pipeline transforms hourly ICON-D2 forecast output into a compressed, streamable\ntile hierarchy that a WebGPU ray-marcher samples at interactive frame rates. Preprocessing\na single forecast timestamp completes in approximately 33 s of compute time,\nproducing a tile hierarchy of roughly 86MiB on average. Rendering cost is well within\ninteractive bounds: even under high cloud coverage, the cloud pass consumes around\n2.25ms of GPU time, a small fraction of the 33ms budget for 30 fps. Qualitative evaluation\nagainst EUMETSAT satellite imagery shows that large-scale cloud patterns are\nreproduced with reasonable fidelity. Comparison against webcam imagery reveals three\nconcrete limitations: the coarse and non-uniform vertical resolution of the source data\nis insufficient to resolve sharp fog layer boundaries, the sub-grid density distribution\ndoes not preserve the character of small cloud elements such as wispy puffs, and the tile\nresolution is too coarse to encode the surface texture of individual cumulus cells. The\nsystem is best understood as a large-scale atmospheric context layer rather than a precise\nlocal forecast tool.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "clouds in weBIGeo",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1958,
            "image_height": 1159,
            "name": "muth-2026-clouds-teaser.png",
            "type": "image/png",
            "size": 4142519,
            "path": "Publication:muth-2026-clouds",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2026/muth-2026-clouds/muth-2026-clouds-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2026/muth-2026-clouds/muth-2026-clouds-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5585
        ],
        "date_end": "2026-03",
        "date_start": "2025-10",
        "matrikelnr": "12226614",
        "supervisor": [
            1110
        ],
        "research_areas": [
            "Rendering"
        ],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://github.com/Qendolin/webigeo-clouds",
                "caption": "GitHub Cloud Renderer",
                "description": null,
                "main_file": 0
            },
            {
                "href": "https://github.com/Qendolin/webigeo-clouds-server",
                "caption": "GitHub Cloud Preprocessing Server",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": "clouds in weBIGeo",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1958,
                "image_height": 1159,
                "name": "muth-2026-clouds-teaser.png",
                "type": "image/png",
                "size": 4142519,
                "path": "Publication:muth-2026-clouds",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2026/muth-2026-clouds/muth-2026-clouds-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2026/muth-2026-clouds/muth-2026-clouds-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "muth-2026-clouds-thesis.pdf",
                "type": "application/pdf",
                "size": 20583815,
                "path": "Publication:muth-2026-clouds",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2026/muth-2026-clouds/muth-2026-clouds-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2026/muth-2026-clouds/muth-2026-clouds-thesis:thumb{{size}}.png"
            },
            {
                "description": "Short video sequence zooming into a cloudy scene",
                "filetitle": "video",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "muth-2026-clouds-video.mp4",
                "type": "video/mp4",
                "size": 8692035,
                "path": "Publication:muth-2026-clouds",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2026/muth-2026-clouds/muth-2026-clouds-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2026/muth-2026-clouds/muth-2026-clouds-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2026/muth-2026-clouds/muth-2026-clouds-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "d9555"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2026/muth-2026-clouds/",
        "__class": "Publication"
    },
    {
        "id": "Musleh_PhD",
        "type_id": "phdthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/224040",
        "title": "Guided Visual Analytics for Decision Making under Uncertainty",
        "date": "2026-03",
        "abstract": "Visual Analytics (VA) has emerged from the need to optimize decision making by involving human reasoning in sense making. The development of VA has been facilitated by significant technological advances in modern computer graphics and data processing capabilities. Involving humans in the loop aims to address high-risk scenarios where artificial intelligence (AI) automated approaches are insufficient. One active area of research with VA is the development of methods that enable the user to make efficient and effective decisions under high uncertainty. Yet, the field of VA research has not fully understood how user attitude, namely trust and confidence, interplay in VA decision making under uncertainty. Properties of the user attitude play a crucial role in optimizing VA decision making, but they are challenging to externalize and evaluate. For instance, user confidence in their decision emerges as an important indicator of effectiveness when the correctness of the decision cannot be measured. In this dissertation, we explore the use of guidance techniques to address uncertainties in VA decision making, focusing on scenarios where the correctness of decisions cannot be definitively established. Throughout this work, we learned that a multidimensional guidance mechanism can address uncertainties more effectively when uncertainties are challenging to quantify and visualize, especially in the case of subjective uncertainty. However, evaluating the effectiveness of guidance approaches requires a more comprehensive analysis of the interplay between trust and confidence within the sense-making process. Using provenance networks and SNA metrics can provide a more reliable and comprehensive assessment of user confidence, indicating that such approaches can be employed to support co-adaptive guidance.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "title,abstract,date,keywords",
        "repositum_presentation_id": null,
        "authors": [
            1867
        ],
        "ac_number": "AC17745765",
        "date_end": "2025",
        "date_start": "2021",
        "doi": "10.34726/hss.2026.137814",
        "open_access": "yes",
        "pages": "185",
        "reviewer_1": [
            5252
        ],
        "reviewer_2": [
            5243
        ],
        "rigorosum": "2025-11",
        "supervisor": [
            1410
        ],
        "research_areas": [
            "InfoVis",
            "MedVis"
        ],
        "keywords": [
            "Visualization",
            "Visual Analytics",
            "Guidance",
            "Trust",
            "Confidence"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Musleh_PhD-thesis.pdf",
                "type": "application/pdf",
                "size": 6169556,
                "path": "Publication:Musleh_PhD",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2026/Musleh_PhD/Musleh_PhD-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2026/Musleh_PhD/Musleh_PhD-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2026/Musleh_PhD/",
        "__class": "Publication"
    },
    {
        "id": "kovacs-2026-sbg",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/226679",
        "title": "Style Brush: Guided Style Transfer for 3D Objects",
        "date": "2026-02-16",
        "abstract": "We introduce Style Brush, a novel style transfer method for textured meshes designed to empower artists with fine-grained control over the stylization process. Our approach extends traditional 3D style transfer methods by introducing a novel loss function that captures style directionality, supports multiple style images or portions thereof and enables smooth transitions between styles in the synthesized texture. The use of easily generated guiding textures streamlines user interaction, making our approach accessible to a broad audience. Extensive evaluations with various meshes, style images and contour shapes demonstrate the flexibility of our method and showcase the visual appeal of the generated textures. Finally, the results of a user study indicate that our approach generates visually appealing mesh textures that adhere to user-defined guidance and enable users to retain creative control during stylization. Our implementation is available on: https://github.com/AronKovacs/style-brush.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1950,
            5415,
            1410
        ],
        "articleno": "e70308",
        "doi": "10.1111/cgf.70308",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "pages": "18",
        "publisher": "WILEY",
        "research_areas": [],
        "keywords": [
            "3D style transfer",
            "directional guidance",
            "mesh texture synthesis",
            "user guidance"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2026/kovacs-2026-sbg/",
        "__class": "Publication"
    },
    {
        "id": "chaves-de-plaza-2026-logcc",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/226667",
        "title": "LoGCC: Local-to-Global Correlation Clustering for Scalar Field Ensembles",
        "date": "2026-02",
        "abstract": "Correlation clustering (CC) offers an effective approach to analyze scalar field ensembles by detecting correlated regions and consistent structures, enabling the extraction of meaningful patterns. However, existing CC methods are computationally expensive, making them impractical for both interactive analysis and large-scale scalar fields. We introduce the Local-to-Global Correlation Clustering (LoGCC) framework, which accelerates pivot-based CC by leveraging the spatial structure of scalar fields and the weak transitivity of correlation. LoGCC operates in two stages: a local step that uses the neighborhood graph of the scalar field's spatial domain to build highly correlated local clusters, and a global step that merges them into global clusters. We implement the LoGCC framework for two well-known pivot-based CC methods, Pivot and CN-Pivot, demonstrating its generality. Our evaluation using synthetic and real-world meteorological and medical image segmentation datasets shows that LoGCC achieves speedups—up to 15 × for Pivot and 200 × for CN-Pivot—and improved scalability to larger scalar fields, while maintaining cluster quality. These contributions broaden the applicability of correlation clustering in large-scale and interactive analysis settings.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5565,
            1410,
            5566,
            5567,
            5568,
            5569,
            5570
        ],
        "doi": "10.1109/TVCG.2025.3630550",
        "issn": "1941-0506",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "number": "2",
        "pages": "12",
        "pages_from": "2260",
        "pages_to": "2271",
        "publisher": "IEEE COMPUTER SOC",
        "volume": "32",
        "research_areas": [],
        "keywords": [
            "Correlation Clustering",
            "Clustering Algorithms",
            "Uncertainty Visualization",
            "Ensemble Visualization",
            "Scalar Field Ensemble Analysis"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2026/chaves-de-plaza-2026-logcc/",
        "__class": "Publication"
    },
    {
        "id": "kaipel_nikolas-2025-baa",
        "type_id": "bachelorthesis",
        "tu_id": null,
        "repositum_id": null,
        "title": "Noisy Change Detection",
        "date": "2026-01-30",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image2",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 312,
            "image_height": 241,
            "name": "kaipel_nikolas-2025-baa-image2.png",
            "type": "image/png",
            "size": 24842,
            "path": "Publication:kaipel_nikolas-2025-baa",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2026/kaipel_nikolas-2025-baa/kaipel_nikolas-2025-baa-image2.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2026/kaipel_nikolas-2025-baa/kaipel_nikolas-2025-baa-image2:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5444
        ],
        "co_supervisor": [
            948
        ],
        "date_end": "2026-01-30",
        "date_start": "2025-02-13",
        "matrikelnr": "12140073",
        "supervisor": [
            193
        ],
        "research_areas": [
            "Geometry"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image2",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 312,
                "image_height": 241,
                "name": "kaipel_nikolas-2025-baa-image2.png",
                "type": "image/png",
                "size": 24842,
                "path": "Publication:kaipel_nikolas-2025-baa",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2026/kaipel_nikolas-2025-baa/kaipel_nikolas-2025-baa-image2.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2026/kaipel_nikolas-2025-baa/kaipel_nikolas-2025-baa-image2:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "kaipel_nikolas-2025-baa-thesis.pdf",
                "type": "application/pdf",
                "size": 900294,
                "path": "Publication:kaipel_nikolas-2025-baa",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2026/kaipel_nikolas-2025-baa/kaipel_nikolas-2025-baa-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2026/kaipel_nikolas-2025-baa/kaipel_nikolas-2025-baa-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "WorldScale"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2026/kaipel_nikolas-2025-baa/",
        "__class": "Publication"
    },
    {
        "id": "Kovacs_PhD",
        "type_id": "phdthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/224033",
        "title": "3D Style Transfer: Lifting 2D Methods to 3D and Enabling Interactive Guidance",
        "date": "2026",
        "abstract": "3D style transfer refers to altering the visual appearance of 3D objects and scenes to match a given (artistic) style, usually provided as an image. 3D style transfer presents significant potential in streamlining the creation of 3D assets such as game environment props, VFX elements, or largescale virtual scenes. However, it faces challenges such as ensuring multi-view consistency, respecting computational and memory constraints, and enabling artist control. In this dissertation, we propose three methods that aim at stylizing 3D assets while addressing these challenges. We focus on optimization-based methods due to the higher quality of results compared to single-pass methods. 0ur contributions advance the state-of-the-art by introducing: (i) novel surface-aware CNN operators for direct mesh texturing, (ii) the first Gaussian Splatting (GS) method capable of transferring both high-frequency details and large scale patterns, and (iii) an interactive method that allows directional and region-based control over the stylization process. Each of these methods outperforms existing baselines in visual fidelity and robustness. Across three complementary projects, we explore different facets of 3D style transfer. In the first project, we propose a method that creates textures directly on the surface of a mesh. By replacing the standard 2D convolution and pooling layers in a pre-trained 2D CNN with surface-based operations, we achieve seamless, multi-view-consistent texture synthesis without relying on proxy 2D images. In the second project, we transfer both high-frequency and large-scale patterns using GS, while addressing representation-specific artifacts such as oversized or elongated Gaussians. Furthermore, we design a style loss capable of transferring style patterns at multiple scales, resulting in visually appealing stylized scenes that preserve both intricate details and large-scale motifs. In the third project, we propose an interactive method that allows users to guide stylization by drawing lines to control pattern direction, and painting regions on both the 3D surface and style image to specify where and how specific style patterns should be applied. Through our extensive qualitative and quantitative evaluations, we show that our methods surpass state-of-the-art techniques. We also demonstrate their robustness across diverse 3D objects, scenes, and styles, highlighting the flexibility of the presented methods. Future work may explore extensions such as geometry modification for style-driven shape changes, more efficient !arge-scale pattern synthesis, temporal coherence in dynamic or video-based scenes, and refined interactive controls informed by direct artist feedback to better integrate creative intent into the stylization pipeline.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "title,abstract,date,keywords,type_id",
        "repositum_presentation_id": null,
        "authors": [
            1950
        ],
        "ac_number": "AC17745734",
        "co_supervisor": [
            5572
        ],
        "date_end": "2024",
        "date_start": "2021",
        "doi": "10.34726/hss.2026.137815",
        "open_access": "yes",
        "pages": "104",
        "supervisor": [
            1410
        ],
        "research_areas": [
            "MedVis"
        ],
        "keywords": [
            "Style transfer",
            "Texture synthesis",
            "Neural Networks",
            "Neural rendering"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Kovacs_PhD-thesis.pdf",
                "type": "application/pdf",
                "size": 4807053,
                "path": "Publication:Kovacs_PhD",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2026/Kovacs_PhD/Kovacs_PhD-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2026/Kovacs_PhD/Kovacs_PhD-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2026/Kovacs_PhD/",
        "__class": "Publication"
    }
]
