[
    {
        "id": "komon-2025-dco",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/224640",
        "title": "Data-Driven Compute Overlays for Interactive Geographic Simulation and Visualization",
        "date": "2025-12-30",
        "abstract": "We present interactive data-driven compute overlays for native and web-based 3D geographic map applications based on WebGPU. Our data-driven overlays are generated in a multi-step compute workflow from multiple data sources on the GPU. We demonstrate their potential by showing results from snow cover and avalanche simulations, where simulation parameters can be adjusted interactively and results are visualized instantly. Benchmarks show that our approach can compute large-scale avalanche simulations in milliseconds to seconds, depending on the size of the terrain and the simulation parameters, which is multiple orders of magnitude faster than a state-of-the-art Python implementation.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Rendered 2.5D terrain with avalanche simulation output as overlay, color encodes velocity.",
            "filetitle": "Teaser image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2527,
            "image_height": 1270,
            "name": "komon-2025-dco-Teaser image.png",
            "type": "image/png",
            "size": 3968804,
            "path": "Publication:komon-2025-dco",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-dco/komon-2025-dco-Teaser image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-dco/komon-2025-dco-Teaser image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1859,
            1869,
            1013,
            1110
        ],
        "booktitle": "2025 IEEE Visualization and Visual Analytics (VIS)",
        "date_from": "2025-11-01",
        "date_to": "2025-11-07",
        "doi": "10.1109/VIS60296.2025.00043",
        "event": "IEEE VIS 2025",
        "isbn": "979-8-3315-6613-5",
        "lecturer": [
            1869
        ],
        "location": "Vienna",
        "pages": "5",
        "pages_from": "186",
        "pages_to": "190",
        "publisher": "IEEE",
        "research_areas": [],
        "keywords": [
            "3D geographic visualization",
            "geographic simulation",
            "WebGPU"
        ],
        "weblinks": [
            {
                "href": "https://arxiv.org/abs/2506.23364",
                "caption": "Paper preprint",
                "description": null,
                "main_file": 1
            },
            {
                "href": "https://webigeo.alpinemaps.org/",
                "caption": "Online demo",
                "description": null,
                "main_file": 1
            },
            {
                "href": "https://youtu.be/pZq0H_l-8Bs?t=1580",
                "caption": "Talk recording",
                "description": null,
                "main_file": 0
            },
            {
                "href": "https://github.com/weBIGeo/webigeo",
                "caption": "GitHub repository",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": "Rendered 2.5D terrain with avalanche simulation output as overlay, color encodes velocity.",
                "filetitle": "Teaser image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2527,
                "image_height": 1270,
                "name": "komon-2025-dco-Teaser image.png",
                "type": "image/png",
                "size": 3968804,
                "path": "Publication:komon-2025-dco",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-dco/komon-2025-dco-Teaser image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-dco/komon-2025-dco-Teaser image:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9555"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-dco/",
        "__class": "Publication"
    },
    {
        "id": "tekaya-2025-amo",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/221607",
        "title": "A Matter of Time: Revealing the Structure of Time in Vision-Language Models",
        "date": "2025-10-27",
        "abstract": "Large-scale vision-language models (VLMs) such as CLIP have gained popularity for their generalizable and expressive multimodal representations. By leveraging large-scale training data with diverse textual metadata, VLMs acquire open-vocabulary capabilities, solving tasks beyond their training scope. This paper investigates the temporal awareness of VLMs, assessing their ability to position visual content in time. We introduce TIME10k, a benchmark dataset of over 10,000 images with temporal ground truth, and evaluate the time-awareness of 37 VLMs by a novel methodology. Our investigation reveals that temporal information is structured along a low-dimensional, non-linear manifold in the VLM embedding space. Based on this insight, we propose methods to derive an explicit ''timeline'' representation from the embedding space. These representations model time and its chronological progression and thereby facilitate temporal reasoning tasks. Our timeline approaches achieve competitive to superior accuracy compared to a prompt-based baseline while being computationally efficient. All code and data are available at https://tekayanidham.github.io/timeline-page/.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1536,
            "image_height": 458,
            "name": "tekaya-2025-amo-teaser.png",
            "type": "image/png",
            "size": 4646052,
            "path": "Publication:tekaya-2025-amo",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2025/tekaya-2025-amo/tekaya-2025-amo-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/tekaya-2025-amo/tekaya-2025-amo-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,keywords,projects",
        "repositum_presentation_id": null,
        "authors": [
            5330,
            1110,
            5370
        ],
        "booktitle": "MM '25: Proceedings of the 33rd ACM International Conference on Multimedia",
        "date_from": "2025-10-27",
        "date_to": "2025-10-31",
        "doi": "10.1145/3746027.3758163",
        "event": "ACM International Conference on Multimedia 2025",
        "isbn": "979-8-4007-2035-2",
        "lecturer": [
            5330
        ],
        "location": "Dublin",
        "open_access": "yes",
        "pages": "10",
        "pages_from": "12371",
        "pages_to": "12380",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Multimodal representations",
            "Vision-language models",
            "Time modeling",
            "Time estimation",
            "Benchmark dataset"
        ],
        "weblinks": [
            {
                "href": "https://tekayanidham.github.io/timeline-page/",
                "caption": null,
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1536,
                "image_height": 458,
                "name": "tekaya-2025-amo-teaser.png",
                "type": "image/png",
                "size": 4646052,
                "path": "Publication:tekaya-2025-amo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/tekaya-2025-amo/tekaya-2025-amo-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/tekaya-2025-amo/tekaya-2025-amo-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9522",
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/tekaya-2025-amo/",
        "__class": "Publication"
    },
    {
        "id": "komon-2025-webigeo",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/221523",
        "title": "weBIGeo: Interaktive Lawinensimulation im Web",
        "date": "2025-10",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2556,
            "image_height": 1303,
            "name": "komon-2025-webigeo-teaser.png",
            "type": "image/png",
            "size": 5204641,
            "path": "Publication:komon-2025-webigeo",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-webigeo/komon-2025-webigeo-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-webigeo/komon-2025-webigeo-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,projects",
        "repositum_presentation_id": null,
        "authors": [
            1859,
            1869,
            5510,
            5511,
            5512,
            5513,
            1013,
            1110
        ],
        "ac_number": "AC17717665",
        "booktitle": "Tagungsband des 6. internationalen Lawinensymposiums",
        "date_from": "2025-10-18",
        "date_to": "2025-10-18",
        "doi": "10.34726/11439",
        "event": "6. Lawinen Symposium Graz 2025",
        "lecturer": [
            1859,
            1110
        ],
        "location": "Graz",
        "open_access": "yes",
        "pages": "4",
        "pages_from": "150",
        "pages_to": "153",
        "research_areas": [
            "InfoVis",
            "Modeling"
        ],
        "keywords": [
            "Visualisierung",
            "Lawinen",
            "3D Karten"
        ],
        "weblinks": [
            {
                "href": "https://webigeo.alpinemaps.org/",
                "caption": "demo",
                "description": "weBIGeo demo: requires WebGPU-ready browser! ",
                "main_file": 1
            },
            {
                "href": "https://lawinensymposium.naturfreunde.at/",
                "caption": "Lawinensymposium",
                "description": "Lawinensymposium",
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "komon-2025-webigeo-paper.pdf",
                "type": "application/pdf",
                "size": 1611394,
                "path": "Publication:komon-2025-webigeo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-webigeo/komon-2025-webigeo-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-webigeo/komon-2025-webigeo-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2556,
                "image_height": 1303,
                "name": "komon-2025-webigeo-teaser.png",
                "type": "image/png",
                "size": 5204641,
                "path": "Publication:komon-2025-webigeo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-webigeo/komon-2025-webigeo-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-webigeo/komon-2025-webigeo-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9555",
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/komon-2025-webigeo/",
        "__class": "Publication"
    },
    {
        "id": "matt-2025-scv",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/216286",
        "title": "Scalable Class-Centric Visual Interactive Labeling",
        "date": "2025-06",
        "abstract": "Large unlabeled datasets demand efficient and scalable data labeling solutions, in particular when the number of instances and classes is large. This leads to significant visual scalability challenges and imposes a high cognitive load on the users. Traditional instance-centric labeling methods, where (single) instances are labeled in each iteration struggle to scale effectively in these scenarios. To address these challenges, we introduce cVIL, a Class-Centric Visual Interactive Labeling methodology designed for interactive visual data labeling. By shifting the paradigm from assigning-classes-to-instances to assigning-instances-to-classes, cVIL reduces labeling effort and enhances efficiency for annotators working with large, complex and class-rich datasets. We propose a novel visual analytics labeling interface built on top of the conceptual cVIL workflow, enabling improved scalability over traditional visual labeling. In a user study, we demonstrate that cVIL can improve labeling efficiency and user satisfaction over instance-centric interfaces. The effectiveness of cVIL is further demonstrated through a usage scenario, showcasing its potential to alleviate cognitive load and support experts in managing extensive labeling tasks efficiently.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "class-centric visual interactive labeling workflow",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1667,
            "image_height": 624,
            "name": "matt-2025-scv-teaser.png",
            "type": "image/png",
            "size": 254694,
            "path": "Publication:matt-2025-scv",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2025/matt-2025-scv/matt-2025-scv-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/matt-2025-scv/matt-2025-scv-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5263,
            5487,
            1851,
            5370,
            1110
        ],
        "articleno": "104240",
        "doi": "10.1016/j.cag.2025.104240",
        "issn": "1873-7684",
        "journal": "COMPUTERS & GRAPHICS-UK",
        "pages": "14",
        "publisher": "PERGAMON-ELSEVIER SCIENCE LTD",
        "volume": "129",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Class-centric labeling",
            "Interactive machine learning",
            "Property measures",
            "Visual analytics",
            "Visual-interactive data labeling"
        ],
        "weblinks": [],
        "files": [
            {
                "description": "class-centric visual interactive labeling workflow",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1667,
                "image_height": 624,
                "name": "matt-2025-scv-teaser.png",
                "type": "image/png",
                "size": 254694,
                "path": "Publication:matt-2025-scv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/matt-2025-scv/matt-2025-scv-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/matt-2025-scv/matt-2025-scv-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/matt-2025-scv/",
        "__class": "Publication"
    },
    {
        "id": "eschner-2025-ide",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/216865",
        "title": "Interactive Discovery and Exploration of Visual Bias in Generative Text‐to‐Image Models",
        "date": "2025-06",
        "abstract": "Bias in generative Text-to-Image (T2I) models is a known issue, yet systematically analyzing such models' outputs to uncover it remains challenging. We introduce the Visual Bias Explorer (ViBEx) to interactively explore the output space of T2I models to support the discovery of visual bias. ViBEx introduces a novel flexible prompting tree interface in combination with zero-shot bias probing using CLIP for quick and approximate bias exploration. It additionally supports in-depth confirmatory bias analysis through visual inspection of forward, intersectional, and inverse bias queries. ViBEx is model-agnostic and publicly available. In four case study interviews, experts in AI and ethics were able to discover visual biases that have so far not been described in literature.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2578,
            "image_height": 1292,
            "name": "eschner-2025-ide-image.png",
            "type": "image/png",
            "size": 1659146,
            "path": "Publication:eschner-2025-ide",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "title,date,projects,event,lecturer,location,number,volume",
        "repositum_presentation_id": null,
        "authors": [
            1653,
            5490,
            5370,
            1110
        ],
        "ac_number": "AC17579673",
        "articleno": "e70135",
        "doi": "10.1111/cgf.70135",
        "event": "EuroVis 2025",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1653
        ],
        "location": "Luxembourg",
        "number": "3",
        "open_access": "yes",
        "pages": "20",
        "publisher": "WILEY",
        "volume": "44",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Visualization",
            "Bias",
            "Artificial Intelligence"
        ],
        "weblinks": [
            {
                "href": "https://vibex.jde.cg.tuwien.ac.at",
                "caption": "live demo",
                "description": "Live demo of the ViBEx application",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2578,
                "image_height": 1292,
                "name": "eschner-2025-ide-image.png",
                "type": "image/png",
                "size": 1659146,
                "path": "Publication:eschner-2025-ide",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "eschner-2025-ide-paper.pdf",
                "type": "application/pdf",
                "size": 46238959,
                "path": "Publication:eschner-2025-ide",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/",
        "__class": "Publication"
    },
    {
        "id": "aigner-2025-vhv",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/225207",
        "title": "Visual Heritage: Visual Analytics and Computer Vision Meet Cultural Heritage (doc.funds.connect)",
        "date": "2025-05-07",
        "abstract": null,
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5540,
            966,
            5541,
            1511,
            5542,
            1110,
            5370
        ],
        "booktitle": "Abstractband: 18. Forschungsforum der österreichischen Fachhochschulen",
        "date_from": "2025-05-07",
        "date_to": "2025-05-08",
        "event": "18. Forschungsforum Der Österreichischen Fachhochschulen",
        "lecturer": [
            5543
        ],
        "location": "Wien",
        "pages": "2",
        "pages_from": "558",
        "pages_to": "559",
        "research_areas": [],
        "keywords": [
            "Artificial Intelligence",
            "Visual Analytics",
            "Computer Vision",
            "Cultural Heritage",
            "Visualization",
            "Computer Science"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [
            "d9522"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/aigner-2025-vhv/",
        "__class": "Publication"
    },
    {
        "id": "grammatikaki-2025-htr",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/215590",
        "title": "How to represent landmark trees in digital 3D maps? An automated workflow and user study",
        "date": "2025-05",
        "abstract": "Digital 3D maps created from digital elevation models (DEMs) cannot properly capture trees due to the 2.5D nature of the DEMs. Leveraging publicly available DEMs and orthophotos as the only input data sources, we present a fully automatic pipeline that models landmark trees. We conducted two crowdsourced user studies to evaluate visual appeal and scene recognizability using two different levels of detail of tree representations generated using our pipeline. Users found highly detailed trees much more appealing, and also easier and more trustworthy to recognize, with open-ended responses revealing key factors like realism, coherence, and tree shape influencing their preferences. However, the ability to recognize a location seems to depend more on the surrounding environment than the representation of the landmark tree in focus. We discuss the implications of these results for digital 3D outdoor maps.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 2200,
            "image_height": 370,
            "name": "grammatikaki-2025-htr-image.jpg",
            "type": "image/jpeg",
            "size": 181956,
            "path": "Publication:grammatikaki-2025-htr",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2025/grammatikaki-2025-htr/grammatikaki-2025-htr-image.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/grammatikaki-2025-htr/grammatikaki-2025-htr-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,projects,pages_from,pages_to,volume",
        "repositum_presentation_id": null,
        "authors": [
            1937,
            1653,
            5463,
            5462,
            1110
        ],
        "doi": "10.1080/15230406.2025.2489543",
        "issn": "1545-0465",
        "journal": "Cartography and Geographic Information Science",
        "open_access": "yes",
        "pages": "18",
        "pages_from": "1",
        "pages_to": "18",
        "publisher": "TAYLOR & FRANCIS INC",
        "volume": "Number 0",
        "research_areas": [],
        "keywords": [
            "Digital 3D maps",
            "landmarks",
            "trees",
            "user study",
            "modeling",
            "rendering"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 2200,
                "image_height": 370,
                "name": "grammatikaki-2025-htr-image.jpg",
                "type": "image/jpeg",
                "size": 181956,
                "path": "Publication:grammatikaki-2025-htr",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/grammatikaki-2025-htr/grammatikaki-2025-htr-image.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/grammatikaki-2025-htr/grammatikaki-2025-htr-image:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/grammatikaki-2025-htr/",
        "__class": "Publication"
    },
    {
        "id": "pahr-2024-ieo",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/199161",
        "title": "Investigating the Effect of Operation Mode and Manifestation on Physicalizations of Dynamic Processes",
        "date": "2024-06",
        "abstract": "We conducted a study to systematically investigate the communication of complex dynamic processes along a two-dimensional design space, where the axes represent a representation's manifestation (physical or virtual) and operation (manual or automatic). We exemplify the design space on a model embodying cardiovascular pathologies, represented by a mechanism where a liquid is pumped into a draining vessel, with complications illustrated through modifications to the model. The results of a mixed-methods lab study with 28 participants show that both physical manifestation and manual operation have a strong positive impact on the audience's engagement. The study does not show a measurable knowledge increase with respect to cardiovascular pathologies using manually operated physical representations. However, subjectively, participants report a better understanding of the process—mainly through non-visual cues like haptics, but also auditory cues. The study also indicates an increased task load when interacting with the process, which, however, seems to play a minor role for the participants. Overall, the study shows a clear potential of physicalization for the communication of complex dynamic processes, which only fully unfold if observers have to chance to interact with the process.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 2634,
            "image_height": 1232,
            "name": "pahr-2024-ieo-teaser.png",
            "type": "image/png",
            "size": 172079,
            "path": "Publication:pahr-2024-ieo",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "projects,date_from,date_to,event,lecturer,pages_from,pages_to",
        "repositum_presentation_id": null,
        "authors": [
            1813,
            1850,
            1464,
            1110,
            1410
        ],
        "articleno": "e15106",
        "date_from": "2024-06-27",
        "date_to": "2024-06-31",
        "doi": "10.1111/cgf.15106",
        "event": "EUROVIS",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1813
        ],
        "number": "3",
        "pages": "12",
        "pages_from": "1",
        "pages_to": "12",
        "publisher": "WILEY",
        "volume": "43",
        "research_areas": [
            "InfoVis",
            "MedVis",
            "Perception"
        ],
        "keywords": [
            "Data Physicalization",
            "Study",
            "Cardiovascular Diseases",
            "Edutainment",
            "Human Computer Interaction (HCI)",
            "Mixed Methods"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 2634,
                "image_height": 1232,
                "name": "pahr-2024-ieo-teaser.png",
                "type": "image/png",
                "size": 172079,
                "path": "Publication:pahr-2024-ieo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/pahr-2024-ieo-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/pahr-2024-ieo/",
        "__class": "Publication"
    },
    {
        "id": "matt-2024-cvil",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/199888",
        "title": "cVIL: Class-Centric Visual Interactive Labeling",
        "date": "2024-05-27",
        "abstract": "We present cVIL, a class-centric approach to visual interactive labeling, which facilitates human annotation of large and complex image data sets. cVIL uses different property measures to support instance labeling for labeling difficult instances and batch labeling to quickly label easy instances. Simulated experiments reveal that cVIL with batch labeling can outperform traditional labeling approaches based on active learning. In a user study, cVIL led to better accuracy and higher user preference compared to a traditional instance-based visual interactive labeling approach based on 2D scatterplots.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Screenshot of cVIL as employed in the user study",
            "filetitle": "cVIL teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1104,
            "image_height": 449,
            "name": "matt-2024-cvil-cVIL teaser.png",
            "type": "image/png",
            "size": 293869,
            "path": "Publication:matt-2024-cvil",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "abstract,projects,open_access",
        "repositum_presentation_id": null,
        "authors": [
            5263,
            5370,
            1110
        ],
        "booktitle": "Eurographics Proceedings",
        "date_from": "2024-05-27",
        "date_to": "2024-05-27",
        "doi": "10.2312/eurova.20241113",
        "editor": "El-Assady, Mennatallah and Schulz, Hans-Jorg",
        "event": "EuroVis Workshop on Visual Analytics (EuroVA 2024)",
        "isbn": "978-3-03868-056-7",
        "lecturer": [
            5263
        ],
        "location": "Aarhus",
        "open_access": "yes",
        "pages": "6",
        "publisher": "Eurographics",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Visual Analytics",
            "Interactive Machine Learning",
            "User Interface Design"
        ],
        "weblinks": [
            {
                "href": "https://diglib.eg.org/server/api/core/bitstreams/c18fafcc-b4b4-4e51-bd2f-cec056c6d93a/content",
                "caption": "paper",
                "description": null,
                "main_file": 1
            },
            {
                "href": "https://gitlab.tuwien.ac.at/e193-02-jde/lava",
                "caption": "GitLab",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": "Screenshot of cVIL as employed in the user study",
                "filetitle": "cVIL teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1104,
                "image_height": 449,
                "name": "matt-2024-cvil-cVIL teaser.png",
                "type": "image/png",
                "size": 293869,
                "path": "Publication:matt-2024-cvil",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/",
        "__class": "Publication"
    },
    {
        "id": "heim-2024-accustripes",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/197495",
        "title": "AccuStripes: Visual exploration and comparison of univariate data distributions using color and binning",
        "date": "2024-04",
        "abstract": "Understanding and analyzing univariate distributions of data in terms of their shapes as well as their specific characteristics, regarding gaps, spikes, or outliers, is crucial in many scientific disciplines. In this paper, we propose a design space composed of the visual channels position and color for representing accumulated distributions. The designs are a mixture of color-coded stripes with density lines. The width and coloring of the stripes is based on the applied binning technique. In a crowd-sourced experiment we explore a subspace, called the AccuStripes (i.e., “accumulated stripes”) design space, consisting of nine representations. These AccuStripes designs integrate three composition strategies (color only, overlay, filled curve) with three binning techniques, one uniform (UB) and two adaptive methods, namely Bayesian Blocks (BB) and Jenks’ Natural Breaks (NB). We evaluate the accuracy, efficiency, and confidence ratings of the nine AccuStripes designs for structural estimation and comparison tasks. Across all study tasks, the overlay composition was found to be most accurate and preferred by observers. Furthermore, the results demonstrate that while no binning method performed best in both identification and comparison, detection of structures using adaptive binning was the most accurate one. For validation we compared the best AccuStripes’ design, i.e., the overlay composition, to line charts. Our results show that the AccuStripes’ design outperformed the line charts in accuracy for all study tasks.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "AccuStripes - Graphical Abstract",
            "filetitle": "accuStripes",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1333,
            "image_height": 428,
            "name": "heim-2024-accustripes-accuStripes.png",
            "type": "image/png",
            "size": 366335,
            "path": "Publication:heim-2024-accustripes",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/heim-2024-accustripes/heim-2024-accustripes-accuStripes.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/heim-2024-accustripes/heim-2024-accustripes-accuStripes:thumb{{size}}.png"
        },
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            1354,
            1355,
            1110,
            166,
            611
        ],
        "articleno": "103906",
        "doi": "10.1016/j.cag.2024.103906",
        "issn": "1873-7684",
        "journal": "COMPUTERS & GRAPHICS-UK",
        "publisher": "PERGAMON-ELSEVIER SCIENCE LTD",
        "volume": "119",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Adaptive binning",
            "Crowd-sourced experiment",
            "Univariate data distributions",
            "Visual analysis"
        ],
        "weblinks": [
            {
                "href": "https://www.sciencedirect.com/science/article/pii/S0097849324000414",
                "caption": "paper",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": "AccuStripes - Graphical Abstract",
                "filetitle": "accuStripes",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1333,
                "image_height": 428,
                "name": "heim-2024-accustripes-accuStripes.png",
                "type": "image/png",
                "size": 366335,
                "path": "Publication:heim-2024-accustripes",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/heim-2024-accustripes/heim-2024-accustripes-accuStripes.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/heim-2024-accustripes/heim-2024-accustripes-accuStripes:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/heim-2024-accustripes/",
        "__class": "Publication"
    },
    {
        "id": "bayat-2024-awt",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/197494",
        "title": "A Workflow to Visually Assess Interobserver Variability in Medical Image Segmentation",
        "date": "2024-01",
        "abstract": "We introduce a workflow for the visual assessment of interobserver variability in medical image segmentation. Image segmentation is a crucial step in the diagnosis, prognosis, and treatment of many diseases. Despite the advancements in autosegmentation, clinical practice widely relies on manual delineations performed by radiologists. Our work focuses on designing a solution for understanding the radiologists' thought processes during segmentation and for unveiling reasons that lead to interobserver variability. To this end, we propose a visual analysis tool connecting multiple radiologists' delineation processes with their outcomes, and we demonstrate its potential in a case study.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1693,
            1110,
            1410
        ],
        "doi": "10.1109/MCG.2023.3333475",
        "issn": "1558-1756",
        "journal": "IEEE Computer Graphics and Applications",
        "number": "1",
        "pages": "9",
        "pages_from": "86",
        "pages_to": "94",
        "publisher": "IEEE COMPUTER SOC",
        "volume": "44",
        "research_areas": [],
        "keywords": [
            "Humans",
            "Observer Variation",
            "Workflow",
            "Algorithms"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/bayat-2024-awt/",
        "__class": "Publication"
    },
    {
        "id": "boffi-2024-bagginghook",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/197699",
        "title": "BaggingHook: Selecting Moving Targets by Pruning Distractors Away for Intention-Prediction Heuristics in Dense 3D Environments",
        "date": "2024",
        "abstract": "Selecting targets in dense, dynamic 3D environments presents a significant challenge. In this study, we introduce two novel selection techniques based on distractor pruning to assist users in selecting targets moving unpredictably: BaggingHook and AutoBaggingHook. Both are built upon the Hook intention-prediction heuristic, which continuously measures the distance between the user's cursor and each object to compute per-object scores and estimate the intended target. Our techniques reduce the number of targets in the environment, making heuristic convergence potentially faster. Once pruned away, distractors are also made semi-transparent to reduce occlusion and the overall difficulty of the task. However, their motion is not altered, so that users can still perceive the dynamics of the environment. We designed two pruning approaches: BaggingHook lets users manually prune distractors away, while AutoBaggingHook uses automated, score-based pruning. We conducted a user study in a virtual reality setting inspired by molecular dynamics simulations, featuring crowded scenes of objects moving fast and unpredictably, in 3D. We compared both proposed techniques to the Hook baseline under more challenging circumstances than it had previously been tested. Our results show that AutoBaggingHook was the fastest, and did not lead to higher error rates. BaggingHook, on the other hand, was preferred by the majority of participants, due to the greater degree of control it provides to users, leading some to see entertainment value in its use. This work shows the potential benefits of varying the types of inputs used in intention-prediction heuristics, not just to improve performance, but also to reduce occlusion, overall task load, and improve user experience.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5365,
            5366,
            1110,
            5367,
            171
        ],
        "booktitle": "2024 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
        "date_from": "2024-03-16",
        "date_to": "2024-03-21",
        "doi": "10.1109/VR58804.2024.00110",
        "event": "2024 IEEE Conference Virtual Reality and 3D User Interfaces (VR)",
        "isbn": "9798350374025",
        "lecturer": [
            5368
        ],
        "location": "Orlando, FL",
        "pages": "11",
        "pages_from": "913",
        "pages_to": "923",
        "research_areas": [],
        "keywords": [
            "Algorithms",
            "AR/VR/Immersive",
            "Human-Subjects Qualitative Studies",
            "Human-Subjects Quantitative Studies",
            "Interaction Design",
            "Mobile",
            "Specialized Input/Display Hardware"
        ],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/boffi-2024-bagginghook/",
        "__class": "Publication"
    },
    {
        "id": "eschner-2023-evl",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Echtzeitvisualisierung von Lawinenrisiko basierend auf hochauflösenden Geodaten",
        "date": "2023-11-18",
        "abstract": "Um das Lawinenrisiko auf Touren abzuschätzen, konsultieren Tourengeher·innen typischerweise vorab den aktuellen Lawinenlagebericht (LLB) sowie die Geländeeigenschaften, wie Hangneigung, Höhe und Exposition der geplanten Tour auf einer Karte. Reduktionsmethoden wie Stop-or-Go oder die SnowCard können sowohl bei der Planung als auch vor Ort angewandt werden, um das Risiko abzuschätzen. Bei korrekter Anwendung dieser Methoden könnte ein Großteil der Todesfälle vermieden werden. Die Anwendung umfasst jedoch mehrere kognitiv aufwändige Schritte: Im ersten Schritt müssen Tourengeher·innen die Informationen aus LLB und Karte korrekt verknüpfen und anhand der gewählten Methode interpretieren, um potenziell kritische Regionen entlang der Route vorab identifizieren zu können. Im zweiten Schritt müssen potenziell kritische Regionen auch während der Tour als solche wiedererkannt und vor Ort beurteilt werden. \nUm die Anwendung von Reduktionsmethoden für Wintersportler·innen zu vereinfachen, können die Informationen aus LLB computergestützt mit den Geländeeigenschaften ausgewertet und direkt in einer Karte dargestellt werden. Skitourenguru, beispielsweise, berechnet das Lawinenrisiko entlang vorgegebener Routen und stellt diese in einer 2D Karte dar. Im Vergleich zu 2D Karten erleichtert eine dreidimensionale Darstellung jedoch die Interpretation des Geländes und das Finden von Routen. Unsere Hypothese ist daher, dass eine direkte Visualisierung des Lawinenrisikos auf einer detaillierten 3D Karte die Identifikation von potenziell kritischen Regionen einer Route in der Planungsphase, sowie deren Wiedererkennung während der Tour, erleichtert.\nWir stellen eine integrierte 3D Risikovisualisierung vor, welche Daten aus dem aktuellen LLB mit einem hochauflösenden Geländemodell kombiniert und existierende Reduktionsmethoden in Echtzeit auswertet, um das Ergebnis auf einer interaktiven Webseite zu visualisieren.\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2076,
            "image_height": 1192,
            "name": "eschner-2023-evl-.png",
            "type": "image/png",
            "size": 4716902,
            "path": "Publication:eschner-2023-evl",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-evl/eschner-2023-evl-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-evl/eschner-2023-evl-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1653,
            1013,
            1110
        ],
        "booktitle": "Lawinensymposium 2023",
        "cfp": {
            "name": "lawinensymposium_cfp.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "3319607",
            "orig_name": "lawinensymposium_cfp.pdf",
            "ext": "pdf"
        },
        "event": "Lawinensymposium 2023",
        "lecturer": [
            1653
        ],
        "location": "Graz",
        "pages_from": "38",
        "pages_to": "43",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://alpinemaps.cg.tuwien.ac.at/ ",
                "caption": "online demo",
                "description": " Demo version of the avalanche risk visualization ",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2076,
                "image_height": 1192,
                "name": "eschner-2023-evl-.png",
                "type": "image/png",
                "size": 4716902,
                "path": "Publication:eschner-2023-evl",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-evl/eschner-2023-evl-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-evl/eschner-2023-evl-:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper preprint",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "eschner-2023-evl-paper preprint.pdf",
                "type": "application/pdf",
                "size": 21355472,
                "path": "Publication:eschner-2023-evl",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-evl/eschner-2023-evl-paper preprint.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-evl/eschner-2023-evl-paper preprint:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-evl/",
        "__class": "Publication"
    },
    {
        "id": "eschner-2023-ims",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/187705",
        "title": "Illustrative Motion Smoothing for Attention Guidance in Dynamic Visualizations",
        "date": "2023-06",
        "abstract": "3D animations are an effective method to learn about complex dynamic phenomena, such as mesoscale biological processes. The animators’ goals are to convey a sense of the scene’s overall complexity while, at the same time, visually guiding the user through a story of subsequent events embedded in the chaotic environment. Animators use a variety of visual emphasis techniques to guide the observers’ attention through the story, such as highlighting, halos – or by manipulating motion parameters of the scene. In this paper, we investigate the effect of smoothing the motion of contextual scene elements to attract attention to focus elements of the story exhibiting high-frequency motion. We conducted a crowdsourced study with 108 participants observing short animations with two illustrative motion smoothing strategies: geometric smoothing through noise reduction of contextual motion trajectories and visual smoothing through motion blur of context items. We investigated the observers’ ability to follow the story as well as the effect of the techniques on speed perception in a molecular scene. Our results show that moderate motion blur significantly improves users’ ability to follow the story. Geometric motion smoothing is less effective but increases the visual appeal of the animation. However, both techniques also slow down the perceived speed of the animation. We discuss the implications of these results and derive design guidelines for animators of complex dynamic visualizations.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1400,
            "image_height": 1080,
            "name": "eschner-2023-ims-teaser.png",
            "type": "image/png",
            "size": 1527403,
            "path": "Publication:eschner-2023-ims",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "event,lecturer,location,projects",
        "repositum_presentation_id": null,
        "authors": [
            1653,
            935,
            1110
        ],
        "doi": "10.1111/cgf.14836",
        "event": "EuroVis 2023",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1653
        ],
        "location": "Leipzig, Germany",
        "number": "3",
        "open_access": "yes",
        "pages": "12",
        "pages_from": "361",
        "pages_to": "372",
        "publisher": "WILEY",
        "volume": "42",
        "research_areas": [
            "BioVis",
            "IllVis",
            "Perception"
        ],
        "keywords": [
            "Empirical studies in visualization",
            "Animation"
        ],
        "weblinks": [
            {
                "href": "https://onlinelibrary.wiley.com/doi/10.1111/cgf.14836",
                "caption": "Computer Graphics Forum",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "eschner-2023-ims-paper.pdf",
                "type": "application/pdf",
                "size": 9072344,
                "path": "Publication:eschner-2023-ims",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1400,
                "image_height": 1080,
                "name": "eschner-2023-ims-teaser.png",
                "type": "image/png",
                "size": 1527403,
                "path": "Publication:eschner-2023-ims",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "eschner-2023-ims-video.mp4",
                "type": "video/mp4",
                "size": 16270968,
                "path": "Publication:eschner-2023-ims",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/eschner-2023-ims-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2023/eschner-2023-ims/",
        "__class": "Publication"
    },
    {
        "id": "indirectBiasLanguageModels-2023",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/187890",
        "title": "Visual Exploration of Indirect Bias in Language Models",
        "date": "2023-06",
        "abstract": "Language models are trained on large text corpora that often include stereotypes. This can lead to direct or indirect bias in downstream applications. In this work, we present a method for interactive visual exploration of indirect multiclass bias learned by contextual word embeddings. We introduce a new indirect bias quantification score and present two interactive visualizations to explore interactions between multiple non-sensitive concepts (such as sports, occupations, and beverages) and sensitive attributes (such as gender or year of birth) based on this score.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1904,
            "image_height": 619,
            "name": "indirectBiasLanguageModels-2023-teaser.png",
            "type": "image/png",
            "size": 81826,
            "path": "Publication:indirectBiasLanguageModels-2023",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,location,keywords",
        "repositum_presentation_id": null,
        "authors": [
            5211,
            1110
        ],
        "booktitle": "EuroVis 2023 - Short Papers",
        "date_from": "2023-06-12",
        "date_to": "2023-06-16",
        "doi": "10.2312/evs.20231034",
        "event": "25th EG Conference on Visualization (EuroVis 2023)",
        "isbn": "978-3-03868-219-6",
        "lecturer": [
            1110
        ],
        "location": "Leipzig, Germany",
        "open_access": "yes",
        "pages": "5",
        "publisher": "The Eurographics Association",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "visual analytics",
            "language models",
            "bias"
        ],
        "weblinks": [
            {
                "href": "https://diglib.eg.org/handle/10.2312/evs20231034",
                "caption": "Eurographics Digital Library",
                "description": null,
                "main_file": 0
            },
            {
                "href": "https://www.cg.tuwien.ac.at/IndirectBiasVis",
                "caption": "online demo",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "indirectBiasLanguageModels-2023-paper.pdf",
                "type": "application/pdf",
                "size": 465161,
                "path": "Publication:indirectBiasLanguageModels-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "supplement",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "indirectBiasLanguageModels-2023-supplement.pdf",
                "type": "application/pdf",
                "size": 124948,
                "path": "Publication:indirectBiasLanguageModels-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-supplement.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-supplement:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1904,
                "image_height": 619,
                "name": "indirectBiasLanguageModels-2023-teaser.png",
                "type": "image/png",
                "size": 81826,
                "path": "Publication:indirectBiasLanguageModels-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "indirectBiasLanguageModels-2023-video.mp4",
                "type": "video/mp4",
                "size": 51440954,
                "path": "Publication:indirectBiasLanguageModels-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/",
        "__class": "Publication"
    },
    {
        "id": "webGPU_aggregateVis-2023",
        "type_id": "poster",
        "tu_id": null,
        "repositum_id": "20.500.12708/187891",
        "title": "WebGPU for Scalable Client-Side Aggregate Visualization",
        "date": "2023-06",
        "abstract": "WebGPU is a new graphics API, which now provides compute shaders for general purpose GPU operations in web browsers. We demonstrate the potential of this new technology for scalable information visualization by showing how to filter and aggregate a spatio-temporal dataset with millions of temperature measurements for real-time interactive exploration of climate change.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1460,
            "image_height": 820,
            "name": "webGPU_aggregateVis-2023-teaser.png",
            "type": "image/png",
            "size": 1143735,
            "path": "Publication:webGPU_aggregateVis-2023",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "abstract,date",
        "repositum_presentation_id": null,
        "authors": [
            1869,
            5238,
            1110
        ],
        "booktitle": "EuroVis 2023 - Posters",
        "date_from": "2023-06-12",
        "date_to": "2023-06-16",
        "doi": "10.2312/evp.20231079",
        "event": "25th EG Conference on Visualization (EuroVis 2023)",
        "isbn": "978-3-03868-220-2",
        "lecturer": [
            1110
        ],
        "location": "Leipzig",
        "pages": "3",
        "pages_from": "105",
        "pages_to": "107",
        "publisher": "Eurographics",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Information visualization",
            "Aggregate visualization",
            "Scalable visualization",
            "WebGPU"
        ],
        "weblinks": [
            {
                "href": "https://diglib.eg.org/xmlui/handle/10.2312/evp20231079",
                "caption": "Eurographics Digital Library",
                "description": null,
                "main_file": 0
            },
            {
                "href": "https://ccexplorer.github.io/",
                "caption": "Climate Change Explorer",
                "description": "online demo",
                "main_file": 1
            },
            {
                "href": "https://cde.gkdev.at/cde/?db=cdata_interp_2bit_0.lzma",
                "caption": "large demo",
                "description": "Demo with more data points",
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "extended abstract",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "webGPU_aggregateVis-2023-extended abstract.pdf",
                "type": "application/pdf",
                "size": 1466660,
                "path": "Publication:webGPU_aggregateVis-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-extended abstract.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-extended abstract:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "poster",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "webGPU_aggregateVis-2023-poster.pdf",
                "type": "application/pdf",
                "size": 1114759,
                "path": "Publication:webGPU_aggregateVis-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-poster.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-poster:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1460,
                "image_height": 820,
                "name": "webGPU_aggregateVis-2023-teaser.png",
                "type": "image/png",
                "size": 1143735,
                "path": "Publication:webGPU_aggregateVis-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/",
        "__class": "Publication"
    },
    {
        "id": "sMolBoxes_2022",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/81180",
        "title": "sMolBoxes: Dataflow Model for Molecular Dynamics Exploration",
        "date": "2022-10",
        "abstract": "We present sMolBoxes, a dataflow representation for the exploration and analysis of long molecular dynamics (MD) simulations. When MD simulations reach millions of snapshots, a frame-by-frame observation is not feasible anymore. Thus, biochemists rely to a large extent only on quantitative analysis of geometric and physico-chemical properties. However, the usage of abstract methods to study inherently spatial data hinders the exploration and poses a considerable workload. sMolBoxes link quantitative analysis of a user-defined set of properties with interactive 3D visualizations. They enable visual explanations of molecular behaviors, which lead to an efficient discovery of biochemically significant parts of the MD simulation. sMolBoxes follow a node-based model for flexible definition, combination, and immediate evaluation of properties to be investigated. Progressive analytics enable fluid switching between multiple properties, which facilitates hypothesis generation. Each sMolBox provides quick insight to an observed property or function, available in more detail in the bigBox View. The case studies illustrate that even with relatively few sMolBoxes, it is possible to express complex analytical tasks, and their use in exploratory analysis is perceived as more efficient than traditional scripting-based methods.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1604,
            "image_height": 942,
            "name": "sMolBoxes_2022-teaser.png",
            "type": "image/png",
            "size": 417094,
            "path": "Publication:sMolBoxes_2022",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2022/sMolBoxes_2022/sMolBoxes_2022-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/sMolBoxes_2022/sMolBoxes_2022-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5182,
            1110,
            1733,
            1498,
            5183,
            1248,
            1254
        ],
        "date_from": "2022-10",
        "date_to": "2022-10",
        "doi": "10.1109/TVCG.2022.3209411",
        "event": "IEEE VIS 2022",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "lecturer": [
            5182
        ],
        "pages": "10",
        "pages_from": "1",
        "pages_to": "10",
        "publisher": "Institute of Electrical and Electronics Engineers (IEEE)",
        "research_areas": [
            "BioVis",
            "InfoVis"
        ],
        "keywords": [
            "Molecular Dynamics",
            "structure",
            "node-based visualization",
            "progressive analytics",
            "proteins",
            "Analytical models",
            "Biological system modeling",
            "Three-dimensional displays",
            "Computational modeling",
            "Task analysis",
            "Animation"
        ],
        "weblinks": [
            {
                "href": "https://arxiv.org/abs/2209.11771",
                "caption": "arxiv",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1604,
                "image_height": 942,
                "name": "sMolBoxes_2022-teaser.png",
                "type": "image/png",
                "size": 417094,
                "path": "Publication:sMolBoxes_2022",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2022/sMolBoxes_2022/sMolBoxes_2022-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/sMolBoxes_2022/sMolBoxes_2022-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2022/sMolBoxes_2022/",
        "__class": "Publication"
    },
    {
        "id": "grossmann-2022-conceptSplatters",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/152292",
        "title": "Concept splatters: Exploration of latent spaces based on human interpretable concepts",
        "date": "2022-04",
        "abstract": "Similarity maps show dimensionality-reduced activation vectors of a high number of data points and thereby can help to understand which features a neural network has learned from the data. However, similarity maps have severely limited expressiveness for large datasets with hundreds of thousands of data instances and thousands of labels, such as ImageNet or word2vec. In this work, we present “concept splatters” as a scalable method to interactively explore similarities between data instances as learned by the machine through the lens of human-understandable semantics. Our approach enables interactive exploration of large latent spaces on multiple levels of abstraction. We present a web-based implementation that supports interactive exploration of tens of thousands of word vectors of word2vec and CNN feature vectors of ImageNet. In a qualitative study, users could effectively discover spurious learning strategies of the network, ambiguous labels, and could characterize reasons for potential confusion.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1055,
            "image_height": 794,
            "name": "grossmann-2022-conceptSplatters-teaser.png",
            "type": "image/png",
            "size": 1013977,
            "path": "Publication:grossmann-2022-conceptSplatters",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/grossmann-2022-conceptSplatters-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/grossmann-2022-conceptSplatters-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,projects",
        "repositum_presentation_id": null,
        "authors": [
            1366,
            166,
            1110
        ],
        "doi": "10.1016/j.cag.2022.04.013",
        "issn": "1873-7684",
        "journal": "Computers and Graphics",
        "open_access": "yes",
        "pages": "12",
        "pages_from": "73",
        "pages_to": "84",
        "publisher": "Elsevier",
        "volume": "105",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Concept spaces",
            "Latent spaces",
            "Similarity maps",
            "Visual exploratory analysis"
        ],
        "weblinks": [
            {
                "href": "https://www.sciencedirect.com/science/article/pii/S0097849322000656",
                "caption": "paper",
                "description": "Link to open access paper",
                "main_file": 1
            },
            {
                "href": "https://kontor.cg.tuwien.ac.at/ConceptSplatters/",
                "caption": "online demo",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": "Detailed findings",
                "filetitle": "supplementary document",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "grossmann-2022-conceptSplatters-supplementary document.pdf",
                "type": "application/pdf",
                "size": 25193252,
                "path": "Publication:grossmann-2022-conceptSplatters",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/grossmann-2022-conceptSplatters-supplementary document.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/grossmann-2022-conceptSplatters-supplementary document:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1055,
                "image_height": 794,
                "name": "grossmann-2022-conceptSplatters-teaser.png",
                "type": "image/png",
                "size": 1013977,
                "path": "Publication:grossmann-2022-conceptSplatters",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/grossmann-2022-conceptSplatters-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/grossmann-2022-conceptSplatters-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "grossmann-2022-conceptSplatters-video.mp4",
                "type": "video/mp4",
                "size": 49841833,
                "path": "Publication:grossmann-2022-conceptSplatters",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/grossmann-2022-conceptSplatters-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/grossmann-2022-conceptSplatters-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/grossmann-2022-conceptSplatters-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2022/grossmann-2022-conceptSplatters/",
        "__class": "Publication"
    },
    {
        "id": "Alharbi_2021",
        "type_id": "journalpaper_notalk",
        "tu_id": 300116,
        "repositum_id": "20.500.12708/138522",
        "title": "Nanotilus: Generator of Immersive Guided-Tours in Crowded 3D Environments",
        "date": "2021-12-09",
        "abstract": "Immersive virtual reality environments are gaining popularity for studying and exploring crowded three-dimensional structures. When reaching very high structural densities, the natural depiction of the scene produces impenetrable clutter and requires visibility and occlusion management strategies for exploration and orientation. Strategies developed to address the crowdedness in desktop applications, however, inhibit the feeling of immersion. They result in nonimmersive, desktop-style outside-in viewing in virtual reality. This paper proposesNanotilus---a new visibility and guidance approach for very dense environments that generates an endoscopic inside-out experience instead of outside-in viewing, preserving the immersive aspect of virtual reality. The approach consists of two novel, tightly coupled mechanisms that control scene sparsification simultaneously with camera path planning. The sparsification strategy is localized around the camera and is realized as a multiscale, multishell, variety-preserving technique. When Nanotilus dives into the structures to capture internal details residing on multiple scales, it guides the camera using depth-based path planning. In addition to sparsification and path planning, we complete the tour generation with an animation controller, textual annotation, and text-to-visualization conversion. We demonstrate the generated guided tours on mesoscopic biological models -- SARS-CoV-2 and HIV viruses. We evaluate the Nanotilus experience with a baseline outside-in sparsification and navigational technique in a formal user study with 29 participants. While users can maintain a better overview using the outside-in sparsification, the study confirms our hypothesis that Nanotilus leads to stronger engagement and immersion.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "Image",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 540,
            "image_height": 273,
            "name": "Alharbi_2021-Image.JPG",
            "type": "image/jpeg",
            "size": 46269,
            "path": "Publication:Alharbi_2021",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2021/Alharbi_2021/Alharbi_2021-Image.JPG",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/Alharbi_2021/Alharbi_2021-Image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1791,
            1789,
            1577,
            1110,
            1383,
            1891,
            1285,
            166,
            171
        ],
        "doi": "10.1109/TVCG.2021.3133592",
        "first_published": "2021-12-09",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "open_access": "yes",
        "pages_from": "1",
        "pages_to": "16",
        "research_areas": [
            "BioVis",
            "IllVis",
            "VR"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "Image",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 540,
                "image_height": 273,
                "name": "Alharbi_2021-Image.JPG",
                "type": "image/jpeg",
                "size": 46269,
                "path": "Publication:Alharbi_2021",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/Alharbi_2021/Alharbi_2021-Image.JPG",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/Alharbi_2021/Alharbi_2021-Image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Paper",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "name": "Alharbi_2021-Paper.pdf",
                "type": "application/pdf",
                "size": 14837965,
                "path": "Publication:Alharbi_2021",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/Alharbi_2021/Alharbi_2021-Paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/Alharbi_2021/Alharbi_2021-Paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2021/Alharbi_2021/",
        "__class": "Publication"
    },
    {
        "id": "sorger-2021-egonet",
        "type_id": "journalpaper",
        "tu_id": 300416,
        "repositum_id": "20.500.12708/58630",
        "title": "Egocentric Network Exploration for Immersive Analytics",
        "date": "2021-10",
        "abstract": "To exploit the potential of immersive network analytics for engaging and effective exploration, we promote the metaphor of ``egocentrism'', where data depiction and interaction are adapted to the perspective of the user within a 3D network. Egocentrism has the potential to overcome some of the inherent downsides of virtual environments, e.g., visual clutter and cyber-sickness. To investigate the effect of this metaphor on immersive network exploration, we designed and evaluated interfaces of varying degrees of egocentrism. In a user study, we evaluated the effect of these interfaces on visual search tasks, efficiency of network traversal, spatial orientation, as well as cyber-sickness. Results show that a simple egocentric interface considerably improves visual search efficiency and navigation performance, yet does not decrease spatial orientation or increase cyber-sickness. A distorted occlusion-free view of the neighborhood only marginally improves the user's performance. We tie our findings together in an open online tool for egocentric network exploration, providing actionable insights on the benefits of the egocentric network exploration metaphor.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2921,
            "image_height": 735,
            "name": "sorger-2021-egonet-teaser.png",
            "type": "image/png",
            "size": 2079618,
            "path": "Publication:sorger-2021-egonet",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/sorger-2021-egonet-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/sorger-2021-egonet-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1072,
            1705,
            1720,
            853,
            1110
        ],
        "cfp": {
            "name": "Call for Papers _ Pacific Graphics 2021.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "246798",
            "orig_name": "Call for Papers _ Pacific Graphics 2021.pdf",
            "ext": "pdf"
        },
        "date_from": "2021-10-18",
        "date_to": "2021-10-21",
        "doi": "10.1111/cgf.14417",
        "event": "Pacific Graphics 21",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1072
        ],
        "location": "Wellington, NZ",
        "open_access": "no",
        "pages": "12",
        "pages_from": "241",
        "pages_to": "252",
        "publisher": "John Wiley and Sons",
        "volume": "40",
        "research_areas": [
            "InfoVis",
            "NetVis",
            "VR"
        ],
        "keywords": [
            "Computer Graphics and Computer-Aided Design"
        ],
        "weblinks": [
            {
                "href": "https://vis.csh.ac.at/egocentricvr/",
                "caption": "online egocentric network",
                "description": "Online tool for egocentric network exploration leveraging insights gathered at the user study",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2921,
                "image_height": 735,
                "name": "sorger-2021-egonet-teaser.png",
                "type": "image/png",
                "size": 2079618,
                "path": "Publication:sorger-2021-egonet",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/sorger-2021-egonet-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/sorger-2021-egonet-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "the paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "sorger-2021-egonet-the paper.pdf",
                "type": "application/pdf",
                "size": 4211458,
                "path": "Publication:sorger-2021-egonet",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/sorger-2021-egonet-the paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/sorger-2021-egonet-the paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "sorger-2021-egonet-video.mp4",
                "type": "video/mp4",
                "size": 295211748,
                "path": "Publication:sorger-2021-egonet",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/sorger-2021-egonet-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/sorger-2021-egonet-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/sorger-2021-egonet-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sorger-2021-egonet/",
        "__class": "Publication"
    },
    {
        "id": "grossmann-2021-layout",
        "type_id": "inproceedings",
        "tu_id": 300287,
        "repositum_id": "20.500.12708/58620",
        "title": "Does the Layout Really Matter? A Study on Visual Model Accuracy Estimation",
        "date": "2021-10",
        "abstract": "In visual interactive labeling, users iteratively assign labels to data items until the machine model reaches an acceptable accuracy. A crucial step of this process is to inspect the model's accuracy and decide whether it is necessary to label additional elements. In scenarios with no or very little labeled data, visual inspection of the predictions is required. Similarity-preserving scatterplots created through a dimensionality reduction algorithm are a common visualization that is used in these cases. Previous studies investigated the effects of layout and image complexity on tasks like labeling. However, model evaluation has not been studied systematically. We present the results of an experiment studying the influence of image complexity and visual grouping of images on model accuracy estimation. We found that users outperform traditional automated approaches when estimating a model's accuracy. Furthermore, while the complexity of images impacts the overall performance, the layout of the items in the plot has little to no effect on estimations.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "In both scatterplots shown here, the percentage of images with correctly predicted class labels (visualized as border color) is over 90%. We found that users can estimate these accuracies fairly well. Image complexity impacts overall performance, but the layout has very little effect on users’ estimations. ",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 3287,
            "image_height": 1817,
            "name": "grossmann-2021-layout-teaser.png",
            "type": "image/png",
            "size": 879037,
            "path": "Publication:grossmann-2021-layout",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1366,
            1851,
            1690,
            1110
        ],
        "booktitle": "IEEE Visualization Conference (VIS)",
        "cfp": {
            "name": "Short Paper Call for Participation.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "180486",
            "orig_name": "Short Paper Call for Participation.pdf",
            "ext": "pdf"
        },
        "doi": "10.1109/VIS49827.2021.9623326",
        "event": "IEEE Visualization Conference (VIS)",
        "lecturer": [
            1110
        ],
        "open_access": "yes",
        "pages": "5",
        "pages_from": "61",
        "pages_to": "65",
        "publisher": "IEEE Computer Society Press",
        "research_areas": [
            "InfoVis",
            "Perception"
        ],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://arxiv.org/pdf/2110.07188.pdf",
                "caption": "arxiv",
                "description": "Link to arxiv version of the paper",
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "grossmann-2021-layout-paper.pdf",
                "type": "application/pdf",
                "size": 1951149,
                "path": "Publication:grossmann-2021-layout",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-paper:thumb{{size}}.png"
            },
            {
                "description": "Pre-recorded presentation for VIS 2021",
                "filetitle": "presentation",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "grossmann-2021-layout-presentation.mp4",
                "type": "video/mp4",
                "size": 28816734,
                "path": "Publication:grossmann-2021-layout",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-presentation.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-presentation:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-presentation:video.mp4"
            },
            {
                "description": "Fast-forward preview video",
                "filetitle": "preview",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "grossmann-2021-layout-preview.mp4",
                "type": "video/mp4",
                "size": 5736268,
                "path": "Publication:grossmann-2021-layout",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-preview.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-preview:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-preview:video.mp4"
            },
            {
                "description": "Supplementary document showing study conditions and interface",
                "filetitle": "supplement",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "grossmann-2021-layout-supplement.pdf",
                "type": "application/pdf",
                "size": 44940660,
                "path": "Publication:grossmann-2021-layout",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-supplement.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-supplement:thumb{{size}}.png"
            },
            {
                "description": "In both scatterplots shown here, the percentage of images with correctly predicted class labels (visualized as border color) is over 90%. We found that users can estimate these accuracies fairly well. Image complexity impacts overall performance, but the layout has very little effect on users’ estimations. ",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 3287,
                "image_height": 1817,
                "name": "grossmann-2021-layout-teaser.png",
                "type": "image/png",
                "size": 879037,
                "path": "Publication:grossmann-2021-layout",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/grossmann-2021-layout-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2021/grossmann-2021-layout/",
        "__class": "Publication"
    },
    {
        "id": "sietzen-2021-perturber",
        "type_id": "journalpaper",
        "tu_id": 300245,
        "repositum_id": "20.500.12708/55628",
        "title": "Interactive Analysis of CNN Robustness",
        "date": "2021-10",
        "abstract": "While convolutional neural networks (CNNs) have found wide adoption as state-of-the-art models for image-related tasks, their predictions are often highly sensitive to small input perturbations, which the human vision is robust against.\nThis paper presents Perturber, a web-based application that allows users to instantaneously explore how CNN activations and predictions evolve when a 3D input scene is interactively perturbed. Perturber offers a large variety of scene modifications, such as camera controls, lighting and shading effects, background modifications, object morphing, as well as adversarial attacks, to facilitate the discovery of potential vulnerabilities. Fine-tuned model versions can be directly compared for qualitative evaluation of their robustness. Case studies with machine learning experts have shown that Perturber helps users to quickly generate hypotheses about model vulnerabilities and to qualitatively compare model behavior. Using quantitative analyses, we could replicate users' insights with other CNN architectures and input images, yielding new insights about the vulnerability of adversarially trained models. ",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Activations and feature visualizations for neurons associated with complex shapes and curvatures in layer mixed4a in the standard model. Note how rotating the input model causes activation changes for oriented shape detectors. ",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1423,
            "image_height": 736,
            "name": "sietzen-2021-perturber-teaser.png",
            "type": "image/png",
            "size": 1235572,
            "path": "Publication:sietzen-2021-perturber",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1373,
            1852,
            1853,
            1854,
            1110
        ],
        "cfp": {
            "name": "Call for Papers _ Pacific Graphics 2021.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "246798",
            "orig_name": "Call for Papers _ Pacific Graphics 2021.pdf",
            "ext": "pdf"
        },
        "date_from": "2021-10-18",
        "date_to": "2021-10-21",
        "doi": "10.1111/cgf.14418",
        "event": "Pacific Graphics 2021",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1373
        ],
        "open_access": "yes",
        "pages": "12",
        "pages_from": "253",
        "pages_to": "264",
        "publisher": "John Wiley and Sons",
        "volume": "40",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Computer Graphics and Computer-Aided Design"
        ],
        "weblinks": [
            {
                "href": "http://perturber.stefansietzen.at/",
                "caption": "online tool",
                "description": "Perturber online tool for interactive analysis of CNN robustness",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "sietzen-2021-perturber-paper.pdf",
                "type": "application/pdf",
                "size": 10858795,
                "path": "Publication:sietzen-2021-perturber",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-paper:thumb{{size}}.png"
            },
            {
                "description": "Additional use cases and detailed reports from the case study",
                "filetitle": "supplementary document ",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "sietzen-2021-perturber-supplementary document .pdf",
                "type": "application/pdf",
                "size": 24700068,
                "path": "Publication:sietzen-2021-perturber",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-supplementary document .pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-supplementary document :thumb{{size}}.png"
            },
            {
                "description": "Activations and feature visualizations for neurons associated with complex shapes and curvatures in layer mixed4a in the standard model. Note how rotating the input model causes activation changes for oriented shape detectors. ",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1423,
                "image_height": 736,
                "name": "sietzen-2021-perturber-teaser.png",
                "type": "image/png",
                "size": 1235572,
                "path": "Publication:sietzen-2021-perturber",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "sietzen-2021-perturber-video.mp4",
                "type": "video/mp4",
                "size": 215752131,
                "path": "Publication:sietzen-2021-perturber",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/sietzen-2021-perturber-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2021/sietzen-2021-perturber/",
        "__class": "Publication"
    },
    {
        "id": "wu-2021-vi",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/138953",
        "title": "Visualization working group at TU Wien: Visibile Facimus Quod Ceteri Non Possunt",
        "date": "2021-03",
        "abstract": "Building-up and running a university-based research group is a multi-faceted undertaking. The visualization working group at TU Wien (vis-group) has been internationally active over more than 25 years. The group has been acting in a competitive scientific setting where sometimes contradicting multiple objectives require trade-offs and optimizations. Research-wise the group has been performing basic and applied research in visualization and visual computing. Teaching-wise the group has been involved in undergraduate and graduate lecturing in (medical) visualization and computer graphics. To be scientifically competitive requires to constantly expose the group and its members to a strong international competition at the highest level. This necessitates to shield the members against the ensuing pressures and demands and provide (emotional) support and encouragement. Internally, the vis-group has developed a unique professional and social interaction culture: work and celebrate, hard and together. This has crystallized into a nested, recursive, and triangular organization model, which concretizes what it takes to make a research group successful. The key elements are the creative and competent vis-group members who collaboratively strive for (scientific) excellence in a socially enjoyable environment.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 256,
            "image_height": 192,
            "name": "wu-2021-vi-image.png",
            "type": "image/png",
            "size": 37909,
            "path": "Publication:wu-2021-vi",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2021/wu-2021-vi/wu-2021-vi-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/wu-2021-vi/wu-2021-vi-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1464,
            1170,
            1366,
            1285,
            1383,
            1263,
            1577,
            935,
            1410,
            171,
            1110,
            166
        ],
        "doi": "https://doi.org/10.1016/j.visinf.2021.02.003",
        "journal": "Visual Informatics",
        "open_access": "yes",
        "pages_from": "76",
        "pages_to": "84",
        "volume": "5",
        "research_areas": [],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 256,
                "image_height": 192,
                "name": "wu-2021-vi-image.png",
                "type": "image/png",
                "size": 37909,
                "path": "Publication:wu-2021-vi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/wu-2021-vi/wu-2021-vi-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/wu-2021-vi/wu-2021-vi-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "wu-2021-vi-paper.pdf",
                "type": "application/pdf",
                "size": 4057176,
                "path": "Publication:wu-2021-vi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/wu-2021-vi/wu-2021-vi-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/wu-2021-vi/wu-2021-vi-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2021/wu-2021-vi/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2021-leo",
        "type_id": "journalpaper_notalk",
        "tu_id": 301684,
        "repositum_id": "20.500.12708/138840",
        "title": "Linking unstructured evidence to structured observations",
        "date": "2021-01-14",
        "abstract": "Many professionals, like journalists, writers, or consultants, need to acquire information from various sources, make sense of this unstructured evidence, structure their observations, and finally create and deliver their product, such as a report or a presentation. In formative interviews, we found that tools allowing structuring of observations are often disconnected from the corresponding evidence. Therefore, we designed a sensemaking environment with a flexible observation graph that visually ties together evidence in unstructured documents with the user’s structured knowledge. This is achieved through bi-directional deep links between highlighted document portions and nodes in the observation graph. In a controlled study, we compared users’ sensemaking strategies using either the observation graph or a simple text editor on a large display. Results show that the observation graph represents a holistic, compact representation of users’ observations, which can be linked to unstructured evidence on demand. In contrast, users taking textual notes required much more display space to spatially organize source documents containing unstructured evidence. This implies that spatial organization is a powerful strategy to structure observations even if the available space is limited.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "observation graph",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1920,
            "image_height": 700,
            "name": "waldner-2021-leo-observation graph.png",
            "type": "image/png",
            "size": 523772,
            "path": "Publication:waldner-2021-leo",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2021/waldner-2021-leo/waldner-2021-leo-observation graph.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/waldner-2021-leo/waldner-2021-leo-observation graph:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            1477,
            202,
            1690
        ],
        "doi": "https://doi.org/10.1177/1473871620986249",
        "journal": "Information Visualization",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "mind map",
            "concept map",
            "observation graph",
            "visual links",
            "sensemaking"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "observation graph",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1920,
                "image_height": 700,
                "name": "waldner-2021-leo-observation graph.png",
                "type": "image/png",
                "size": 523772,
                "path": "Publication:waldner-2021-leo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/waldner-2021-leo/waldner-2021-leo-observation graph.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/waldner-2021-leo/waldner-2021-leo-observation graph:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Paper",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "name": "waldner-2021-leo-Paper.pdf",
                "type": "application/pdf",
                "size": 4071473,
                "path": "Publication:waldner-2021-leo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/waldner-2021-leo/waldner-2021-leo-Paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/waldner-2021-leo/waldner-2021-leo-Paper:thumb{{size}}.png"
            },
            {
                "description": "accepted version",
                "filetitle": "preprint",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2021-leo-preprint.pdf",
                "type": "application/pdf",
                "size": 6410358,
                "path": "Publication:waldner-2021-leo",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2021/waldner-2021-leo/waldner-2021-leo-preprint.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2021/waldner-2021-leo/waldner-2021-leo-preprint:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2021/waldner-2021-leo/",
        "__class": "Publication"
    },
    {
        "id": "reina-2020-mtv",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/141135",
        "title": "The moving target of visualization software for an increasingly complex world",
        "date": "2020-04",
        "abstract": "Visualization has evolved into a mature scientific field and it has also become widely accepted as a standard approach in diverse fields, including physics, life sciences, and business intelligence. However, despite its successful development, there are still many open research questions that require customized implementations in order to explore and establish concepts, and to perform experiments and take measurements. Many methods and tools have been developed and published but most are stand-alone prototypes and have not reached a mature state that can be used in a reliable manner by collaborating domain scientists or a wider audience. In this study, we discuss the challenges, solutions, and open research questions that affect the development of sophisticated, relevant, and novel scientific visualization solutions with minimum overheads. We summarize and discuss the results of a recent National Institute of Informatics Shonan seminar on these topics.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 301,
            "image_height": 121,
            "name": "reina-2020-mtv-teaser.jpg",
            "type": "image/jpeg",
            "size": 15741,
            "path": "Publication:reina-2020-mtv",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2020/reina-2020-mtv/reina-2020-mtv-teaser.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/reina-2020-mtv/reina-2020-mtv-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1750,
            1751,
            235,
            231,
            1110,
            1752,
            1248,
            951,
            1753,
            1754,
            166,
            1249
        ],
        "doi": "https://doi.org/10.1016/j.cag.2020.01.005",
        "journal": "Computers & Graphics",
        "pages_from": "12",
        "pages_to": "29",
        "volume": "87",
        "research_areas": [],
        "keywords": [
            "Software engineering",
            "Visualization",
            "Visualization community",
            "Visualization research",
            "Visualization software"
        ],
        "weblinks": [
            {
                "href": "https://www.sciencedirect.com/science/article/pii/S0097849320300078",
                "caption": "paper",
                "description": "Online paper on ScienceDirect",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 301,
                "image_height": 121,
                "name": "reina-2020-mtv-teaser.jpg",
                "type": "image/jpeg",
                "size": 15741,
                "path": "Publication:reina-2020-mtv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/reina-2020-mtv/reina-2020-mtv-teaser.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/reina-2020-mtv/reina-2020-mtv-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2020/reina-2020-mtv/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2020-tbg",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/141146",
        "title": "Interactive exploration of large time-dependent bipartite graphs",
        "date": "2020-04",
        "abstract": "Bipartite graphs are typically visualized using linked lists or matrices, but these visualizations neither scale well nor do they convey temporal development. We present a new interactive exploration interface for large, time-dependent bipartite graphs. We use two clustering techniques to build a hierarchical aggregation supporting different exploration strategies. Aggregated nodes and edges are visualized as linked lists with nested time series. We demonstrate two use cases: finding advertising expenses of public authorities following similar temporal patterns and comparing author-keyword co-occurrences across time. Through a user study, we show that linked lists with hierarchical aggregation lead to more insights than without.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Dynamic BicFlows with nested time series visualization per cluster per set.",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1121,
            "image_height": 936,
            "name": "waldner-2020-tbg-teaser.png",
            "type": "image/png",
            "size": 250456,
            "path": "Publication:waldner-2020-tbg",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2020/waldner-2020-tbg/waldner-2020-tbg-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/waldner-2020-tbg/waldner-2020-tbg-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            1378,
            166
        ],
        "doi": "https://doi.org/10.1016/j.cola.2020.100959",
        "journal": "Journal of Computer Languages",
        "volume": "57",
        "research_areas": [
            "InfoVis",
            "NetVis"
        ],
        "keywords": [
            "Information visualization",
            "Bipartite graphs",
            "Clustering",
            "Time series data",
            "Insight-based evaluation"
        ],
        "weblinks": [
            {
                "href": "https://www.sciencedirect.com/science/article/pii/S2590118420300198",
                "caption": "paper",
                "description": "Open Access article at ScienceDirect",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": "Dynamic BicFlows with nested time series visualization per cluster per set.",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1121,
                "image_height": 936,
                "name": "waldner-2020-tbg-teaser.png",
                "type": "image/png",
                "size": 250456,
                "path": "Publication:waldner-2020-tbg",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2020/waldner-2020-tbg/waldner-2020-tbg-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2020/waldner-2020-tbg/waldner-2020-tbg-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2020/waldner-2020-tbg/",
        "__class": "Publication"
    },
    {
        "id": "sorger-2019-odn",
        "type_id": "inproceedings",
        "tu_id": 284199,
        "repositum_id": null,
        "title": "Immersive Analytics of Large Dynamic Networks via Overview and Detail Navigation",
        "date": "2019-12",
        "abstract": "Analysis of large dynamic networks is a thriving research field, typically relying on 2D graph representations. The advent of affordable head mounted displays sparked new interest in the potential of 3D visualization for immersive network analytics. Nevertheless, most solutions do not scale well with the number of nodes and edges and rely on conventional fly- or walk-through navigation. In this paper, we present a novel approach for the exploration of large dynamic graphs in virtual reality that interweaves two navigation metaphors: overview exploration and immersive detail analysis. We thereby use the potential of state-of-the-art VR headsets, coupled with a web-based 3D rendering engine that supports heterogeneous input modalities to enable ad-hoc immersive network analytics. We validate our approach through a performance evaluation and a case study with experts analyzing medical data.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "detail view",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1600,
            "image_height": 900,
            "name": "sorger-2019-odn-detail view.jpg",
            "type": "image/jpeg",
            "size": 203956,
            "path": "Publication:sorger-2019-odn",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/sorger-2019-odn/sorger-2019-odn-detail view.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/sorger-2019-odn/sorger-2019-odn-detail view:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1072,
            1110,
            853,
            1705
        ],
        "booktitle": "2nd International Conference on Artificial Intelligence & Virtual Reality",
        "cfp": {
            "name": "IEEE AIVR 2019.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "203068",
            "orig_name": "IEEE AIVR 2019.pdf",
            "ext": "pdf"
        },
        "event": "AIVR 2019",
        "lecturer": [
            1072
        ],
        "location": "San Diego, California, USA",
        "open_access": "yes",
        "organization": "IEEE",
        "pages_from": "144",
        "pages_to": "151",
        "research_areas": [
            "InfoVis",
            "NetVis"
        ],
        "keywords": [
            "Immersive Network Analytics",
            "Web-Based Visualization",
            "Dynamic Graph Visualization"
        ],
        "weblinks": [
            {
                "href": "https://arxiv.org/pdf/1910.06825.pdf",
                "caption": "arxiv preprint",
                "description": null,
                "main_file": 1
            },
            {
                "href": "http://aivr.science.uu.nl/",
                "caption": "IEEE AIVR 2019",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "detail view",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1600,
                "image_height": 900,
                "name": "sorger-2019-odn-detail view.jpg",
                "type": "image/jpeg",
                "size": 203956,
                "path": "Publication:sorger-2019-odn",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/sorger-2019-odn/sorger-2019-odn-detail view.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/sorger-2019-odn/sorger-2019-odn-detail view:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "sorger-2019-odn-video.mp4",
                "type": "video/mp4",
                "size": 97540271,
                "path": "Publication:sorger-2019-odn",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/sorger-2019-odn/sorger-2019-odn-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/sorger-2019-odn/sorger-2019-odn-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2019/sorger-2019-odn/sorger-2019-odn-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/sorger-2019-odn/",
        "__class": "Publication"
    },
    {
        "id": "sietzen-ifv-2019",
        "type_id": "otherreviewed",
        "tu_id": null,
        "repositum_id": null,
        "title": "Interactive Feature Visualization in the Browser",
        "date": "2019-10",
        "abstract": "Excellent explanations of feature visualization already exist in the form of interactive articles, e.g. DeepDream, Feature Visualization, The Building Blocks of Interpretability, Activation Atlas, Visualizing GoogLeNet Classes. They mostly rely on curated prerendered visualizations, additionally providing colab notebooks or public repositories allowing the reader to reproduce those results. While precalculated visualizations have many advantages (directability, more processing budget), they are always discretized samples of a continuous parameter space. In the spirit of Tensorflow Playground, this project aims at providing a fully interactive interface to some basic functionality of the originally Python-based Lucid library, roughly corresponding to the concepts presented in the “Feature Visualization\" article. The user is invited to explore the effect of parameter changes in a playful way and without requiring any knowledge of programming, enabled by an implementation on top of TensorFlow.js. Live updates of the generated input image as well as feature map activations should give the user a visual intuition to the otherwise abstract optimization process. Further, this interface opens the domain of feature visualization to non-experts, as no scripting is required.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "screenshot",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 759,
            "image_height": 614,
            "name": "sietzen-ifv-2019-screenshot.png",
            "type": "image/png",
            "size": 293509,
            "path": "Publication:sietzen-ifv-2019",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/sietzen-ifv-2019/sietzen-ifv-2019-screenshot.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/sietzen-ifv-2019/sietzen-ifv-2019-screenshot:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1373,
            1110
        ],
        "booktitle": "Proceedings of the Workshop on Visualization for AI explainability (VISxAI)",
        "cfp": {
            "name": "2nd VISxAI Workshop at IEEE VIS 2019.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "409701",
            "orig_name": "2nd VISxAI Workshop at IEEE VIS 2019.pdf",
            "ext": "pdf"
        },
        "editor": "El-Assady, Mennatallah and Chau, Duen Horng (Polo) and Hohman, Fred and Perer, Adam and Strobelt, Hendrik and Viégas, Fernanda",
        "event": "Workshop on Visualization for AI explainability (VISxAI) at IEEE VIS 2019",
        "lecturer": [
            1373
        ],
        "location": "Vancouver",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [],
        "weblinks": [
            {
                "href": "http://visxai.stefansietzen.at/",
                "caption": "Explainable URL",
                "description": "Peer-reviewed submission for the workshop. ",
                "main_file": 1
            },
            {
                "href": "https://visxai.io/",
                "caption": "VISxAI Workshop 2019",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "screenshot",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 759,
                "image_height": 614,
                "name": "sietzen-ifv-2019-screenshot.png",
                "type": "image/png",
                "size": 293509,
                "path": "Publication:sietzen-ifv-2019",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/sietzen-ifv-2019/sietzen-ifv-2019-screenshot.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/sietzen-ifv-2019/sietzen-ifv-2019-screenshot:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "sietzen-ifv-2019-video.mp4",
                "type": "video/mp4",
                "size": 51123687,
                "path": "Publication:sietzen-ifv-2019",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/sietzen-ifv-2019/sietzen-ifv-2019-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/sietzen-ifv-2019/sietzen-ifv-2019-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2019/sietzen-ifv-2019/sietzen-ifv-2019-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/sietzen-ifv-2019/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2019-rld",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": "A Comparison of Radial and Linear Charts for Visualizing Daily Patterns",
        "date": "2019-10",
        "abstract": "Radial charts are generally considered less effective than linear charts. Perhaps the only exception is in visualizing periodical time-dependent data, which is believed to be naturally supported by the radial layout. It has been demonstrated that the\ndrawbacks of radial charts outweigh the benefits of this natural mapping. Visualization of daily patterns, as a special case, has not been systematically evaluated using radial charts. In contrast to yearly or weekly recurrent trends, the analysis of daily patterns on a radial chart may benefit from our trained skill on reading radial clocks that are ubiquitous in our culture. In a crowd-sourced experiment with 92 non-expert users, we evaluated the accuracy, efficiency, and subjective ratings of radial and linear charts for visualizing daily traffic accident patterns. We systematically compared juxtaposed 12-hours variants and single 24-hours variants for both layouts in four low-level tasks and one high-level interpretation task. Our results show that over all tasks, the most elementary 24-hours linear bar chart is most accurate and efficient and is also preferred by the users. This provides strong evidence for the use of linear layouts – even for visualizing periodical daily patterns.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Daily patterns visualized in a 24-hours radial chart. ",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 497,
            "image_height": 474,
            "name": "waldner-2019-rld-teaser.png",
            "type": "image/png",
            "size": 53565,
            "path": "Publication:waldner-2019-rld",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            1121,
            581,
            1204,
            1122,
            235
        ],
        "cfp": {
            "name": "IEEE VIS 2019 Call For Participation.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "545751",
            "orig_name": "IEEE VIS 2019 Call For Participation.pdf",
            "ext": "pdf"
        },
        "date_from": "2019-10-20",
        "date_to": "2019-10-25",
        "doi": "10.1109/TVCG.2019.2934784",
        "event": "IEEE VIS InfoVis",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "lecturer": [
            1110
        ],
        "location": "Vancouver, Canada",
        "pages_from": "1033",
        "pages_to": "1042",
        "volume": "26",
        "research_areas": [
            "InfoVis",
            "Perception"
        ],
        "keywords": [
            "radial charts",
            "time series data",
            "daily patterns",
            "crowd-sourced experiment"
        ],
        "weblinks": [
            {
                "href": "https://vimeo.com/371939694",
                "caption": "Presentation recording from IEEE VIS 2019 (VGTC Vimeo)",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2019-rld-paper.pdf",
                "type": "application/pdf",
                "size": 892625,
                "path": "Publication:waldner-2019-rld",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-paper:thumb{{size}}.png"
            },
            {
                "description": "Powerpoint slides of presentation at IEEE VIS 2019 (do not contain animations and videos). ",
                "filetitle": "slides ",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2019-rld-slides .pdf",
                "type": "application/pdf",
                "size": 12853249,
                "path": "Publication:waldner-2019-rld",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-slides .pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-slides :thumb{{size}}.png"
            },
            {
                "description": "Supplemental information about the user study",
                "filetitle": "supplement",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2019-rld-supplement.pdf",
                "type": "application/pdf",
                "size": 2511402,
                "path": "Publication:waldner-2019-rld",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-supplement.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-supplement:thumb{{size}}.png"
            },
            {
                "description": "Daily patterns visualized in a 24-hours radial chart. ",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 497,
                "image_height": 474,
                "name": "waldner-2019-rld-teaser.png",
                "type": "image/png",
                "size": 53565,
                "path": "Publication:waldner-2019-rld",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video preview",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2019-rld-video preview.mp4",
                "type": "video/mp4",
                "size": 3082628,
                "path": "Publication:waldner-2019-rld",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-video preview.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-video preview:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/waldner-2019-rld-video preview:video.mp4"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldner-2019-rld/",
        "__class": "Publication"
    },
    {
        "id": "2019-ic",
        "type_id": "techreport",
        "tu_id": 282845,
        "repositum_id": null,
        "title": "Collecting and Structuring Information in the Information Collage",
        "date": "2019-08",
        "abstract": "Knowledge workers, such as scientists, journalists, or consultants, adaptively seek, gather, and consume information. These processes are often inefficient as existing user interfaces provide limited possibilities to combine information from various sources and different formats into a common knowledge representation. In this paper, we present the concept of an information collage (IC) -- a web browser extension combining manual spatial organization of gathered information fragments and automatic text analysis for interactive content exploration and expressive visual summaries. We used IC for case studies with knowledge workers from different domains and longer-term field studies over a period of one month. We identified three different ways how users collect and structure information and provide design recommendations how to support these observed usage strategies. ",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 1243,
            "image_height": 832,
            "name": "2019-ic-teaser.png",
            "type": "image/png",
            "size": 279699,
            "path": "Publication:2019-ic",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/2019-ic/2019-ic-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/2019-ic/2019-ic-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1211,
            1690,
            1110
        ],
        "number": "TR-193-02-2019-2",
        "research_areas": [],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://arxiv.org/abs/1909.00608",
                "caption": "arXiv report",
                "description": "Technical Report at arXiv",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 1243,
                "image_height": 832,
                "name": "2019-ic-teaser.png",
                "type": "image/png",
                "size": 279699,
                "path": "Publication:2019-ic",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/2019-ic/2019-ic-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/2019-ic/2019-ic-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "2019-ic-video.mp4",
                "type": "video/mp4",
                "size": 13549543,
                "path": "Publication:2019-ic",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/2019-ic/2019-ic-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/2019-ic/2019-ic-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2019/2019-ic/2019-ic-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/2019-ic/",
        "__class": "Publication"
    },
    {
        "id": "byska-2019-mdfc",
        "type_id": "journalpaper",
        "tu_id": 283263,
        "repositum_id": null,
        "title": "Analysis of Long Molecular Dynamics Simulations Using Interactive Focus+Context Visualization",
        "date": "2019-06",
        "abstract": "Analyzing molecular dynamics (MD) simulations is a key aspect to understand protein dynamics and function. With increasing computational power, it is now possible to generate very long and complex simulations, which are cumbersome to explore using traditional 3D animations of protein movements. Guided by requirements derived from multiple focus groups with protein engineering experts, we designed and developed a novel interactive visual analysis approach for long and crowded MD simulations. In this approach, we link a dynamic 3D focus+context visualization with a 2D chart of time series data to guide the detection and navigation towards important spatio-temporal events. The 3D visualization renders elements of interest in more detail and increases the temporal resolution dependent on the time series data or the spatial region of interest. In case studies with different MD simulation data sets and research questions, we found that the proposed visual analysis approach facilitates exploratory analysis to generate, confirm, or reject hypotheses about causalities. Finally, we derived design guidelines for interactive visual analysis of complex MD simulation data.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 783,
            "image_height": 717,
            "name": "byska-2019-mdfc-teaser.jpg",
            "type": "image/jpeg",
            "size": 95308,
            "path": "Publication:byska-2019-mdfc",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-teaser.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1254,
            1196,
            1669,
            1499,
            1248,
            1110
        ],
        "cfp": {
            "name": "Eurovis 2019 CFP.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "91823",
            "orig_name": "Eurovis 2019 CFP.pdf",
            "ext": "pdf"
        },
        "date_from": "2019-06-03",
        "date_to": "2019-06-07",
        "doi": "10.1111/cgf.13701",
        "event": "EuroVis 2019",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1254
        ],
        "location": "Porto, Portugal",
        "number": "3",
        "pages_from": "441",
        "pages_to": "453",
        "volume": "38",
        "research_areas": [
            "BioVis",
            "InfoVis"
        ],
        "keywords": [
            "scientific visualization",
            "user centered design"
        ],
        "weblinks": [
            {
                "href": "https://onlinelibrary.wiley.com/doi/abs/10.1111/cgf.13701",
                "caption": "Wiley Online ",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "byska-2019-mdfc-paper.pdf",
                "type": "application/pdf",
                "size": 5805942,
                "path": "Publication:byska-2019-mdfc",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-paper:thumb{{size}}.png"
            },
            {
                "description": "Domain expert report of findings",
                "filetitle": "supplement",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "byska-2019-mdfc-supplement.pdf",
                "type": "application/pdf",
                "size": 2846584,
                "path": "Publication:byska-2019-mdfc",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-supplement.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-supplement:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 783,
                "image_height": 717,
                "name": "byska-2019-mdfc-teaser.jpg",
                "type": "image/jpeg",
                "size": 95308,
                "path": "Publication:byska-2019-mdfc",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-teaser.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "byska-2019-mdfc-video.mp4",
                "type": "video/mp4",
                "size": 222623560,
                "path": "Publication:byska-2019-mdfc",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/byska-2019-mdfc-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/byska-2019-mdfc/",
        "__class": "Publication"
    },
    {
        "id": "waldin-2019-ccm",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": null,
        "title": "Cuttlefish: Color Mapping for Dynamic Multi‐Scale Visualizations",
        "date": "2019-03",
        "abstract": "Visualizations of hierarchical data can often be explored interactively. For example, in geographic visualization, there are continents, which can be subdivided into countries, states, counties and cities. Similarly, in models of viruses or bacteria at the highest level are the compartments, and below that are macromolecules, secondary structures (such as α‐helices), amino‐acids, and on the finest level atoms. Distinguishing between items can be assisted through the use of color at all levels. However, currently, there are no hierarchical and adaptive color mapping techniques for very large multi‐scale visualizations that can be explored interactively. We present a novel, multi‐scale, color‐mapping technique for adaptively adjusting the color scheme to the current view and scale. Color is treated as a resource and is smoothly redistributed. The distribution adjusts to the scale of the currently observed detail and maximizes the color range utilization given current viewing requirements. Thus, we ensure that the user is able to distinguish items on any level, even if the color is not constant for a particular feature. The coloring technique is demonstrated for a political map and a mesoscale structural model of HIV. The technique has been tested by users with expertise in structural biology and was overall well received.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Multiple color zoom levels. ",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 509,
            "image_height": 447,
            "name": "waldin-2019-ccm-teaser.png",
            "type": "image/png",
            "size": 690229,
            "path": "Publication:waldin-2019-ccm",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldin-2019-ccm/waldin-2019-ccm-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldin-2019-ccm/waldin-2019-ccm-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1339,
            1110,
            1189,
            166,
            1365,
            1260,
            1475,
            171
        ],
        "doi": "10.1111/cgf.13611",
        "journal": "Computer Graphics Forum",
        "number": "6",
        "pages_from": "150",
        "pages_to": "164",
        "volume": "38",
        "research_areas": [
            "BioVis",
            "IllVis",
            "InfoVis"
        ],
        "keywords": [
            "multiscale visualization",
            "illustrative visualization",
            "molecular visualization"
        ],
        "weblinks": [
            {
                "href": "https://onlinelibrary.wiley.com/doi/10.1111/cgf.13611",
                "caption": "Open Access Article in Wiley Online Library",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": "Multiple color zoom levels. ",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 509,
                "image_height": 447,
                "name": "waldin-2019-ccm-teaser.png",
                "type": "image/png",
                "size": 690229,
                "path": "Publication:waldin-2019-ccm",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldin-2019-ccm/waldin-2019-ccm-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldin-2019-ccm/waldin-2019-ccm-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis",
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2019/waldin-2019-ccm/",
        "__class": "Publication"
    },
    {
        "id": "steinboeck-2018-lbg",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Casual Visual Exploration of Large Bipartite Graphs Using Hierarchical Aggregation and Filtering",
        "date": "2018-10",
        "abstract": "Bipartite graphs are typically visualized using linked\nlists or matrices. However, these classic visualization techniques\ndo not scale well with the number of nodes. Biclustering has\nbeen used to aggregate edges, but not to create linked lists\nwith thousands of nodes. In this paper, we present a new\ncasual exploration interface for large, weighted bipartite graphs,\nwhich allows for multi-scale exploration through hierarchical\naggregation of nodes and edges using biclustering in linked\nlists. We demonstrate the usefulness of the technique using two\ndata sets: a database of media advertising expenses of public\nauthorities and author-keyword co-occurrences from the IEEE\nVisualization Publication collection. Through an insight-based\nstudy with lay users, we show that the biclustering interface leads\nto longer exploration times, more insights, and more unexpected\nfindings than a baseline interface using only filtering. However,\nusers also perceive the biclustering interface as more complex.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "BiCFlows showing visualization authors and their key words",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 708,
            "image_height": 702,
            "name": "steinboeck-2018-lbg-teaser.png",
            "type": "image/png",
            "size": 137906,
            "path": "Publication:steinboeck-2018-lbg",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2018-lbg/steinboeck-2018-lbg-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2018-lbg/steinboeck-2018-lbg-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1378,
            166,
            1110
        ],
        "booktitle": "International Symposium on Big Data Visual and Immersive Analytics",
        "event": "4th International Symposium on Big Data Visual and Immersive Analytics",
        "lecturer": [
            1110
        ],
        "location": "Konstanz, Germany",
        "organization": "IEEE",
        "research_areas": [
            "InfoVis",
            "NetVis"
        ],
        "keywords": [
            "information visualization",
            "bipartite graphs",
            "biclustering",
            "insight-based evaluation"
        ],
        "weblinks": [
            {
                "href": "https://users.cg.tuwien.ac.at/~waldner/bicflows/",
                "caption": "BiCFlows online",
                "description": "BiCFlows online for exploring Austria's media transparency database and the IEEE Visualization paper authors and their key words. ",
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "steinboeck-2018-lbg-paper.pdf",
                "type": "application/pdf",
                "size": 1871535,
                "path": "Publication:steinboeck-2018-lbg",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2018-lbg/steinboeck-2018-lbg-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2018-lbg/steinboeck-2018-lbg-paper:thumb{{size}}.png"
            },
            {
                "description": "BiCFlows showing visualization authors and their key words",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 708,
                "image_height": 702,
                "name": "steinboeck-2018-lbg-teaser.png",
                "type": "image/png",
                "size": 137906,
                "path": "Publication:steinboeck-2018-lbg",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2018-lbg/steinboeck-2018-lbg-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2018-lbg/steinboeck-2018-lbg-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2018/steinboeck-2018-lbg/",
        "__class": "Publication"
    },
    {
        "id": "mazurek-2018-veq",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": "Visualizing Expanded Query Results",
        "date": "2018-06",
        "abstract": "When performing queries in web search engines, users often face difficulties choosing appropriate query terms. Search engines\ntherefore usually suggest a list of expanded versions of the user query to disambiguate it or to resolve potential term mismatches.\nHowever, it has been shown that users find it difficult to choose an expanded query from such a list. In this paper, we describe\nthe adoption of set-based text visualization techniques to visualize how query expansions enrich the result space of a given\nuser query and how the result sets relate to each other. Our system uses a linguistic approach to expand queries and topic\nmodeling to extract the most informative terms from the results of these queries. In a user study, we compare a common text list\nof query expansion suggestions to three set-based text visualization techniques adopted for visualizing expanded query results\n– namely, Compact Euler Diagrams, Parallel Tag Clouds, and a List View – to resolve ambiguous queries using interactive\nquery expansion. Our results show that text visualization techniques do not increase retrieval efficiency, precision, or recall.\nOverall, users rate Parallel Tag Clouds visualizing key terms of the expanded query space lowest. Based on the results, we derive\nrecommendations for visualizations of query expansion results, text visualization techniques in general, and discuss alternative\nuse cases of set-based text visualization techniques in the context of web search.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Density-based compact Euler Diagram",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 919,
            "image_height": 656,
            "name": "mazurek-2018-veq-teaser.png",
            "type": "image/png",
            "size": 34882,
            "path": "Publication:mazurek-2018-veq",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/mazurek-2018-veq-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/mazurek-2018-veq-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1364,
            1110
        ],
        "cfp": {
            "name": "EuroVis 2018 CfP.pdf",
            "type": "application/pdf",
            "error": "0",
            "size": "55844",
            "orig_name": "EuroVis 2018 CfP.pdf",
            "ext": "pdf"
        },
        "date_from": "2018-06-04",
        "date_to": "2018-06-08",
        "event": "EG / VGTC Conference on Visualization (EuroVis)",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1364
        ],
        "location": "Brno, Czech Republic",
        "pages_from": "87",
        "pages_to": "98",
        "research_areas": [
            "InfoVis",
            "NetVis"
        ],
        "keywords": [
            "Information visualization",
            "search interfaces",
            "empirical studies in visualization"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "mazurek-2018-veq-paper.pdf",
                "type": "application/pdf",
                "size": 1891501,
                "path": "Publication:mazurek-2018-veq",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/mazurek-2018-veq-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/mazurek-2018-veq-paper:thumb{{size}}.png"
            },
            {
                "description": "Density-based compact Euler Diagram",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 919,
                "image_height": 656,
                "name": "mazurek-2018-veq-teaser.png",
                "type": "image/png",
                "size": 34882,
                "path": "Publication:mazurek-2018-veq",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/mazurek-2018-veq-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/mazurek-2018-veq-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "mazurek-2018-veq-video.mp4",
                "type": "video/mp4",
                "size": 30231176,
                "path": "Publication:mazurek-2018-veq",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/mazurek-2018-veq-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/mazurek-2018-veq-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/mazurek-2018-veq-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2018/mazurek-2018-veq/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2018-ved",
        "type_id": "talk",
        "tu_id": null,
        "repositum_id": null,
        "title": "Visual Data Exploration and Analysis in Emerging Display Environments ",
        "date": "2018-04-05",
        "abstract": "Increasingly powerful computing and display hardware open up entirely new ways for visual data exploration and analysis. Powerful machines and emerging display environments facilitate novel visual exploration techniques, collaborative data analysis, and even immersion into the scientific data. This talk will address the challenges we faced when bringing biomolecular visual analysis tools and complex molecular visualizations into such large, multi-user environments. A special focus lies on interfaces and attention guidance techniques we designed and evaluated to keep the user oriented and reduce visual clutter. ",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110
        ],
        "event": "Emerging Technologies in Scientific Data Visualisation - CECAM",
        "location": "Pisa, Italy",
        "research_areas": [],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://www.cecam.org/workshop-1586.html",
                "caption": "CECAM workshop",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2018/waldner-2018-ved/",
        "__class": "Publication"
    },
    {
        "id": "polatsek-2018-stv",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": null,
        "title": "Exploring visual attention and saliency modeling for task-based visual analysis",
        "date": "2018-02",
        "abstract": "Memory, visual attention and perception play a critical role in the design of visualizations. The way users observe a visualization is affected by salient stimuli in a scene as well as by domain knowledge, interest, and the task. While recent saliency models manage to predict the users’ visual attention in visualizations during exploratory analysis, there is little evidence how much influence bottom-up saliency has on task-based visual analysis. Therefore, we performed an eye-tracking study with 47 users to determine the users’ path of attention when solving three low-level analytical tasks using 30 different charts from the MASSVIS database [1]. We also compared our task-based eye tracking data to the data from the original memorability experiment by Borkin et al. [2]. We found that solving a task leads to more consistent viewing patterns compared to exploratory visual analysis. However, bottom-up saliency of a visualization has negligible influence on users’ fixations and task efficiency when performing a low-level analytical task. Also, the efficiency of visual search for an extreme target data point is barely influenced by the target’s bottom-up saliency. Therefore, we conclude that bottom-up saliency models tailored towards information visualization are not suitable for predicting visual attention when performing task-based visual analysis. We discuss potential reasons and suggest extensions to visual attention models to better account for task-based visual analysis.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "graphical abstract",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 591,
            "image_height": 584,
            "name": "polatsek-2018-stv-graphical abstract.png",
            "type": "image/png",
            "size": 193042,
            "path": "Publication:polatsek-2018-stv",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-graphical abstract.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-graphical abstract:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1545,
            1110,
            171,
            1546,
            1547
        ],
        "doi": "https://doi.org/10.1016/j.cag.2018.01.010",
        "journal": "Computers & Graphics",
        "number": "2",
        "open_access": "no",
        "research_areas": [
            "InfoVis",
            "Perception"
        ],
        "keywords": [
            "Information visualization",
            "Eye-tracking experiment",
            "Saliency",
            "Visual attention",
            "Low-level analytical tasks"
        ],
        "weblinks": [
            {
                "href": "https://www.sciencedirect.com/science/article/pii/S0097849318300104",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "graphical abstract",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 591,
                "image_height": 584,
                "name": "polatsek-2018-stv-graphical abstract.png",
                "type": "image/png",
                "size": 193042,
                "path": "Publication:polatsek-2018-stv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-graphical abstract.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-graphical abstract:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "polatsek-2018-stv-paper.pdf",
                "type": "application/pdf",
                "size": 3228380,
                "path": "Publication:polatsek-2018-stv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/polatsek-2018-stv-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis",
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2018/polatsek-2018-stv/",
        "__class": "Publication"
    },
    {
        "id": "Waldner_2017_11",
        "type_id": "talk",
        "tu_id": null,
        "repositum_id": null,
        "title": "Guiding Attention in Complex Visualizations using Flicker",
        "date": "2017-11-17",
        "abstract": "Drawing the user’s gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker is also a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. While it is very salient, it is often perceived as annoying. In this talk, I will present our research on how flicker can be used as attention guidance technique in cluttered visualizations while lowering its negative side-effects. In particular, I will first present results of studies examining a two-stage flicker technique for dynamic visualizations on large displays. Then, I will present we our explorations of high frequency flicker (60 to 72 Hz) to guide the user’s attention in images. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. We show that high frequency flicker, using personalized attributes like patch size and luminance, can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user’s attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110
        ],
        "event": "S&T Cooperation Austria-Czech Republic",
        "location": "Czech Technical University",
        "research_areas": [
            "IllVis"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [],
        "projects_workgroups": [
            "vis",
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/Waldner_2017_11/",
        "__class": "Publication"
    },
    {
        "id": "geymayer-2017-std",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "How Sensemaking Tools Influence Display Space Usage",
        "date": "2017-06",
        "abstract": "We explore how the availability of a sensemaking tool influences users’ knowledge externalization strategies. On a large display,\nusers were asked to solve an intelligence analysis task with or without a bidirectionally linked concept-graph (BLC) to organize\ninsights into concepts (nodes) and relations (edges). In BLC, both nodes and edges maintain “deep links” to the exact source\nphrases and sections in associated documents. In our control condition, we were able to reproduce previously described spatial\norganization behaviors using document windows on the large display. When using BLC, however, we found that analysts apply\nspatial organization to BLC nodes instead, use significantly less display space and have significantly fewer open windows.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 805,
            "image_height": 344,
            "name": "geymayer-2017-std-image.PNG",
            "type": "image/png",
            "size": 604999,
            "path": "Publication:geymayer-2017-std",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2017/geymayer-2017-std/geymayer-2017-std-image.PNG",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/geymayer-2017-std/geymayer-2017-std-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1477,
            1110,
            1476,
            202
        ],
        "booktitle": "EuroVis Workshop on Visual Analytics",
        "event": "EuroVis 2017",
        "lecturer": [
            1477
        ],
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "sensemaking",
            "large displays",
            "evaluation"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 805,
                "image_height": 344,
                "name": "geymayer-2017-std-image.PNG",
                "type": "image/png",
                "size": 604999,
                "path": "Publication:geymayer-2017-std",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/geymayer-2017-std/geymayer-2017-std-image.PNG",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/geymayer-2017-std/geymayer-2017-std-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "geymayer-2017-std-paper.pdf",
                "type": "application/pdf",
                "size": 1117995,
                "path": "Publication:geymayer-2017-std",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/geymayer-2017-std/geymayer-2017-std-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/geymayer-2017-std/geymayer-2017-std-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/geymayer-2017-std/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2017-vph",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Exploring Visual Prominence of Multi-Channel Highlighting in Visualizations",
        "date": "2017-05",
        "abstract": "Visualizations make rich use of multiple visual channels so that there are few resources left to make selected focus elements visually\ndistinct from their surrounding context. A large variety of highlighting techniques for visualizations has been presented in the past,\nbut there has been little systematic evaluation of the design space of highlighting. We explore highlighting from the perspective\nof visual marks and channels – the basic building blocks of visualizations that are directly controlled by visualization designers.\nWe present the results from two experiments, exploring the visual prominence of highlighted marks in scatterplots: First, using\nluminance as a single highlight channel, we found that visual prominence is mainly determined by the luminance difference between\nthe focus mark and the brightest context mark. The brightness differences between context marks and the overall brightness level\nhave negligible influence. Second, multi-channel highlighting using luminance and blur leads to a good trade-off between highlight\neffectiveness and aesthetics. From the results, we derive a simple highlight model to balance highlighting across multiple visual\nchannels and focus and context marks, respectively.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 160,
            "image_height": 124,
            "name": "waldner-2017-vph-.png",
            "type": "image/png",
            "size": 20447,
            "path": "Publication:waldner-2017-vph",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            925,
            166
        ],
        "booktitle": "Spring Conference on Computer Graphics 2017",
        "lecturer": [
            1110
        ],
        "research_areas": [
            "InfoVis",
            "Perception"
        ],
        "keywords": [
            "information visualization",
            "highlighting",
            "focus+context",
            "visual prominence"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 160,
                "image_height": 124,
                "name": "waldner-2017-vph-.png",
                "type": "image/png",
                "size": 20447,
                "path": "Publication:waldner-2017-vph",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2017-vph-paper.pdf",
                "type": "application/pdf",
                "size": 1851221,
                "path": "Publication:waldner-2017-vph",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-paper:thumb{{size}}.png"
            },
            {
                "description": "Details about experiment design and results. ",
                "filetitle": "supplemental material",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2017-vph-supplemental material.pdf",
                "type": "application/pdf",
                "size": 1396255,
                "path": "Publication:waldner-2017-vph",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-supplemental material.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/waldner-2017-vph-supplemental material:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/waldner-2017-vph/",
        "__class": "Publication"
    },
    {
        "id": "Waldin_Nicholas_2017_FlickerObserver",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": "Flicker Observer Effect: Guiding Attention Through High Frequency Flicker in Images",
        "date": "2017-05",
        "abstract": "Drawing the user's gaze to an important item in an image or a graphical user interface is a common challenge. Usually, some form of highlighting is used, such as a clearly distinct color or a border around the item. Flicker can also be very salient, but is often perceived as annoying. In this paper, we explore high frequency flicker (60 to 72 Hz) to guide the user's attention in an image. At such high frequencies, the critical flicker frequency (CFF) threshold is reached, which makes the flicker appear to fuse into a stable signal. However, the CFF is not uniform across the visual field, but is higher in the peripheral vision at normal lighting conditions. Through experiments, we show that high frequency flicker can be easily detected by observers in the peripheral vision, but the signal is hardly visible in the foveal vision when users directly look at the flickering patch. We demonstrate that this property can be used to draw the user's attention to important image regions using a standard high refresh-rate computer monitor with minimal visible modifications to the image. In an uncalibrated visual search task, users could in a crowded image easily spot the specified search targets flickering with very high frequency. They also reported that high frequency flicker was distracting when they had to attend to another region, while it was hardly noticeable when looking at the flickering region itself.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1461,
            1110,
            171
        ],
        "date_from": "2014",
        "date_to": "2017",
        "event": "Eurographics 2017",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1339
        ],
        "number": "2",
        "pages_from": "467",
        "pages_to": "476",
        "volume": "36",
        "research_areas": [
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Waldin_Nicholas_2017_FlickerObserver-paper.pdf",
                "type": "application/pdf",
                "size": 6348247,
                "path": "Publication:Waldin_Nicholas_2017_FlickerObserver",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/Waldin_Nicholas_2017_FlickerObserver-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/Waldin_Nicholas_2017_FlickerObserver-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis",
            "deskollage",
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2017/Waldin_Nicholas_2017_FlickerObserver/",
        "__class": "Publication"
    },
    {
        "id": "bernhard-2016-gft",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": null,
        "title": " The Accuracy of Gauge-Figure Tasks in Monoscopic and Stereo Displays",
        "date": "2016-07",
        "abstract": "The gauge-figure task (GFT) is a widespread method used to study surface perception for evaluating rendering and visualization techniques. The authors investigate how accurately slant angles probed on well-defined objects align with the ground truth (GT) in monoscopic and stereoscopic displays. Their results show that the GFT probes taken with well-defined objects align well with the GT in the all-monoscopic and all-stereoscopic conditions. However, they found that a GF rendered in stereo over a monoscopic stimulus results in a strong slant underestimation and that an overestimation occurred in the inverse case (monoscopic GF andstereoscopic stimulus). They discuss how their findings affect the interpretation of absolute GFT measures, compared to the GT normal.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 216,
            "image_height": 205,
            "name": "bernhard-2016-gft-.jpg",
            "type": "image/jpeg",
            "size": 38475,
            "path": "Publication:bernhard-2016-gft",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/bernhard-2016-gft-.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/bernhard-2016-gft-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            660,
            1110,
            1293,
            896,
            171
        ],
        "journal": "IEEE Computer Graphics and Applications",
        "number": "4",
        "pages_from": "56",
        "pages_to": "66",
        "volume": "36",
        "research_areas": [
            "Perception"
        ],
        "keywords": [
            "computer graphics",
            "gauge-figure task",
            "perceptual visualization",
            "shape perception"
        ],
        "weblinks": [
            {
                "href": "http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7478440",
                "caption": "IEEE Xplore",
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 216,
                "image_height": 205,
                "name": "bernhard-2016-gft-.jpg",
                "type": "image/jpeg",
                "size": 38475,
                "path": "Publication:bernhard-2016-gft",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/bernhard-2016-gft-.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/bernhard-2016-gft-:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2016/bernhard-2016-gft/",
        "__class": "Publication"
    },
    {
        "id": "Waldin_Nicholas_2016_Chameleon",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Chameleon Dynamic Color Mapping for Multi-Scale Structural Biology Models",
        "date": "2016",
        "abstract": "Visualization of structural biology data uses color to categorize or separate dense structures into particular semantic units. In\nmultiscale models of viruses or bacteria, there are atoms on the finest level of detail, then amino-acids, secondary structures,\nmacromolecules, up to the compartment level and, in all these levels, elements can be visually distinguished by color. However,\ncurrently only single scale coloring schemes are utilized that show information for one particular scale only. We present a novel\ntechnology which adaptively, based on the current scale level, adjusts the color scheme to depict or distinguish the currently\nbest visible structural information. We treat the color as a visual resource that is distributed given a particular demand. The\nchanges of the color scheme are seamlessly interpolated between the color scheme from the previous views into a given new one.\nWith such dynamic multi-scale color mapping we ensure that the viewer is able to distinguish structural detail that is shown\non any given scale. This technique has been tested by users with an expertise in structural biology and has been overall well\nreceived.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "Overview",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 378,
            "image_height": 472,
            "name": "Waldin_Nicholas_2016_Chameleon-Overview.png",
            "type": "image/png",
            "size": 488060,
            "path": "Publication:Waldin_Nicholas_2016_Chameleon",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/Waldin_Nicholas_2016_Chameleon-Overview.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/Waldin_Nicholas_2016_Chameleon-Overview:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1461,
            1189,
            1110,
            166,
            1365,
            1260,
            171
        ],
        "booktitle": "Eurographics Workshop on Visual Computing for Biology and Medicine",
        "event": "VCBM",
        "lecturer": [
            1339
        ],
        "research_areas": [],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://diglib.eg.org/handle/10.2312/vcbm20161266",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "Overview",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 378,
                "image_height": 472,
                "name": "Waldin_Nicholas_2016_Chameleon-Overview.png",
                "type": "image/png",
                "size": 488060,
                "path": "Publication:Waldin_Nicholas_2016_Chameleon",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/Waldin_Nicholas_2016_Chameleon-Overview.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/Waldin_Nicholas_2016_Chameleon-Overview:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "Waldin_Nicholas_2016_Chameleon-paper.pdf",
                "type": "application/pdf",
                "size": 25172411,
                "path": "Publication:Waldin_Nicholas_2016_Chameleon",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/Waldin_Nicholas_2016_Chameleon-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/Waldin_Nicholas_2016_Chameleon-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "zoomed A",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 378,
                "image_height": 472,
                "name": "Waldin_Nicholas_2016_Chameleon-zoomed A.png",
                "type": "image/png",
                "size": 462676,
                "path": "Publication:Waldin_Nicholas_2016_Chameleon",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/Waldin_Nicholas_2016_Chameleon-zoomed A.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/Waldin_Nicholas_2016_Chameleon-zoomed A:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "zoomed B",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 380,
                "image_height": 474,
                "name": "Waldin_Nicholas_2016_Chameleon-zoomed B.png",
                "type": "image/png",
                "size": 360850,
                "path": "Publication:Waldin_Nicholas_2016_Chameleon",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/Waldin_Nicholas_2016_Chameleon-zoomed B.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/Waldin_Nicholas_2016_Chameleon-zoomed B:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis",
            "deskollage"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2016/Waldin_Nicholas_2016_Chameleon/",
        "__class": "Publication"
    },
    {
        "id": "lemuzic_2015_timelapse",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Illustrative Timelapse: A Technique for Illustrative Visualization of Particle Simulations on the Mesoscale Level",
        "date": "2015-04",
        "abstract": "Animated movies are a popular way to communicate complex phenomena\nin cell biology to the broad audience. Animation artists\napply sophisticated illustration techniques to communicate a story,\nwhile trying to maintain a realistic representation of a complex dynamic\nenvironment. Since such hand-crafted animations are timeconsuming\nand cost-intensive to create, our goal is to formalize\nillustration techniques used by artists to facilitate the automatic\ncreation of visualizations generated from mesoscale particle-based\nmolecular simulations. Our technique Illustrative Timelapse supports\nvisual exploration of complex biochemical processes in dynamic\nenvironments by (1) seamless temporal zooming to observe\nphenomena in different temporal resolutions, (2) visual abstraction\nof molecular trajectories to ensure that observers are able to visually\nfollow the main actors, (3) increased visual focus on events of interest,\nand (4) lens effects to preserve a realistic representation of the\nenvironment in the context. Results from a first user study indicate\nthat visual abstraction of trajectories improves the ability to follow\na story and is also appreciated by users. Lens effects increased the\nperceived amount of molecular motion in the environment while\ntrading off traceability of individual molecules.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "screenshot",
            "main_file": true,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1804,
            "image_height": 810,
            "name": "lemuzic_2015_timelapse-screenshot.jpg",
            "type": "image/jpeg",
            "size": 238322,
            "path": "Publication:lemuzic_2015_timelapse",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/lemuzic_2015_timelapse-screenshot.jpg",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/lemuzic_2015_timelapse-screenshot:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1189,
            1110,
            1143,
            171
        ],
        "booktitle": "Visualization Symposium (PacificVis), 2015 IEEE Pacific",
        "date_from": "2015-04-14",
        "date_to": "2015-04-17",
        "lecturer": [
            1189
        ],
        "location": "Zijingang Campus, Zhejiang University, Hangzhou, China",
        "organization": "8th IEEE Pacific Visualization Symposium (PacificVis 2015)",
        "pages_from": "247",
        "pages_to": "254",
        "publisher": "IEEE",
        "research_areas": [
            "BioVis",
            "IllVis"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": "the paper",
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "lemuzic_2015_timelapse-paper.pdf",
                "type": "application/pdf",
                "size": 1414681,
                "path": "Publication:lemuzic_2015_timelapse",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/lemuzic_2015_timelapse-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/lemuzic_2015_timelapse-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "screenshot",
                "main_file": true,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1804,
                "image_height": 810,
                "name": "lemuzic_2015_timelapse-screenshot.jpg",
                "type": "image/jpeg",
                "size": 238322,
                "path": "Publication:lemuzic_2015_timelapse",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/lemuzic_2015_timelapse-screenshot.jpg",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/lemuzic_2015_timelapse-screenshot:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2015/lemuzic_2015_timelapse/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2014-af",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": null,
        "title": " Attractive Flicker: Guiding Attention in Dynamic Narrative Visualizations",
        "date": "2014-12",
        "abstract": "Focus+context techniques provide visual guidance in visualizations by giving strong visual prominence to elements of interest while the context is suppressed. However, finding a visual feature to enhance for the focus to pop out from its context in a large dynamic scene, while leading to minimal visual deformation and subjective disturbance, is challenging. This paper proposes Attractive Flicker, a novel technique for visual guidance in dynamic narrative visualizations. We first show that flicker is a strong visual attractor in the entire visual field, without distorting, suppressing, or adding any scene elements. The novel aspect of our Attractive Flicker technique is that it consists of two signal stages: The first “orientation stage” is a short but intensive flicker stimulus to attract the attention to elements of interest. Subsequently, the intensive flicker is reduced to a minimally disturbing luminance oscillation (“engagement stage”) as visual support to keep track of the focus elements. To find a good trade-off between attraction effectiveness and subjective annoyance caused by flicker, we conducted two perceptual studies to find suitable signal parameters. We showcase Attractive Flicker with the parameters obtained from the perceptual statistics in a study of molecular interactions. With Attractive Flicker, users were able to easily follow the narrative of the visualization on a large display, while the flickering of focus elements was not disturbing when observing the context.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 600,
            "image_height": 605,
            "name": "waldner-2014-af-.png",
            "type": "image/png",
            "size": 232715,
            "path": "Publication:waldner-2014-af",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            1189,
            660,
            190,
            171
        ],
        "date_from": "2014-11-09",
        "date_to": "2014-11-14",
        "event": "IEEE VIS 2014",
        "journal": "IEEE Transactions on Visualization and Computer Graphics",
        "lecturer": [
            1110
        ],
        "location": "Paris, France",
        "number": "12",
        "pages_from": "2456",
        "pages_to": "2465",
        "volume": "20",
        "research_areas": [
            "BioVis",
            "Perception"
        ],
        "keywords": [
            "Narrative Visualization",
            "Flicker",
            "Visual Attention"
        ],
        "weblinks": [
            {
                "href": "http://dx.doi.org/10.1109/TVCG.2014.2346352",
                "caption": null,
                "description": null,
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 600,
                "image_height": 605,
                "name": "waldner-2014-af-.png",
                "type": "image/png",
                "size": 232715,
                "path": "Publication:waldner-2014-af",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-:thumb{{size}}.png"
            },
            {
                "description": "Screenshot of the large molecular scene used in the final experiment",
                "filetitle": "molecularScene",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 7597,
                "image_height": 1327,
                "name": "waldner-2014-af-molecularScene.png",
                "type": "image/png",
                "size": 3805356,
                "path": "Publication:waldner-2014-af",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-molecularScene.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-molecularScene:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2014-af-paper.pdf",
                "type": "application/pdf",
                "size": 6413298,
                "path": "Publication:waldner-2014-af",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Preview video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 1280,
                "preview_image_height": 720,
                "name": "waldner-2014-af-Preview video.mp4",
                "type": "video/mp4",
                "size": 16497882,
                "path": "Publication:waldner-2014-af",
                "preview_name": "waldner-2014-af-Preview video:preview.png",
                "preview_type": "image/jpeg",
                "preview_size": 14964,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Preview video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Preview video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Preview video:video.mp4"
            },
            {
                "description": null,
                "filetitle": "Submission video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 1280,
                "preview_image_height": 720,
                "name": "waldner-2014-af-Submission video.mp4",
                "type": "video/mp4",
                "size": 23904054,
                "path": "Publication:waldner-2014-af",
                "preview_name": "waldner-2014-af-Submission video:preview.png",
                "preview_type": "image/jpeg",
                "preview_size": 48929,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Submission video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Submission video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/waldner-2014-af-Submission video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-af/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2014-ghi",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Graphical Histories of Information Foraging",
        "date": "2014-10",
        "abstract": "During information foraging, knowledge workers iteratively seek, filter, read, and extract information. When using multiple information sources and different applications for information processing, re-examination of activities for validation of previous decisions or re-discovery of previously used information sources is challenging. In this paper, we present a novel representation of cross-application histories to support recall of past operations and re-discovery of information resources. Our graphical history consists of a cross-scale visualization combining an overview node-link diagram of used desktop resources with nested (animated) snapshot sequences, based on a recording of the visual screen output during the users’ desktop work. This representation makes key elements of the users’ tasks visually stand out, while exploiting the power of visual memory to recover subtle details of their activities. In a preliminary study, users found our graphical history helpful to recall details of an information foraging task and commented positively on the ability to expand overview nodes into snapshot and video sequences.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 748,
            "image_height": 668,
            "name": "waldner-2014-ghi-.png",
            "type": "image/png",
            "size": 118153,
            "path": "Publication:waldner-2014-ghi",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/waldner-2014-ghi-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/waldner-2014-ghi-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            161,
            171
        ],
        "booktitle": "Proceedings of the 8th Nordic Conference on Human-Computer Interaction: Fun, Fast, Foundational ",
        "date_from": "2014-10-26",
        "date_to": "2014-10-30",
        "isbn": "978-1-4503-2542-4",
        "lecturer": [
            1110
        ],
        "location": "Helsinki, Finland",
        "organization": "NordiCHI’14 - Nordic Conference on Human-Computer Interaction",
        "pages_from": "295",
        "pages_to": "304",
        "publisher": "ACM",
        "research_areas": [
            "InfoVis",
            "NetVis"
        ],
        "keywords": [
            "Graph visualization",
            "Interaction history",
            "Provenance"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 748,
                "image_height": 668,
                "name": "waldner-2014-ghi-.png",
                "type": "image/png",
                "size": 118153,
                "path": "Publication:waldner-2014-ghi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/waldner-2014-ghi-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/waldner-2014-ghi-:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2014-ghi-paper.pdf",
                "type": "application/pdf",
                "size": 3116802,
                "path": "Publication:waldner-2014-ghi",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/waldner-2014-ghi-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/waldner-2014-ghi-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2014/waldner-2014-ghi/",
        "__class": "Publication"
    },
    {
        "id": "lemuzic_2014_ipv",
        "type_id": "poster",
        "tu_id": null,
        "repositum_id": null,
        "title": "Illustrative Visualization of Biochemical Processes Featuring Multiple Temporal Scales",
        "date": "2014",
        "abstract": "Scientific illustrators are commonly using structural description of molecular compounds when depicting complex biochemical processes. However, computational biology also provides procedural models describing the function of biological processes which are not currently used in the production pipeline. Instead, animators utilize scientific knowledge to manually animate and reproduce the functioning of cellular biology. We would like to explore the use of such models in order to generate explanatory illustrations that would show how molecular machinery works. Particle-based simulations provide the means for spatially representing the dynamics of biochemical processes. They compute the positions of each single particle and are supposed to \nmimic a realistic behaviour of the metabolites. Current mesoscale visualization also allows to directly show the results of such simulations by mapping the positions of particles in a virtual 3D environment. Nevertheless, some biochemical processes, like the DNA repair for instance, exhibit temporal multiscale aspects because they comprise diffusion rates which are much greater in comparison with reaction rates. As a result, it is challenging to produce a clear and coherent visualization out of this type of simulation. Indeed, when viewing the process at the pace which would let us see the reactions, it becomes impossible for the human eye to keep track of individual elements because of the very large diffusion displacements. On the other hand, if one would playback the simulation slow enough to be see a steady motion of individual elements, then only a very few number of reactions would occur in a reasonable amount of time. In this work we propose to solve the problem associated with multiple temporal scales by providing means for spatial. With this approach we aim at showing the two different temporal scale at the same time by using advanced trajectory smoothing mechanism. This would allow us to see individual elements while showing a world full of reactions, hence enabling us to communicate complex biological processes and molecular machineries in a comprehensive way.\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "name": "lemuzic_2014_ipv-.pdf",
            "type": "application/pdf",
            "size": 823052,
            "path": "Publication:lemuzic_2014_ipv",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic_2014_ipv/lemuzic_2014_ipv-.pdf",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic_2014_ipv/lemuzic_2014_ipv-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1189,
            1143,
            1110,
            171
        ],
        "date_from": "2014-09-04",
        "date_to": "2014-09-05",
        "event": "Eurographics Workshop on Visual Computing for Biology",
        "research_areas": [
            "IllVis"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "lemuzic_2014_ipv-.pdf",
                "type": "application/pdf",
                "size": 823052,
                "path": "Publication:lemuzic_2014_ipv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic_2014_ipv/lemuzic_2014_ipv-.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic_2014_ipv/lemuzic_2014_ipv-:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2014/lemuzic_2014_ipv/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2013-facetCloudsGI",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "FacetClouds: Exploring Tag Clouds for Multi-Dimensional Data",
        "date": "2013-05",
        "abstract": "Tag clouds are simple yet very widespread representations of how often certain words appear in a collection. In conventional tag clouds, only a single visual text variable is actively controlled: the tags’ font size. Previous work has demonstrated that font size is indeed the most influential visual text variable. However, there are other variables, such as text color, font style and tag orientation, that could be manipulated to encode additional data dimensions.\n\nFacetClouds manipulate intrinsic visual text variables to encode multiple data dimensions within a single tag cloud. We conducted a series of experiments to detect the most appropriate visual text variables for encoding nominal and ordinal values in a cloud with tags of varying font size. Results show that color is the most expressive variable for both data types, and that a combination of tag rotation and background color range leads to the best overall performance when showing multiple data dimensions in a single tag cloud.\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 362,
            "image_height": 360,
            "name": "waldner-2013-facetCloudsGI-image.png",
            "type": "image/png",
            "size": 49904,
            "path": "Publication:waldner-2013-facetCloudsGI",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-image:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            1111,
            1112,
            1113,
            1114,
            1115
        ],
        "address": "Regina, Saskatchewan, Canada",
        "booktitle": "Proceedings of the 2013 Graphics Interface Conference",
        "date_from": "2013-05-29",
        "date_to": "2013-05-31",
        "isbn": "978-1-4822-1680-6 ",
        "lecturer": [
            1111
        ],
        "location": "Regina, Saskatchewan, Canada",
        "organization": "ACM Siggraph",
        "pages_from": "17",
        "pages_to": "24",
        "publisher": "ACM Publishing House",
        "research_areas": [
            "InfoVis",
            "Perception"
        ],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 362,
                "image_height": 360,
                "name": "waldner-2013-facetCloudsGI-image.png",
                "type": "image/png",
                "size": 49904,
                "path": "Publication:waldner-2013-facetCloudsGI",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "preview_image_width": 636,
                "preview_image_height": 708,
                "name": "waldner-2013-facetCloudsGI-paper.pdf",
                "type": "application/pdf",
                "size": 2184510,
                "path": "Publication:waldner-2013-facetCloudsGI",
                "preview_name": "waldner-2013-facetCloudsGI-paper:preview.png",
                "preview_type": "image/png",
                "preview_size": 189512,
                "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/waldner-2013-facetCloudsGI-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-facetCloudsGI/",
        "__class": "Publication"
    },
    {
        "id": "waldner-2013-ubiWM",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": null,
        "title": "Towards Ubiquitous Information Space Management",
        "date": "2013-05",
        "abstract": "Large, high-resolution display spaces are usually created by carefully aligning multiple monitors or projectors to obtain a perfectly flat, rectangular display. In this paper, we suggest the usage of imperfect surfaces as extension of personal workspaces to create ubiquitous, personalized information spaces. We identify five environmental factors ubiquitous information spaces need to consider: 1) user location and display visibility, 2) display gaps and holes, 3) corners and non-planarity of the display surface, 4) physical objects within and around the display surface, and 5) non-rectangular display shapes. Instead of compensating for fragmentations and non-planarity of the information space, we propose a ubiquitous information space manager, adapting interaction and window rendering techniques to the above mentioned factors. We hypothesize that knowledge workers will benefit from such ubiquitous information spaces due to increased exploitation of spatial cognition.\n",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "",
            "main_file": false,
            "use_in_gallery": false,
            "access": "public",
            "image_width": 300,
            "image_height": 238,
            "name": "waldner-2013-ubiWM-.png",
            "type": "image/png",
            "size": 39630,
            "path": "Publication:waldner-2013-ubiWM",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-ubiWM/waldner-2013-ubiWM-.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-ubiWM/waldner-2013-ubiWM-:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1110,
            202
        ],
        "booktitle": "POWERWALL: International Workshop on Interactive, Ultra-High-Resolution Displays, part of the SIGCHI Conference on Human Factors in Computing Systems (2013)",
        "date_from": "2013-04-27",
        "date_to": "2013-05-02",
        "isbn": "978-1-4503-1952-2",
        "lecturer": [
            1110
        ],
        "location": "Paris, France",
        "pages_from": "1",
        "pages_to": "6",
        "publisher": "ACM",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "information management",
            "ubiquitous displays"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "image_width": 300,
                "image_height": 238,
                "name": "waldner-2013-ubiWM-.png",
                "type": "image/png",
                "size": 39630,
                "path": "Publication:waldner-2013-ubiWM",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-ubiWM/waldner-2013-ubiWM-.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-ubiWM/waldner-2013-ubiWM-:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "Position Paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "waldner-2013-ubiWM-Position Paper.pdf",
                "type": "application/pdf",
                "size": 846295,
                "path": "Publication:waldner-2013-ubiWM",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-ubiWM/waldner-2013-ubiWM-Position Paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-ubiWM/waldner-2013-ubiWM-Position Paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "illvisation"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2013/waldner-2013-ubiWM/",
        "__class": "Publication"
    },
    {
        "id": "wu-2021",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": null,
        "title": "Visualization working group at TU Wien: Visible Facimus Quod Ceteri Non Possunt",
        "date": null,
        "abstract": "Building-up and running a university-based research group is a multi-faceted undertaking. The visualization working group at TU Wien (vis-group) has been internationally active over more than 25 years. The group has been acting in a competitive scientific setting where sometimes contradicting multiple objectives require trade-offs and optimizations. Research-wise the group has been performing basic and applied research in visualization and visual computing. Teaching-wise the group has been involved in undergraduate and graduate lecturing in (medical) visualization and computer graphics. To be scientifically competitive requires to constantly expose the group and its members to a strong international competition at the highest level. This necessitates to shield the members against the ensuing pressures and demands and provide (emotional) support and encouragement. Internally, the vis-group has developed a unique professional and social interaction culture: work and celebrate, hard and together. This has crystallized into a nested, recursive, and triangular organization model, which concretizes what it takes to make a research group successful. The key elements are the creative and competent vis-group members who collaboratively strive for (scientific) excellence in a socially enjoyable environment.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "name": "wu-2021-image.png",
            "type": "image/png",
            "size": null,
            "path": "Publication:wu-2021",
            "url": "https://www.cg.tuwien.ac.at/research/publications/ongoing/wu-2021/wu-2021-image.png",
            "thumb_image_sizes": []
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            1464,
            1170,
            1366,
            1285,
            1383,
            1263,
            1577,
            935,
            1410,
            171,
            1110,
            166
        ],
        "doi": "https://doi.org/10.1016/j.visinf.2021.02.003",
        "journal": "Visual Informatics",
        "open_access": "yes",
        "pages_from": "76",
        "pages_to": "84",
        "volume": "5",
        "research_areas": [],
        "keywords": [],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "name": "wu-2021-image.png",
                "type": "image/png",
                "size": null,
                "path": "Publication:wu-2021",
                "url": "https://www.cg.tuwien.ac.at/research/publications/ongoing/wu-2021/wu-2021-image.png",
                "thumb_image_sizes": []
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "wu-2021-paper.pdf",
                "type": "application/pdf",
                "size": null,
                "path": "Publication:wu-2021",
                "url": "https://www.cg.tuwien.ac.at/research/publications/ongoing/wu-2021/wu-2021-paper.pdf",
                "thumb_image_sizes": []
            }
        ],
        "projects_workgroups": [
            "vis"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/ongoing/wu-2021/",
        "__class": "Publication"
    }
]
