[
    {
        "id": "matt-2025-scv",
        "type_id": "journalpaper_notalk",
        "tu_id": null,
        "repositum_id": "20.500.12708/216286",
        "title": "Scalable Class-Centric Visual Interactive Labeling",
        "date": "2025-06",
        "abstract": "Large unlabeled datasets demand efficient and scalable data labeling solutions, in particular when the number of instances and classes is large. This leads to significant visual scalability challenges and imposes a high cognitive load on the users. Traditional instance-centric labeling methods, where (single) instances are labeled in each iteration struggle to scale effectively in these scenarios. To address these challenges, we introduce cVIL, a Class-Centric Visual Interactive Labeling methodology designed for interactive visual data labeling. By shifting the paradigm from assigning-classes-to-instances to assigning-instances-to-classes, cVIL reduces labeling effort and enhances efficiency for annotators working with large, complex and class-rich datasets. We propose a novel visual analytics labeling interface built on top of the conceptual cVIL workflow, enabling improved scalability over traditional visual labeling. In a user study, we demonstrate that cVIL can improve labeling efficiency and user satisfaction over instance-centric interfaces. The effectiveness of cVIL is further demonstrated through a usage scenario, showcasing its potential to alleviate cognitive load and support experts in managing extensive labeling tasks efficiently.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "class-centric visual interactive labeling workflow",
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1667,
            "image_height": 624,
            "name": "matt-2025-scv-teaser.png",
            "type": "image/png",
            "size": 254694,
            "path": "Publication:matt-2025-scv",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2025/matt-2025-scv/matt-2025-scv-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/matt-2025-scv/matt-2025-scv-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5263,
            5487,
            1851,
            5370,
            1110
        ],
        "articleno": "104240",
        "doi": "10.1016/j.cag.2025.104240",
        "issn": "1873-7684",
        "journal": "COMPUTERS & GRAPHICS-UK",
        "pages": "14",
        "publisher": "PERGAMON-ELSEVIER SCIENCE LTD",
        "volume": "129",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Class-centric labeling",
            "Interactive machine learning",
            "Property measures",
            "Visual analytics",
            "Visual-interactive data labeling"
        ],
        "weblinks": [],
        "files": [
            {
                "description": "class-centric visual interactive labeling workflow",
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1667,
                "image_height": 624,
                "name": "matt-2025-scv-teaser.png",
                "type": "image/png",
                "size": 254694,
                "path": "Publication:matt-2025-scv",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/matt-2025-scv/matt-2025-scv-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/matt-2025-scv/matt-2025-scv-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/matt-2025-scv/",
        "__class": "Publication"
    },
    {
        "id": "eschner-2025-ide",
        "type_id": "journalpaper",
        "tu_id": null,
        "repositum_id": "20.500.12708/216865",
        "title": "Interactive Discovery and Exploration of Visual Bias in Generative Text‐to‐Image Models",
        "date": "2025-06",
        "abstract": "Bias in generative Text-to-Image (T2I) models is a known issue, yet systematically analyzing such models' outputs to uncover it remains challenging. We introduce the Visual Bias Explorer (ViBEx) to interactively explore the output space of T2I models to support the discovery of visual bias. ViBEx introduces a novel flexible prompting tree interface in combination with zero-shot bias probing using CLIP for quick and approximate bias exploration. It additionally supports in-depth confirmatory bias analysis through visual inspection of forward, intersectional, and inverse bias queries. ViBEx is model-agnostic and publicly available. In four case study interviews, experts in AI and ethics were able to discover visual biases that have so far not been described in literature.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "image",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 2578,
            "image_height": 1292,
            "name": "eschner-2025-ide-image.png",
            "type": "image/png",
            "size": 1659146,
            "path": "Publication:eschner-2025-ide",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-image.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-image:thumb{{size}}.png"
        },
        "sync_repositum_override": "title,date,projects,event,lecturer,location,number,volume",
        "repositum_presentation_id": null,
        "authors": [
            1653,
            5490,
            5370,
            1110
        ],
        "ac_number": "AC17579673",
        "articleno": "e70135",
        "doi": "10.1111/cgf.70135",
        "event": "EuroVis 2025",
        "issn": "1467-8659",
        "journal": "Computer Graphics Forum",
        "lecturer": [
            1653
        ],
        "location": "Luxembourg",
        "number": "3",
        "open_access": "yes",
        "pages": "20",
        "publisher": "WILEY",
        "volume": "44",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Visualization",
            "Bias",
            "Artificial Intelligence"
        ],
        "weblinks": [
            {
                "href": "https://vibex.jde.cg.tuwien.ac.at",
                "caption": "live demo",
                "description": "Live demo of the ViBEx application",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "image",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 2578,
                "image_height": 1292,
                "name": "eschner-2025-ide-image.png",
                "type": "image/png",
                "size": 1659146,
                "path": "Publication:eschner-2025-ide",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-image.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-image:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "eschner-2025-ide-paper.pdf",
                "type": "application/pdf",
                "size": 46238959,
                "path": "Publication:eschner-2025-ide",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/eschner-2025-ide-paper:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/eschner-2025-ide/",
        "__class": "Publication"
    },
    {
        "id": "stoff-2025-pvu",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/209541",
        "title": "Prototypical Visualization: Using Prototypical Networks for Visualizing Large Unstructured Data",
        "date": "2025",
        "abstract": "Making sense of data is something that many professionals are required to do on a daily basis. This can be a difficult task if the amount of data is so large that it can not be easily examined. One effective method of quickly getting an overview of data structure is visualization, but this is not always a feasible solution with large data due to the sheer amount of data and also the potentially high dimensionality. Machine learning models can help with with the organization and classification of data, but they often require large quantities of labeled training data, which is frequently not readily available. This is why models that can reliably classify data based on only few examples for each class are an interesting topic of research. One such kind of model are prototypical networks. They utilize few samples to create an embedding space in fewer dimensions, in which similar data points cluster around a single class prototype. In this thesis, we investigate if the embedding space of a prototypical network makes for a good approach for the purpose of visualizing high-dimensional, unstructured data. The goal is to reduce the dimensionality of the data in such a way that the highdimensional relations and structures between data points are preserved, resulting in 2D representations of the data that form coherent class clusters in a scatter plot visualization. This approach is compared with, and evaluated against, other well known supervised and unsupervised dimensionality reduction techniques. Through quantitative experiments relying on statistical measures, as well as a qualitative evaluation of our results, we find that our ProtoNet is capable of producing point embeddings in which the spatial separation of classes is as good or better than the other methods.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            1865
        ],
        "ac_number": "AC17416388",
        "doi": "10.34726/hss.2025.119321",
        "open_access": "yes",
        "pages": "62",
        "supervisor": [
            1110
        ],
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Dimensionality Reduction",
            "Data Visualization",
            "Prototypical Network",
            "ProtoNet",
            "Class Separation",
            "Scatter Plot"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "stoff-2025-pvu-thesis.pdf",
                "type": "application/pdf",
                "size": 1529952,
                "path": "Publication:stoff-2025-pvu",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2025/stoff-2025-pvu/stoff-2025-pvu-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2025/stoff-2025-pvu/stoff-2025-pvu-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2025/stoff-2025-pvu/",
        "__class": "Publication"
    },
    {
        "id": "wolf-2024-jhd",
        "type_id": "studentproject",
        "tu_id": null,
        "repositum_id": null,
        "title": "Joint Human-Machine Data Exploration Sandbox",
        "date": "2024-09",
        "abstract": "Data analysis exploration is becoming increasingly challenging as datasets grow in scale and complexity. The Joint Human-Machine Data Exploration (JDE) framework offers a novel solution for analyzing large, unstructured datasets by integrating human insight with machine learning. The framework facilitates dynamic user interaction and visual exploration through three interconnected views: the data view, frame view, and knowledge view. These views enable users to align data exploration with evolving knowledge models. Implemented as a flexible, modular system using a client-server architecture, the JDE framework supports interactive data manipulation and real-time feedback. This project developed a functional prototype, the JDE sandbox, showcasing the system's potential for enhancing exploratory data analysis. Future work will focus on expanding the framework's capabilities and improving accessibility for a broader audience.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 7680,
            "image_height": 3915,
            "name": "wolf-2024-jhd-teaser.png",
            "type": "image/png",
            "size": 2648227,
            "path": "Publication:wolf-2024-jhd",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": null,
        "repositum_presentation_id": null,
        "authors": [
            5238
        ],
        "date_end": "2024-09",
        "date_start": "2023-09",
        "matrikelnr": "00925239",
        "supervisor": [
            1110
        ],
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [],
        "weblinks": [
            {
                "href": "https://gitlab.tuwien.ac.at/e193-02-jde/jde-sandbox",
                "caption": "GitLab",
                "description": null,
                "main_file": 1
            },
            {
                "href": "https://sandbox.jde.cg.tuwien.ac.at/sandbox",
                "caption": "demo",
                "description": "Online demo of the sandbox",
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "report",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "wolf-2024-jhd-report.pdf",
                "type": "application/pdf",
                "size": 8752213,
                "path": "Publication:wolf-2024-jhd",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-report.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-report:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 7680,
                "image_height": 3915,
                "name": "wolf-2024-jhd-teaser.png",
                "type": "image/png",
                "size": 2648227,
                "path": "Publication:wolf-2024-jhd",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/wolf-2024-jhd-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/wolf-2024-jhd/",
        "__class": "Publication"
    },
    {
        "id": "matt-2024-cvil",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/199888",
        "title": "cVIL: Class-Centric Visual Interactive Labeling",
        "date": "2024-05-27",
        "abstract": "We present cVIL, a class-centric approach to visual interactive labeling, which facilitates human annotation of large and complex image data sets. cVIL uses different property measures to support instance labeling for labeling difficult instances and batch labeling to quickly label easy instances. Simulated experiments reveal that cVIL with batch labeling can outperform traditional labeling approaches based on active learning. In a user study, cVIL led to better accuracy and higher user preference compared to a traditional instance-based visual interactive labeling approach based on 2D scatterplots.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": "Screenshot of cVIL as employed in the user study",
            "filetitle": "cVIL teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1104,
            "image_height": 449,
            "name": "matt-2024-cvil-cVIL teaser.png",
            "type": "image/png",
            "size": 293869,
            "path": "Publication:matt-2024-cvil",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "abstract,projects,open_access",
        "repositum_presentation_id": null,
        "authors": [
            5263,
            5370,
            1110
        ],
        "booktitle": "Eurographics Proceedings",
        "date_from": "2024-05-27",
        "date_to": "2024-05-27",
        "doi": "10.2312/eurova.20241113",
        "editor": "El-Assady, Mennatallah and Schulz, Hans-Jorg",
        "event": "EuroVis Workshop on Visual Analytics (EuroVA 2024)",
        "isbn": "978-3-03868-056-7",
        "lecturer": [
            5263
        ],
        "location": "Aarhus",
        "open_access": "yes",
        "pages": "6",
        "publisher": "Eurographics",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Visual Analytics",
            "Interactive Machine Learning",
            "User Interface Design"
        ],
        "weblinks": [
            {
                "href": "https://diglib.eg.org/server/api/core/bitstreams/c18fafcc-b4b4-4e51-bd2f-cec056c6d93a/content",
                "caption": "paper",
                "description": null,
                "main_file": 1
            },
            {
                "href": "https://gitlab.tuwien.ac.at/e193-02-jde/lava",
                "caption": "GitLab",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": "Screenshot of cVIL as employed in the user study",
                "filetitle": "cVIL teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1104,
                "image_height": 449,
                "name": "matt-2024-cvil-cVIL teaser.png",
                "type": "image/png",
                "size": 293869,
                "path": "Publication:matt-2024-cvil",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/matt-2024-cvil-cVIL teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/matt-2024-cvil/",
        "__class": "Publication"
    },
    {
        "id": "irendorfer-2024-uat",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/202525",
        "title": "User Approaches to Knowledge Externalization in Visual Analytics of Unstructured Data",
        "date": "2024",
        "abstract": "Traditional machine learning approaches for analyzing large unstructured data often depend on labelled training data and well-defined target definitions. However, these may not be available or feasible when dealing with unknown and unstructured data. It requires human reasoning and domain knowledge to interpret it. Interactive systems that combine human analytical abilities with machine learning techniques can address this limitation. However, to incorporate human knowledge in such systems, we need a better understanding of the semantic information and structures that users observe and expect while exploring unstructured data, as well as how they make their tacit knowledge explicit. This thesis aims to narrow the gap between human cognition and (knowledge-assisted) visual analytics. In a qualitative and exploratory user study, this thesis investigates how individuals explore a large unstructured dataset and which strategies they apply to externalize their mental models. By analyzing users' externalized mental models, we aim to better understand how their knowledge evolves during data exploration. We evaluate the comprehensiveness, detail and evolution of users' external knowledge representations by applying quantitative and qualitative methods, including a crowdsourcing step. The results show that users' externalized structures are able to represent a given dataset comprehensively and to a high degree of detail. While these knowledge representations are highly subjective and show various individual differences, we could identify structural similarities between individuals. In addition to the insights about how users externalize their tacit knowledge during data exploration, we propose design guidelines for (knowledge-assisted) visual analytics systems.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            5399
        ],
        "doi": "10.34726/hss.2024.115066",
        "open_access": "yes",
        "pages": "80",
        "supervisor": [
            1110
        ],
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Knowledge Externalization",
            "Knowledge-Assisted Visualization",
            "Visual Analytics",
            "Unstructured Data",
            "Concept Maps",
            "Mental Models",
            "User Study",
            "Data Exploration"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "irendorfer-2024-uat-thesis.pdf",
                "type": "application/pdf",
                "size": 1981913,
                "path": "Publication:irendorfer-2024-uat",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/irendorfer-2024-uat/irendorfer-2024-uat-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/irendorfer-2024-uat/irendorfer-2024-uat-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/irendorfer-2024-uat/",
        "__class": "Publication"
    },
    {
        "id": "eitler-2024-sos",
        "type_id": "masterthesis",
        "tu_id": null,
        "repositum_id": "20.500.12708/205306",
        "title": "Spatial Organization Strategies in Exploratory Analysis of Unstructured Data",
        "date": "2024",
        "abstract": "As not only the amount but also the complexity of data increases, there is a growing need to support humans in the analysis of data that is not structured in a way that can be easily interpreted by machines. So-called “knowledge-assisted visual analytics” (KAVA) tools aim to address these challenges by integrating the knowledge of the analyst into their system to support the analysis process.In this thesis, we investigate the spatial organization strategies that users employ when exploring unstructured data. We aim to characterize the types of strategies that users employ, how they change over time, and how we can use them to infer the users’ knowledge of the data. To answer these questions, we first conduct a user study in which the participants explore an image dataset on a multitouch tabletop interface imitating an analogue setting and externalize their findings into concept maps. We observe their organization strategies and analyse their methods in a mixed-methods approach, combining qualitative analysis of the participants’ interview statements with quantitative analysis of the interaction logs.We find that the participants’ spatial organization strategies can be characterized by four features: semantic clusters, type of layout, uncovering process, and reorganization of the data. While most participants prefer layouts that give them an overview of the data, only about half create semantic clusters (i.e., grouping similar images together). The participants also mostly uncovered all images — which were initially on a stack — in the task right away before externalizing their knowledge, and only a few reorganized the images. We further find that the participants generally did not change their organization strategies over time, and that the resulting spatial arrangements do not necessarily provide valuable insights into the users’ knowledge of the data.Finally, we discuss our findings and list the limitations of our study. As this thesis is embedded in a research project that aims to develop a tool for knowledge-assisted visual analytics, we discuss potential design implications for the development of such a tool.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": null,
        "sync_repositum_override": "projects",
        "repositum_presentation_id": null,
        "authors": [
            5419
        ],
        "doi": "10.34726/hss.2024.117186",
        "open_access": "yes",
        "pages": "95",
        "supervisor": [
            1110
        ],
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "visual analytics",
            "unstructured data",
            "spatial organization",
            "exploratory analysis",
            "knowledge-assisted visual analytics",
            "semantic interaction",
            "knowledge externalization"
        ],
        "weblinks": [],
        "files": [
            {
                "description": null,
                "filetitle": "thesis",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "eitler-2024-sos-thesis.pdf",
                "type": "application/pdf",
                "size": 2087980,
                "path": "Publication:eitler-2024-sos",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2024/eitler-2024-sos/eitler-2024-sos-thesis.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2024/eitler-2024-sos/eitler-2024-sos-thesis:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2024/eitler-2024-sos/",
        "__class": "Publication"
    },
    {
        "id": "indirectBiasLanguageModels-2023",
        "type_id": "inproceedings",
        "tu_id": null,
        "repositum_id": "20.500.12708/187890",
        "title": "Visual Exploration of Indirect Bias in Language Models",
        "date": "2023-06",
        "abstract": "Language models are trained on large text corpora that often include stereotypes. This can lead to direct or indirect bias in downstream applications. In this work, we present a method for interactive visual exploration of indirect multiclass bias learned by contextual word embeddings. We introduce a new indirect bias quantification score and present two interactive visualizations to explore interactions between multiple non-sensitive concepts (such as sports, occupations, and beverages) and sensitive attributes (such as gender or year of birth) based on this score.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1904,
            "image_height": 619,
            "name": "indirectBiasLanguageModels-2023-teaser.png",
            "type": "image/png",
            "size": 81826,
            "path": "Publication:indirectBiasLanguageModels-2023",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "date,location,keywords",
        "repositum_presentation_id": null,
        "authors": [
            5211,
            1110
        ],
        "booktitle": "EuroVis 2023 - Short Papers",
        "date_from": "2023-06-12",
        "date_to": "2023-06-16",
        "doi": "10.2312/evs.20231034",
        "event": "25th EG Conference on Visualization (EuroVis 2023)",
        "isbn": "978-3-03868-219-6",
        "lecturer": [
            1110
        ],
        "location": "Leipzig, Germany",
        "open_access": "yes",
        "pages": "5",
        "publisher": "The Eurographics Association",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "visual analytics",
            "language models",
            "bias"
        ],
        "weblinks": [
            {
                "href": "https://diglib.eg.org/handle/10.2312/evs20231034",
                "caption": "Eurographics Digital Library",
                "description": null,
                "main_file": 0
            },
            {
                "href": "https://www.cg.tuwien.ac.at/IndirectBiasVis",
                "caption": "online demo",
                "description": null,
                "main_file": 1
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "paper",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "indirectBiasLanguageModels-2023-paper.pdf",
                "type": "application/pdf",
                "size": 465161,
                "path": "Publication:indirectBiasLanguageModels-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-paper.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-paper:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "supplement",
                "main_file": false,
                "use_in_gallery": false,
                "access": "public",
                "name": "indirectBiasLanguageModels-2023-supplement.pdf",
                "type": "application/pdf",
                "size": 124948,
                "path": "Publication:indirectBiasLanguageModels-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-supplement.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-supplement:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1904,
                "image_height": 619,
                "name": "indirectBiasLanguageModels-2023-teaser.png",
                "type": "image/png",
                "size": 81826,
                "path": "Publication:indirectBiasLanguageModels-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-teaser:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "video",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "indirectBiasLanguageModels-2023-video.mp4",
                "type": "video/mp4",
                "size": 51440954,
                "path": "Publication:indirectBiasLanguageModels-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-video.mp4",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-video:thumb{{size}}.png",
                "video_mp4": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/indirectBiasLanguageModels-2023-video:video.mp4"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2023/indirectBiasLanguageModels-2023/",
        "__class": "Publication"
    },
    {
        "id": "webGPU_aggregateVis-2023",
        "type_id": "poster",
        "tu_id": null,
        "repositum_id": "20.500.12708/187891",
        "title": "WebGPU for Scalable Client-Side Aggregate Visualization",
        "date": "2023-06",
        "abstract": "WebGPU is a new graphics API, which now provides compute shaders for general purpose GPU operations in web browsers. We demonstrate the potential of this new technology for scalable information visualization by showing how to filter and aggregate a spatio-temporal dataset with millions of temperature measurements for real-time interactive exploration of climate change.",
        "authors_et_al": false,
        "substitute": null,
        "main_image": {
            "description": null,
            "filetitle": "teaser",
            "main_file": false,
            "use_in_gallery": true,
            "access": "public",
            "image_width": 1460,
            "image_height": 820,
            "name": "webGPU_aggregateVis-2023-teaser.png",
            "type": "image/png",
            "size": 1143735,
            "path": "Publication:webGPU_aggregateVis-2023",
            "url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-teaser.png",
            "thumb_image_sizes": [
                16,
                64,
                100,
                175,
                300,
                600
            ],
            "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-teaser:thumb{{size}}.png"
        },
        "sync_repositum_override": "abstract,date",
        "repositum_presentation_id": null,
        "authors": [
            1869,
            5238,
            1110
        ],
        "booktitle": "EuroVis 2023 - Posters",
        "date_from": "2023-06-12",
        "date_to": "2023-06-16",
        "doi": "10.2312/evp.20231079",
        "event": "25th EG Conference on Visualization (EuroVis 2023)",
        "isbn": "978-3-03868-220-2",
        "lecturer": [
            1110
        ],
        "location": "Leipzig",
        "pages": "3",
        "pages_from": "105",
        "pages_to": "107",
        "publisher": "Eurographics",
        "research_areas": [
            "InfoVis"
        ],
        "keywords": [
            "Information visualization",
            "Aggregate visualization",
            "Scalable visualization",
            "WebGPU"
        ],
        "weblinks": [
            {
                "href": "https://diglib.eg.org/xmlui/handle/10.2312/evp20231079",
                "caption": "Eurographics Digital Library",
                "description": null,
                "main_file": 0
            },
            {
                "href": "https://ccexplorer.github.io/",
                "caption": "Climate Change Explorer",
                "description": "online demo",
                "main_file": 1
            },
            {
                "href": "https://cde.gkdev.at/cde/?db=cdata_interp_2bit_0.lzma",
                "caption": "large demo",
                "description": "Demo with more data points",
                "main_file": 0
            }
        ],
        "files": [
            {
                "description": null,
                "filetitle": "extended abstract",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "webGPU_aggregateVis-2023-extended abstract.pdf",
                "type": "application/pdf",
                "size": 1466660,
                "path": "Publication:webGPU_aggregateVis-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-extended abstract.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-extended abstract:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "poster",
                "main_file": true,
                "use_in_gallery": false,
                "access": "public",
                "name": "webGPU_aggregateVis-2023-poster.pdf",
                "type": "application/pdf",
                "size": 1114759,
                "path": "Publication:webGPU_aggregateVis-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-poster.pdf",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-poster:thumb{{size}}.png"
            },
            {
                "description": null,
                "filetitle": "teaser",
                "main_file": false,
                "use_in_gallery": true,
                "access": "public",
                "image_width": 1460,
                "image_height": 820,
                "name": "webGPU_aggregateVis-2023-teaser.png",
                "type": "image/png",
                "size": 1143735,
                "path": "Publication:webGPU_aggregateVis-2023",
                "url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-teaser.png",
                "thumb_image_sizes": [
                    16,
                    64,
                    100,
                    175,
                    300,
                    600
                ],
                "thumb_url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/webGPU_aggregateVis-2023-teaser:thumb{{size}}.png"
            }
        ],
        "projects_workgroups": [
            "d9282"
        ],
        "url": "https://www.cg.tuwien.ac.at/research/publications/2023/webGPU_aggregateVis-2023/",
        "__class": "Publication"
    }
]
