@article{Das2024, title = {Shifting Focus with HCEye: Exploring the Dynamics of Visual Highlighting and Cognitive Load on User Attention and Saliency Prediction}, author = {Anwesha Das and Zekun Wu and Iza Škrjanec and Anna Maria Feit}, url = {https://arxiv.org/abs/2404.14232 https://osf.io/x8p9b/}, doi = {doi.org/10.1145/3655610}, year = {2024}, date = {2024-06-03}, urldate = {2024-06-03}, booktitle = {Proceedings of the ACM Symposium on }, journal = {Proceedings of the ACM on Human-Computer Interaction}, volume = {8, No. ETRA }, number = {236 }, abstract = {Visual highlighting can guide user attention in complex interfaces. However, its effectiveness under limited attentional capacities is underexplored. This paper examines the joint impact of visual highlighting (permanent and dynamic) and dual-task-induced cognitive load on gaze behaviour. Our analysis, using eye-movement data from 27 participants viewing 150 unique webpages reveals that while participants' ability to attend to UI elements decreases with increasing cognitive load, dynamic adaptations (i.e., highlighting) remain attention-grabbing. The presence of these factors significantly alters what people attend to and thus what is salient. Accordingly, we show that state-of-the-art saliency models increase their performance when accounting for different cognitive loads. Our empirical insights, along with our openly available dataset, enhance our understanding of attentional processes in UIs under varying cognitive (and perceptual) loads and open the door for new models that can predict user attention while multitasking.}, keywords = {}, pubstate = {forthcoming}, tppubtype = {article} } @article{10.1145/3604276, title = {Typing Behavior is About More than Speed: Users' Strategies for Choosing Word Suggestions Despite Slower Typing Rates}, author = {Florian Lehmann and Itto Kornecki and Daniel Buschek and Anna Maria Feit}, url = {https://dl.acm.org/doi/abs/10.1145/3604276 https://osf.io/u9aej/}, doi = {10.1145/3604276}, year = {2023}, date = {2023-09-01}, urldate = {2023-09-01}, journal = {Proc. ACM Hum.-Comput. Interact.}, volume = {7}, number = {MHCI}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, abstract = {Mobile word suggestions can slow down typing, yet are still widely used. To investigate the apparent benefits beyond speed, we analyzed typing behavior of 15,162 users of mobile devices. Controlling for natural typing speed (a confounding factor not considered by prior work), we statistically show that slower typists use suggestions more often but are slowed down by doing so. To better understand how these typists leverage suggestions – if not to improve their speed – we extract eight usage strategies, including completion, correction, and next-word prediction. We find that word characteristics, such as length or frequency, along with the strategy, are predictive of whether a user will select a suggestion. We show how to operationalize our findings by building and evaluating a predictive model of suggestion selection. Such a model could be used to augment existing suggestion algorithms to consider people's strategic use of word predictions beyond speed and keystroke savings.}, keywords = {intelligent text entry methods, mobile text entry, text entry, typing, word prediction, word suggestion}, pubstate = {published}, tppubtype = {article} } @inproceedings{johns2024, title = {Towards Flexible and Robust User Interface Adaptations With Multiple Objectives}, author = {Christoph Albert Johns and João Marcelo Evangelista Belo and Anna Maria Feit and Clemens Nylandsted Klokmose and Ken Pfeuffer}, url = {https://dl.acm.org/doi/abs/10.1145/3586183.3606799}, doi = {10.1145/3586183.3606799}, isbn = {9798400701320}, year = {2023}, date = {2023-01-01}, urldate = {2023-01-01}, booktitle = {Proceedings of the 36th Annual ACM Symposium on User Interface Software and Technology}, publisher = {Association for Computing Machinery}, address = {San Francisco, CA, USA}, series = {UIST '23}, abstract = {This paper proposes a new approach for online UI adaptation that aims to overcome the limitations of the most commonly used UI optimization method involving multiple objectives: weighted sum optimization. Weighted sums are highly sensitive to objective formulation, limiting the effectiveness of UI adaptations. We propose ParetoAdapt, an adaptation approach that uses online multi-objective optimization with a posteriori articulated preferences—that is, articulation of preferences after the optimization has concluded—to make UI adaptation robust to incomplete and inaccurate objective formulations. It offers users a flexible way to control adaptations by selecting from a set of Pareto optimal adaptation proposals and adjusting them to fit their needs. We showcase the feasibility and flexibility of ParetoAdapt by implementing an online layout adaptation system in a state-of-the-art 3D UI adaptation framework. We further evaluate its robustness and run-time in simulation-based experiments that allow us to systematically change the accuracy of the estimated user preferences. We conclude by discussing how our approach may impact the usability and practicality of online UI adaptations.}, keywords = {multi-objective optimization, online UI adaptation, Pareto frontier}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{belo22, title = {AUIT – the Adaptive User Interfaces Toolkit for Designing XR Applications}, author = {João Marcelo Evangelista Belo, Mathias N Lystbæk, Anna Maria Feit, Ken Pfeuffer, Peter Kán, Antti Oulasvirta, Kaj Grønbæk}, url = {https://dl.acm.org/doi/fullHtml/10.1145/3526113.3545651 https://github.com/joaobelo92/auit}, doi = {https://doi.org/10.1145/3526113.3545651}, year = {2022}, date = {2022-10-29}, urldate = {2022-10-29}, booktitle = {Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology, UIST'22}, abstract = {Adaptive user interfaces can improve experiences in Extended Reality (XR) applications by adapting interface elements according to the user's context. Although extensive work explores different adaptation policies, XR creators often struggle with their implementation, which involves laborious manual scripting. The few available tools are underdeveloped for realistic XR settings where it is often necessary to consider conflicting aspects that affect an adaptation. We fill this gap by presenting AUIT, a toolkit that facilitates the design of optimization-based adaptation policies. AUIT allows creators to flexibly combine policies that address common objectives in XR applications, such as element reachability, visibility, and consistency. Instead of using rules or scripts, specifying adaptation policies via adaptation objectives simplifies the design process and enables creative exploration of adaptations. After creators decide which adaptation objectives to use, a multi-objective solver finds appropriate adaptations in real-time. A study showed that AUIT allowed creators of XR applications to quickly and easily create high-quality adaptations.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{zhang2021, title = {Eye Gaze Estimation and Its Applications}, author = {Xucong Zhang, Seonwook Park, Anna Maria Feit}, editor = {Yang Li, Otmar Hilliges}, url = {https://cix.cs.uni-saarland.de/wp-content/uploads/2022/05/Zhang2021_Chapter_EyeGazeEstimationAndItsApplica.pdf}, doi = {https://doi.org/10.1007/978-3-030-82681-9_4}, year = {2021}, date = {2021-11-04}, publisher = {Springer}, series = {Artificial Intelligence for Human Computer Interaction: A Modern Approach}, abstract = {The human eye gaze is an important non-verbal cue that can unobtrusively provide information about the intention and attention of a user to enable intelligent interactive systems. Eye gaze can also be taken as input to systems as a replacement of the conventional mouse and keyboard, and can also be indicative of the cognitive state of the user. However, estimating and applying gaze in real-world applications poses significant challenges. In this chapter, we first review the development of gaze estimation methods in recent years. We especially focus on learning-based gaze estimation methods which benefit from large-scale data and deep learning methods that recently became available. Second, we discuss the challenges of using gaze estimation for real-world applications and our efforts toward making these methods easily usable for the Human-Computer Interaction community. At last, we provide two application examples, demonstrating the use of eye gaze to enable attentive and adaptive interfaces.}, keywords = {}, pubstate = {published}, tppubtype = {inbook} } @conference{hetzel22, title = {Complex Interaction as Emergent Behaviour: Simulating Mid-Air Virtual Keyboard Typing using Reinforcement Learning}, author = {Lorenz Hetzel and John Dudley and Anna Maria Feit and Per Ola Kristensson}, url = {http://pokristensson.com/pubs/HetzelEtAlTVCG2021.pdf}, doi = {10.1109/TVCG.2021.3106494}, year = {2021}, date = {2021-08-27}, urldate = {2021-08-27}, booktitle = {IEEE Transactions on Visualization and Computer Graphics}, publisher = {IEEE}, abstract = {Accurately modelling user behaviour has the potential to significantly improve the quality of human-computer interaction. Traditionally, these models are carefully hand-crafted to approximate specific aspects of well-documented user behaviour. This limits their availability in virtual and augmented reality where user behaviour is often not yet well understood. Recent efforts have demonstrated that reinforcement learning can approximate human behaviour during simple goal-oriented reaching tasks. We build on these efforts and demonstrate that reinforcement learning can also approximate user behaviour in a complex mid-air interaction task: typing on a virtual keyboard. We present the first reinforcement learning-based user model for mid-air and surface-aligned typing on a virtual keyboard. Our model is shown to replicate high-level human typing behaviour. We demonstrate that this approach may be used to augment or replace human testing during the validation and development of virtual keyboards.}, keywords = {}, pubstate = {published}, tppubtype = {conference} } @inproceedings{xrgonomics21, title = {XRgonomics: Facilitating the Creation of Ergonomic 3D Interfaces}, author = {João Belo and Anna Maria Feit and Tiare Feuchtner and Kaj Grønbæk }, url = {https://www.researchgate.net/publication/349110658_XRgonomics_Facilitating_the_Creation_of_Ergonomic_3D_Interfaces#fullTextFileContent https://joaomebelo.com/#/project/xrgonomics}, doi = {10.1145/3411764.3445349}, year = {2021}, date = {2021-05-08}, booktitle = {Proceedings of the CHI Conference on Human Factors in Computing Systems (CHI'21)}, publisher = {ACM}, abstract = {Arm discomfort is a common issue in Cross Reality applications involving prolonged mid-air interaction. Solving this problem is difficult because of the lack of tools and guidelines for 3D user interface design. Therefore, we propose a method to make existing ergonomic metrics available to creators during design by estimating the interaction cost at each reachable position in the user's environment. We present XRgonomics, a toolkit to visualize the interaction cost and make it available at runtime, allowing creators to identify UI positions that optimize users' comfort. Two scenarios show how the toolkit can support 3D UI design and dynamic adaptation of UIs based on spatial constraints. We present results from a walkthrough demonstration, which highlight the potential of XRgonomics to make ergonomics metrics accessible during the design and development of 3D UIs. Finally, we discuss how the toolkit may address design goals beyond ergonomics.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @article{10.1145/3382035, title = {AZERTY Amélioré: Computational Design on a National Scale}, author = {Anna Maria Feit and Mathieu Nancel and Maximilian John and Andreas Karrenbauer and Daryl Weir and Antti Oulasvirta}, url = {https://cacm.acm.org/magazines/2021/2/250082-azerty-amlior http://norme-azerty.fr/en }, doi = {10.1145/3382035}, issn = {0001-0782}, year = {2021}, date = {2021-01-01}, journal = {Communications of the ACM}, volume = {64}, number = {2}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, abstract = {France is the first country in the world to adopt a keyboard standard informed by computational methods, improving the performance, ergonomics, and intuitiveness of the keyboard while enabling input of many more characters. We describe a human-centric approach developed jointly with stakeholders to utilize computational methods in the decision process not only to solve a well-defined problem but also to understand the design requirements, to inform subjective views, or to communicate the outcomes. To be more broadly useful, research must develop computational methods that can be used in a participatory and inclusive fashion respecting the different needs and roles of stakeholders. }, keywords = {}, pubstate = {published}, tppubtype = {article} } @inproceedings{feit20, title = {Detecting Relevance during Decision-Making from Eye Movements for UI Adaptation}, author = {Anna Maria Feit and Lukas Vordemann and Seonwook Park and Caterina Berube and Otmar Hilliges}, url = {https://ait.ethz.ch/projects/2020/relevance-detection/}, doi = {10.1145/3379155.3391321}, isbn = {9781450371339}, year = {2020}, date = {2020-06-01}, booktitle = {Symposium on Eye Tracking Research and Applications}, publisher = {Association for Computing Machinery}, series = {ETRA '20 }, abstract = {This paper proposes an approach to detect information relevance during decision-making from eye movements in order to enable user interface adaptation. This is a challenging task because gaze behavior varies greatly across individual users and tasks and ground-truth data is difficult to obtain. Thus, prior work has mostly focused on simpler target-search tasks or on establishing general interest, where gaze behavior is less complex. From the literature, we identify six metrics that capture different aspects of the gaze behavior during decision-making and combine them in a voting scheme. We empirically show, that this accounts for the large variations in gaze behavior and out-performs standalone metrics. Importantly, it offers an intuitive way to control the amount of detected information, which is crucial for different UI adaptation schemes to succeed. We show the applicability of our approach by developing a room-search application that changes the visual saliency of content detected as relevant. In an empirical study, we show that it detects up to 97% of relevant elements with respect to user self-reporting, which allows us to meaningfully adapt the interface, as confirmed by participants. Our approach is fast, does not need any explicit user input and can be applied independent of task and user.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{lindlbauer19, title = {Context-Aware Online Adaptation of Mixed Reality Interfaces}, author = {David Lindlbauer and Anna Maria Feit and Otmar Hilliges}, url = {https://ait.ethz.ch/projects/2019/computationalMR/}, doi = {10.1145/3332165.3347945}, isbn = {9781450368162}, year = {2019}, date = {2019-01-01}, urldate = {2019-01-01}, booktitle = {Symposium on User Interface Software and Technology}, publisher = {ACM}, series = {UIST '19}, abstract = {We present an optimization-based approach for Mixed Reality (MR) systems to automatically control when and where applications are shown, and how much information they display. Currently, content creators design applications, and users then manually adjust which applications are visible and how much information they show. This choice has to be adjusted every time users switch context, i.e., whenever they switch their task or environment. Since context switches happen many times a day, we believe that MR interfaces require automation to alleviate this problem. We propose a real-time approach to automate this process based on users' current cognitive load and knowledge about their task and environment. Our system adapts which applications are displayed, how much information they show, and where they are placed. We formulate this problem as a mix of rule-based decision making and combinatorial optimization which can be solved efficiently in real-time. We present a set of proof-of-concept applications showing that our approach is applicable in a wide range of scenarios. Finally, we show in a dual-task evaluation that our approach decreased secondary tasks interactions by 36%.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{palin19, title = {How Do People Type on Mobile Devices? Observations from a Study with 37,000 Volunteers}, author = {Kseniia Palin and Anna Maria Feit and Sunjun Kim and Per Ola Kristensson and Antti Oulasvirta}, url = {https://userinterfaces.aalto.fi/typing37k/ https://www.slideshare.net/kimsunjun5/how-do-people-type-on-mobile-devices-observations-from-a-study-with-37000-volunteers-mobilehci-2019}, doi = {10.1145/3338286.3340120}, isbn = {9781450368254}, year = {2019}, date = {2019-01-01}, booktitle = {International Conference on Human-Computer Interaction with Mobile Devices and Services}, publisher = {ACM}, address = {New York, NY, USA}, series = {MobileHCI '19}, abstract = {This paper presents a large-scale dataset on mobile text entry collected via a web-based transcription task performed by 37,370 volunteers. The average typing speed was 36.2 WPM with 2.3% uncorrected errors. The scale of the data enables powerful statistical analyses on the correlation between typing performance and various factors, such as demographics, finger usage, and use of intelligent text entry techniques. We report effects of age and finger usage on performance that correspond to previous studies. We also find evidence of relationships between performance and use of intelligent text entry techniques: auto-correct usage correlates positively with entry rates, whereas word prediction usage has a negative correlation. To aid further work on modeling, machine learning and design improvements in mobile text entry, we make the code and dataset openly available.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{10.1145/3290607.3313773, title = {SIGCHI Outstanding Dissertation Award: Assignment Problems for Optimizing Text Input}, author = {Anna Maria Feit}, url = {https://doi.org/10.1145/3290607.3313773}, doi = {10.1145/3290607.3313773}, isbn = {9781450359719}, year = {2019}, date = {2019-01-01}, booktitle = {Extended Abstracts of the SIGCHI Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, USA}, series = {CHI EA '19}, abstract = {Text input methods are an integral part of our daily interaction with digital devices. However, their design poses a complex problem: for any method, we must decide which input action (a button press, a hand gesture, etc.) produces which symbol (e.g., a character or word). With only 26 symbols and input actions, there are already more than 1026 distinct solutions, making it impossible to find the best one through manual design. Prior work has shown that we can use optimization methods to search such large design spaces efficiently and automatically find a good user interface with respect to the given objectives [6]. However, work in the text entry domain has been limited mostly to the performance optimization of (soft-)keyboards (see [2] for an overview). The Ph.D. thesis [2] advances the field of text-entry optimization by enlarging the space of optimizable text-input methods and proposing new criteria for assessing their optimality. Firstly, the design problem is formulated as an assignment problem for integer programming. This enables the use of standard mathematical solvers and algorithms for efficiently finding good solutions. Then, objective functions are developed, for assessing their optimality with respect to motor performance, ergonomics, and learnability. The corresponding models extend beyond interaction with soft keyboards, to consider multi-finger input, novel sensors, and alternative form factors. In addition, the thesis illustrates how to formulate models from prior work in terms of an assignment problem, providing a coherent theoretical basis for text entry optimization. The proposed objectives are applied in the optimization of three assignment problems: text input with multi-finger gestures in mid-air [8], text input on a long piano keyboard [4], and - for a contribution to the official French keyboard standard - input of special characters via a physical keyboard [3]. Combining the proposed models offers a multi-objective optimization approach able to capture the complex cognitive and motor processes during typing. . .}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{park18, title = {AdaM: Adapting Multi-User Interfaces for Collaborative Environments in Real-Time}, author = {Seonwook Park and Christoph Gebhardt and Roman Rädle and Anna Maria Feit and Hana Vrzakova and Niraj Dayama and Hui-Shyong Yeo and Clemens Klokmose and Aaron Quigley and Antti Oulasvirta and Otmar Hilliges}, url = {https://ait.ethz.ch/projects/2018/adam/}, doi = {10.1145/3173574.3173758}, year = {2018}, date = {2018-01-01}, urldate = {2018-01-01}, booktitle = {SIGCHI Conference on Human Factors in Computing Systems}, publisher = {ACM}, series = {CHI '18}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @phdthesis{Feit2018, title = {Assignment Problems for Optimizing Text Input}, author = {Anna Maria Feit}, url = {http://urn.fi/URN:ISBN:978-952-60-8016-1}, year = {2018}, date = {2018-01-01}, pages = {182 + app. 56}, publisher = {Aalto University}, school = {Aalto University}, abstract = {Text input methods are an integral part of our daily interaction with digital devices. However, their design poses a complex problem: for any method, we must decide which input action (a button press, a hand gesture, etc.) produces which symbol (e.g., a character or word). With only 26 symbols and input actions, there are already more than 10^26 distinct solutions, making it impossible to find the best one through manual design. Prior work has shown that we can use optimization methods to search such large design spaces efficiently and automatically find the best solution for a given task and objective. However, work in this domain has been limited mostly to the performance optimization of keyboards. The Ph.D. thesis advances the field of text-entry optimization by enlarging the space of optimizable text-input methods and proposing new criteria for assessing their optimality. Firstly, the design problem is formulated as an assignment problem for integer programming. This enables the use of standard mathematical solvers and algorithms for efficiently finding good solutions. Then, objective functions are developed, for assessing their optimality with respect to motor performance, ergonomics, and learnability. The corresponding models extend beyond interaction with soft keyboards, to consider multi-finger input, novel sensors, and alternative form factors. In addition, the thesis illustrates how to formulate models from prior work in terms of an assignment problem, providing a coherent theoretical basis for text-entry optimization. The proposed objectives are applied in the optimization of three assignment problems: text input with multi-finger gestures in mid-air, text input on a long piano keyboard, and -- for a contribution to the official French keyboard standard -- input of special characters via a physical keyboard. Combining the proposed models offers a multi-objective optimization approach able to capture the complex cognitive and motor processes during typing. Finally, the dissertation discusses future work that is needed to solve the long-standing problem of finding the optimal layout for physical keyboards, in light of empirical evidence that prior models are insufficient to respond to the diverse typing strategies people employ with modern keyboards. The thesis advances the state of the art in text-entry optimization by proposing novel objective functions that quantify the performance, ergonomics and learnabilityof a text input method. The objectives presented are formulated as assignment problems, which can be solved with integer programming via standard mathematical solvers or heuristic algorithms. While the work focused on text input, the assignment problem can be used to model other design problems in HCI (e.g., how best to assign commands to UI controls or distribute UI elements across several devices), for which the same problem formulations, optimization techniques, and even models could be applied.}, keywords = {}, pubstate = {published}, tppubtype = {phdthesis} } @inproceedings{Dhakal2018, title = {Observations on Typing from 136 Million Keystrokes}, author = {Vivek Dhakal and Anna Maria Feit and Per Ola Kristensson and Antti Oulasvirta}, url = {https://userinterfaces.aalto.fi/136Mkeystrokes/ https://www.slideshare.net/oulasvir/observations-on-typing-from-136-million-keystrokes-presentation-by-antti-oulasvirta-at-chi2018-april-2018-montreal}, doi = {10.1145/3173574.3174220}, isbn = {9781450356206}, year = {2018}, date = {2018-01-01}, booktitle = {SIGCHI Conference on Human Factors in Computing Systems}, journal = {Proc. of CHI}, publisher = {ACM}, address = {New York, NY, US}, series = {CHI'18}, abstract = {We report on typing behaviour and performance of 168,000 volunteers in an online study. The large dataset allows de-tailed statistical analyses of keystroking patterns, linking them to typing performance. Besides reporting distributions and confirming some earlier findings, we report two new findings. First, letter pairs typed by different hands or fingers are more predictive of typing speed than, for example, letter repetitions. Second, rollover-typing, wherein the next key is pressed before the previous one is released, is surprisingly prevalent. Notwith-standing considerable variation in typing patterns, unsuper-vised clustering using normalised inter-key intervals reveals that most users can be divided into eight groups of typists that differ in performance, accuracy, hand and finger usage, and rollover. The code and dataset are released for scientific use.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{10.1145/3173574.3173919, title = {Physical Keyboards in Virtual Reality: Analysis of Typing Performance and Effects of Avatar Hands}, author = {Pascal Knierim and Valentin Schwind and Anna Maria Feit and Florian Nieuwenhuizen and Niels Henze}, url = {https://doi.org/10.1145/3173574.3173919}, doi = {10.1145/3173574.3173919}, isbn = {9781450356206}, year = {2018}, date = {2018-01-01}, urldate = {2018-01-01}, booktitle = {SIGCHI Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, USA}, series = {CHI '18}, abstract = {Entering text is one of the most common tasks when interacting with computing systems. Virtual Reality (VR) presents a challenge as neither the user's hands nor the physical input devices are directly visible. Hence, conventional desktop peripherals are very slow, imprecise, and cumbersome. We developed a apparatus that tracks the user's hands, and a physical keyboard, and visualize them in VR. In a text input study with 32 participants, we investigated the achievable text entry speed and the effect of hand representations and transparency on typing performance, workload, and presence. With our apparatus, experienced typists benefited from seeing their hands, and reach almost outside-VR performance. Inexperienced typists profited from semi-transparent hands, which enabled them to type just 5.6 WPM slower than with a regular desktop setup. We conclude that optimizing the visualization of hands in VR is important, especially for inexperienced typists, to enable a high typing performance.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{speicher18, title = {Selection-Based Text Entry in Virtual Reality}, author = {Marco Speicher and Anna Maria Feit and Pascal Ziegler and Antonio Krüger}, url = {https://umtl.cs.uni-saarland.de/research/projects/selection-based-text-entry-in-virtual-reality.html}, doi = {10.1145/3173574.3174221}, isbn = {9781450356206}, year = {2018}, date = {2018-01-01}, urldate = {2018-01-01}, booktitle = {SIGCHI Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, USA}, series = {CHI '18}, abstract = {In recent years, Virtual Reality (VR) and 3D User Interfaces (3DUI) have seen a drastic increase in popularity, especially in terms of consumer-ready hardware and software. While the technology for input as well as output devices is market ready, only a few solutions for text input exist, and empirical knowledge about performance and user preferences is lacking. In this paper, we study text entry in VR by selecting characters on a virtual keyboard. We discuss the design space for assessing selection-based text entry in VR. Then, we implement six methods that span different parts of the design space and evaluate their performance and user preferences. Our results show that pointing using tracked hand-held controllers outperforms all other methods. Other methods such as head pointing can be viable alternatives depending on available resources. We summarize our findings by formulating guidelines for choosing optimal virtual keyboard text entry methods in VR.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Feit2017, title = {Toward everyday gaze input: Accuracy and precision of eye tracking and implications for design}, author = {Anna Maria Feit and Shane Williams and Arturo Toledo and Ann Paradiso and Harish Kulkarni and Shaun Kane and Meredith Ringel Morris}, url = {https://www.slideshare.net/AnnaMariaFeit/toward-everyday-gaze-input-accuracy-and-precision-of-eye-tracking-and-implications-for-design}, doi = {10.1145/3025453.3025599}, isbn = {9781450346559}, year = {2017}, date = {2017-05-01}, urldate = {2017-05-01}, booktitle = {SIGCHI Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, USA}, abstract = {For eye tracking to become a ubiquitous part of our everyday interaction with computers, we first need to understand its limitations outside rigorously controlled labs, and develop robust applications that can be used by a broad range of users and in various environments. Toward this end, we collected eye tracking data from 80 people in a calibration-style task, using two different trackers in two lighting conditions. We found that accuracy and precision can vary between users and targets more than six-fold, and report on differences between lighting, trackers, and screen regions. We show how such data can be used to determine appropriate target sizes and to optimize the parameters of commonly used filters. We conclude with design recommendations and examples how our findings and methodology can inform the design of error-aware adaptive applications.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @article{10.1145/3131608, title = {Computational Support for Functionality Selection in Interaction Design}, author = {Antti Oulasvirta and Anna Maria Feit and Perttu Lähteenlahti and Andreas Karrenbauer}, url = {https://doi.org/10.1145/3131608}, doi = {10.1145/3131608}, issn = {1073-0516}, year = {2017}, date = {2017-01-01}, urldate = {2017-01-01}, journal = {ACM Transaction on Computer-Human Interaction}, volume = {24}, number = {5}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, abstract = {Designing interactive technology entails several objectives, one of which is identifying and selecting appropriate functionality. Given candidate functionalities such as “print,” “bookmark,” and “share,” a designer has to choose which functionalities to include and which to leave out. Such choices critically affect the acceptability, productivity, usability, and experience of the design. However, designers may overlook reasonable designs because there is an exponential number of functionality sets and multiple factors to consider. This article is the first to formally define this problem and propose an algorithmic method to support designers to explore alternative functionality sets in early stage design. Based on interviews of professional designers, we mathematically define the task of identifying functionality sets that strike the best balance among four objectives: usefulness, satisfaction, ease of use, and profitability. We develop an integer linear programming solution that can efficiently solve very large instances (set size over 1,300) on a regular computer. Further, we build on techniques of robust optimization to search for diverse and surprising functionality designs. Empirical results from a controlled study and field deployment are encouraging. Most designers rated computationally created sets to be of the comparable or superior quality than their own. Designers reported gaining better understanding of available functionalities and the design space.}, keywords = {}, pubstate = {published}, tppubtype = {article} } @inproceedings{Feit2016, title = {How We Type: Movement Strategies and Performance in Everyday Typing}, author = {Anna Maria Feit and Daryl Weir and Antti Oulasvirta}, url = {https://userinterfaces.aalto.fi/how-we-type/}, doi = {10.1145/2858036.2858233}, isbn = {978-1-4503-3362-7}, year = {2016}, date = {2016-01-01}, urldate = {2016-01-01}, booktitle = {SIGCHI Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, NY, US}, series = {CHI '16}, abstract = {This paper revisits the present understanding of typing, which originates mostly from studies of trained typists using the ten-finger touch typing system. Our goal is to characterise the majority of present-day users who are untrained and employ diverse, self-taught techniques. In a transcription task, we compare self-taught typists and those that took a touch typing course. We report several differences in performance, gaze deployment and movement strategies. The most surprising finding is that self-taught typists can achieve performance levels comparable with touch typists, even when using fewer fingers. Motion capture data exposes 3 predictors of high performance: 1) unambiguous mapping (a letter is consistently pressed by the same finger), 2) active preparation of upcoming keystrokes, and 3) minimal global hand motion. We release an extensive dataset on everyday typing behavior.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @workshop{Feit2015_multiobjective, title = {Towards Multi-Objective Optimization for UI Design}, author = {Anna Maria Feit and Myroslav Bachynskyi and Srinath Sridhar}, url = {http://annafeit.de/resources/papers/Multiobjective_Optimization2015.pdf}, year = {2015}, date = {2015-04-01}, booktitle = {Workshop on Principles, Techniques and Perspectives on Optimization and HCI, CHI'15}, address = {Seoul, Korea}, abstract = {In recent years computational optimization has been applied to the problem of finding good designs for user interfaces with huge design spaces. There, designers are struggling to integrate many different objectives into the design process, such as ergonomics, learnability or performance. However, most computationally designed interfaces are optimized with respect to only one objective. In this paper we argue that multi-objective optimization is needed to improve over manual designs. We identify 8 categories that cover design principles from UI design and usability engineering. We propose a multi-objective function in form of a linear combination of these factors and discuss benefits and pitfalls of multi-objective optimization.}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } @inproceedings{Sridhar2015, title = {Investigating the Dexterity of Multi-Finger Input for Mid-Air Text Entry}, author = {Srinath Sridhar and Anna Maria Feit and Christian Theobalt and Antti Oulasvirta}, url = {http://handtracker.mpi-inf.mpg.de/projects/HandDexterity/}, doi = {10.1145/2702123.2702136}, isbn = {9781450331456}, year = {2015}, date = {2015-01-01}, urldate = {2015-01-01}, booktitle = {SIGCHI Conference on Human Factors in Computing Systems}, publisher = {ACM}, address = {New York, New York, USA}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Feit2014, title = {PianoText: Redesigning the Piano Keyboard for Text Entry}, author = {Anna Maria Feit and Antti Oulasvirta}, url = {http://annafeit.de/pianotext}, doi = {10.1145/2598510.2598547}, isbn = {978-1-4503-2902-6}, year = {2014}, date = {2014-01-01}, urldate = {2014-01-01}, booktitle = {Conference on Designing Interactive Systems}, publisher = {ACM}, series = {DIS '14}, abstract = {Inspired by the high keying rates of skilled pianists, we study the design of piano keyboards for rapid text entry. We review the qualities of the piano as an input device, observing four design opportunities: 1) chords, 2) redundancy (more keys than letters in English), 3) the transfer of musical skill and 4) optional sound feedback. Although some have been utilized in previous text entry methods, our goal is to exploit all four in a single design. We present PianoText, a computationally designed mapping that assigns letter sequences of English to frequent note transitions of music. It allows fast text entry on any MIDI-enabled keyboard and was evaluated in two transcription typing studies. Both show an achievable rate of over 80 words per minute. This parallels the rates of expert Qwerty typists and doubles that of a previous piano-based design from the 19th century. We also design PianoText-Mini, which allows for comparable performance in a portable form factor. Informed by the studies, we estimate the upper bound of typing performance, draw implications to other text entry methods, and critically discuss outstanding design challenges.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} }