@article{202791, keywords = {Motor learning, context learning, meta-learning, reinforcement learning, navigation}, author = {Carlos Vel{\'a}zquez-Vargas and Isaac Christian and Jordan A. Taylor and Sreejan Kumar}, title = {Learning to abstract visuomotor mappings using meta-reinforcement learning}, abstract = {

We investigated the human capacity to acquire multiple visuomotor mappings for
de novo skills. Using a grid navigation paradigm, we tested whether contextual
cues implemented as different {\textquotedblright}grid worlds{\textquotedblright}, allow participants to learn two distinct
key-mappings more efficiently. Our results indicate that when contextual
information is provided, task performance is significantly better. The same held
true for meta-reinforcement learning agents that differed in whether or not they
receive contextual information when performing the task. We evaluated their accuracy
in predicting human performance in the task and analyzed their internal
representations. The results indicate that contextual cues allow the formation of
separate representations in space and time when using different visuomotor mappings,
whereas the absence of them favors sharing one representation. While both
strategies can allow learning of multiple visuomotor mappings, we showed contextual
cues provide a computational advantage in terms of how many mappings
can be learned.

}, year = {2024}, journal = {ICLR 2024 Workshop on Representational Alignment (Re-Align)}, volume = {46}, pages = {1-11}, month = {07/2024}, url = {https://escholarship.org/uc/item/4jj4q4df}, }