-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathindex.json
1 lines (1 loc) · 179 KB
/
index.json
1
[{"categories":null,"content":"Raiyah Ahmed is an undergraduate student at UC Santa Barbara pursuing a dual degree in Psychological \u0026amp; Brain Sciences (BS) and Statistics and Data Science (BA). She is interested in using computational neuroscience to bridge the gap between technology and brain sciences. Raiyah would like to apply data analysis techniques to advance brain mapping research.\nIn her free time, Raiyah enjoys cooking, reading, and going to the beach with her friends.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"7c8f811a899d30381078b0eb56b0f297","people":["ahmed_raiyah"],"permalink":"https://bionicvisionlab.org/people/ahmed_raiyah/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/ahmed_raiyah/","section":"people","summary":"Raiyah Ahmed is an undergraduate student at UC Santa Barbara pursuing a dual degree in Psychological \u0026amp; Brain Sciences (BS) and Statistics and Data Science (BA). She is interested in using computational neuroscience to bridge the gap between technology and brain sciences. Raiyah would like to apply data analysis techniques to advance brain mapping research.\nIn her free time, Raiyah enjoys cooking, reading, and going to the beach with her friends.","title":"Raiyah Ahmed","type":"people"},{"categories":null,"content":"Alyssa Alanis is an undergraduate student pursuing a degree in Psychological \u0026amp; Brain Sciences at UC Santa Barbara.\nAlyssa is interested in the neurological and cognitive aspects of perception, pharmacological and psychological altered states of consciousness, and neuroimaging. She is passionate about studying the connection between biological vision and perceived experiences, distorted realities, hallucinations, and drug induced altered states of minds in the hopes to help those with hallucination related-disorders.\nIn her free time Alyssa loves to hike, go on roadtrips, and have movie nights!\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"f81ab2476ac33359b7b3409e8bb40f82","people":["alanis_alyssa"],"permalink":"https://bionicvisionlab.org/people/alanis_alyssa/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/alanis_alyssa/","section":"people","summary":"Alyssa Alanis is an undergraduate student pursuing a degree in Psychological \u0026amp; Brain Sciences at UC Santa Barbara.\nAlyssa is interested in the neurological and cognitive aspects of perception, pharmacological and psychological altered states of consciousness, and neuroimaging. She is passionate about studying the connection between biological vision and perceived experiences, distorted realities, hallucinations, and drug induced altered states of minds in the hopes to help those with hallucination related-disorders.","title":"Alyssa Alanis","type":"people"},{"categories":null,"content":"Kanav Arora is currently an undergraduate student pursuing a Computer Science degree at UC Santa Barbara.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"b61f6fdda3b9c85e341b99ddb5010469","people":["arora_kanav"],"permalink":"https://bionicvisionlab.org/people/arora_kanav/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/arora_kanav/","section":"people","summary":"Kanav Arora is currently an undergraduate student pursuing a Computer Science degree at UC Santa Barbara.","title":"Kanav Arora","type":"people"},{"categories":null,"content":"Hasith Basnayake is currently a fourth-year undergraduate student pursuing a degree in Psychological and Brain Sciences at UC Santa Barbara. He’s interested in the intersection of cognitive science and computer science in areas of human-computer interaction, computer vision, and machine learning.\nIn his free time, he likes reading science fiction, taking care of his plants, designing in Figma, and learning how to surf.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"578cd0707a892ab4bd3d25cdf18d336c","people":["basnayake_hasith"],"permalink":"https://bionicvisionlab.org/people/basnayake_hasith/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/basnayake_hasith/","section":"people","summary":"Hasith Basnayake is currently a fourth-year undergraduate student pursuing a degree in Psychological and Brain Sciences at UC Santa Barbara. He’s interested in the intersection of cognitive science and computer science in areas of human-computer interaction, computer vision, and machine learning.\nIn his free time, he likes reading science fiction, taking care of his plants, designing in Figma, and learning how to surf.","title":"Hasith Basnayake","type":"people"},{"categories":null,"content":"Dr. Michael Beyeler directs the Bionic Vision Lab at UC Santa Barbara. He earned his Ph.D. in Computer Science from UC Irvine and holds a B.S. in Electrical Engineering and an M.S. in Biomedical Engineering from ETH Zurich, Switzerland. Before joining UCSB in 2019, he completed a postdoctoral fellowship at the University of Washington, where his work on computational models of bionic vision laid the foundation for the research now pursued in his lab.\nDr. Beyeler also serves as Associate Director of the UCSB Center for Virtual Environments and Behavior (ReCVEB) and has received several prestigious awards, including the National Institutes of Health (NIH) K99/R00 Pathway to Independence Award and the DP2 New Innovator Award. Most recently, he was honored with the Harold J. Plous Memorial Award 2024-25 in recognition of his outstanding contributions to UC Santa Barbara\u0026rsquo;s intellectual life through research, teaching, and service.\n","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"9b65f6e0cdcdd6af799cdb43ac4127dc","people":["beyeler_michael"],"permalink":"https://bionicvisionlab.org/people/beyeler_michael/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/beyeler_michael/","section":"people","summary":"Dr. Michael Beyeler directs the Bionic Vision Lab at UC Santa Barbara. He earned his Ph.D. in Computer Science from UC Irvine and holds a B.S. in Electrical Engineering and an M.S. in Biomedical Engineering from ETH Zurich, Switzerland. Before joining UCSB in 2019, he completed a postdoctoral fellowship at the University of Washington, where his work on computational models of bionic vision laid the foundation for the research now pursued in his lab.","title":"Michael Beyeler","type":"people"},{"categories":null,"content":"Ruyi Cao is an undergraduate student pursuing a dual BS degree in Psychological \u0026amp; Brain Sciences as well as Statistics \u0026amp; Data Science at UC Santa Barbara, with a Minor in Applied Psychology.\nRuyi is passionate about computational neuroscience and is dedicated to exploring the brain\u0026rsquo;s complex functions through advanced computational methods. She aims to contribute to understanding neurological processes and enhance mental health treatments.\nRuyi enjoys dancing to jazz in her free time and taking short walks along the beach.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"02682e46c21d97328a63549d58adeb94","people":["cao_ruyi"],"permalink":"https://bionicvisionlab.org/people/cao_ruyi/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/cao_ruyi/","section":"people","summary":"Ruyi Cao is an undergraduate student pursuing a dual BS degree in Psychological \u0026amp; Brain Sciences as well as Statistics \u0026amp; Data Science at UC Santa Barbara, with a Minor in Applied Psychology.\nRuyi is passionate about computational neuroscience and is dedicated to exploring the brain\u0026rsquo;s complex functions through advanced computational methods. She aims to contribute to understanding neurological processes and enhance mental health treatments.\nRuyi enjoys dancing to jazz in her free time and taking short walks along the beach.","title":"Ruyi Cao","type":"people"},{"categories":null,"content":"Anissa Carter is a Promise Scholar and second-year student in the Department of Computer Science. She is interested in applying computer science to the furtherment of the medical field.\nIn her spare time, she loves to swim, bike, and hike.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"4f189bb2ce5ea013e0566250eb940bcd","people":["carter_anissa"],"permalink":"https://bionicvisionlab.org/people/carter_anissa/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/carter_anissa/","section":"people","summary":"Anissa Carter is a Promise Scholar and second-year student in the Department of Computer Science. She is interested in applying computer science to the furtherment of the medical field.\nIn her spare time, she loves to swim, bike, and hike.","title":"Anissa Carter","type":"people"},{"categories":null,"content":"Krishna Dhulipala is a Masters student in Computer Science at UC Santa Barbara.\nHe serves as Accessibility Assistant in the lab. He is also a Research Assistant in the ArchLab.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"3523c5d7547b9ca58c00908fdfcc0951","people":["dhulipala_krishna"],"permalink":"https://bionicvisionlab.org/people/dhulipala_krishna/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/dhulipala_krishna/","section":"people","summary":"Krishna Dhulipala is a Masters student in Computer Science at UC Santa Barbara.\nHe serves as Accessibility Assistant in the lab. He is also a Research Assistant in the ArchLab.","title":"Krishna Dhulipala","type":"people"},{"categories":null,"content":"Jacob Granley is a PhD student in the Department of Computer Science.\nPrior to joining UCSB, he received his Masters and Bachelors in Computer Science from Colorado School of Mines. He is pursuing his PhD under Dr. Beyeler as part of the Bionic Vision lab, where he hopes to use Computer Science and Machine Learning methods to help improve artificial vision technologies with the ultimate goal of restoring sight to the blind.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"2b70b8d79cd7bf947cd51020510152a3","people":["granley_jacob"],"permalink":"https://bionicvisionlab.org/people/granley_jacob/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/granley_jacob/","section":"people","summary":"Jacob Granley is a PhD student in the Department of Computer Science.\nPrior to joining UCSB, he received his Masters and Bachelors in Computer Science from Colorado School of Mines. He is pursuing his PhD under Dr. Beyeler as part of the Bionic Vision lab, where he hopes to use Computer Science and Machine Learning methods to help improve artificial vision technologies with the ultimate goal of restoring sight to the blind.","title":"Jacob Granley","type":"people"},{"categories":null,"content":"Yuchen Hou is a PhD student in Computer Science at UC Santa Barbara. She is interested in computational neuroscience and machine learning. Her research goal is to model the dynamics of brain functions by integrating knowledge from computer, cognitive, and neural science.\nPrior to pursuing her PhD studies, she was an undergraduate research assistant in the Bionic Vision Lab with a BS degree in Psychological \u0026amp; Brain Sciences.\nIn her free time, she likes reading fiction books and watching action movies.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"e3745716f7ad68c766108f2e2cbeefef","people":["hou_yuchen"],"permalink":"https://bionicvisionlab.org/people/hou_yuchen/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/hou_yuchen/","section":"people","summary":"Yuchen Hou is a PhD student in Computer Science at UC Santa Barbara. She is interested in computational neuroscience and machine learning. Her research goal is to model the dynamics of brain functions by integrating knowledge from computer, cognitive, and neural science.\nPrior to pursuing her PhD studies, she was an undergraduate research assistant in the Bionic Vision Lab with a BS degree in Psychological \u0026amp; Brain Sciences.\nIn her free time, she likes reading fiction books and watching action movies.","title":"Yuchen Hou","type":"people"},{"categories":null,"content":"Byron is a PhD student in the Department of Psychological \u0026amp; Brain Sciences. He was born and raised in St. Louis, Missouri. He moved to New York City to study psychology, where he received a BA in Psychology from St. John’s University (2015) and then a MA in Behavioral Neuroscience from Queens College (2017). Byron worked as a Research Operations Manager at a start-up company that develops assistive products for blind and low vision individuals (2017 - 2020).\nByron\u0026rsquo;s main research interest is studying how image processing and psychophysics can be used to understand how low vision conditions affect visual tasks. Specifically, how combining computational models with simulated low vision impairment conditions can help inform and enhance individualized vision capabilities.\nByron is supervised by Dr. Michael Beyeler in the Bionic Vision Lab and Dr. Miguel Eckstein in the Vision and Image Understanding Lab.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"226dccd548b210ce20689586bc7b8aba","people":["johnson_byron"],"permalink":"https://bionicvisionlab.org/people/johnson_byron/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/johnson_byron/","section":"people","summary":"Byron is a PhD student in the Department of Psychological \u0026amp; Brain Sciences. He was born and raised in St. Louis, Missouri. He moved to New York City to study psychology, where he received a BA in Psychology from St. John’s University (2015) and then a MA in Behavioral Neuroscience from Queens College (2017). Byron worked as a Research Operations Manager at a start-up company that develops assistive products for blind and low vision individuals (2017 - 2020).","title":"Byron A. Johnson","type":"people"},{"categories":null,"content":"Emily is a PhD student in the Dynamical Neuroscience (DYNS) program at UC Santa Barbara and is currently rotating in the Bionic Vision Lab. She previously worked with EM connectomics datasets at Janelia and at the Allen Institute, where she conducted research in cell typing and neural circuitry. During this time, she also developed open-source tools to analyze neuronal morphology and connectivity.\nEmily is interested in the intricate circuitry of visual systems and how its pieces are organized to create the sense of vision. She is also eager to apply insights from animal models to help recreate vision in humans when parts of the circuitry are no longer functioning.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"8b0891ee59a674dd2335953c155cbf37","people":["joyce_emily"],"permalink":"https://bionicvisionlab.org/people/joyce_emily/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/joyce_emily/","section":"people","summary":"Emily is a PhD student in the Dynamical Neuroscience (DYNS) program at UC Santa Barbara and is currently rotating in the Bionic Vision Lab. She previously worked with EM connectomics datasets at Janelia and at the Allen Institute, where she conducted research in cell typing and neural circuitry. During this time, she also developed open-source tools to analyze neuronal morphology and connectivity.\nEmily is interested in the intricate circuitry of visual systems and how its pieces are organized to create the sense of vision.","title":"Emily Joyce","type":"people"},{"categories":null,"content":"Lucas Gil Nadolskis is currently a Graduate Student Researcher in the Bionic Vision Lab.\nLucas got his BS in Computer Science with a minor in Neuroscience from the University of Minnesota in 2021, where he performed research related to autonomous navigation, computer vision and brain-computer interfaces. Later, he got his MS in Biomedical Engineering from Carnegie Mellon University, where his primary work focused on analyzing top-down pathways of the visual system and how this could be integrated into cortical implants for the blind. In addition, he worked with the Human-Computer Interaction department on issues related to accessibility in data visualization, an area that he still seeks to explore in the future.\nStarting Fall \u0026lsquo;23, Lucas will be a PhD student in the Interdepartmental Graduate Program in Dynamical Neuroscience (DYNS) at UC Santa Barbara, where he will investigate novel ways to approach cortical implants for the blind. Being blind himself since the age of five, Lucas\u0026rsquo; interests are broad, ranging from neuroscience to accessibility, but can be summarized as efforts to improve the lives of blind people around the world.\nOutside of the lab, most of his free time is occupied by music, traveling and searching for audio-described content.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"5e85e1d123b1d01498dd8360b48ea1aa","people":["nadolskis_lucas"],"permalink":"https://bionicvisionlab.org/people/nadolskis_lucas/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/nadolskis_lucas/","section":"people","summary":"Lucas Gil Nadolskis is currently a Graduate Student Researcher in the Bionic Vision Lab.\nLucas got his BS in Computer Science with a minor in Neuroscience from the University of Minnesota in 2021, where he performed research related to autonomous navigation, computer vision and brain-computer interfaces. Later, he got his MS in Biomedical Engineering from Carnegie Mellon University, where his primary work focused on analyzing top-down pathways of the visual system and how this could be integrated into cortical implants for the blind.","title":"Lucas Nadolskis","type":"people"},{"categories":null,"content":"Jing Peng is a second-year student pursuing a MS in Computer Science at UCSB.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"18cd679606cb0b17e1ce9e39639c0516","people":["peng_jing"],"permalink":"https://bionicvisionlab.org/people/peng_jing/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/peng_jing/","section":"people","summary":"Jing Peng is a second-year student pursuing a MS in Computer Science at UCSB.","title":"Jing Peng","type":"people"},{"categories":null,"content":"Galen Pogoncheff joined the Bionic Vision Lab in 2022 as a PhD student in the Computer Science department. Driven to improve the processing of visual stimuli for bionic vision devices, he aims to advance techniques in computer vision using mechanisms inspired by neural processing.\nPrior to joining the lab, Galen obtained his B.S. and M.S. in Computer Science from the University of Colorado and subsequently led the research and development of machine learning models for wearable electrophysiology devices at a local startup.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"4438a2bc07066689cc572fd851af41ff","people":["pogoncheff_galen"],"permalink":"https://bionicvisionlab.org/people/pogoncheff_galen/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/pogoncheff_galen/","section":"people","summary":"Galen Pogoncheff joined the Bionic Vision Lab in 2022 as a PhD student in the Computer Science department. Driven to improve the processing of visual stimuli for bionic vision devices, he aims to advance techniques in computer vision using mechanisms inspired by neural processing.\nPrior to joining the lab, Galen obtained his B.S. and M.S. in Computer Science from the University of Colorado and subsequently led the research and development of machine learning models for wearable electrophysiology devices at a local startup.","title":"Galen Pogoncheff","type":"people"},{"categories":null,"content":"Adyah Rastogi is currently a third-year undergraduate student pursuing a Computer Science degree at UC Santa Barbara. She is working on the HILO project in the lab and is interested in machine learning and NLP, human computer interaction, and data analysis.\nAdyah grew up in the Bay Area and enjoys exploring nature and walks, going to the gym, and going on fun drives in her free time.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"a4146d28380dbaa96eb907bedfbe6f10","people":["rastogi_adyah"],"permalink":"https://bionicvisionlab.org/people/rastogi_adyah/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/rastogi_adyah/","section":"people","summary":"Adyah Rastogi is currently a third-year undergraduate student pursuing a Computer Science degree at UC Santa Barbara. She is working on the HILO project in the lab and is interested in machine learning and NLP, human computer interaction, and data analysis.\nAdyah grew up in the Bay Area and enjoys exploring nature and walks, going to the gym, and going on fun drives in her free time.","title":"Adyah Rastogi","type":"people"},{"categories":null,"content":"Roksana Sadeghi is an incoming postdoc in the lab. She previously worked with Gislin Dagnelie at Johns Hopkins University and Jorge Otero-Millan at UC Berkeley.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"851b6be0c61f006b55db82660d22cb91","people":["sadeghi_roksana"],"permalink":"https://bionicvisionlab.org/people/sadeghi_roksana/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/sadeghi_roksana/","section":"people","summary":"Roksana Sadeghi is an incoming postdoc in the lab. She previously worked with Gislin Dagnelie at Johns Hopkins University and Jorge Otero-Millan at UC Berkeley.","title":"Roksana Sadeghi","type":"people"},{"categories":null,"content":"Magnolia Saur is a third year undergraduate student pursuing a BS in Electrical Engineering at UC Santa Barbara. She is interested in machine learning, computational neuroscience, and deep learning. She is passionate about incorporating electrical engineering within the medical industry, particularly to support disabled and underrepresented communities.\nIn her free time, Magnolia enjoys outdoor activities, crocheting, and caring for her plants\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"1f2376f3c36174de814ed94040a6bb93","people":["saur_magnolia"],"permalink":"https://bionicvisionlab.org/people/saur_magnolia/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/saur_magnolia/","section":"people","summary":"Magnolia Saur is a third year undergraduate student pursuing a BS in Electrical Engineering at UC Santa Barbara. She is interested in machine learning, computational neuroscience, and deep learning. She is passionate about incorporating electrical engineering within the medical industry, particularly to support disabled and underrepresented communities.\nIn her free time, Magnolia enjoys outdoor activities, crocheting, and caring for her plants","title":"Magnolia Saur","type":"people"},{"categories":null,"content":"Marius Schneider is a Postdoctoral Researcher at the Institute for Collaborative Biotechnologies. He completed his doctoral research in Systems Neuroscience at the Ernst Strüngmann Institute in Frankfurt, in affiliation with the International Max Planck Research School (IMPRS) for Neural Circuits at the Max Planck Institute for Brain Research. Marius defended his PhD with highest honors at Radboud University Nijmegen in May 2024.\nMarius’s research aims to uncover how the brain achieves flexible information processing. He focuses on understanding how different cell types and brain regions integrate sensory information to drive behavior. To address these questions, he combines detailed biophysical modeling and state-of-the-art machine learning techniques with large-scale, multi-areal electrophysiological recordings.\nOutside the lab, Marius enjoys running, working out at the gym, spending time at the beach, traveling, and listening to music.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"d14182b496e00ac9edaabd97e4ca3663","people":["schneider_marius"],"permalink":"https://bionicvisionlab.org/people/schneider_marius/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/schneider_marius/","section":"people","summary":"Marius Schneider is a Postdoctoral Researcher at the Institute for Collaborative Biotechnologies. He completed his doctoral research in Systems Neuroscience at the Ernst Strüngmann Institute in Frankfurt, in affiliation with the International Max Planck Research School (IMPRS) for Neural Circuits at the Max Planck Institute for Brain Research. Marius defended his PhD with highest honors at Radboud University Nijmegen in May 2024.\nMarius’s research aims to uncover how the brain achieves flexible information processing.","title":"Marius Schneider","type":"people"},{"categories":null,"content":"Eirini Schoinas is currently a first-year undergraduate student pursuing a Computing degree in the College of Creative Studies at UC Santa Barbara. She is interested in human-computer interaction and new ways for computers to interface with the brain.\nIn her free time, she enjoys reading science fiction and fantasy novels and making art.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"fabfd5792b657e19c558420ac2ff4ce5","people":["schoinas_eirini"],"permalink":"https://bionicvisionlab.org/people/schoinas_eirini/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/schoinas_eirini/","section":"people","summary":"Eirini Schoinas is currently a first-year undergraduate student pursuing a Computing degree in the College of Creative Studies at UC Santa Barbara. She is interested in human-computer interaction and new ways for computers to interface with the brain.\nIn her free time, she enjoys reading science fiction and fantasy novels and making art.","title":"Eirini Schoinas","type":"people"},{"categories":null,"content":"Hannah Stone is a PhD student in the Bionic Vision Lab at UC Santa Barbara. She previously worked at the Brain Development and Education Lab at Stanford University, where she analyzed the visual processing of text and the flexible effects of task in the human frontal cortex. Prior to that, she worked at the University of Rochester\u0026rsquo;s Marmolab, examining the top-down effects of attention on the oculomotor system and early visual processing.\nHannah is interested in how biological visual systems respond to artificial inputs, specifically how the visual system processes and consciously interprets these signals.\nOutside of the lab, Hannah can be found painting, surfing, and exploring Santa Barbara on her bike.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"c9f8bea220769465f88f22eb8ddb5a9d","people":["stone_hannah"],"permalink":"https://bionicvisionlab.org/people/stone_hannah/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/stone_hannah/","section":"people","summary":"Hannah Stone is a PhD student in the Bionic Vision Lab at UC Santa Barbara. She previously worked at the Brain Development and Education Lab at Stanford University, where she analyzed the visual processing of text and the flexible effects of task in the human frontal cortex. Prior to that, she worked at the University of Rochester\u0026rsquo;s Marmolab, examining the top-down effects of attention on the oculomotor system and early visual processing.","title":"Hannah Stone","type":"people"},{"categories":null,"content":"Jiaxin Su is an undergraduate student pursuing degrees in Psychological \u0026amp; Brain Sciences as well as in Data Science and Statistics at UC Santa Barbara.\nShe is interested in working at the intersection of biostatistics and psychology. In her free time, she enjoys running and playing frisbee.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"2104459f97364834d46b2358ae484380","people":["su_jiaxin"],"permalink":"https://bionicvisionlab.org/people/su_jiaxin/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/su_jiaxin/","section":"people","summary":"Jiaxin Su is an undergraduate student pursuing degrees in Psychological \u0026amp; Brain Sciences as well as in Data Science and Statistics at UC Santa Barbara.\nShe is interested in working at the intersection of biostatistics and psychology. In her free time, she enjoys running and playing frisbee.","title":"Jiaxin Su","type":"people"},{"categories":null,"content":"Eyob Teshome is a senior undergraduate student pursuing a Bachelor of Science in Computer Science at the University of California, Santa Barbara. As a SEEDS Undergraduate Research Fellow, Eyob focuses on the application of computer vision, machine learning, and database systems. He recently expanded his academic horizons through a study abroad program at the University of Melbourne, Australia, where he focused on database systems and artificial intelligence.\nOutside the classroom, Eyob maintains an active lifestyle by working out and playing intramural soccer. He also plays the piano and enjoys taking care of his plants.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"c0df3d356bd8facfcee0b6383efe5e95","people":["teshome_eyob"],"permalink":"https://bionicvisionlab.org/people/teshome_eyob/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/teshome_eyob/","section":"people","summary":"Eyob Teshome is a senior undergraduate student pursuing a Bachelor of Science in Computer Science at the University of California, Santa Barbara. As a SEEDS Undergraduate Research Fellow, Eyob focuses on the application of computer vision, machine learning, and database systems. He recently expanded his academic horizons through a study abroad program at the University of Melbourne, Australia, where he focused on database systems and artificial intelligence.\nOutside the classroom, Eyob maintains an active lifestyle by working out and playing intramural soccer.","title":"Eyob Teshome","type":"people"},{"categories":null,"content":"Lily Turkstra is a PhD student in the Bionic Vision Lab at UC Santa Barbara.\nShe has extensive research experience with human psychophysics, has worked with clinical populations, and is well versed in statistical software analysis and programming.\nBefore joining the PBS department as a graduate student, Lily served as lab manager from Fall \u0026lsquo;22 to Summer \u0026lsquo;23. Before that, she was a Behavioral Health and Performance Intern at NASA and a Software Quality Assurance specialist at Tapestry Solutions. She also worked as a member of the Multisensory Perception Lab at Cal Poly and as a children’s behavioral therapist with California PsychCare.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"7c45c0abd1782c170bfc40cc395e222e","people":["turkstra_lily"],"permalink":"https://bionicvisionlab.org/people/turkstra_lily/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/turkstra_lily/","section":"people","summary":"Lily Turkstra is a PhD student in the Bionic Vision Lab at UC Santa Barbara.\nShe has extensive research experience with human psychophysics, has worked with clinical populations, and is well versed in statistical software analysis and programming.\nBefore joining the PBS department as a graduate student, Lily served as lab manager from Fall \u0026lsquo;22 to Summer \u0026lsquo;23. Before that, she was a Behavioral Health and Performance Intern at NASA and a Software Quality Assurance specialist at Tapestry Solutions.","title":"Lily M. Turkstra","type":"people"},{"categories":null,"content":"Apurv Varshney is currently a first-year PhD student pursuing a PhD in Computer Science at UC Santa Barbara. He is interested in improving Bionic Vision using Computer vision and human computer interaction (HCI) techniques.\nIn his free time he enjoys hiking and playing tennis.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"283fe3910514662d9abab07376970c6f","people":["varshney_apurv"],"permalink":"https://bionicvisionlab.org/people/varshney_apurv/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/varshney_apurv/","section":"people","summary":"Apurv Varshney is currently a first-year PhD student pursuing a PhD in Computer Science at UC Santa Barbara. He is interested in improving Bionic Vision using Computer vision and human computer interaction (HCI) techniques.\nIn his free time he enjoys hiking and playing tennis.","title":"Apurv Varshney","type":"people"},{"categories":null,"content":"Alvin Wang is a BS/MS student pursuing a Computer Science degree at UC Santa Barbara. He is especially interested in the fields of machine learning, computer vision, and artificial intelligence regulation. From Fall 2023 until Spring 2024, he was an ERSP Scholar in the lab, where he worked on deep learning-based computer vision algorithms for shadow removal.\nIn his free time, he enjoys cooking and playing basketball as well as hiking the numerous breathtaking trails Santa Barbara has to offer.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"f0732b46f88e8ceefeecaaa7f5b6c460","people":["wang_alvin"],"permalink":"https://bionicvisionlab.org/people/wang_alvin/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/wang_alvin/","section":"people","summary":"Alvin Wang is a BS/MS student pursuing a Computer Science degree at UC Santa Barbara. He is especially interested in the fields of machine learning, computer vision, and artificial intelligence regulation. From Fall 2023 until Spring 2024, he was an ERSP Scholar in the lab, where he worked on deep learning-based computer vision algorithms for shadow removal.\nIn his free time, he enjoys cooking and playing basketball as well as hiking the numerous breathtaking trails Santa Barbara has to offer.","title":"Alvin Wang","type":"people"},{"categories":null,"content":"Jianna Wong is a fourth-year undergraduate student pursuing degrees in Psychological and Brain Sciences and Statistics and Data Science at UC Santa Barbara. She is interested in neural prosthetics, machine learning, and computer vision.\nIn her free time, she enjoys cooking new recipes, playing piano, and watching movies.\n","date":-62135596800,"expirydate":-62135596800,"kind":"term","lang":"en","lastmod":-62135596800,"objectID":"bce75f19ed61ea989b839ac1342de65f","people":["wong_jianna"],"permalink":"https://bionicvisionlab.org/people/wong_jianna/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/people/wong_jianna/","section":"people","summary":"Jianna Wong is a fourth-year undergraduate student pursuing degrees in Psychological and Brain Sciences and Statistics and Data Science at UC Santa Barbara. She is interested in neural prosthetics, machine learning, and computer vision.\nIn her free time, she enjoys cooking new recipes, playing piano, and watching movies.","title":"Jianna Wong","type":"people"},{"categories":null,"content":"Prerequisites: PSY 111; open to Psychological \u0026amp; Brain Sciences, Biopsychology, and Interdisciplinary Studies majors only.\nThis course will focus on the biological basis of vision, including both the theory and recent research in visual neuroscience, with a focus on the retina and the early visual cortex. In specific, we will cover how the retinal image is represented by the neural response within the visual pathways, considering evidence from behavioral and biological approaches.\nUnderstanding how the visual system encodes light has implications for everything else the visual pathways do. Our understanding of the neural representation is based on work in several different disciplines. Throughout this course we will see that:\n there are many anatomically distinct types of neurons with various distinct functions, the different anatomical types of neurons respond to light stimulation in different ways and their signals are communicated to different destinations, the microcircuitry of the local neural connections is very precise, and not at all random. ","date":1740787200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1740787200,"objectID":"dc831e4a9d49c15326f50c5022580cab","people":null,"permalink":"https://bionicvisionlab.org/teaching/2025-spring-psych132/","publishdate":"2025-03-01T00:00:00Z","relpermalink":"/teaching/2025-spring-psych132/","section":"teaching","summary":"An overview of theory and research in visual neuroscience. Topics may include: a focus on mammalian vision considering evidence from behavioral and biological approaches.","title":"PSY-132: Visual Neuroscience","type":"teaching"},{"categories":null,"content":"","date":1739213340,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1739213340,"objectID":"61bc325334bc0101928552826613c0a6","people":["Roksana Sadeghi","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2025-02-gaussian-process-thresholds/","publishdate":"2025-02-10T18:49:00Z","relpermalink":"/publications/2025-02-gaussian-process-thresholds/","section":"publications","summary":"We propose a Gaussian Process Regression (GPR) framework to predict perceptual thresholds at unsampled locations while leveraging uncertainty estimates to guide adaptive sampling.","title":"Efficient spatial estimation of perceptual thresholds for retinal implants via Gaussian process regression","type":"publications"},{"categories":null,"content":"","date":1738313340,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1738313340,"objectID":"5e99c4f1db84a16cc98e5e846f1c4e60","people":["Eirini Schoinas","Adyah Rastogi","Anissa Carter","Jacob Granley","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2025-01-hilo-personalization-sighted/","publishdate":"2025-01-31T08:49:00Z","relpermalink":"/publications/2025-01-hilo-personalization-sighted/","section":"publications","summary":"We evaluate HILO using sighted participants viewing simulated prosthetic vision to assess its ability to optimize stimulation strategies under realistic conditions.","title":"Evaluating deep human-in-the-loop optimization for retinal implants using sighted participants","type":"publications"},{"categories":null,"content":"","date":1735894140,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1735894140,"objectID":"8bfdbd7343b2686bfb204218b405c158","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2025-01-raster-patterns/","publishdate":"2025-01-03T08:49:00Z","relpermalink":"/publications/2025-01-raster-patterns/","section":"publications","summary":"Using an immersive VR system, we systematically evaluated two behavioral tasks under four raster patterns (horizontal, vertical, checkerboard, and random) and found checkerboard raster to be the most effective.","title":"Simulated prosthetic vision identifies checkerboard as an effective raster pattern for retinal implants","type":"publications"},{"categories":null,"content":"What would the world look like with a bionic eye?\nThis graduate course will introduce students to the multidisciplinary field of bionic vision viewed through the lens of computer science, neuroscience, and human-computer interaction.\nThe course will conclude with a programming project (teams of ≤ 3, any language/environment ok) in lieu of a final exam, giving students an opportunity to gain hands-on experience of working on open research problems using methods and tools best suited to their scientific background.\nCourse Objectives The course will give an overview of current bionic eye technology designed to restore vision to people living with incurable blindness. By the end of the course, you should be able to:\n identify various types of bionic eye technologies, their differences and similarities explain how the retina and visual cortex support our sense of seeing apply common computer vision \u0026amp; machine learning techniques for stimulus encoding give a nuanced review of the HCI \u0026amp; ethics issues associated with implantable neurotechnology demonstrate your hands-on experience of working on open problems in the field The course is targeted to a diverse audience spanning computer science (computer vision, human factors, deep learning) to psychology (vision, psychophysics) and brain sciences (computational neuroscience, neuroengineering).\nPrerequisites There are no official prerequisites for this course. The instructor will do his best to make the course content self-contained, including a crash course in neuroscience \u0026amp; computational vision. However, homeworks and final projects will require programming. Homeworks will be based around pulse2percept, a Python-based simulation framework for bionic vision. Any suitable programming language/framework is ok for the final project. ","date":1735689600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1735689600,"objectID":"f4385e07c2329acbdaf0193e83151f47","people":null,"permalink":"https://bionicvisionlab.org/teaching/2025-winter-cs291a/","publishdate":"2025-01-01T00:00:00Z","relpermalink":"/teaching/2025-winter-cs291a/","section":"teaching","summary":"This graduate course will introduce students to the multidisciplinary field of bionic vision viewed through the lens of computer science, neuroscience, and human-computer interaction.","title":"CS-291A: Bionic Vision","type":"teaching"},{"categories":null,"content":"","date":1732178940,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1732178940,"objectID":"ef5c4e2bd4354017fb6ce42039d4c905","people":["Lucas Nadolskis","Lily M. Turkstra","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2024-11-implant-expectations/","publishdate":"2024-11-21T08:49:00Z","relpermalink":"/publications/2024-11-implant-expectations/","section":"publications","summary":"Our interview study found a significant gap between researcher expectations and implantee experiences with visual prostheses, underscoring the importance of focusing future research on usability and real-world application.","title":"Aligning visual prosthetic development with implantee needs","type":"publications"},{"categories":[],"content":"Dr. Michael Beyeler of the departments of Computer Science and Psychlogical \u0026amp; Brain Sciences is the recipient of the Harold J. Plous Memorial Award 2024-25. The honor recognizes Dr. Beyeler\u0026rsquo;s oustanding contributions in research, teaching, and service, embodying the spirit of excellence and innovation that defines the UCSB academic community.\nDr. Beyeler has established himself as a leading interdisciplinary researcher, making groundbreaking advances in both theoretical understanding of vision and the development of technologies that improve the quality of life for individuals with visual impairments. His research focuses on bridging neuroscience, computer science, engineering, and psychology to enhance sight recovery techniques, specifically retinal implants — often referred to as bionic eyes. His work has significantly advanced our understanding of how the brain processes visual information, offering insights that can be applied to developing artificial vision systems.\nOne of Dr. Beyeler\u0026rsquo;s most notable contributions is his development of computational models that predict how users of visual prostheses perceive their surroundings. His work has been pivotal in enhancing the spatial resolution of current retinal implants, providing cruical insights into improving these technologies. This line of research has had a profound impact on the field of neuroscience, and Dr. Beyeler\u0026rsquo;s innovations have earned international recognition, including being featued in the NIH documentary \u0026ldquo;Toward a Smart Bionic Eye.\u0026rdquo; With 38 publications in top-tier journals and conference proceedings, over $2 million in active research funding, and numerous awards — including the highly prestigious NIH New Innovator Award — Dr. Beyeler\u0026rsquo;s accomplishments in research and service are truly remarkable.\nhttps://cs.ucsb.edu/happenings/awards/michael-beyeler-receives-harold-j-plous-memorial-award-2024-25\n","date":1730455140,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1730455140,"objectID":"c0f29558437113ea1697b6830cb037a4","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2024-11-beyeler-plous-award/","publishdate":"2024-11-01T09:59:00Z","relpermalink":"/news/2024-11-beyeler-plous-award/","section":"news","summary":"The Harold J. Plous award recognizes an assistant professor within the College of Letters \u0026 Science who has demonstrated outstanding performance or promise as measured by creative action or contribution to the intellectual life of the college community.","title":"Michael Beyeler receives Harold J. Plous Memorial Award 2024-25","type":"news"},{"categories":null,"content":"","date":1729500540,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1729500540,"objectID":"0d32c1d9d4e01cd795b2914e7f008990","people":["Lucas Nadolskis","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2024-10-ismar-vision-ai/","publishdate":"2024-10-21T08:49:00Z","relpermalink":"/publications/2024-10-ismar-vision-ai/","section":"publications","summary":"We introduce VisionAI, a mobile application designed to enhance the in-store shopping experience for individuals with vision impairments.","title":"VisionAI - Shopping Assistance for People with Vision Impairments","type":"publications"},{"categories":[],"content":"Our latest research on simulating prosthetic vision was highlighted at Unite 2024 during a presentation on Unity Sentis, Unity\u0026rsquo;s AI neural engine. The presentation showcased how Unity Sentis enables real-time execution of computationally expensive AI models within Unity Runtime.\nOur project focuses on using neurophysiologically inspired and psychophysically validated models to simulate the visual experiences that could be generated by future bionic eye implants. These models are integrated into immersive virtual reality (VR) environments, updating in real time based on user head and eye movements. By leveraging Unity Sentis, we can run these models efficiently, allowing us to create realistic simulations of what individuals with prosthetic vision may experience.\n\rBig thanks to Unity\u0026rsquo;s Bill Cullen and Alexandre Ribard for making this happen!\n","date":1729159140,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1729159140,"objectID":"de1c076d9b8a9521dd8517163a8a64a1","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2024-10-unity-sentis/","publishdate":"2024-10-17T09:59:00Z","relpermalink":"/news/2024-10-unity-sentis/","section":"news","summary":"Unity Sentis highlights our real-time VR simulation of bionic eye technology, showcasing AI-driven models for visual stimulus encoding.","title":"BionicVisionXR featured at Unite 2024","type":"news"},{"categories":null,"content":"Same course as ECE 181.\nWaitlist: CS students, please add your name to the waitlist on GOLD. ECE students, please contact the ugrad advisor, since ECE is keeping an old-school (paper) waitlist.\nPrerequisites: Upper-division standing in Electrical Engineering, Computer Engineering, Computer Science, Chemical Engineering or Mechanical Engineering.\nOverview of computer vision problems and techniques for analyzing the content of images and video. Topics include image formation, alignment and warping; stereo vision; feature extraction, matching, and tracking; deep learning-based object detection, recognition, and segmentation; and case studies of practical vision systems.\nAfter taking CS 181, students should be able to:\n explain \u0026amp; apply the fundamentals of image formation, alignment, and warping explain \u0026amp; apply the fundamentals of projective and epipolar geometry process image features using traditional \u0026amp; modern approaches summarize current approaches to object recognition, detection, and segmentation demonstrate expertise towards the implementation and programming practices of effective computer vision applications ","date":1725148800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1725148800,"objectID":"cde78b05a1beb52123445ed2acbbd476","people":null,"permalink":"https://bionicvisionlab.org/teaching/2024-fall-cs181/","publishdate":"2024-09-01T00:00:00Z","relpermalink":"/teaching/2024-fall-cs181/","section":"teaching","summary":"Overview of computer vision problems and techniques for analyzing the content of images and video. Topics include image formation, alignment and warping; stereo vision; feature extraction, matching, and tracking; deep learning-based object detection, recognition, and segmentation; and case studies of practical vision systems.","title":"CS/ECE-181: Introduction to Computer Vision","type":"teaching"},{"categories":null,"content":"","date":1721033340,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1721033340,"objectID":"4ac8e60c47768d328503159d600137b5","people":["Yuchen Hou","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2024-07-predicting-temporal-dynamics/","publishdate":"2024-07-15T08:49:00Z","relpermalink":"/publications/2024-07-predicting-temporal-dynamics/","section":"publications","summary":"We introduce two computational models designed to accurately predict phosphene fading and persistence under varying stimulus conditions, cross-validated on behavioral data reported by nine users of the Argus II Retinal Prosthesis System.","title":"Predicting the temporal dynamics of prosthetic vision","type":"publications"},{"categories":null,"content":"","date":1715417340,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1715417340,"objectID":"1c6a9cc4ee4df2621ac00e4bbc6a4d6e","people":["Jacob Granley","Galen Pogoncheff","Lily M. Turkstra","Lucas Nadolskis","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2024-05-beyond-sight-blind-v1-alignment/","publishdate":"2024-05-11T08:49:00Z","relpermalink":"/publications/2024-05-beyond-sight-blind-v1-alignment/","section":"publications","summary":"We present a series of analyses on the shared representations between evoked neural activity in the primary visual cortex of a blind human with an intracortical visual prosthesis, and latent visual representations computed in deep neural networks.","title":"Beyond sight: Probing alignment between image models and blind V1","type":"publications"},{"categories":null,"content":"","date":1712566140,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1712566140,"objectID":"6a2f6c272c5143824b28a36f9179367d","people":["Yuchen Hou","Jacob Granley","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2024-04-argus-pairs/","publishdate":"2024-04-08T08:49:00Z","relpermalink":"/publications/2024-04-argus-pairs/","section":"publications","summary":"We retrospectively analyzed phosphene shape data collected form three Argus II patients to investigate which neuroanatomical and stimulus parameters predict paired-phosphene appearance and whether phospehenes add up linearly.","title":"Axonal stimulation affects the linear summation of single-point perception in three Argus II users","type":"publications"},{"categories":null,"content":"","date":1710842400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1710842400,"objectID":"e45071471b146509c8995a53eb20e2f7","people":["Galen Pogoncheff","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2024-03-explainable-thresholds/","publishdate":"2024-03-19T10:00:00Z","relpermalink":"/publications/2024-03-explainable-thresholds/","section":"publications","summary":"We present explainable artificial intelligence (XAI) models fit on a large longitudinal dataset that can predict perceptual thresholds on individual Argus II electrodes over time.","title":"Explainable machine learning predictions of perceptual sensitivity for retinal prostheses","type":"publications"},{"categories":null,"content":"","date":1710612000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1710612000,"objectID":"0696707596748dd9c73dffbc1641c9b9","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2024-03-eye-tracking-mixed-reality/","publishdate":"2024-03-16T18:00:00Z","relpermalink":"/publications/2024-03-eye-tracking-mixed-reality/","section":"publications","summary":"We conducted user studies evaluating eye tracking on the Magic Leap One, the HoloLens 2, and the Meta Quest Pro to show how locomotion influences eye tracking performance in these headsets.","title":"Eye tracking performance in mobile mixed reality","type":"publications"},{"categories":null,"content":"","date":1710146940,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1710146940,"objectID":"2155f82f2f7fa6c2bd70c59afff4e9f7","people":["Apurv Varshney","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2024-03-stress-affects-navigation/","publishdate":"2024-03-11T08:49:00Z","relpermalink":"/publications/2024-03-stress-affects-navigation/","section":"publications","summary":"We used immersive virtual reality to develop a novel behavioral paradigm to examine navigation under dynamically changing, high-stress situations.","title":"Stress affects navigation strategies in immersive virtual reality","type":"publications"},{"categories":null,"content":"PSY-221F is the new course number for PSY-265 formerly taught by Greg Ashby\nCourse Description This is (primarily) a lecture course that surveys computational neuroscience, which is a branch of neuroscience that employs mathematical models, theoretical analysis, and abstractions of the brain to understand the principles that govern development, structure, physiology, and cognitive abilities of the nervous system.\nIn this new iteration of the course, we will visit different brain areas to learn about the computational principles that may underlie their function. This may include:\n Visual cortex: classical/extraclassical receptive fields, tuning curves, population codes, object recognition Auditory cortex: spiking models, tonotopic maps, coincidence detection, predictive coding Hippocampus (memory \u0026amp; learning): spike-timing dependent plasticity, pattern completion/separation Hippocampal-entorhinal complex (spatial navigation): place cells, grid cells, head direction cells Basal ganglia: Reward signaling and reinforcement learning, actor-critic model Prefrontal cortex: (Bayesian) decision making, attractor networks, rule-based learning At the beginning of the quarter, students will need to subscribe to one of two tracks:\nTrack I (Concept-Focused):\n suitable for students from a broad range of backgrounds, including biology, psychology, and cognitive science focuses on the conceptual understanding of computational models and principles in neuroscience offers a ligher homework load, with assignments designed to reinforce understanding of key concepts and theories without extensive mathematical analysis offers a math refresher and intro to Python programming Track II (Comprehensive):\n suitable for students eager to dive deep into the mathematical and computational foundations of neuroscience is ideal for those with a strong background in math, physics, engineering, computer science, or related field (mandatory for DYNS students) offers a comprehensive coverage of mathematical concepts and computational models underlying brain function includes hands-on programming and data analysis assignments Students will gain experience both conceptually and practically, by homework assignments that involve solving problems and implementing computational models. Yes, there will be math. Yes, there will be programming. However, this is not primarily a programming course - the goal is to get experience with the computational models. Students with little programming experience are encouraged to take advantage of the math refresher/programming intro.\nBy the end of this course, students should be able to:\n describe how the brain \u0026ldquo;computes\u0026rdquo;, describe different methods that computational neuroscientists use to model neural coding, computationally model the biophysics of single neurons and the dynamics of neural networks, fit a computational model to experimental data. The course will feature a few homework assignments, in-class presentations, and a final (group) project.\nPrerequisites The formal prerequisites are PSY-221A and PSY-221B, but exceptions will be considered on a case-by-case basis (send me your transcripts).\nThe actual necessary background includes:\n calculus (differential, integral) and statistics, some prior exposure to matrix algebra, some prior exposure to Python. Desirable, but not strictly necessary:\n prior exposure to probability theory, basic knowledge of neuroscience. ","date":1709251200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1709251200,"objectID":"18493551895cd1c8b557952374f639d8","people":null,"permalink":"https://bionicvisionlab.org/teaching/2024-spring-psy221f/","publishdate":"2024-03-01T00:00:00Z","relpermalink":"/teaching/2024-spring-psy221f/","section":"teaching","summary":"A lecture course that surveys computational neuroscience, which is a branch of neuroscience that employs mathematical models, theoretical analysis, and abstractions of the brain to understand the principles that govern development, structure, physiology, and cognitive abilities of the nervous system. We will cover both classical (e.g., GLM, LIF, Hodgkin-Huxley model) and state-of-the-art methods (i.e., deep learning).","title":"PSY-221F: Computational Neuroscience","type":"teaching"},{"categories":[],"content":"“There’s up to six million people worldwide who live with profound blindness,” Prof. Beyeler explains in the newly released video, “and the idea of a visual prosthesis is to replace lost functionality with an implant. Even though these devices are already out there, the vision they provide is rather limited.”\nWith current visual prostheses, blind users are able to see something but not know what it is that they’re looking at. Beyeler’s work intends to fill these gaps, integrating AI and object recognition technology into the devices to let the user know, for example, whether the object they’re looking at is another human or a car or a trash can or something else.\n\r“If a smart bionic eye gets developed through this research, It’s going to change the lives of millions of people around the world, not just myself,” says Jason Esterhuizen, a bionic eye user. “Blindness will not be an issue any more.”\n“It’s my life’s work,” adds UCSB PhD student Lucas Gil Nadolskis. “It’s more than research. For a lot of people working with this, it’s a cool little project. For me it’s deeply personal. It’s the goal of my life.”\nAn audio-described version of the video is available below:\n\rAn extended version is available as a NNLM Discovery Podcast episode below (transcript):\n\rRelated coverage:\n NNLM Podcast: https://www.nnlm.gov/podcast/towards-a-smart-bionic-eye CS@UCSB: https://cs.ucsb.edu/happenings/news/towards-smart-bionic-eye Daily Nexus: https://dailynexus.com/2023-07-07/ucsb-professor-receives-nih-directors-new-innovator-award/ UCSB Current: https://news.ucsb.edu/2022/020732/clear-vision ","date":1709114340,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1709114340,"objectID":"798bf4227f01f74ca3676508009eff7f","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2024-02-nih-smart-bionic-eye/","publishdate":"2024-02-28T09:59:00Z","relpermalink":"/news/2024-02-nih-smart-bionic-eye/","section":"news","summary":"The National Library of Medicine at the National Institutes of Health (NIH) sent a film crew to UCSB to document the work we do as part of our NIH DP2 New Innovator Award.","title":"Towards a Smart Bionic Eye","type":"news"},{"categories":null,"content":"Prerequisite: CS-130A\nArtificial Intelligence is about building and understanding intelligent systems. AI means different things to different people. People have approached the study of AI from various perspectives: philosophy, neurophysiology, mathematics, linguistics, psychology, control systems, and computer science, to name a few. Part of what makes it so interesting is the range of activities, from trying to understand minds in the abstract, to building creatures that run around and do things. AI has made steady progress and contributed to much of what is now standard computer science and cognitive science. It has spawned several sub-areas (e.g., natural language processing, speech recognition, computer vision, robotics, expert systems, and machine learning).\nThis is an introductory course in artificial intelligence offered as part of a computer science curriculum, so we are not primarily concerned with the approaches of philosophy, cognitive science, etc., although we will come back to them from time to time. We will cover the following technical topics:\n Problem solving and intelligent agents Introduction to machine learning (classification) Probabilistic modeling and inference (Bayesian networks) Problem solving and search (blind, informed, adversarial) Introduction to reinforcement learning (Bandits, MDPs and Reinforcement Learning) Knowledge Representation and Reasoning (logic) Responsible AI (fairness, privacy, AI for social good) Differences from CS165B: Note that CS165B is about machine learning (ML). In CS165A, we take a broader view of AI and cover a complementary set of topics (each one of them may have a learning component). There isn’t a right order in taking these two courses. You can go either way. You will have more ideas on applications of ML for decision making if you have completed CS165A when taking CS165B; meanwhile you will gain deeper understanding in CS165A topics if you have taken CS165B.\n","date":1704067200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1704067200,"objectID":"b7cdc7057cc439a2fb66846d1ce4e0dd","people":null,"permalink":"https://bionicvisionlab.org/teaching/2024-winter-cs165a/","publishdate":"2024-01-01T00:00:00Z","relpermalink":"/teaching/2024-winter-cs165a/","section":"teaching","summary":"Introduction to the field of artificial intelligence, which seeks to understand and build intelligent computational systems. Topics include intelligent agents, problem solving and heuristic search, knowledge representation and reasoning, uncertainty and probabilistic reasoning, machine learning, reinforcement learning, and responsible AI.","title":"CS-165A: Introduction to Artificial Intelligence","type":"teaching"},{"categories":null,"content":"","date":1702237680,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1702237680,"objectID":"e268b30ccd6e03c94d0b77a006d9e4be","people":["Galen Pogoncheff","Jacob Granley","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2023-12-v1-monkey-neuroai/","publishdate":"2023-12-10T19:48:00Z","relpermalink":"/publications/2023-12-v1-monkey-neuroai/","section":"publications","summary":"We systematically incorporated neuroscience-derived architectural components into CNNs to identify a set of mechanisms and architectures that comprehensively explain neural activity in V1.","title":"Explaining V1 properties with a biologically constrained deep learning architecture","type":"publications"},{"categories":null,"content":"","date":1702234140,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1702234140,"objectID":"22ed88e37d64c0539737a121b619b730","people":["Jacob Granley","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2023-12-hilo-stimulus-encoding/","publishdate":"2023-12-10T18:49:00Z","relpermalink":"/publications/2023-12-hilo-stimulus-encoding/","section":"publications","summary":"We propose a personalized stimulus encoding strategy that combines state-of-the-art deep stimulus encoding with preferential Bayesian optimization.","title":"Human-in-the-loop optimization for deep stimulus encoding in visual prostheses","type":"publications"},{"categories":null,"content":"","date":1702231200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1702231200,"objectID":"7b806ede64638425f8f76f5776ff8b40","people":["Yuchen Hou","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2023-12-v1-mouse/","publishdate":"2023-12-10T18:00:00Z","relpermalink":"/publications/2023-12-v1-mouse/","section":"publications","summary":"We introduce a multimodal recurrent neural network that integrates gaze-contingent visual input with behavioral and temporal dynamics to explain V1 activity in freely moving mice.","title":"Multimodal deep learning model unveils behavioral dynamics of V1 activity in freely moving mice","type":"publications"},{"categories":[],"content":"The lab had 3 papers accepted at NeurIPS \u0026lsquo;23:\n PhD students Aiwen Xu and Yuchen Hou developed a multimodal recurrent neural net that well describes V1 activity in freely moving mice, revealing how some neurons lack pronounced visual RFs and that most neurons exhibit mixed selectivity:\nA Xu, Y Hou, CM Niell, M Beyeler (2023). Multimodal deep learning model unveils behavioral dynamics of V1 activity in freely moving mice. 37th Conference on Neural Information Processing Systems (NeurIPS) \u0026lsquo;23\n The latest work by PhD students Galen Pogoncheff and Jacob Granley enriches ResNet50 (the previously best V1-aligned deep net) with layers that simulate the processing hallmarks of the early visual system and assesses how they affect model-brain alignment:\nG Pogoncheff, J Granley, M Beyeler (2023). Explaining V1 properties with a biologically constrained deep learning architecture. 37th Conference on Neural Information Processing Systems (NeurIPS) \u0026lsquo;23\n And last but not least, Jacob Granley (in collab w/ Tristan Fauvel \u0026amp; Matthew Chalk from Sorbonne University) combined deep stimulus encoding with preferential Bayesian optimization to develop personalized stimulation strategies for neural prostheses:\nJ Granley, T Fauvel, M Chalk, M Beyeler (2023). Human-in-the-loop optimization for deep stimulus encoding in visual prostheses. 37th Conference on Neural Information Processing Systems (NeurIPS) \u0026lsquo;23\n ","date":1695315540,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1695315540,"objectID":"e5c65e8e59dae10e21bf064026c1f195","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2023-09-neurips/","publishdate":"2023-09-21T09:59:00-07:00","relpermalink":"/news/2023-09-neurips/","section":"news","summary":"Our latest work yields state-of-the-art predictions of primary visual cortex (V1) activity in mouse and monkey, plus a new way to optimize stimulation protocols for visual prostheses.","title":"3 papers accepted at NeurIPS '23","type":"news"},{"categories":null,"content":"","date":1692953340,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1692953340,"objectID":"ccb535fc7cefa9331b5907f6f0f4483b","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2023-08-eye-tracking-suite/","publishdate":"2023-08-25T08:49:00Z","relpermalink":"/publications/2023-08-eye-tracking-suite/","section":"publications","summary":"We developed EyeTTS, an eye tracking test suite to evaluate and compare different eye tracking devices on various augmented reality tasks and metrics, specifically for scenarios involving head movement and locomotion.","title":"EyeTTS: Evaluating and Calibrating Eye Tracking for Mixed-Reality Locomotion","type":"publications"},{"categories":null,"content":"","date":1684368000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1684368000,"objectID":"337ae34472877e25b0b1837b8fae1de0","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2023-05-retinal-degeneration/","publishdate":"2023-05-18T00:00:00Z","relpermalink":"/publications/2023-05-retinal-degeneration/","section":"publications","summary":"We present a biophysically detailed *in silico* model of retinal degeneration that simulates the network-level response to both light and electrical stimulation as a function of disease progression.","title":"Retinal ganglion cells undergo cell type--specific functional changes in a computational model of cone-mediated retinal degeneration","type":"publications"},{"categories":null,"content":"","date":1683223200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1683223200,"objectID":"a316ee61f21a99e784d263cd258f59f8","people":["Lily M. Turkstra","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2023-05-information-needs-blind/","publishdate":"2023-05-04T18:00:00Z","relpermalink":"/publications/2023-05-information-needs-blind/","section":"publications","summary":"We present a mixed-methods approach that combines semi-structured interviews with a follow-up behavioral study to understand current and potential future use of technologies for daily activities around the home, especially for cooking.","title":"Information needs and technology use for daily living activities at home by people who are blind","type":"publications"},{"categories":null,"content":"","date":1683158400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1683158400,"objectID":"1fb89e3d563faf51751733186a868f20","people":["Byron A. Johnson","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2023-05-xr-systematic-review/","publishdate":"2023-05-04T00:00:00Z","relpermalink":"/publications/2023-05-xr-systematic-review/","section":"publications","summary":"We present a systematic literature review of 227 publications from 106 different venues assessing the potential of XR technology to further visual accessibility.","title":"A systematic review of extended reality (XR) for understanding and augmenting vision loss","type":"publications"},{"categories":null,"content":"","date":1682380800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1682380800,"objectID":"1d10c9afb2961d48e669e73445e22101","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2023-04-neuromorphic-lstm/","publishdate":"2023-04-25T00:00:00Z","relpermalink":"/publications/2023-04-neuromorphic-lstm/","section":"publications","summary":"We present a way to implement long short-term memory (LSTM) cells on spiking neuromorphic hardware.","title":"Long-short term memory (LSTM) cells on spiking neuromorphic hardware","type":"publications"},{"categories":null,"content":"","date":1680307200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1680307200,"objectID":"4efa58ff71127cb2aa61636362a9a035","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2023-04-snn-stdp-wta/","publishdate":"2023-04-01T00:00:00Z","relpermalink":"/publications/2023-04-snn-stdp-wta/","section":"publications","summary":"We present a SNN model that uses spike-latency coding and winner-take-all inhibition to efficiently represent visual objects with as little as 15 spikes per neuron.","title":"Efficient multi-scale representation of visual objects using a biologically plausible spike-latency code and winner-take-all inhibition","type":"publications"},{"categories":null,"content":"Prerequisites: PSY 111; open to Psychological \u0026amp; Brain Sciences, Biopsychology, and Interdisciplinary Studies majors only.\nThis course will focus on the biological basis of vision, including both the theory and recent research in visual neuroscience, with a focus on the retina and the early visual cortex. In specific, we will cover how the retinal image is represented by the neural response within the visual pathways, considering evidence from behavioral and biological approaches.\nUnderstanding how the visual system encodes light has implications for everything else the visual pathways do. Our understanding of the neural representation is based on work in several different disciplines. Throughout this course we will see that:\n there are many anatomically distinct types of neurons with various distinct functions, the different anatomical types of neurons respond to light stimulation in different ways and their signals are communicated to different destinations, the microcircuitry of the local neural connections is very precise, and not at all random. All course materials can be found on GauchoSpace.\n","date":1677628800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1677628800,"objectID":"013accb3ec28b26abbfa1224eab5bebb","people":null,"permalink":"https://bionicvisionlab.org/teaching/2023-spring-psych132/","publishdate":"2023-03-01T00:00:00Z","relpermalink":"/teaching/2023-spring-psych132/","section":"teaching","summary":"An overview of theory and research in visual neuroscience. Topics may include: a focus on mammalian vision considering evidence from behavioral and biological approaches.","title":"PSY-132: Visual Neuroscience","type":"teaching"},{"categories":null,"content":"Overview of computer vision problems and techniques for analyzing the content of images and video. Topics include image formation, edge detection, image segmentation, pattern recognition, texture analysis, optical flow, stereo vision, shape representation and recovery techniques, issues in object recognition, and case studies of practical vision systems.\nMore info coming soon.\n","date":1672531200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1672531200,"objectID":"a3783704b1256bf2f3b35b18c927cff4","people":null,"permalink":"https://bionicvisionlab.org/teaching/2023-winter-cs281b/","publishdate":"2023-01-01T00:00:00Z","relpermalink":"/teaching/2023-winter-cs281b/","section":"teaching","summary":"Overview of computer vision problems and techniques for analyzing the content of images and video. Topics include image formation, edge detection, image segmentation, pattern recognition, texture analysis, optical flow, stereo vision, shape representation and recovery techniques, issues in object recognition, and case studies of practical vision systems.","title":"CS/ECE-281B: Advanced Topics in Computer Vision","type":"teaching"},{"categories":null,"content":"","date":1669939200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669939200,"objectID":"2ee90801938f9e1297598ca453d525a7","people":["Jacob Granley","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2022-12-brain-like-phosphene-model/","publishdate":"2022-12-02T00:00:00Z","relpermalink":"/publications/2022-12-brain-like-phosphene-model/","section":"publications","summary":"We show that a neurologically-inspired decoding of CNN activations produces qualitatively accurate phosphenes, comparable to phosphenes reported by real patients.","title":"Adapting brain-like neural networks for modeling cortical visual prostheses","type":"publications"},{"categories":null,"content":"","date":1669680000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669680000,"objectID":"2b89d352e0fc3f5e4b7701f2b59fbbc8","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2022-11-indoor-mobility/","publishdate":"2022-11-29T00:00:00Z","relpermalink":"/publications/2022-11-indoor-mobility/","section":"publications","summary":"We used a neurobiologically inspired model of simulated prosthetic vision in an immersive virtual reality environment to test the relative importance of semantic edges and relative depth cues to support the ability to avoid obstacles and identify objects.","title":"The relative importance of depth cues and semantic edges for indoor mobility using simulated prosthetic vision in immersive virtual reality","type":"publications"},{"categories":null,"content":"","date":1669593600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669593600,"objectID":"ef4d91b2b9249770aa00fdf35cfad0ce","people":["Jacob Granley","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2022-11-hybrid-neural-autoencoder/","publishdate":"2022-11-28T00:00:00Z","relpermalink":"/publications/2022-11-hybrid-neural-autoencoder/","section":"publications","summary":"What is the required stimulus to produce a desired percept? Here we frame this as an end-to-end optimization problem, where a deep neural network encoder is trained to invert a known, fixed forward model that approximates the underlying biological system.","title":"Hybrid neural autoencoders for stimulus encoding in visual and other sensory neuroprostheses","type":"publications"},{"categories":null,"content":"","date":1669593600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1669593600,"objectID":"fba714fa570547587274baef42438a77","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2022-11-smart-bionic-eye/","publishdate":"2022-11-28T00:00:00Z","relpermalink":"/publications/2022-11-smart-bionic-eye/","section":"publications","summary":"Rather than aiming to represent the visual scene as naturally as possible, a *Smart Bionic Eye* could provide visual augmentations through the means of artificial intelligence–based scene understanding, tailored to specific real-world tasks that are known to affect the quality of life of people who are blind.","title":"Towards a *Smart Bionic Eye*: AI-powered artificial vision for the treatment of incurable blindness","type":"publications"},{"categories":[],"content":"Prof. Beyeler aims to bring to the mainstream an artificial intelligence (AI)-powered bionic eye that can generate artificial vision, in an effort to increase the quality of life for patients who are blind or visually impaired.\n“I envision a smart bionic eye that could find misplaced keys on a counter, read out medication labels, inform a user about people’s gestures and facial expressions during social interactions, and warn a user of nearby obstacles and outline safe paths,” he said.\nFor his project, “Towards a Smart Bionic Eye: AI-Powered Artificial Vision for the Treatment of Incurable Blindness,” Beyeler has been selected for a National Institutes of Health (NIH) Director’s New Innovator Award. The five-year, $1.5 million grant was one of 72 awarded this week by the NIH to enable exceptionally creative early-career scientists to push the boundaries of biomedical science and pursue high-impact projects that aim to advance knowledge and enhance health.\n“I offer my sincerest congratulations to Professor Beyeler for having his innovative research recognized with the prestigious NIH Director\u0026rsquo;s New Innovator Award,” said Tresa Pollock, the interim dean of the College of Engineering and Alcoa Distinguished Professor of Materials. “His novel approach of using recent advances in computer vision, AI and neuroscience has tremendous potential to uncover new knowledge and provide millions of people with useful vision through a smart bionic eye.”\nRead the full article here.\n","date":1664877540,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1664877540,"objectID":"bfaa490bae8c16679721d3964316f122","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2022-10-new-innovator/","publishdate":"2022-10-04T09:59:00Z","relpermalink":"/news/2022-10-new-innovator/","section":"news","summary":"Prof. Beyeler receives $1.5 million NIH Director's New Innovator award to enable a Smart Bionic Eye","title":"Towards a \u003ci\u003eSmart Bionic Eye\u003c/i\u003e","type":"news"},{"categories":[],"content":"Current epiretinal implants arrange their electrodes on a rectangular grid. \u0026ldquo;Some people have looked at where to place the whole implant on the retina\u0026rdquo;, says Prof. Beyeler. \u0026ldquo;Ashley was the first to ask, what if we moved every individual electrode around based on what we know about how these electrodes produce artificial vision?\u0026quot;\nHowever, moving every electrode presents the problem of combinatorial explosion. Even in current devices with only 60 electrodes, there are many possibilities for arranging them. It is not usually technically feasible to find a solution.\n\u0026ldquo;Ashley approached this as a greedy optimization problem, where one electrode is placed after another\u0026rdquo;, explains Prof. Beyeler. \u0026ldquo;We used a computational model of bionic vision to help predict what the vision would look like for a given electrode placement. By iterating over that, Ashley found a mathematically proven optimal solution.\u0026quot;\nRead the full article at rsipvision.com.\nThe paper has been accepted at MICCAI \u0026lsquo;22.\n","date":1663550940,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1663550940,"objectID":"f081acf70642846dae6eda5f84056cf0","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2022-09-miccai-daily/","publishdate":"2022-09-19T01:29:00Z","relpermalink":"/news/2022-09-miccai-daily/","section":"news","summary":"The work of Ashley Bruce, CS's Outstanding MS Student of the Year awardee, was highlighted in MICCAI Daily magazine.","title":"Greedy optimization of electrode arrangement for epiretinal prostheses","type":"news"},{"categories":null,"content":"","date":1663545600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1663545600,"objectID":"943d856a76877a1c1a66dc87c930a112","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2022-09-epiretinal-design/","publishdate":"2022-06-03T00:00:00Z","relpermalink":"/publications/2022-09-epiretinal-design/","section":"publications","summary":"We optimize electrode arrangement of epiretinal implants to maximize visual subfield coverage.","title":"Greedy optimization of electrode arrangement for epiretinal prostheses","type":"publications"},{"categories":null,"content":"Same course as ECE 181. This year, CS/ECE 181 will be offered twice: in Fall 2022 and Winter 2023.\nPrerequisites: Upper-division standing in Electrical Engineering, Computer Engineering, Computer Science, Chemical Engineering or Mechanical Engineering.\nOverview of computer vision problems and techniques for analyzing the content of images and video. Topics include image formation, alignment and warping; stereo vision; feature extraction, matching, and tracking; deep learning-based object detection, recognition, and segmentation; and case studies of practical vision systems.\nAfter taking CS 181, students should be able to:\n explain \u0026amp; apply the fundamentals of image formation, alignment, and warping explain \u0026amp; apply the fundamentals of projective and epipolar geometry process image features using traditional \u0026amp; modern approaches summarize current approaches to object recognition, detection, and segmentation demonstrate expertise towards the implementation and programming practices of effective computer vision applications ","date":1661990400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1661990400,"objectID":"7981ea9f46ce9dd391990a2500d6bc97","people":null,"permalink":"https://bionicvisionlab.org/teaching/2022-fall-cs181/","publishdate":"2022-09-01T00:00:00Z","relpermalink":"/teaching/2022-fall-cs181/","section":"teaching","summary":"Overview of computer vision problems and techniques for analyzing the content of images and video. Topics include image formation, alignment and warping; stereo vision; feature extraction, matching, and tracking; deep learning-based object detection, recognition, and segmentation; and case studies of practical vision systems.","title":"CS/ECE-181: Introduction to Computer Vision","type":"teaching"},{"categories":null,"content":"","date":1661299200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1661299200,"objectID":"05694a8455cf62a720fad6af5db9d731","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2022-08-factors-two-point-argus/","publishdate":"2022-08-24T00:00:00Z","relpermalink":"/publications/2022-08-factors-two-point-argus/","section":"publications","summary":"We explored the causes of high thresholds and poor spatial resolution within the Argus II epiretinal implant.","title":"Factors affecting two-point discrimination in Argus II patients","type":"publications"},{"categories":[],"content":"Ph.D. student Byron Johnson won not just one, but three travel awards for his exciting work using eye tracking to simulate artificial scotomas (damaged regions of the visual field) and their effects on visual processing and eye movements.\rIn late May, Byron's poster \"The Effect of a Simulated Scotoma on Rapid Scene Understanding\" was selected for a travel fellowship to present at the Center for Visual Science's 32nd Symposium on Active Vision, hosted at the University of Rochester/Memorial Art Gallery (see photo).\rIn July, Byron received a travel award from the Helmsley Charitable Trust to attend the prestigious three-week course for Computational Neuroscience: Vision at the Cold Spring Harbor Laboratory.\rAnd in August, Byron received a travel award to attend and give a talk about his research at the 2022 Biennial Perceptual Learning Workshop, held in Alyeska, Alaska. Congratulations, Byron!\nOver in Computer Science, M.S. student Ashley Bruce was recognized with the MS Student of the Year award, presented annually in recognition of a student who has excelled in both research and either department service or teaching.\rAshley's research involved the optimization of epiretinal implant designs, which was accepted to the main track at MICCAI '22.\rFrom faculty: “Looking at her success and productivity, it is sometimes easy to forget that Ashley did not come from a traditional CS background. This means that a lot of the computing skills at which she now excels were self-taught. Not only did she make it into our prestigious Masters program, but she has thrived in it.”\nMeanwhile Yuchen Hou earned one of the most prestigious awards of the Psychological \u0026 Brain Sciences (PBS) department: the Abdullah \u0026 Marjorie R. Nasser Memorial Scholarship Fund Award.\rThis award recognizes Yuchen's outstanding scholarship (she finished her BS with a 3.98 overall GPA) and dedication to research.\rWe are fortunate to have Yuchen continue as Ph.D. student in the lab.\nAnd the list goes on, with Tanya Bhatia winning the PBS Chairperson's Award - as well as the Exceptional Academic Performance Award alongside Yuchen Hou, Anvitha Akkaraju, and Ananth Mahes. In addition, Tanya and Anvitha completed innovative Honors Theses in the lab, which hopefully will see the light of the day as a peer-reviewed publication in the next couple of months.\nWhat a fantastic array of achievements. Congratulations everyone!\n","date":1659342540,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1659342540,"objectID":"21efcc51af401b482f6e5762695aa913","people":null,"permalink":"https://bionicvisionlab.org/news/2022-08-awards/","publishdate":"2022-08-01T01:29:00-07:00","relpermalink":"/news/2022-08-awards/","section":"news","summary":"With the academic year coming to a close, the work of multiple Bionic Vision Lab members was recognized by UCSB campus-wide as well as national awards.","title":"Bionic Vision Lab Member Awards 2022","type":"news"},{"categories":null,"content":"","date":1655856000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1655856000,"objectID":"1217f1e403d91b8b044fb7696f6e8ebf","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2022-06-motion-perception-stdp-h/","publishdate":"2022-06-22T00:00:00Z","relpermalink":"/publications/2022-06-motion-perception-stdp-h/","section":"publications","summary":"We developed a spiking neural network model that showed MSTd-like response properties can emerge from evolving spike-timing dependent plasticity with homeostatic synaptic scaling (STDP-H) parameters of the connections between area MT and MSTd.","title":"Cortical motion perception emerges from dimensionality reduction with evolved spike-timing dependent plasticity rules","type":"publications"},{"categories":[],"content":"\u0026ldquo;We started working on this project in an attempt to solve the long-standing problem of stimulus optimization in visual prostheses,\u0026rdquo; Jacob Granley, one of the researchers who carried out the study, told TechXplore. \u0026ldquo;One of the likely causes for the poor results achieved by visual prostheses is the naive stimulus encoding strategy that devices conventionally use. Previous works have suggested encoding strategies, but many are unrealistic, and none have given a general solution that could work across implants and patients.\u0026rdquo;\nRead the full article at techxplore.com.\nThe paper has been accepted at NeurIPS \u0026lsquo;22.\n","date":1655803740,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1655803740,"objectID":"6db51169b111943186ebe3b8b46fa953","people":["Jacob Granley","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2022-06-techxplore/","publishdate":"2022-06-21T09:29:00Z","relpermalink":"/news/2022-06-techxplore/","section":"news","summary":"What is the required stimulus to produce a desired percept? Our latest work on deep learning-based stimulus optimization was featured in a news article by TechXplore.","title":"A neural autoencoder to enhance sensory neuroprostheses","type":"news"},{"categories":null,"content":"","date":1655596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1655596800,"objectID":"faa9a03d09362bb4c8dd2abc464f6693","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2022-06-spike-latency-winner-take-all/","publishdate":"2022-05-23T00:00:00Z","relpermalink":"/publications/2022-06-spike-latency-winner-take-all/","section":"publications","summary":"We present a SNN model that uses spike-latency coding and winner-take-all inhibition to efficiently represent visual stimuli from the Fashion MNIST dataset.","title":"Efficient visual object representation using a biologically plausible spike-latency code and winner-take-all inhibition","type":"publications"},{"categories":null,"content":"Extended reality (XR) is a powerful tool for human behavioral research. The ability to create 3D visual scenes and measure responses to arbitrary visual stimuli enables the behavioral researcher to test hypotheses in a well-controlled environment. However, software packages such as SteamVR, OpenXR, and ARKit have been developed for game designers rather than behavioral researchers. While Unity is considered the most beginner-friendly platform, barriers still exist for inexperienced programmers. Toolboxes such as VREX and USE have focused on simplifying experimental design and remote data collection, but no tools currently exist to help with all aspects of an experiment.\nTo address this challenge, Justin M. Kasowski developed SimpleXR during his PhD studies. SimpleXR is an open-source Unity package that allows for creating complex experiments with relatively little code. The toolbox contains a plethora of tools that are particularly useful for the visual sciences, such as creating dynamic scenes, randomizing object locations, accessing eye-tracker data, and applying full-screen shader effects (e.g., blurring, gaze-contingent scotomas, edge detection) either in virtual reality (VR) or to the pass-through camera for augmented reality (AR) tasks. SimpleXR also provides one-line commands for interacting with virtual objects, displaying stimuli and instructions, using timers, and much more. Additionally, it automatically switches between desktop and immersive VR modes. sXR creates separate user interfaces for the experimenter and participant, allowing the experimenter to track performance and monitor for anomalies. By using Unity’s Universal Rendering Pipeline, SimpleXR allows researchers to develop across platforms, including VR headsets, AR glasses, and smartphones.\n","date":1653091200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1653091200,"objectID":"30a9394e245123ee7bfd3f0c8403a586","people":null,"permalink":"https://bionicvisionlab.org/code/simplexr/","publishdate":"2022-05-21T00:00:00Z","relpermalink":"/code/simplexr/","section":"code","summary":"SimpleXR is a Unity package designed to facilitate rapid development of visual and behavioral experiments using virtual and augmented reality.","title":"SimpleXR: An open-source Unity toolbox for simplified XR development","type":"code"},{"categories":null,"content":"\r\r","date":1646611200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1646611200,"objectID":"8efc4550929d5e5f7e0beac1472d6641","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2022-03-deep-stimulus-encoder/","publishdate":"2022-03-07T00:00:00Z","relpermalink":"/publications/2022-03-deep-stimulus-encoder/","section":"publications","summary":"We propose a perceptual stimulus encoder based on convolutional neural networks that is trained in an end-to-end fashion to predict the electrode activation patterns required to produce a desired visual percept. ","title":"Deep learning-based perceptual stimulus encoder for bionic vision","type":"publications"},{"categories":null,"content":"\r\r\r\r","date":1646611200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1646611200,"objectID":"b2024030dd8e104999ceff361e83f99e","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2022-03-immersive-vr/","publishdate":"2022-03-07T00:00:00Z","relpermalink":"/publications/2022-03-immersive-vr/","section":"publications","summary":"We present VR-SPV, an open-source virtual reality toolbox for simulated prosthetic vision that uses a psychophysically validated computational model to allow sighted participants to 'see through the eyes' of a bionic eye user.","title":"Immersive virtual reality simulations of bionic vision","type":"publications"},{"categories":null,"content":"A major outstanding challenge in the field of bionic vision is predicting what people “see” when they use their devices. The limited field of view of current devices necessitates head movements to scan the scene, which is difficult to simulate on a computer screen. In addition, many computational models of bionic vision lack biological realism.\nTo address these challenges, we present BionicVisionXR, an open-source virtual reality toolbox for simulated prosthetic vision that uses a psychophysically validated computational model to allow sighted participants to “see through the eyes” of a bionic eye user.\n","date":1646092800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1646092800,"objectID":"aaac1b4b99b12c8538f8882d903955fc","people":null,"permalink":"https://bionicvisionlab.org/code/bionicvisionxr/","publishdate":"2022-03-01T00:00:00Z","relpermalink":"/code/bionicvisionxr/","section":"code","summary":"BionicVisionXR is an open-source virtual reality toolbox for simulated prosthetic vision that uses a psychophysically validated computational model to allow sighted participants to \"see through the eyes\" of a bionic eye recipient.","title":"BionicVisionXR: An Open-Source Virtual Reality Toolbox for Bionic Vision","type":"code"},{"categories":null,"content":"PSY-221F is the new course number for PSY-265 formerly taught by Greg Ashby\nCourse Description This is a lecture course that surveys computational neuroscience, which is a branch of neuroscience that employs mathematical models, theoretical analysis, and abstractions of the brain to understand the principles that govern development, structure, physiology, and cognitive abilities of the nervous system. We will cover both classical (e.g., GLM, LIF, Hodgkin-Huxley model) and state-of-the-art methods (i.e., deep learning).\nBy the end of this course, you should be able to:\n describe how the brain \u0026ldquo;computes\u0026rdquo;, describe different methods that computational neuroscientists use to model neural coding, computationally model the biophysics of single neurons and the dynamics of neural networks, fit a computational model to experimental data. You will gain experience both conceptually and practically, by homework assignments that involve solving problems and implementing computational models. However, this is not primarily a programming course - that is, the main goal is to learn the concepts, not to learn a programming language or particular programming techniques. However, coding examples of the concepts is the best way to demonstrate (and facilitate) your knowledge of them. Lab sections will feature Python \u0026amp; math tutorials, hands-on examples, and guided programming sessions.\nPrerequisites The formal prerequisite is PSY-221B, but the only part of that course that is necessary is the introduction to matrix algebra.\nThe actual necessary background includes:\n calculus, some prior exposure to matrix algebra, some prior exposure to Python. Desirable, but not strictly necessary:\n prior exposure to differential equations, basic knowledge of neuroscience. Content Textbook: Dayan \u0026amp; Abbott (2001)\nTopics to be covered:\n Intro to CompNeuro: concepts, properties of neurons, cell types Neural encoding: spike trains and firing rates, early visual system Neuroelectronics: Electrical properties of neurons, Nernst equation Point neuron models: LIF, Izhikevich neurons, Hodgkin-Huxley neurons Morphological neuron models: synaptic conducances, cable equation, multi-compartment models Network models: firing rate models, feedforward/recurrent models, stochastic networks Plasticity \u0026amp; learning: short \u0026amp; long-term plasticity, reinforcement learning Machine and deep learning: model fitting, GLM, CNN, RNN Applications: sensory systems, language, decision-making, \u0026hellip; Your grade will be determined by biweekly quizzes, homework assignments (drop the lowest) and a take-home final exam.\nMore information at: https://gauchospace.ucsb.edu/courses/course/view.php?id=10610.\n","date":1646092800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1646092800,"objectID":"690d235d8e7f4e8a8e3ea4e2083a195b","people":null,"permalink":"https://bionicvisionlab.org/teaching/2022-spring-psy221f/","publishdate":"2022-03-01T00:00:00Z","relpermalink":"/teaching/2022-spring-psy221f/","section":"teaching","summary":"A lecture course that surveys computational neuroscience, which is a branch of neuroscience that employs mathematical models, theoretical analysis, and abstractions of the brain to understand the principles that govern development, structure, physiology, and cognitive abilities of the nervous system. We will cover both classical (e.g., GLM, LIF, Hodgkin-Huxley model) and state-of-the-art methods (i.e., deep learning).","title":"PSY-221F: Computational Neuroscience","type":"teaching"},{"categories":[],"content":"Prof. Beyeler was mentioned in a recent article by The Guardian that discusses recent advances in machine learning and what data-driven discovery means for the classic methodology of hypothesizing, predicting, and testing.\n\rA tougher obstacle to the new science may be our human need to explain the world – to talk in terms of cause and effect. In 2019, neuroscientists Bingni Brunton and Michael Beyeler of the University of Washington, Seattle, wrote that this need for interpretability may have prevented scientists from making novel insights about the brain, of the kind that only emerges from large datasets. But they also sympathised. If those insights are to be translated into useful things such as drugs and devices, they wrote, “it is imperative that computational models yield insights that are explainable to, and trusted by, clinicians, end-users and industry”.\rLaura Spinny, The Guardian\r\rThe full article can be read here.\n","date":1641745740,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1641745740,"objectID":"c375d3cf9a926141c26a3665dcf1d2bd","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2022-01-guardian/","publishdate":"2022-01-09T09:29:00-07:00","relpermalink":"/news/2022-01-guardian/","section":"news","summary":"Prof. Beyeler was mentioned in a recent article by The Guardian.","title":"Are we witnessing the dawn of post-theory science?","type":"news"},{"categories":null,"content":"","date":1640131200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1640131200,"objectID":"10e800338dcbf02f3fd25a256ed0cc7d","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2021-learning-to-see-again/","publishdate":"2021-12-22T00:00:00Z","relpermalink":"/publications/2021-learning-to-see-again/","section":"publications","summary":"We show that sighted individuals can learn to adapt to the unnatural on- and off-cell population responses produced by electronic and optogenetic sight recovery technologies.","title":"Learning to see again: Perceptual learning of simulated abnormal on- off-cell population responses in sighted individuals","type":"publications"},{"categories":null,"content":"Rather than aiming to one day restore natural vision (which may remain elusive until we fully understand the neural code of vision), we might be better off thinking about how to create practical and useful artificial vision now. Specifically, a visual prosthesis has the potential to provide visual augmentations through the means of artificial intelligence (AI) based scene understanding (e.g., by highlighting important objects), tailored to specific real-world tasks that are known to affect the quality of life of people who are blind (e.g., face recognition, outdoor navigation, self-care).\nIn the future, these visual augmentations could be combined with GPS to give directions, warn users of impending dangers in their immediate surroundings, or even extend the range of visible light with the use of an infrared sensor (think bionic night-time vision). Once the quality of the generated artificial vision reaches a certain threshold, there are a lot of exciting avenues to pursue.\n","date":1639267200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1639267200,"objectID":"483f7439d7ffd8ed32df84fef3409470","people":null,"permalink":"https://bionicvisionlab.org/research/smart-bionic-eye/","publishdate":"2021-12-12T00:00:00Z","relpermalink":"/research/smart-bionic-eye/","section":"research","summary":"Rather than aiming to one day restore *natural* vision, we might be better off thinking about how to create practical and useful *artificial* vision now.","title":"Towards a Smart Bionic Eye","type":"research"},{"categories":null,"content":"Our lack of understanding of multi-electrode interactions severely limits current stimulation protocols. For example, current Argus II protocols simply attempt to minimize electric field interactions by maximizing phase delays across electrodes using ‘time-multiplexing’. The assumption is that single-electrode percepts act as atomic ‘building blocks’ of patterned vision. However, these building blocks often fail to assemble into more complex percepts.\nThe goal of this project is therefore to develop new stimulation strategies that minimize perceptual distortions. One potential avenue is to view this as an end-to-end optimization problem, where a deep neural network (encoder) is trained to predict the electrical stimulus needed to produce a desired percept (target).\nImportantly, this model would have to be trained with the phosphene model in the loop, such that the overall network would minimize a perceptual error between the predicted and target output. This is technically challenging, because a phosphene model must be:\n simple enough to be differentiable such that it can be included in the backward pass of a deep neural network, complex enough to be able to explain the spatiotemporal perceptual distortions observed in real prosthesis patients, and amenable to an efficient implementation such that the training of the network is feasible. ","date":1638316800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1638316800,"objectID":"2c6fc964c5b613455122b2baa2413267","people":null,"permalink":"https://bionicvisionlab.org/research/end-to-end-optimization/","publishdate":"2021-12-01T00:00:00Z","relpermalink":"/research/end-to-end-optimization/","section":"research","summary":"Rather than predicting perceptual distortions, one needs to solve the inverse problem: What is the best stimulus to generate a desired visual percept?","title":"End-to-End Optimization of Bionic Vision","type":"research"},{"categories":null,"content":"A major outstanding challenge is predicting what people \u0026ldquo;see\u0026rdquo; when they use their devices.\nInstead of seeing focal spots of light, current visual implant users perceive highly distorted percepts, which vary in shape not just across subjects but also across electrodes and often fail to assemble into more complex percepts. Furthermore, phosphenes appear fundamentally different depending on whether they are generated with retinal or cortical implants.\nThe goal of this project is thus to combine psychophysical and neuroanatomical data that can inform phosphene models capable of linking electrical stimulation directly to perception.\n","date":1635811200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1635811200,"objectID":"b4577bf7255e69c478cf022f745b64e1","people":null,"permalink":"https://bionicvisionlab.org/research/predicting-visual-outcomes-visual-prostheses/","publishdate":"2021-11-02T00:00:00Z","relpermalink":"/research/predicting-visual-outcomes-visual-prostheses/","section":"research","summary":"What do visual prosthesis users see, and why? Clinical studies have shown that the vision provided by current devices differs substantially from normal sight.","title":"Predicting Visual Outcomes for Visual Prostheses","type":"research"},{"categories":null,"content":"","date":1635724800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1635724800,"objectID":"9a060bf8d239d3032bdf3b9c520fe632","people":["Jacob Granley","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2021-biphasic-axon-map/","publishdate":"2021-11-01T00:00:00Z","relpermalink":"/publications/2021-biphasic-axon-map/","section":"publications","summary":"We present a phenomenological model that predicts phosphene appearance as a function of stimulus amplitude, frequency, and pulse duration.","title":"A computational model of phosphene appearance for epiretinal prostheses","type":"publications"},{"categories":null,"content":"The goal of this project is to obtain a nuanced understanding of the strategies that people who are blind or visually impaired (BVI) employ to perform different instrumental activities of daily living (iADLs).\nIdentifying useful and relevant visual cues that could support these iADLs, especially when the task involves some level of scene understanding, orientation, and mobility, will be essential to the success of near-future visual accessibility aids.\n","date":1635724800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1635724800,"objectID":"0436c06999d02c1a39549c55f7aa4543","people":null,"permalink":"https://bionicvisionlab.org/research/information-needs-blind-low-vision/","publishdate":"2021-11-01T00:00:00Z","relpermalink":"/research/information-needs-blind-low-vision/","section":"research","summary":"A nuanced understanding of the strategies that people who are blind or visually impaired employ to perform different instrumental activities of daily living (iADLs) is essential to the success of future visual accessibility aids.","title":"Understanding the Information Needs of People Who Are Blind or Visually Impaired","type":"research"},{"categories":null,"content":"Globally, millions of individuals with visual impairments face significant challenges in navigation and independence. Traditional white canes, while helpful, offer limited assistance in complex environments. Accessible tech, once effective, often turns commercial with hefty prices. This hits harder due to the high unemployment rate among the visually impaired.\nOur goal is to leverage computer vision and AI to co-design the next-generation of accessible technologies with people who are blind, focusing on affordable design and open-source software, thereby enabling community-driven development and widespread impact.\n","date":1633219200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1633219200,"objectID":"6fda4c91724fcd127422de347bd18680","people":null,"permalink":"https://bionicvisionlab.org/research/assistive-technologies-blind/","publishdate":"2021-10-03T00:00:00Z","relpermalink":"/research/assistive-technologies-blind/","section":"research","summary":"This research explores the integration of computer vision into various assistive devices, aiming to enhance urban navigation and environmental interaction for individuals who are blind or visually impaired.","title":"Assistive Technologies for People Who Are Blind","type":"research"},{"categories":null,"content":"","date":1632700800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1632700800,"objectID":"1ea7bfff6a164cada4127025bca057b9","people":["Jacob Granley","Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2021-hba-u-net/","publishdate":"2021-09-27T00:00:00Z","relpermalink":"/publications/2021-hba-u-net/","section":"publications","summary":"We propose HBA-U-Net: a U-Net backbone with hierarchical bottleneck attention to highlight retinal abnormalities that may be important for fovea and optic disc segmentation in the degenerated retina.","title":"U-Net with hierarchical bottleneck attention for landmark detection in fundus images of the degenerated retina","type":"publications"},{"categories":[],"content":"Prof. Beyeler was part of the Giz Asks series, where the focused turned to the prospect of using brain-machine interface technology to directly write in information to the brain.\nRead the full interview here.\n","date":1632130140,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1632130140,"objectID":"d4a0e49065c203f630cac27fbfa59336","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2021-09-giz-asks/","publishdate":"2021-09-20T09:29:00Z","relpermalink":"/news/2021-09-giz-asks/","section":"news","summary":"Prof. Beyeler was featured in a Giz Asks article about the prospect of using brain-machine interfaces to directly write in information to the brain.","title":"Will it ever be possible to upload information to my brain?","type":"news"},{"categories":null,"content":"How are visual acuity and daily activities affected by visual impairment?\nPrevious studies with people who have retinal degeneration have shown that vision is altered and impaired in the presence of a scotoma. This is also the case when a sighted person is tested under simulated low vision (SLV) conditions. However, the extent to which patient-specific factors affect vision and quality of life is not well understood.\nTesting sighted participants with SLV allows us to compare performance to real patients, design simulations to be as naturalistic as possible, and assess changes in vision for real life tasks instead of relying on acuity alone.\n","date":1630454400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1630454400,"objectID":"8f35d635d63daf950223aa36a8f607ce","people":null,"permalink":"https://bionicvisionlab.org/research/simulated-visual-impairment/","publishdate":"2021-09-01T00:00:00Z","relpermalink":"/research/simulated-visual-impairment/","section":"research","summary":"How are visual acuity and daily activities affected by visual impairment? Previous studies have shown that vision is altered and impaired in the presence of a scotoma, but the extent to which patient-specific factors affect vision and quality of life is not well understood.","title":"Simulating Visual Impairment","type":"research"},{"categories":null,"content":"Understanding the early visual system in health and disease is a key issue for neuroscience and neuroengineering applications such as visual prostheses.\nAlthough the processing of visual information in the healthy retina and early visual cortex (EVC) has been studied in detail, no comprehensive computational model exists that captures the many cell-level and network-level biophysical changes common to retinal degenerative diseases and other sources of visual impairment.\nTo address this challenge, we are developing computational models of the retina and EVC to elucidate the neural code of vision.\n","date":1630281600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1630281600,"objectID":"ae96ee0678ca69a1e2207d06cd74a9ac","people":null,"permalink":"https://bionicvisionlab.org/research/neuroai-models-visual-system/","publishdate":"2021-08-30T00:00:00Z","relpermalink":"/research/neuroai-models-visual-system/","section":"research","summary":"Understanding the visual system in health and disease is a key issue for neuroscience and neuroengineering applications such as visual prostheses.","title":"NeuroAI Models of the Visual System","type":"research"},{"categories":null,"content":"What would the world look like with a bionic eye?\nThis graduate course will introduce students to the multidisciplinary field of bionic vision viewed through the lens of computer science, neuroscience, and human-computer interaction.\nThe course will conclude with a programming project (teams of ≤ 4, any language/environment ok) in lieu of a final exam, giving students an opportunity to gain hands-on experience of working on open research problems using methods and tools best suited to their scientific background.\n Instructor Michael Beyeler (first initial last name at ucsb dot edu) Class F21, Tue/Thu 9:00 \u0026ndash; 10:50 am, Phelps 3526 Office Hours Tue 2\u0026ndash;4 pm Zoom (schedule a meeting) \rChangelog:\r 2021-10-09: Switched order of mid-quarter content 2021-09-27: Updated office hours and assigned reading for Week 1 2021-09-15: Posted initial schedule \rCourse Objectives The course will give an overview of current bionic eye technology designed to restore vision to people living with incurable blindness. By the end of the course, you should be able to:\n identify various types of bionic eye technologies, their differences and similarities explain how the retina and visual cortex support our sense of seeing apply common computer vision \u0026amp; machine learning techniques for stimulus encoding give a nuanced review of the HCI \u0026amp; ethics issues associated with implantable neurotechnology demonstrate your hands-on experience of working on open problems in the field The course is targeted to a diverse audience spanning computer science (computer vision, human factors, deep learning) to psychology (vision, psychophysics) and brain sciences (computational neuroscience, neuroengineering).\nPrerequisites There are no official prerequisites for this course. The instructor will do his best to make the course content self-contained, including a crash course in neuroscience \u0026amp; computational vision. However, homeworks and final projects will require programming. Homeworks will be based around pulse2percept, a Python-based simulation framework for bionic vision. Any suitable programming language/framework is ok for the final project. FAQ Will classes be in person? Yes, and you are strongly encouraged to attend. What about office hours? I will offer both in-person and virtual office hours. Do instructors and students need to wear a mask in class? Yes. There will be no exceptions to this policy. Sure, it\u0026rsquo;ll be a little weird at first, but I\u0026rsquo;m sure we\u0026rsquo;ll all adjust pretty quickly. What if I can\u0026rsquo;t make a lecture? Send me a quick email before the lecture. You don\u0026rsquo;t need a reason for your first 3 absences. I will do my best to record the lectures and upload them to GauchoCast for those who cannot make a lecture, but I am unable to make any remote accommodations beyond that. What if I\u0026rsquo;m sick or need to quarantine or isolate? Now more important than ever, do not come in if you feel under the weather. Email me, then follow UCSB testing \u0026amp; quarantine protocol. What if the instructor needs to quarantine? In this case we will temporarily shift to remote instruction. What if my question isn\u0026rsquo;t answered here? I\u0026rsquo;m happy to answer your question via email. Schedule Note: This schedule is subject to slight change over the course of the quarter.\n\r\rWk\rDate\rReading\rTopics\rAction\rHWout\rHWdue\rQuiz\r\r\r\r\r0\rThu\rSep 23\r\rR1\r\r\r\rL1: Introduction\rL2: Bionic Vision - Then \u0026 Now\r\r\r\r\r\r\r\r\r\r1\rTue\rSep 28\rR2, R3\r\r\rL3: Sight Recovery Technologies\rL4: Foundations of Vision\r \r\rHW1\r\r\r\r\rThu\rSep 30\r\r\r\rL5: Computational Neuroscience\rA1: Introduction to Google Colab \u0026 Python\r\r Quiz 1 (Q1) due by Sun, Oct 3, 11:59 pm.\r\r\r\rA1\r\r\rQ1\r\r\r2\rTue\rOct 5\rR4\r\rGuest lecture by Aiwen Xu\r\rL6: Retina in Health \u0026 Disease\r\r\r\r\r\r\r\r\rThu\rOct 7\rR5\r\r\rL7: Retinal Prostheses\r\r Homework 1 (HW1) due by Sun, Oct 10, 11:59 pm.\r\r\r\r\r\rHW1\r\r\r\r3\rTue\rOct 12\rR6\r\r\rL8: Computational Models of Bionic Vision\r\r\r\r\r\r\r\r\rThu\rOct 14\rR7\r\r\rA2: Introduction to pulse2percept in Python\rA3: Group Project Discussion\r\r Quiz 2 (Q2) due by Sun, Oct 17, 11:59 pm.\r\r\r\rA2, A3\rHW2\r\rQ2\r\r\r4\rTue\rOct 19\rR8\r\r\rL9: The Role of Machine Learning in Bionic Vision\r\r\r\r\r\r\r\r\rThu\rOct 21\rR9\r\r\rL10: Optimizing Electrical Stimulation in an Artificial Retina\r\r Homework 2 (HW2) due by Sun, Oct 24, 11:59 pm.\r\r\r\r\r\rHW2\r\r\r\r5\rTue\rOct 26\rR10, R11\r\r\rL11: The Role of Mixed Reality in Bionic Vision\r\r\rA4\r\r\r\r\r\rThu\rOct 28\r\r\rA4: Project Progress Presentations \rTeam \u0026 project description (TPD) due by Sun, Oct 31, 11:59 pm.\r\r A5\r\rTPD\r\r\r\r6\rTue\rNov 2\rR12\r\r\rL12: The Role of Image Processing in Bionic Vision\rA5: BionicVisionVR Demo by Justin Kasowski\r\r\r\r\r\r\r\r\rThu\rNov 4\rR13\r\r\rL13: The Role of Computer Vision in Bionic Vision\rA6: Smart Bionic Eye\r\r Quiz 3 (Q3) due by Sun, Nov 7, 11:59 pm.\r\r\r\rA6\r\r\rQ3\r\r\r7\rTue\rNov 9\rR14, R15\r\r\rL14: Cortical Prostheses - Approaches \u0026 Challenges\rL15: Phosphene Models for Cortical Prostheses\r\r\r\r\r\r\r\rThu\rNov 11\r\r\rVeterans' Day\r\r\r\r\r\r\r\r8\rTue\rNov 16\rR16, R17\r\r\rL16: Learning to See Again with a Bionic Eye\r\r\r\r\r\r\r\r\rThu\rNov 18\r\r\rA7: Guest Visit by Jason Esterhuizen, ORION implantee \r Quiz 4 (Q4) due by Sun, Nov 21, 11:59 pm.\r\r\r\r\rA7\r\r\r\r\rQ4\r\r\r9\rTue\rNov 23\r\r\rA8: Project Progress Presentations\r\rA8\r\r\r\r\rThu\rNov 25\r\r\r\rThanksgiving Day\r\r\r\r\r\r\r\r10\rTue\rNov 30\r\r\r\r\rL17: The Future of Bionic Vision\r\r\r\r\r\r\r\r\rThu\rDec 2\r\r\r\r\rA9: Quarter Review\r\r\rProject report (PR) \u0026amp; source code (SC) due Sun, Dec 5, 11:59 pm.\r\r\r\r\rA9\r\r\rPR\u0026amp;SC\r\r\r11\rTue\rDec 7\r\rA10: Final Project Presentations\rA10\r\r\r\r\r\r\rCourse Requirements \u0026amp; Grading Your final grade will be determined as follows:\n 20% Homework assignments: 10% Homework 1 10% Homework 2 30% \u0026ldquo;Check Your Knowledge\u0026rdquo; quizzes 10% per quiz Lowest-scoring quiz will be dropped 50% Final project implementation, documentation, and presentation 5% Project proposal presentation (1 slide) 5% Project progress presentation (2 slides: what have you done, what\u0026rsquo;s left to do) 20% Project final presentation 20% Project final report (+5% extra credit if project shows promise of turning into a publication) Lateness Policy All assignments are due at 11:59:59 pm on the scheduled due date, typically a Sunday (timestamp of the online submission system).\n Each student will be allowed 3 \u0026ldquo;late days\u0026rdquo; over the course of the quarter for which lateness will not be penalized. Late days cannot be applied to project deadlines. Late days may be applied to the quizzes and homework assignments: Anything turned in between 12:00:00 am and 11:59:59 pm the next day is one day late; every day thereafter that an assignment is late, including weekends and holidays, counts as an additional late day. No late work will be accepted after the deadline if you have used up all your late days. If you\u0026rsquo;re not done on time, you should turn in what you have to receive partial credit. No exceptions will be made for the final project report. Please make sure you understand this policy.\n\u0026ldquo;Check Your Knowledge\u0026rdquo; Quizzes We will have 4 GauchoSpace quizzes over the quarter that test your theoretical/conceptual knowledge of the course content (this includes lectures and assigned reading materials).\nThe following rules apply:\n Quizzes must be completed by Sunday 11:59 pm of the respective week (lateness policy applies) You have 30 mins per attempt A quiz can be taken twice. If you decide to take the quiz again, only your second attempt will be counted (that is, the score from your first attempt will be dropped, this is called \u0026ldquo;grading method: last attempt\u0026rdquo; on GauchoSpace) At the end of the quarter, the lowest-scoring quiz will be dropped. (Each of the 3 highest-scoring quizzes will thus account for 10% of your grade) Final Project In lieu of a final exam, students will conduct a programming project (team size ≤ 4). The goal of the project is to gain hands-on experience working on open research questions in bionic vision using tools and methods best suited to their scientific background.\nAll projects must address a research question and have a programming component. Students are free to use any programming language and development environment they choose. Building a project based on pulse2percept is encouraged (especially for students with relatively little programming experience) but is by no means required. Reproducing key research findings in the literature is allowed. No pure literature reviews, please.\nProjects that show promise of turning into a publication will receive extra credit.\nStudents will present their project to the rest of the class during finals week. In addition, students will submit a write-up of their project and hand in their source code (see Milestones).\nThe project will be evaluated based on the:\n originality/novelty of the idea technical strength of the work (emphasis on the research, not the programming expertise) organization, clarity, and style of the project report effort and completeness of the work (normalized by the number of team members) Project Milestones Date Time Deliverable due Thu, Oct 14 9:00 am Students start forming teams and discussing project ideas in class. Thu, Oct 28 9:00 am Teams present their project ideas in class. Sun, Oct 31 11:59 pm Teams submit a project title and 2-3 sentence project description. Tue, Nov 23 9:00 am Teams present their project progress in class. Sun, Dec 5 11:59 pm Teams hand in their final project report and all source code. Tue, Dec 7 9:00 am Teams make their final project presentations in class. Students are encouraged to discuss ideas with the instructors, so that feedback can be incorporated early in the process.\nLate days cannot be used on these project deadlines.\nAcademic Integrity The University of California has formal policies related to academic integrity.\nAny act of academic dishonesty, such as cheating or plagiarism, will result in a University disciplinary action and an \u0026ldquo;F\u0026rdquo; in this course. In addition to academic integrity, I also expect everyone in this class to treat their fellow students and course staff with respect.\nBasic Needs If you are facing any challenges securing food or housing and believe this may affect your performance in the class, you are urged to meet with a Food Security and Calfresh Advocate who is aware of the broad variety of resources that UCSB has to offer (see their drop-in hours at food.ucsb.edu). You are also urged to contact the professor if you are comfortable doing so.\nPlease visit food.ucsb.edu for additional resources including Calfresh, the AS Food Bank, and more.\n","date":1628985600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1628985600,"objectID":"2dad7ab16b905e022d4e43580bb840eb","people":null,"permalink":"https://bionicvisionlab.org/teaching/2021-fall-cs291a/","publishdate":"2021-08-15T00:00:00Z","relpermalink":"/teaching/2021-fall-cs291a/","section":"teaching","summary":"This graduate course will introduce students to the multidisciplinary field of bionic vision viewed through the lens of computer science, neuroscience, and human-computer interaction.","title":"CS-291A: Bionic Vision","type":"teaching"},{"categories":null,"content":"","date":1620086400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1620086400,"objectID":"ce7e87355e58295ded0c12942f667a7c","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2021-explainable-ai/","publishdate":"2021-05-04T00:00:00Z","relpermalink":"/publications/2021-explainable-ai/","section":"publications","summary":"We present an explainable artificial intelligence (XAI) model fit on a large longitudinal dataset that can predict electrode deactivation in Argus II.","title":"Explainable AI for retinal prostheses: Predicting electrode deactivation from routine clinical measures","type":"publications"},{"categories":null,"content":"pulse2percept is a BSD-licensed, open-source Python package for simulated prosthetic vision (SPV).\nBuilt on the NumPy and SciPy stacks, as well as contributions from the broader Python community, pulse2percept provides an open-source implementation of several phosphene models for a wide range of state-of-the-art retinal prostheses, to provide insight into the visual experience provided by these devices.\nThe project started at the University of Washington under the guidance of Ione Fine, Geoff Boynton, and Ariel Rokem. It has since been repackaged to run on both CPU and GPU backends, extended to support both retinal and cortical prostheses, and upgraded to be compatible with the Open Neural Network Exchange (ONNX) standard.\nAs pulse2percept continues to be adopted by several research labs around the globe, we continue to improve its functionality and performance as well as add new implants, models, and datasets.\nDocumentation is available at https://pulse2percept.readthedocs.io.\nContribute at https://github.com/pulse2percept/pulse2percept.\n","date":1619827200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1619827200,"objectID":"378142164076a4135229e041fb38c801","people":null,"permalink":"https://bionicvisionlab.org/code/pulse2percept/","publishdate":"2021-05-01T00:00:00Z","relpermalink":"/code/pulse2percept/","section":"code","summary":"pulse2percept is an open-source Python simulation framework used to predict the perceptual experience of retinal prosthesis patients across a wide range of implant configurations.","title":"pulse2percept: A Python-Based Simulation Framework for Bionic Vision","type":"code"},{"categories":[],"content":"Research by the Bionic Vision Lab was featured in NVIDIA\u0026rsquo;s I AM AI trailer, which was premiered at NVIDIA GTC 2021:\n","date":1618997340,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1618997340,"objectID":"156026ccc247500095ad40f0c3070210","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2021-04-nvidia-i-am-ai/","publishdate":"2021-04-21T09:29:00Z","relpermalink":"/news/2021-04-nvidia-i-am-ai/","section":"news","summary":"Our recent research was featured in NVIDIA's I AM AI trailer, premiered at NVIDIA GTC 2021","title":"Bionic Vision Lab featured in NVIDIA's I AM AI trailer","type":"news"},{"categories":[],"content":"Over the years, cyberpunk tales and sci-fi series have featured characters with cybernetic vision—most recently Star Trek Discovery\u0026rsquo;s Lieutenant Keyla Detmer and her ocular implants. In the real world, restoring “natural” vision is still a complex puzzle, though researchers at UC Santa Barbara are developing a smart prosthesis that provides cues to the visually impaired, much like a computer vision system talks to a self-driving car.\nToday, over 10 million people worldwide are living with profound visual impairment, many due to retinal degeneration diseases. Ahead of this week\u0026rsquo;s Augmented Humans International Conference, we spoke with Dr. Michael Beyeler, Assistant Professor in Computer Science and Psychological \u0026amp; Brain Sciences at UCSB, who is forging ahead with synthetic sight trials at his Bionic Vision Lab and will be presenting a paper at the conference.\nRead the full interview here.\n","date":1614097740,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1614097740,"objectID":"9ef4e2280812af8ef569089d069a0146","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2021-02-pcmag/","publishdate":"2021-02-23T09:29:00-07:00","relpermalink":"/news/2021-02-pcmag/","section":"news","summary":"Instead of focusing on one day restoring ‘natural’ vision, we may be better off thinking about how to create ‘practical’ and ‘useful’ artificial vision now.","title":"PCMag: Building the bionic eye... with car tech?","type":"news"},{"categories":null,"content":"\r","date":1614038400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1614038400,"objectID":"44c941643b53b9b8fe1d9d80e20ef966","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2021-towards-immersive-vr/","publishdate":"2021-02-23T00:00:00Z","relpermalink":"/publications/2021-towards-immersive-vr/","section":"publications","summary":"We propose to embed biologically realistic models of simulated prosthetic vision in immersive virtual reality so that sighted subjects can act as 'virtual patients' in real-world tasks.","title":"Towards immersive virtual reality simulations of bionic vision","type":"publications"},{"categories":null,"content":"\r","date":1613952000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1613952000,"objectID":"f4d31dde088561df71570dfd3af2cf11","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2021-scene-simplification/","publishdate":"2021-02-01T00:00:00Z","relpermalink":"/publications/2021-scene-simplification/","section":"publications","summary":"We combined deep learning-based scene simplification strategies with a psychophysically validated computational model of the retina to generate realistic predictions of simulated prosthetic vision.","title":"Deep learning-based scene simplification for bionic vision","type":"publications"},{"categories":null,"content":"Same course as ECE 181. Not open for credit to students who have completed ECE/CMPSC 181B with a grade of C or better. ECE/CMPSC 181 is a legal repeat of ECE/CMPSC 181B.\nPrerequisites: Upper-division standing in Electrical Engineering, Computer Engineering, Computer Science, Chemical Engineering or Mechanical Engineering.\n","date":1609459200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1609459200,"objectID":"ae5ee224e19cf700fc8876dd2c5037bf","people":null,"permalink":"https://bionicvisionlab.org/teaching/2021-winter-cs181/","publishdate":"2021-01-01T00:00:00Z","relpermalink":"/teaching/2021-winter-cs181/","section":"teaching","summary":"Overview of computer vision problems and techniques for analyzing the content of images and video. Topics include image formation, edge detection, image segmentation, pattern recognition, texture analysis, optical flow, stereo vision, shape representation and recovery techniques, issues in object recognition, and case studies of practical vision systems.","title":"CS/ECE-181: Introduction to Computer Vision","type":"teaching"},{"categories":[],"content":"Dr. Beyeler sat down with Luming Cao from SciSection\u0026rsquo;s Human and Science platform to talk about how bionic vision, as sci-fi as it sounds, is already helping to restore vision to the blind.\nThe transcript of this informal interview is now available, and a podcast will follow soon.\nRead the full interview here.\n","date":1601717340,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1601717340,"objectID":"4a7cc3efbf54119e45e4cf35151fa11c","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2020-10-scisection/","publishdate":"2020-10-03T09:29:00Z","relpermalink":"/news/2020-10-scisection/","section":"news","summary":"Prof. Beyeler talks about how bionic vision, as sci-fi as it sounds, is already helping to restore vision to the blind.","title":"SciSection: Interview with Michael Beyeler","type":"news"},{"categories":null,"content":"PSY 110A is the former number of PSY 130. Students who have completed PSY110A with a C- or below may take PSY 130 as a legal repeat.\nPrerequisites: Open to Psychological \u0026amp; Brain Sciences, Biopsychology, and Interdisciplinary Studies majors only.\nWe take our ability to see for granted. It is for the most part automatic and effortless and thus might seem relatively simple. But behind the scenes our brains dedicate over a 1/4 of their machinery to analyze and interpret the light falling on our eyes. How does the brain do it?\nIn this course we will learn how the brain gives rise to our visual experience from seeing depth, color, and motion, to recognizing faces and objects. Importantly, the course illustrates an approach to study psychology and the brain combining behavioral research, neurophysiology and computational theory.\n","date":1601424000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1601424000,"objectID":"42140d1938d80b337f7aa7055292a764","people":null,"permalink":"https://bionicvisionlab.org/teaching/2020-fall-psych130/","publishdate":"2020-09-30T00:00:00Z","relpermalink":"/teaching/2020-fall-psych130/","section":"teaching","summary":"An overview of theory and research into the human performance and biological processes of visual perception. Typical topics may range from the detection of simple stimuli to the identification of objects and events.","title":"PSY-130: Perception - Vision","type":"teaching"},{"categories":[],"content":"Michael Beyeler and the Bionic Vision Lab are featured heavily in the Spring Edition of UCSB\u0026rsquo;s College of Engineering Convergence Magazine:\n\rThere is research to try to understand the brain—how it works on a mechanistic and algorithmic level—and then there's applying that to an engineered system that can interface with the brain. Brain-computer interfaces can be used both for treating neurological and mental disorders as well as for understanding brain function.\r\rRead the full article here.\n","date":1588350540,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1588350540,"objectID":"bfbbcf294ec41d730fd11d17e0061182","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2020-05-convergence/","publishdate":"2020-05-01T09:29:00-07:00","relpermalink":"/news/2020-05-convergence/","section":"news","summary":"In UCSB's College of Engineering, the phrase 'reverse engineering the brain' tends to relate to emerging technologies in neural networks and new machine-learning models that function more like the human brain.","title":"UCSB Convergence: Reverse engineering the brain","type":"news"},{"categories":null,"content":"How does cortical circuitry perform the visual scene analysis needed to support navigation through the environment?\nMost studies of central visual processing are focused on detection or discrimination of specific features of simple artificial stimuli (e.g., orientation, direction of motion, object identity). However, navigation through the environment involves a very different set of computational goals, such as identifying landmarks and using optic flow to avoid obstacles. Furthermore, these computations occur under a very different stimulus regime, with the animal actively sampling a complex and continually moving sensory scene.\nOur goal is to determine how the brain extracts relevant visual features from the rich, dynamic visual input that typifies active exploration, and develop (deep) predictive models of brain activity based on visual input and several behavioral variables. The data includes one-of-a-kind measures of neural activity in mice navigating through real-world and virtual environments, collected using 2-photon imaging and electrophysiology by our collaborators Spencer Smith, Michael Goard, and Cris Niell.\nThe results of this project will provide knowledge about normal visual function and insights for treating impaired vision via prosthetic or assistive devices.\n","date":1577923200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1577923200,"objectID":"e1165442b451c1b4848a6243964ca32d","people":null,"permalink":"https://bionicvisionlab.org/research/mouse-visual-navigation/","publishdate":"2020-01-02T00:00:00Z","relpermalink":"/research/mouse-visual-navigation/","section":"research","summary":"How does the brain extract relevant visual features from the rich, dynamic visual input that typifies active exploration, and how does the neural representation of these features support visual navigation?","title":"Cortical Visual Processing for Navigation","type":"research"},{"categories":null,"content":"What would the world look like with a bionic eye?\nThis graduate course will introduce students to the multidisciplinary field of bionic vision, with an emphasis on both the computer science and neuroscience of the field.\nThe course will conclude with a programming project (teams of ≤ 3, any language/environment ok) in lieu of a final exam, giving students an opportunity to gain hands-on experience of working on open research problems using methods and tools best suited to their scientific background.\n Instructor Michael Beyeler (first initial last name at ucsb dot edu) Class WQ 2020, Tue/Thu 9:00 \u0026ndash; 10:50 am, Phelps 3526 Office Hours Tue 4:00 \u0026ndash; 5:00 pm or by appointment, Psych East 3822 This website and the Piazza Forum will be used as centers for communication. Homework submission will occur through GauchoSpace. Make sure you are enrolled! (Come to class to get an add code.)\nTable of Contents\r Course Objectives Prerequisites Schedule Course Requirements \u0026amp; Grading Lateness Policy Final Project Project Milestones Project Presentation Project Report Academic Integrity Basic Needs \rCourse Objectives The course will give an overview of current bionic eye technology designed to restore vision to people living with incurable blindness. By the end of the course, you should be able to:\n Identify various types of bionic eye technologies, their differences and similarities Have a basic understanding of the neuroscience of the human visual system Be familiar with common preprocessing, encoding, and electrical stimulation methods Understand the limitations of current bionic eye technologies Have hands-on experience of working on open problems in the field The course is targeted to a diverse audience spanning from computer science (human factors, neural networks, computer vision) to psychology (vision, psychophysics) and brain sciences (computational neuroscience, neuroengineering).\nPrerequisites There are no official prerequisites for this course. The instructor will do his best to make the course content self-contained. However, prior programming experience (e.g., Python, Matlab, C++) will be highly beneficial as Homework 2 (HW2) and the final project require programming. Students will be introduced to pulse2percept, a Python-based simulation framework for bionic vision, which will form the basis for HW2 and (optionally) the final project. Schedule Import calendar: Note: This schedule is subject to change over the course of the quarter.\n\r\rWk\rDate\rReading\rTopics\rAction\rHW out\rHW due\r\r\r\r\r1\rTue\rJan 7\r\r\r\rIntroduction: Class requirements, policies\rBionic vision: then \u0026amp; now\r\r\r\r\r\r\r\rThu\rJan 9\r\rR1,\rR2\r\r\r\rBlinding eye diseases\rSight restoration approaches \u0026amp; challenges\r\r\r\r\rHW1\r\r\r\r\r2\rTue\rJan 14\r\rR3\r\r\r\rFundamentals of neuroscience\rThe visual system\r\r\r\r\r\r\r\rThu\rJan 16\r\rR4\r\r\r\rComputational neuroscience\rIntroduction to Python\r\r\rHomework 1 (HW1) due by Sun, Jan 19, 11:59 pm.\r\r\r\r\rA1\t\r\rHW1\r\r\r3\rTue\rJan 21\r\rR5,\rR6\r\r\r\rRetina in health \u0026amp; disease\rRetinal prostheses\r\r\r\r\r\r\r\rThu\rJan 23\r\rR7\r\r\r\rIntroduction to pulse2percept in Python\rProject ideas: Discussion \u0026amp; brainstorming\r\r\r\rA2\r\r\r\r\r\r4\rTue\rJan 28\r\rR8\r\r\r\rVisual psychophysics for retinal prostheses\rPhosphene models: Scoreboard, axon map\r\r\r\r\r\r\r\rThu\rJan 30\r\r\rTeams present their project ideas \rTeam \u0026 project description (TPD) due by Sun, Feb 2, 11:59 pm.\r\r\r\r\r\rTPD\r\r\r5\rTue\rFeb 4\r\rR9\r\r\r\rCortical prostheses: approaches, challenges\rPhosphene models for cortical prostheses\r\r\r\r\rHW2\r\r\r\r\rThu\rFeb 6\r\r\r\rMid-quarter review\r\r\r\rA3\r\r\r\r\r\r6\rTue\rFeb 11\r\rR10,\rR11\r\r\r\rTraining \u0026amp; rehabilitation\rCortical plasticity \u0026amp; perceptual learning\r\r\r\r\r\r\r\rThu\rFeb 13\r\r\rGuest Visit: Jason Esterhuizen, ORION implantee\r \rA4\r\r\r\r\r\r7\rTue\rFeb 18\r\rR12, \rR13\r\r\r\rImproving visual outcomes in bionic eye technologies\rAdvanced stimulation strategies\r\r\r\r\r\r\r\rThu\rFeb 20\r\rR14\r\r\r\rOptimizing electrical stimulation in an artificial retina\r\r\rHomework 2 (HW2) due by Sun, Feb 23, 11:59 pm.\r\r\r\r\r\rHW2\r\r\r8\rTue\rFeb 25\r\rR15, \rR16\r\r\r\rScene representation for future bionic eye technologies\rAdvanced encoding methods\r\r\r\r\r\r\r\rThu\rFeb 27\r\r\rTeams present project progress\r\r\r\r\r\r9\rTue\rMar 3\r\rTeams work on projects -- Instructor out of the country\r\r\r\r\r\rThu\rMar 5\r\rTeams work on projects -- Instructor out of the country\r\r\r\r\r\r\r10\rTue\rMar 10\r\rR17\r\r\rGuest Lecture: Dr. Noelle Stiles, USC/Caltech\r\r\r\r\r\r\rThu\rMar 12\r\rR18\r\r\r\rOutlook: Future of bionic vision\rAlternatives to brain-computer interfaces\r\r\rProject report (PR) \u0026amp; source code (SC) due Sun, Mar 15, 11:59 pm.\r\r\r\r\r\rPR\u0026amp;SC\r\r11\rTue\rMar 17\r\rTeams make their final project presentations\r\r\r\r\r\r\rCourse Requirements \u0026amp; Grading Your final grade will be determined as follows:\n 15% Class participation and attendance: Students are expected to attend all class sessions and actively participate in class discussions and activities. If a student must miss a session, they should email the instructor beforehand. Each student will be allowed 3 excused absences (no detailed explanation required) before their absence will start to negatively affect their participation grade. However, late arrivals and unexcused absences will most definitely have a negative effect on a student\u0026rsquo;s participation grade. 30% Homework assignments: 10% Homework 1 20% Homework 2 55% Final project implementation, documentation, and presentation 5% Project idea presentation (1 slide) 10% Project progress presentation (2 slides: what have you done, what\u0026rsquo;s left to do) 20% Project final presentation 20% Project final report (+5% extra credit if project shows promise of turning into a publication) Lateness Policy All assignments are due at 11:59:59 pm on the scheduled due date, typically a Sunday (timestamp of the online submission system).\n Each student will be allowed 3 \u0026ldquo;late days\u0026rdquo; over the course of the quarter for which lateness will not be penalized. Late days cannot be applied to project deadlines. Late days may be applied to one or both homework assignments: Anything turned in between 12:00:00 am and 11:59:59 pm the next day is one day late; every day thereafter that an assignment is late, including weekends and holidays, counts as an additional late day. Absolutely no late work will be accepted after the deadline if you have used up all your late days. If you\u0026rsquo;re not done on time, you must turn in what you have to receive partial credit. There will be no exceptions from this rule. No exceptions will be made for the final project report. Please make sure you understand this policy.\nFinal Project In lieu of a final exam, students will conduct a programming project (team size ≤ 3). The goal of the project is to gain hands-on experience working on open research questions in bionic vision using tools and methods best suited to their scientific background.\nAll projects must address a research question and have a programming component. Students are free to use any programming language and development environment they choose. Building a project based on pulse2percept is encouraged (especially for students with relatively little programming experience) but is by no means required. Reproducing key research findings in the literature is allowed. No pure literature reviews, please.\nProjects that show promise of turning into a publication will receive extra credit.\nStudents will present their project to the rest of the class during finals week. In addition, students will submit a write-up of their project and hand in their source code (see Milestones).\nThe project will be evaluated based on the:\n originality/novelty of the idea technical strength of the work (emphasis on the research, not the programming expertise) organization, clarity, and style of the project report effort and completeness of the work (normalized by the number of team members) Project Milestones Date Time Deliverable due Thu, Jan 23 9:00 am Students start forming teams and discussing project ideas in class. Thu, Jan 30 9:00 am Teams present their project ideas in class. Sun, Feb 2 11:59 pm Teams submit a project title and 2-3 sentence project description. Thu, Feb 27 9:00 am Teams present their project progress in class. Sun, Mar 15 11:59 pm Teams hand in their final project report and all source code. Tue, Mar 17 Teams make their final project presentations in class. Students are encouraged to discuss ideas with the instructors, so that feedback can be incorporated early in the process.\nLate days cannot be used on these project deadlines.\nProject Presentation Teams will present their project via Zoom on Tue, Mar 17.\nEach team will have 20 mins to present (+5 mins for Questions \u0026amp; Answers). Sign up for a time slot here.\nBefore the meeting, decide who will host the slides/demo. This person will share their screen during the meeting. Other team members can choose to be physically present with the person sharing the screen or simply log in from their own computer.\nThere are at least two strategies to present your work:\n Strategy A: Follow the outline of your report Introduction, Methods, Results, Discussion Strategy B: Top-down Give an overview of the project\u0026rsquo;s end result Follow with a detailed discussion of the various features/techniques Make sure to address the challenges you faced and how you overcame them! What have you learned?\nEvery student in the team must say something.\nProject Report Each team will also submit a write-up of their project:\n Use the CHI Extended Abstracts template Structure your report like a short research paper (~4 pages): Abstract: ~150 words Introduction (1-2 paragraphs) What did you study and why? Related work (1/2 page) Brief summary of the relevant literature. Make sure to point out gaps in the literature that your project is trying to address. Methods (1-2 pages) First paragraph: Describe the big-picture idea behind your system/model/approach. Subsections: Walk the reader through all the steps/features (with pictures/schematics). Results (1-2 pages) Structure based on research question(s) and/or experiments. Have 2-3 figures to support your claims. Explain each figure and summarize the findings. Discussion (1/2 page) First sentence: Summarize your findings. Discuss: What does it all mean? What have you learned? Future work? When you\u0026rsquo;re done, zip up the PDF/DOC together with all your source code and upload the zip file to GauchoSpace.\nDon\u0026rsquo;t forget to submit your source code.\nAcademic Integrity The University of California has formal policies related to academic integrity.\nAny act of academic dishonesty, such as cheating or plagiarism, will result in a University disciplinary action and an \u0026ldquo;F\u0026rdquo; in this course. In addition to academic integrity, I also expect everyone in this class to treat their fellow students and course staff with respect.\nBasic Needs If you are facing any challenges securing food or housing and believe this may affect your performance in the class, you are urged to meet with a Food Security and Calfresh Advocate who is aware of the broad variety of resources that UCSB has to offer (see their drop-in hours at food.ucsb.edu). You are also urged to contact the professor if you are comfortable doing so.\nPlease visit food.ucsb.edu for additional resources including Calfresh, the AS Food Bank, and more.\n","date":1577836800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1577836800,"objectID":"e575d5ab26bf85a2b9d88aa8cefe5cd2","people":null,"permalink":"https://bionicvisionlab.org/teaching/2020-winter-cs291i/","publishdate":"2020-01-01T00:00:00Z","relpermalink":"/teaching/2020-winter-cs291i/","section":"teaching","summary":"This graduate course will introduce students to the multidisciplinary field of bionic vision, with an emphasis on both the computer science and neuroscience of the field.","title":"CS-291I: Bionic Vision","type":"teaching"},{"categories":null,"content":"Neuromorphic event‐based vision sensors are poised to dramatically improve the latency, robustness and power in applications ranging from smart sensing to autonomous driving and assistive technologies for people who are blind.\nSoon these sensors may power low vision aids and retinal implants, where the visual scene has to be processed quickly and efficiently before it is displayed. However, novel methods are needed to process the unconventional output of these sensors in order to unlock their potential.\n","date":1577836800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1577836800,"objectID":"fd8775517ff4ebf614b1c8406a8ad65c","people":null,"permalink":"https://bionicvisionlab.org/research/event-based-vision/","publishdate":"2020-01-01T00:00:00Z","relpermalink":"/research/event-based-vision/","section":"research","summary":"Neuromorphic event-based vision sensors may soon power low vision aids and retinal implants, where the visual scene has to be processed quickly and efficiently before it is displayed.","title":"Event-Based Vision at the Edge","type":"research"},{"categories":null,"content":"There are known individual differences in both ability to learn the layout of novel environments and flexibility of strategies for navigating known environments. It is unclear, however, how navigational abilities and situational awareness are impacted by high-stress scenarios and whether augmented reality (AR) could be employed to enhance performance and situational awareness.\nThis project will investigate three core questions:\n How does a person\u0026rsquo;s navigational abilities change in extreme situations? How can we best train them for these situations? How can vision augmentation be employed to improve situational awareness? ","date":1577836800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1577836800,"objectID":"ef5f79b69efa3ee442b825e065390d21","people":null,"permalink":"https://bionicvisionlab.org/research/high-stress-navigation/","publishdate":"2020-01-01T00:00:00Z","relpermalink":"/research/high-stress-navigation/","section":"research","summary":"How do people's navigatioal abilities change in stressful conditions? How can we best train them for these situations? And how can vision augmentation be employed to improve situational awareness?","title":"Visual Navigation Under High-Stress Conditions","type":"research"},{"categories":[],"content":"","date":1570717303,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1570717303,"objectID":"01d43ae4db26724f7e05489e59bab7d8","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2019-optimal-surgical-placement/","publishdate":"2019-10-10T14:21:43Z","relpermalink":"/publications/2019-optimal-surgical-placement/","section":"publications","summary":"We systematically explored the space of possible implant configurations to make recommendations for optimal intraocular positioning of Argus II.","title":"Model-based recommendations for optimal surgical placement of epiretinal implants","type":"publications"},{"categories":null,"content":"","date":1563321600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1563321600,"objectID":"a3097d298d06cb569e35f87e6e81a170","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2019-data-driven-models-human-neuroscience-neuroengineering/","publishdate":"2017-01-01T00:00:00Z","relpermalink":"/publications/2019-data-driven-models-human-neuroscience-neuroengineering/","section":"publications","summary":"In this review, we provide an accessible primer to modern modeling approaches and highlight recent data-driven discoveries in the domains of neuroimaging, single-neuron and neuronal population responses, and device neuroengineering.","title":"Data-driven models in human neuroscience and neuroengineering","type":"publications"},{"categories":[],"content":"A new article appeared in PCMag to celebrate the inauguration of the Bionic Vision Lab at UCSB:\n Bionic vision might sound like science fiction, but Dr. Michael Beyeler is working on just that.\n Originally from Switzerland, Dr. Beyeler is wrapping up his postdoctoral fellow at the University of Washington before moving to the University of California Santa Barbara this fall to head up the newly formed Bionic Vision Lab in the Departments of Computer Science and Psychological \u0026amp; Brain Sciences.\n We spoke with him about this \u0026ldquo;deep fascination with the brain\u0026rdquo; and how he hopes his work will eventually be able to restore vision to the blind. Here are edited and condensed excerpts from our conversation.\n Read the full article here.\n","date":1562664540,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1562664540,"objectID":"cc65e45a3acf72adeb393cedd3b10561","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/news/2019-07-pcmag/","publishdate":"2019-07-09T09:29:00Z","relpermalink":"/news/2019-07-pcmag/","section":"news","summary":"Michael Beyeler recently sat down with PCMag to talk about bionic vision and his move to UC Santa Barbara.","title":"PCMag: Restoring vision with bionic eyes - no longer science fiction","type":"news"},{"categories":null,"content":"","date":1561593600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1561593600,"objectID":"386c91297fe76d33fc00b9225d6ec9dd","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2019-neural-correlates-sparse-coding-dimensionality-reduction/","publishdate":"2019-06-27T00:00:00Z","relpermalink":"/publications/2019-neural-correlates-sparse-coding-dimensionality-reduction/","section":"publications","summary":"Brains face the fundamental challenge of extracting relevant information from high-dimensional external stimuli in order to form the neural basis that can guide an organism's behavior and its interaction with the world. One potential approach to addressing this challenge is to reduce the number of variables required to represent a particular input space (i.e., dimensionality reduction). We review compelling evidence that a range of neuronal responses can be understood as an emergent property of nonnegative sparse coding (NSC)—a form of efficient population coding due to dimensionality reduction and sparsity constraints.","title":"Neural correlates of sparse coding and dimensionality reduction","type":"publications"},{"categories":null,"content":"","date":1561334400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1561334400,"objectID":"9aade570f250a8bdf7d19bf2115101b8","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2019-axon-map-model/","publishdate":"2019-06-24T00:00:00Z","relpermalink":"/publications/2019-axon-map-model/","section":"publications","summary":"We show that the perceptual experience of retinal implant users can be accurately predicted using a computational model that simulates each individual patient’s retinal ganglion axon pathways.","title":"A model of ganglion axon pathways accounts for percepts elicited by retinal implants","type":"publications"},{"categories":null,"content":"","date":1558310400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1558310400,"objectID":"c444c103408be4d6a1dbd752616ec318","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2019-biophysical-model-axonal-stimulation/","publishdate":"2019-05-20T00:00:00Z","relpermalink":"/publications/2019-biophysical-model-axonal-stimulation/","section":"publications","summary":"To investigate the effect of axonal stimulation on the retinal response, we developed a computational model of a small population of morphologically and biophysically detailed retinal ganglion cells, and simulated their response to epiretinal electrical stimulation. We found that activation thresholds of ganglion cell somas and axons varied systematically with both stimulus pulse duration and electrode-retina distance. These findings have important implications for the improvement of stimulus encoding methods for epiretinal prostheses.","title":"Biophysical model of axonal stimulation in epiretinal visual prostheses","type":"publications"},{"categories":null,"content":"","date":1557446400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1557446400,"objectID":"7364a8adf91ea84cc0fbbd0c04424708","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2019-retinal-sheet-transplants/","publishdate":"2019-05-10T00:00:00Z","relpermalink":"/publications/2019-retinal-sheet-transplants/","section":"publications","summary":"A Commentary on: Detailed Visual Cortical Responses Generated by Retinal Sheet Transplants in Rats with Severe Retinal Degeneration by AT Foik et al. (2018).","title":"Commentary: Detailed visual cortical responses generated by retinal sheet transplants in rats with severe retinal degeneration","type":"publications"},{"categories":null,"content":"","date":1531440000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1531440000,"objectID":"096961db4e57e2fb4d3906e8230e49cf","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2018-carlsim4/","publishdate":"2018-07-13T00:00:00Z","relpermalink":"/publications/2018-carlsim4/","section":"publications","summary":"We have developed CARLsim 4, a user-friendly SNN library written in C++ that can simulate large biologically detailed neural networks. Improving on the efficiency and scalability of earlier releases, the present release allows for the simulation using multiple GPUs and multiple CPU cores concurrently in a heterogeneous computing cluster. Benchmarking results demonstrate simulation of 8.6 million neurons and 0.48 billion synapses using 4 GPUs and up to 60x speedup for multi-GPU implementations over a single-threaded CPU implementation, making CARLsim 4 well-suited for large-scale SNN models in the presence of real-time constraints.","title":"CARLsim 4: An open source library for large scale, biologically detailed spiking neural network simulation using heterogeneous clusters","type":"publications"},{"categories":null,"content":"","date":1503360000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1503360000,"objectID":"b5e80da46501c93df7df8c6fb3c5fc7f","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2017-learning-to-see-again/","publishdate":"2017-08-22T00:00:00Z","relpermalink":"/publications/2017-learning-to-see-again/","section":"publications","summary":"The goal of this review is to summarize the vast basic science literature on developmental and adult cortical plasticity with an emphasis on how this literature might relate to the field of prosthetic vision.","title":"Learning to see again: Biological constraints on cortical plasticity and the implications for sight restoration technologies","type":"publications"},{"categories":null,"content":"\r","date":1498867200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1498867200,"objectID":"0f9bbe69fd665667e284936ab7bda6e7","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2017-pulse2percept/","publishdate":"2017-07-01T00:00:00Z","relpermalink":"/publications/2017-pulse2percept/","section":"publications","summary":"*pulse2percept* is an open-source Python simulation framework used to predict the perceptual experience of retinal prosthesis patients across a wide range of implant configurations.","title":"pulse2percept: A Python-based simulation framework for bionic vision","type":"publications"},{"categories":null,"content":"","date":1470009600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1470009600,"objectID":"6010a0be9a85e86c6ad356a4d230a9e2","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2016-sparse-decomposition-model/","publishdate":"2016-08-01T00:00:00Z","relpermalink":"/publications/2016-sparse-decomposition-model/","section":"publications","summary":"Using a dimensionality reduction technique known as non-negative matrix factorization, we found that a variety of medial superior temporal (MSTd) neural response properties could be derived from MT-like input features. The responses that emerge from this technique, such as 3D translation and rotation selectivity, spiral tuning, and heading selectivity, can account for a number of empirical results. These findings (1) provide a further step toward a scientific understanding of the often nonintuitive response properties of MSTd neurons; (2) suggest that response properties, such as complex motion tuning and heading selectivity, might simply be a byproduct of MSTd neurons performing dimensionality reduction on their inputs; and (3) imply that motion perception in the cortex is consistent with ideas from the efficient-coding and free-energy principles.","title":"3D visual response properties of MSTd emerge from an efficient, sparse population code","type":"publications"},{"categories":null,"content":"","date":1448928000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1448928000,"objectID":"de49b660f9fc238f3af998f4dfc28f79","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2015-gpu-visually-guided-robot-navigation/","publishdate":"2015-12-01T00:00:00Z","relpermalink":"/publications/2015-gpu-visually-guided-robot-navigation/","section":"publications","summary":"We present a cortical neural network model for visually guided navigation that has been embodied on a physical robot exploring a real-world environment. The model includes a rate based motion energy model for area V1, and a spiking neural network model for cortical area MT. The model generates a cortical representation of optic flow, determines the position of objects based on motion discontinuities, and combines these signals with the representation of a goal location to produce motor commands that successfully steer the robot around obstacles toward the goal. This study demonstrates how neural signals in a model of cortical area MT might provide sufficient motion information to steer a physical robot on human-like paths around obstacles in a real-world environment.","title":"A GPU-accelerated cortical neural network model for visually guided robot navigation","type":"publications"},{"categories":null,"content":"","date":1436659200,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1436659200,"objectID":"4c7bedf1cb0d806fe2aece390417d28c","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2015-carlsim3/","publishdate":"2015-07-12T00:00:00Z","relpermalink":"/publications/2015-carlsim3/","section":"publications","summary":"We have developed CARLsim 3, a user-friendly, GPU-accelerated SNN library written in C/C++ that is capable of simulating biologically detailed neural models. The present release of CARLsim provides a number of improvements over our prior SNN library to allow the user to easily analyze simulation data, explore synaptic plasticity rules, and automate parameter tuning. In the present paper, we provide examples and performance benchmarks highlighting the library's features.","title":"CARLsim 3: A user-friendly and highly optimized library for the creation of neurobiologically detailed spiking neural networks","type":"publications"},{"categories":null,"content":"","date":1401580800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1401580800,"objectID":"9dfaef9074cb247761c6511079679203","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2014-vision-road-lane-detection/","publishdate":"2014-06-01T00:00:00Z","relpermalink":"/publications/2014-vision-road-lane-detection/","section":"publications","summary":"This paper presents an integrative approach to ego-lane detection that aims to be as simple as possible to enable real-time computation while being able to adapt to a variety of urban and rural traffic scenarios. The approach at hand combines and extends a road segmentation method in an illumination-invariant color image, lane markings detection using a ridge operator, and road geometry estimation using RANdom SAmple Consensus (RANSAC). The power and robustness of this algorithm has been demonstrated in a car simulation system as well as in the challenging KITTI data base of real-world urban traffic scenarios.","title":"Vision-based robust road lane detection in urban environments","type":"publications"},{"categories":null,"content":"","date":1391558400,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1391558400,"objectID":"a53663b6b62012e138a0743d6e6dfef2","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2014-snn-pattern-motion/","publishdate":"2014-01-20T00:00:00Z","relpermalink":"/publications/2014-snn-pattern-motion/","section":"publications","summary":"We present a two-stage model of visual area MT that we believe to be the first large-scale spiking network to demonstrate pattern direction selectivity. In this model, component-direction-selective (CDS) cells in MT linearly combine inputs from V1 cells that have spatiotemporal receptive fields according to the motion energy model of Simoncelli and Heeger. Pattern-direction-selective (PDS) cells in MT are constructed by pooling over MT CDS cells with a wide range of preferred directions. Responses of our model neurons are comparable to electrophysiological results for grating and plaid stimuli as well as speed tuning.","title":"Efficient spiking neural network model of pattern motion selectivity in visual cortex","type":"publications"},{"categories":null,"content":"","date":1390176000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1390176000,"objectID":"da2f3225cbcc97db91c2933db3c0c0ee","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2014-gpgpu-accelerated-simulation-parameter-tuning/","publishdate":"2014-01-20T00:00:00Z","relpermalink":"/publications/2014-gpgpu-accelerated-simulation-parameter-tuning/","section":"publications","summary":"We describe a simulation environment that can be used to design, construct, and run spiking neural networks (SNNs) quickly and efficiently using graphics processing units (GPUs). We then explain how the design of the simulation environment utilizes the parallel processing power of GPUs to simulate large-scale SNNs and describe recent modeling experiments performed using the simulator. Finally, we present an automated parameter tuning framework that utilizes the simulation environment and evolutionary algorithms to tune SNNs.","title":"GPGPU accelerated simulation and parameter tuning for neuromorphic applications","type":"publications"},{"categories":null,"content":"","date":1385856000,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1385856000,"objectID":"e469311b24ad43d62ae7a26f056d1942","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2013-categorization-mnist-stdp/","publishdate":"2013-12-01T00:00:00Z","relpermalink":"/publications/2013-categorization-mnist-stdp/","section":"publications","summary":"We present a large-scale model of a hierarchical spiking neural network (SNN) that integrates a low-level memory encoding mechanism with a higher-level decision process to perform a visual classification task in real-time. The model consists of Izhikevich neurons and conductance-based synapses for realistic approximation of neuronal dynamics, a spike-timing-dependent plasticity (STDP) synaptic learning rule with additional synaptic dynamics for memory encoding, and an accumulator model for memory retrieval and categorization. The full network, which comprised 71,026 neurons and approximately 133 million synapses, ran in real-time on a single off-the-shelf graphics processing unit (GPU). The network achieved 92% correct classifications on MNIST in 100 rounds of random sub-sampling, which is comparable to other SNN approaches and provides a conservative and reliable performance metric. Additionally, the model correctly predicted reaction times from psychophysical experiments. Because of the scalability of the approach and its neurobiological fidelity, the current model can be extended to an efficient neuromorphic implementation that supports more generalized object recognition and decision-making architectures found in the brain.","title":"Categorization and decision-making in a neurobiologically plausible spiking network using a STDP-like plasticity rule","type":"publications"},{"categories":null,"content":"","date":1288569600,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":1288569600,"objectID":"1df3e4d282d890015a5345a70810e5d6","people":["Michael Beyeler"],"permalink":"https://bionicvisionlab.org/publications/2010-exploring-olfactory-networks/","publishdate":"2010-11-01T00:00:00Z","relpermalink":"/publications/2010-exploring-olfactory-networks/","section":"publications","summary":"Olfactory stimuli are represented in a high-dimensional space by neural networks of the olfactory system. While a number of studies have illustrated the importance of inhibitory networks within the olfactory bulb or the antennal lobe for the shaping and processing of olfactory information, it is not clear how exactly these inhibitory networks are organized to provide filtering and contrast enhancement capabilities. In this work the aim is to study the topology of the proposed networks by using software simulations and hardware implementation. While we can study the dependence of the activity on each parameter of the theoretical models with the simulations, it is important to understand whether the models can be used in robotic applications for real-time odor recognition. We present the results of a linear simulation, a spiking simulation with I\u0026F neurons and a real-time hardware emulation using neuromorphic VLSI chips.","title":"Exploring olfactory sensory networks: Simulations and hardware emulation","type":"publications"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"0345abb3184f3021cfe981d9c1b0ead1","people":[],"permalink":"https://bionicvisionlab.org/collaborators/boynton_geoff/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/boynton_geoff/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"e81cc6a213259bcccd0481e866fc4f73","people":[],"permalink":"https://bionicvisionlab.org/collaborators/brunton_bing/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/brunton_bing/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"d159791a8442e301af2f6ce0f7b65bb5","people":[],"permalink":"https://bionicvisionlab.org/collaborators/dagnelie_gislin/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/dagnelie_gislin/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"cbd88cd6538699a558bbbcd23229ab06","people":[],"permalink":"https://bionicvisionlab.org/collaborators/eckstein_miguel/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/eckstein_miguel/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"e0941d3038b77f624edd5c354cfe2612","people":[],"permalink":"https://bionicvisionlab.org/collaborators/fernandez_eduardo/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/fernandez_eduardo/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"bd76d6c0532e954c7a69a73fc2b83f16","people":[],"permalink":"https://bionicvisionlab.org/collaborators/fine_ione/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/fine_ione/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"124fe87861e790e184e6a15a299416f0","people":[],"permalink":"https://bionicvisionlab.org/collaborators/giesbrecht_barry/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/giesbrecht_barry/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"08cb4baf0e819a154a89d51ccf472b0a","people":[],"permalink":"https://bionicvisionlab.org/collaborators/goard_michael/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/goard_michael/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"5aa56499202dd4f4d2674829cdead929","people":[],"permalink":"https://bionicvisionlab.org/collaborators/grafton_scott/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/grafton_scott/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"4c0e7e578fe0a2de11ccdf2b92688825","people":[],"permalink":"https://bionicvisionlab.org/collaborators/hegarty_mary/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/hegarty_mary/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"00ccf161cd074a0950a464dd6943722f","people":[],"permalink":"https://bionicvisionlab.org/collaborators/hollerer_tobias/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/hollerer_tobias/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"826508bc3d1b39eb7e478070cf266785","people":[],"permalink":"https://bionicvisionlab.org/collaborators/montezuma_sandra/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/montezuma_sandra/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"6c8afc0395cca73b0f99c53bfdf0e212","people":[],"permalink":"https://bionicvisionlab.org/collaborators/niell_cris/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/niell_cris/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"82968413fb541f85c81d62175eb6ed89","people":[],"permalink":"https://bionicvisionlab.org/collaborators/open_postdoc/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/open_postdoc/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"74edf1fcd71e82ad16afe5f271aa8723","people":[],"permalink":"https://bionicvisionlab.org/collaborators/open_ra/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/open_ra/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"b194678a7a6a010af51b3a392b87fdf4","people":[],"permalink":"https://bionicvisionlab.org/collaborators/open_student/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/open_student/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"a53ebfbe9e0f7c87114a1f4ff9c06cf1","people":[],"permalink":"https://bionicvisionlab.org/collaborators/rokem_ariel/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/rokem_ariel/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"0302f9c6a7657d8d70c47c800a1047ae","people":[],"permalink":"https://bionicvisionlab.org/collaborators/second_sight/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/second_sight/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"6636f6af3ca44951f03a6496c0f51b51","people":[],"permalink":"https://bionicvisionlab.org/collaborators/smith_spencer/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/smith_spencer/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"f19bf5bee0480291e98f9538f6ec0325","people":[],"permalink":"https://bionicvisionlab.org/collaborators/weiland_jim/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/collaborators/weiland_jim/","section":"collaborators","summary":"","title":"","type":"collaborators"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"a1b2db76f8bcd92aee74d434c799bf16","people":[],"permalink":"https://bionicvisionlab.org/grants/2021-r01-mouse/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/grants/2021-r01-mouse/","section":"grants","summary":"","title":"Cortical visual processing for navigation","type":"grants"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"d1700e2bc315bd058766abe8cb79f635","people":[],"permalink":"https://bionicvisionlab.org/grants/2021-ucsb-academic-senate/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/grants/2021-ucsb-academic-senate/","section":"grants","summary":"","title":"Event-based scene understanding for bionic vision","type":"grants"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"5f9f1c6b4bbfb3b16bb7ad8059923366","people":[],"permalink":"https://bionicvisionlab.org/grants/2022-dp2-new-innovator/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/grants/2022-dp2-new-innovator/","section":"grants","summary":"","title":"Towards a Smart Bionic Eye: AI-Powered Artificial Vision for the Treatment of Incurable Blindness","type":"grants"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"449e696159cfd0650277154e94d8219e","people":[],"permalink":"https://bionicvisionlab.org/grants/2020-r00-virtual-prototyping/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/grants/2020-r00-virtual-prototyping/","section":"grants","summary":"","title":"Virtual prototyping for retinal prosthesis patients","type":"grants"},{"categories":null,"content":"","date":-62135596800,"expirydate":-62135596800,"kind":"page","lang":"en","lastmod":-62135596800,"objectID":"4e6594480155b2f52ee673351ff91384","people":[],"permalink":"https://bionicvisionlab.org/grants/2021-icb-army/","publishdate":"0001-01-01T00:00:00Z","relpermalink":"/grants/2021-icb-army/","section":"grants","summary":"","title":"Visual navigation under high-stress conditions","type":"grants"}]