<?xml version="1.0" encoding="UTF-8"?><xml><records><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cao, Runnan</style></author><author><style face="normal" font="default" size="100%">Brunner, Peter</style></author><author><style face="normal" font="default" size="100%">Brandmeir, Nicholas J</style></author><author><style face="normal" font="default" size="100%">Willie, Jon T</style></author><author><style face="normal" font="default" size="100%">Wang, Shuo</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">A human single-neuron dataset for object recognition.</style></title><secondary-title><style face="normal" font="default" size="100%">Sci Data</style></secondary-title><alt-title><style face="normal" font="default" size="100%">Sci Data</style></alt-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Amygdala</style></keyword><keyword><style  face="normal" font="default" size="100%">Epilepsy</style></keyword><keyword><style  face="normal" font="default" size="100%">Hippocampus</style></keyword><keyword><style  face="normal" font="default" size="100%">Humans</style></keyword><keyword><style  face="normal" font="default" size="100%">Neurons</style></keyword><keyword><style  face="normal" font="default" size="100%">Pattern Recognition, Visual</style></keyword><keyword><style  face="normal" font="default" size="100%">Recognition, Psychology</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2025</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2025 Jan 15</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">12</style></volume><pages><style face="normal" font="default" size="100%">79</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;Object recognition is fundamental to how we interact with and interpret the world around us. The human amygdala and hippocampus play a key role in object recognition, contributing to both the encoding and retrieval of visual information. Here, we recorded single-neuron activity from the human amygdala and hippocampus when neurosurgical epilepsy patients performed a one-back task using naturalistic object stimuli. We employed two sets of naturalistic object images from leading datasets extensively used in primate neural recordings and computer vision models: we recorded 1204 neurons using the ImageNet stimuli, which included broader object categories (10 different images per category for 50 categories), and we recorded 512 neurons using the Microsoft COCO stimuli, which featured a higher number of images per category (50 different images per category for 10 categories). Together, our extensive dataset, offering the highest spatial and temporal resolution currently available in humans, will not only facilitate a comprehensive analysis of the neural correlates of object recognition but also provide valuable opportunities for training and validating computational models.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">1</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Cao, Runnan</style></author><author><style face="normal" font="default" size="100%">Wang, Jinge</style></author><author><style face="normal" font="default" size="100%">Brunner, Peter</style></author><author><style face="normal" font="default" size="100%">Willie, Jon T</style></author><author><style face="normal" font="default" size="100%">Li, Xin</style></author><author><style face="normal" font="default" size="100%">Rutishauser, Ueli</style></author><author><style face="normal" font="default" size="100%">Brandmeir, Nicholas J</style></author><author><style face="normal" font="default" size="100%">Wang, Shuo</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Neural mechanisms of face familiarity and learning in the human amygdala and hippocampus.</style></title><secondary-title><style face="normal" font="default" size="100%">Cell Rep</style></secondary-title><alt-title><style face="normal" font="default" size="100%">Cell Rep</style></alt-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Amygdala</style></keyword><keyword><style  face="normal" font="default" size="100%">Facial Recognition</style></keyword><keyword><style  face="normal" font="default" size="100%">Hippocampus</style></keyword><keyword><style  face="normal" font="default" size="100%">Humans</style></keyword><keyword><style  face="normal" font="default" size="100%">Learning</style></keyword><keyword><style  face="normal" font="default" size="100%">Pattern Recognition, Visual</style></keyword><keyword><style  face="normal" font="default" size="100%">Recognition, Psychology</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2024</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2024 Jan 23</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">43</style></volume><pages><style face="normal" font="default" size="100%">113520</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;Recognizing familiar faces and learning new faces play an important role in social cognition. However, the underlying neural computational mechanisms remain unclear. Here, we record from single neurons in the human amygdala and hippocampus and find a greater neuronal representational distance between pairs of familiar faces than unfamiliar faces, suggesting that neural representations for familiar faces are more distinct. Representational distance increases with exposures to the same identity, suggesting that neural face representations are sharpened with learning and familiarization. Furthermore, representational distance is positively correlated with visual dissimilarity between faces, and exposure to visually similar faces increases representational distance, thus sharpening neural representations. Finally, we construct a computational model that demonstrates an increase in the representational distance of artificial units with training. Together, our results suggest that the neuronal population geometry, quantified by the representational distance, encodes face familiarity, similarity, and learning, forming the basis of face recognition and memory.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">1</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>10</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Miller, John W</style></author><author><style face="normal" font="default" size="100%">Hermes, Dora</style></author><author><style face="normal" font="default" size="100%">Gerwin Schalk</style></author><author><style face="normal" font="default" size="100%">Ramsey, Nick F</style></author><author><style face="normal" font="default" size="100%">Jagadeesh, Bharathi</style></author><author><style face="normal" font="default" size="100%">den Nijs, Marcel</style></author><author><style face="normal" font="default" size="100%">Ojemann, J G</style></author><author><style face="normal" font="default" size="100%">Rao, Rajesh P N</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Detection of spontaneous class-specific visual stimuli with high temporal accuracy in human electrocorticography.</style></title><secondary-title><style face="normal" font="default" size="100%">Conf Proc IEEE Eng Med Biol Soc</style></secondary-title><alt-title><style face="normal" font="default" size="100%">Conf Proc IEEE Eng Med Biol Soc</style></alt-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Algorithms</style></keyword><keyword><style  face="normal" font="default" size="100%">Electrocardiography</style></keyword><keyword><style  face="normal" font="default" size="100%">Evoked Potentials, Visual</style></keyword><keyword><style  face="normal" font="default" size="100%">Humans</style></keyword><keyword><style  face="normal" font="default" size="100%">Male</style></keyword><keyword><style  face="normal" font="default" size="100%">Pattern Recognition, Automated</style></keyword><keyword><style  face="normal" font="default" size="100%">Pattern Recognition, Visual</style></keyword><keyword><style  face="normal" font="default" size="100%">Photic Stimulation</style></keyword><keyword><style  face="normal" font="default" size="100%">Reproducibility of Results</style></keyword><keyword><style  face="normal" font="default" size="100%">Sensitivity and Specificity</style></keyword><keyword><style  face="normal" font="default" size="100%">User-Computer Interface</style></keyword><keyword><style  face="normal" font="default" size="100%">Visual Cortex</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2009</style></year><pub-dates><date><style  face="normal" font="default" size="100%">2009</style></date></pub-dates></dates><volume><style face="normal" font="default" size="100%">2009</style></volume><pages><style face="normal" font="default" size="100%">6465-8</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">Most brain-computer interface classification experiments from electrical potential recordings have been focused on the identification of classes of stimuli or behavior where the timing of experimental parameters is known or pre-designated. Real world experience, however, is spontaneous, and to this end we describe an experiment predicting the occurrence, timing, and types of visual stimuli perceived by a human subject from electrocorticographic recordings. All 300 of 300 presented stimuli were correctly detected, with a temporal precision of order 20 ms. The type of stimulus (face/house) was correctly identified in 95% of these cases. There were approximately 20 false alarm events, corresponding to a late 2nd neuronal response to a previously identified event.</style></abstract></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Brendan Z. Allison</style></author><author><style face="normal" font="default" size="100%">Dennis J. McFarland</style></author><author><style face="normal" font="default" size="100%">Gerwin Schalk</style></author><author><style face="normal" font="default" size="100%">Zheng, Shi Dong</style></author><author><style face="normal" font="default" size="100%">Moore-Jackson, Melody</style></author><author><style face="normal" font="default" size="100%">Jonathan Wolpaw</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Towards an independent brain-computer interface using steady state visual evoked potentials.</style></title><secondary-title><style face="normal" font="default" size="100%">Clin Neurophysiol</style></secondary-title><alt-title><style face="normal" font="default" size="100%">Clin Neurophysiol</style></alt-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Adolescent</style></keyword><keyword><style  face="normal" font="default" size="100%">Adult</style></keyword><keyword><style  face="normal" font="default" size="100%">Attention</style></keyword><keyword><style  face="normal" font="default" size="100%">Brain</style></keyword><keyword><style  face="normal" font="default" size="100%">Brain Mapping</style></keyword><keyword><style  face="normal" font="default" size="100%">Dose-Response Relationship, Radiation</style></keyword><keyword><style  face="normal" font="default" size="100%">Electroencephalography</style></keyword><keyword><style  face="normal" font="default" size="100%">Evoked Potentials, Visual</style></keyword><keyword><style  face="normal" font="default" size="100%">Female</style></keyword><keyword><style  face="normal" font="default" size="100%">Humans</style></keyword><keyword><style  face="normal" font="default" size="100%">Male</style></keyword><keyword><style  face="normal" font="default" size="100%">Pattern Recognition, Visual</style></keyword><keyword><style  face="normal" font="default" size="100%">Photic Stimulation</style></keyword><keyword><style  face="normal" font="default" size="100%">Spectrum Analysis</style></keyword><keyword><style  face="normal" font="default" size="100%">User-Computer Interface</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">2008</style></year><pub-dates><date><style  face="normal" font="default" size="100%">02/2008</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.ncbi.nlm.nih.gov/pubmed/18077208</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">119</style></volume><pages><style face="normal" font="default" size="100%">399-408</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;h4 style=&quot;font-size: 13px; margin: 0px 0.25em 0px 0px; text-transform: uppercase; float: left; font-family: arial, helvetica, clean, sans-serif; line-height: 17px;&quot;&gt;OBJECTIVE:&amp;nbsp;&lt;/h4&gt;
&lt;p style=&quot;margin: 0px 0px 0.5em; font-family: arial, helvetica, clean, sans-serif; font-size: 13px; line-height: 17px;&quot;&gt;Brain-computer interface (BCI) systems using steady state visual evoked potentials (SSVEPs) have allowed healthy subjects to communicate. However, these systems may not work in severely disabled users because they may depend on gaze shifting. This study evaluates the hypothesis that overlapping stimuli can evoke changes in SSVEP activity sufficient to control a BCI. This would provide evidence that SSVEP BCIs could be used without shifting gaze.&lt;/p&gt;
&lt;h4 style=&quot;font-size: 13px; margin: 0px 0.25em 0px 0px; text-transform: uppercase; float: left; font-family: arial, helvetica, clean, sans-serif; line-height: 17px;&quot;&gt;METHODS:&amp;nbsp;&lt;/h4&gt;
&lt;p style=&quot;margin: 0px 0px 0.5em; font-family: arial, helvetica, clean, sans-serif; font-size: 13px; line-height: 17px;&quot;&gt;Subjects viewed a display containing two images that each oscillated at a different frequency. Different conditions used overlapping or non-overlapping images to explore dependence on gaze function. Subjects were asked to direct attention to one or the other of these images during each of 12 one-minute runs.&lt;/p&gt;
&lt;h4 style=&quot;font-size: 13px; margin: 0px 0.25em 0px 0px; text-transform: uppercase; float: left; font-family: arial, helvetica, clean, sans-serif; line-height: 17px;&quot;&gt;RESULTS:&amp;nbsp;&lt;/h4&gt;
&lt;p style=&quot;margin: 0px 0px 0.5em; font-family: arial, helvetica, clean, sans-serif; font-size: 13px; line-height: 17px;&quot;&gt;Half of the subjects produced differences in SSVEP activity elicited by overlapping stimuli that could support BCI control. In all remaining users, differences did exist at corresponding frequencies but were not strong enough to allow effective control.&lt;/p&gt;
&lt;h4 style=&quot;font-size: 13px; margin: 0px 0.25em 0px 0px; text-transform: uppercase; float: left; font-family: arial, helvetica, clean, sans-serif; line-height: 17px;&quot;&gt;CONCLUSIONS:&amp;nbsp;&lt;/h4&gt;
&lt;p style=&quot;margin: 0px 0px 0.5em; font-family: arial, helvetica, clean, sans-serif; font-size: 13px; line-height: 17px;&quot;&gt;The&amp;nbsp;&lt;span class=&quot;highlight&quot;&gt;data&lt;/span&gt;&amp;nbsp;demonstrate that SSVEP differences sufficient for BCI control may be elicited by selective attention to one of two overlapping stimuli. Thus, some SSVEP-based BCI approaches may not depend on gaze control. The nature and extent of any BCI's dependence on muscle activity is a function of many factors, including the display, task, environment, and user.&lt;/p&gt;
&lt;h4 style=&quot;font-size: 13px; margin: 0px 0.25em 0px 0px; text-transform: uppercase; float: left; font-family: arial, helvetica, clean, sans-serif; line-height: 17px;&quot;&gt;SIGNIFICANCE:&amp;nbsp;&lt;/h4&gt;
&lt;p style=&quot;margin: 0px 0px 0.5em; font-family: arial, helvetica, clean, sans-serif; font-size: 13px; line-height: 17px;&quot;&gt;SSVEP BCIs might function in severely disabled users unable to reliably control gaze. Further research with these users is necessary to explore the optimal parameters of such a system and validate online performance in a home environment.&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">2</style></issue></record><record><source-app name="Biblio" version="7.x">Drupal-Biblio</source-app><ref-type>17</ref-type><contributors><authors><author><style face="normal" font="default" size="100%">Baxter, Bill</style></author><author><style face="normal" font="default" size="100%">Dow, B M</style></author></authors></contributors><titles><title><style face="normal" font="default" size="100%">Horizontal organization of orientation-sensitive cells in primate visual cortex.</style></title><secondary-title><style face="normal" font="default" size="100%">Biol Cybern</style></secondary-title><alt-title><style face="normal" font="default" size="100%">Biol Cybern</style></alt-title></titles><keywords><keyword><style  face="normal" font="default" size="100%">Animals</style></keyword><keyword><style  face="normal" font="default" size="100%">Electron Transport Complex IV</style></keyword><keyword><style  face="normal" font="default" size="100%">Form Perception</style></keyword><keyword><style  face="normal" font="default" size="100%">Models, Neurological</style></keyword><keyword><style  face="normal" font="default" size="100%">Pattern Recognition, Visual</style></keyword><keyword><style  face="normal" font="default" size="100%">Visual Cortex</style></keyword></keywords><dates><year><style  face="normal" font="default" size="100%">1989</style></year><pub-dates><date><style  face="normal" font="default" size="100%">07/1989</style></date></pub-dates></dates><urls><web-urls><url><style face="normal" font="default" size="100%">http://www.ncbi.nlm.nih.gov/pubmed/2548628</style></url></web-urls></urls><volume><style face="normal" font="default" size="100%">61</style></volume><pages><style face="normal" font="default" size="100%">171-82</style></pages><language><style face="normal" font="default" size="100%">eng</style></language><abstract><style face="normal" font="default" size="100%">&lt;p&gt;&lt;span style=&quot;font-family: arial, helvetica, clean, sans-serif; font-size: 13px; line-height: 17px;&quot;&gt;In the visual cortex of the monkey the horizontal organization of the preferred orientations of orientation-selective cells follows two opposing rules: (1) neighbors tend to have similar orientation preferences, and (2) many different orientations are observed in a local region. We have described a classification for orientation maps based on the types of topological singularities and the spacing of these singularities relative to the cytochrome oxidase blobs. Using the orientation drift rate as a measure we have compared simulated orientation maps to published records of horizontal electrode recordings.&lt;/span&gt;&lt;/p&gt;</style></abstract><issue><style face="normal" font="default" size="100%">3</style></issue></record></records></xml>