<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
	<id>http://debianws.lexgopc.com/wiki143/index.php?action=history&amp;feed=atom&amp;title=Affective_computing</id>
	<title>Affective computing - Revision history</title>
	<link rel="self" type="application/atom+xml" href="http://debianws.lexgopc.com/wiki143/index.php?action=history&amp;feed=atom&amp;title=Affective_computing"/>
	<link rel="alternate" type="text/html" href="http://debianws.lexgopc.com/wiki143/index.php?title=Affective_computing&amp;action=history"/>
	<updated>2026-05-04T21:03:14Z</updated>
	<subtitle>Revision history for this page on the wiki</subtitle>
	<generator>MediaWiki 1.43.1</generator>
	<entry>
		<id>http://debianws.lexgopc.com/wiki143/index.php?title=Affective_computing&amp;diff=4583246&amp;oldid=prev</id>
		<title>imported&gt;OAbot: Open access bot: doi updated in citation with #oabot.</title>
		<link rel="alternate" type="text/html" href="http://debianws.lexgopc.com/wiki143/index.php?title=Affective_computing&amp;diff=4583246&amp;oldid=prev"/>
		<updated>2025-12-22T03:00:19Z</updated>

		<summary type="html">&lt;p&gt;&lt;a href=&quot;https://en.wikipedia.org/wiki/OABOT&quot; class=&quot;extiw&quot; title=&quot;wikipedia:OABOT&quot;&gt;Open access bot&lt;/a&gt;: doi updated in citation with #oabot.&lt;/p&gt;
&lt;table style=&quot;background-color: #fff; color: #202122;&quot; data-mw=&quot;interface&quot;&gt;
				&lt;col class=&quot;diff-marker&quot; /&gt;
				&lt;col class=&quot;diff-content&quot; /&gt;
				&lt;col class=&quot;diff-marker&quot; /&gt;
				&lt;col class=&quot;diff-content&quot; /&gt;
				&lt;tr class=&quot;diff-title&quot; lang=&quot;en&quot;&gt;
				&lt;td colspan=&quot;2&quot; style=&quot;background-color: #fff; color: #202122; text-align: center;&quot;&gt;← Previous revision&lt;/td&gt;
				&lt;td colspan=&quot;2&quot; style=&quot;background-color: #fff; color: #202122; text-align: center;&quot;&gt;Revision as of 03:00, 22 December 2025&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot; id=&quot;mw-diff-left-l1&quot;&gt;Line 1:&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;Line 1:&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{update|date=January 2023}}&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{update|date=January 2023}}&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{short description|Area of research in computer science aiming to understand the emotional state of users}}&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{short description|Area of research in computer science aiming to understand the emotional state of users}}&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;[[File:Sophia at the AI for Good Global Summit 2018 (27254369347) (cropped).jpg|thumb|Electronic devices such as robots are increasingly able to &lt;del style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;mimic &lt;/del&gt;human emotion.]]  &lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;[[File:Sophia at the AI for Good Global Summit 2018 (27254369347) (cropped).jpg|thumb|Electronic devices such as robots are increasingly able to &lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;detect and respond to &lt;/ins&gt;human emotion.]]  &lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&amp;#039;&amp;#039;&amp;#039;Affective computing&amp;#039;&amp;#039;&amp;#039; is the study and development of systems and devices that can recognize, interpret, process, and simulate human [[Affect (psychology)|affects]]. It is an interdisciplinary field spanning [[computer science]], [[psychology]], and [[cognitive science]].&amp;lt;ref name=TaoTan&amp;gt;{{cite conference |first=Jianhua |last=Tao |author2=Tieniu Tan |title=Affective Computing: A Review |book-title=Affective Computing and Intelligent Interaction |volume=[[LNCS]] 3784 |pages=981–995 |publisher=Springer |year=2005 |doi=10.1007/11573548 }}&amp;lt;/ref&amp;gt; While some core ideas in the field may be traced as far back as to early philosophical inquiries into [[Emotion#James–Lange theory|emotion]],&amp;lt;ref&amp;gt;{{cite journal |last=James |first=William |year=1884 |title=What Is Emotion |journal=Mind |volume=9 |issue=34 |pages=188–205 |doi=10.1093/mind/os-IX.34.188|url=https://zenodo.org/record/1431811 }} Cited by Tao and Tan.&amp;lt;/ref&amp;gt; the more modern branch of computer science originated with [[Rosalind Picard]]&amp;#039;s 1995 paper entitled &amp;quot;Affective Computing&amp;quot;&amp;lt;ref&amp;gt;[https://vismod.media.mit.edu/pub/tech-reports/TR-321.pdf &amp;quot;Affective Computing&amp;quot;] MIT Technical Report #321 ([http://vismod.media.mit.edu/pub/tech-reports/TR-321-ABSTRACT.html Abstract]), 1995&amp;lt;/ref&amp;gt; and her 1997 book of the same name&amp;lt;ref name=&amp;quot;Affective Computing&amp;quot;&amp;gt;{{cite book|last1=Picard|first1=Rosalind|title=Affective Computing|date=1997|publisher=MIT Press|location=Cambridge, MA|page=1}}&amp;lt;/ref&amp;gt; published by [[MIT Press]].&amp;lt;ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&amp;#039;&amp;#039;&amp;#039;Affective computing&amp;#039;&amp;#039;&amp;#039; is the study and development of systems and devices that can recognize, interpret, process, and simulate human [[Affect (psychology)|affects]]. It is an interdisciplinary field spanning [[computer science]], [[psychology]], and [[cognitive science]].&amp;lt;ref name=TaoTan&amp;gt;{{cite conference |first=Jianhua |last=Tao |author2=Tieniu Tan |title=Affective Computing: A Review |book-title=Affective Computing and Intelligent Interaction |volume=[[LNCS]] 3784 |pages=981–995 |publisher=Springer |year=2005 |doi=10.1007/11573548 }}&amp;lt;/ref&amp;gt; While some core ideas in the field may be traced as far back as to early philosophical inquiries into [[Emotion#James–Lange theory|emotion]],&amp;lt;ref&amp;gt;{{cite journal |last=James |first=William |year=1884 |title=What Is Emotion |journal=Mind |volume=9 |issue=34 |pages=188–205 |doi=10.1093/mind/os-IX.34.188|url=https://zenodo.org/record/1431811 }} Cited by Tao and Tan.&amp;lt;/ref&amp;gt; the more modern branch of computer science originated with [[Rosalind Picard]]&amp;#039;s 1995 paper entitled &amp;quot;Affective Computing&amp;quot;&amp;lt;ref&amp;gt;[https://vismod.media.mit.edu/pub/tech-reports/TR-321.pdf &amp;quot;Affective Computing&amp;quot;] MIT Technical Report #321 ([http://vismod.media.mit.edu/pub/tech-reports/TR-321-ABSTRACT.html Abstract]), 1995&amp;lt;/ref&amp;gt; and her 1997 book of the same name&amp;lt;ref name=&amp;quot;Affective Computing&amp;quot;&amp;gt;{{cite book|last1=Picard|first1=Rosalind|title=Affective Computing|date=1997|publisher=MIT Press|location=Cambridge, MA|page=1}}&amp;lt;/ref&amp;gt; published by [[MIT Press]].&amp;lt;ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{cite web&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{cite web&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot; id=&quot;mw-diff-left-l54&quot;&gt;Line 54:&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;Line 54:&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;[[Marvin Minsky]], one of the pioneering computer scientists in [[artificial intelligence]], relates emotions to the broader issues of machine intelligence stating in &amp;#039;&amp;#039;[[The Emotion Machine]]&amp;#039;&amp;#039; that emotion is &amp;quot;not especially different from the processes that we call &amp;#039;thinking.&amp;#039;&amp;quot;&amp;lt;ref&amp;gt;{{cite news|url=https://www.washingtonpost.com/wp-dyn/content/article/2006/12/14/AR2006121401554.html|title=Mind Over Matter|last=Restak|first=Richard|date=2006-12-17|newspaper=The Washington Post|access-date=2008-05-13}}&amp;lt;/ref&amp;gt; The innovative approach &amp;quot;digital humans&amp;quot; or [[Virtual human|virtual humans]] includes an attempt to give these programs, which simulate humans, the emotional dimension as well, including reactions in accordance with the reaction that a real person would react in a certain emotionally stimulating situation as well as facial expressions and gestures.&amp;lt;ref&amp;gt;{{Cite journal |last1=Loveys |first1=Kate |last2=Sagar |first2=Mark |last3=Broadbent |first3=Elizabeth |date=2020-07-22 |title=The Effect of Multimodal Emotional Expression on Responses to a Digital Human during a Self-Disclosure Conversation: a Computational Analysis of User Language |url=http://dx.doi.org/10.1007/s10916-020-01624-4 |journal=Journal of Medical Systems |volume=44 |issue=9 |page=143 |doi=10.1007/s10916-020-01624-4 |pmid=32700060 |s2cid=220717084 |issn=0148-5598|url-access=subscription }}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;[[Marvin Minsky]], one of the pioneering computer scientists in [[artificial intelligence]], relates emotions to the broader issues of machine intelligence stating in &amp;#039;&amp;#039;[[The Emotion Machine]]&amp;#039;&amp;#039; that emotion is &amp;quot;not especially different from the processes that we call &amp;#039;thinking.&amp;#039;&amp;quot;&amp;lt;ref&amp;gt;{{cite news|url=https://www.washingtonpost.com/wp-dyn/content/article/2006/12/14/AR2006121401554.html|title=Mind Over Matter|last=Restak|first=Richard|date=2006-12-17|newspaper=The Washington Post|access-date=2008-05-13}}&amp;lt;/ref&amp;gt; The innovative approach &amp;quot;digital humans&amp;quot; or [[Virtual human|virtual humans]] includes an attempt to give these programs, which simulate humans, the emotional dimension as well, including reactions in accordance with the reaction that a real person would react in a certain emotionally stimulating situation as well as facial expressions and gestures.&amp;lt;ref&amp;gt;{{Cite journal |last1=Loveys |first1=Kate |last2=Sagar |first2=Mark |last3=Broadbent |first3=Elizabeth |date=2020-07-22 |title=The Effect of Multimodal Emotional Expression on Responses to a Digital Human during a Self-Disclosure Conversation: a Computational Analysis of User Language |url=http://dx.doi.org/10.1007/s10916-020-01624-4 |journal=Journal of Medical Systems |volume=44 |issue=9 |page=143 |doi=10.1007/s10916-020-01624-4 |pmid=32700060 |s2cid=220717084 |issn=0148-5598|url-access=subscription }}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Emotion in machines often refers to emotion in computational, often AI-based, systems. As a result, the terms &#039;emotional AI&#039; and &#039;[https://mitsloan.mit.edu/ideas-made-to-matter/emotion-ai-explained emotion AI]&#039; are being used.&amp;lt;ref&amp;gt;{{Cite journal |last=Ho |first=Manh-Tung |date=29 March 2023 |title=An analytical framework for studying attitude towards emotional AI: The three-pronged approach |journal=[[MethodsX]] |volume=10 |issue=102149 |doi=10.1016/j.mex.2023.102149 |pmid=37091958 |pmc=10113835 }}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Emotion in machines often refers to emotion in computational, often AI-based, systems. As a result, the terms &#039;emotional AI&#039; and &#039;[https://mitsloan.mit.edu/ideas-made-to-matter/emotion-ai-explained emotion AI]&#039; are being used.&amp;lt;ref&amp;gt;{{Cite journal |last=Ho |first=Manh-Tung |date=29 March 2023 |title=An analytical framework for studying attitude towards emotional AI: The three-pronged approach |journal=[[MethodsX]] |volume=10 |issue&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;=102149 |article-number&lt;/ins&gt;=102149 |doi=10.1016/j.mex.2023.102149 |pmid=37091958 |pmc=10113835 }}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;== Technologies ==&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;== Technologies ==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot; id=&quot;mw-diff-left-l68&quot;&gt;Line 68:&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;Line 68:&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Emotional speech processing technologies recognize the user&amp;#039;s emotional state using computational analysis of speech features. Vocal parameters and [[prosody (linguistics)|prosodic]] features such as pitch variables and speech rate can be analyzed through pattern recognition techniques.&amp;lt;ref name=&amp;quot;Dellaert&amp;quot;&amp;gt;Dellaert, F., Polizin, t., and Waibel, A., Recognizing Emotion in Speech&amp;quot;, In Proc. Of ICSLP 1996, Philadelphia, PA, pp.1970–1973, 1996&amp;lt;/ref&amp;gt;&amp;lt;ref name=&amp;quot;Lee&amp;quot;&amp;gt;Lee, C.M.; Narayanan, S.; Pieraccini, R., Recognition of Negative Emotion in the Human Speech Signals, Workshop on Auto. Speech Recognition and Understanding, Dec 2001&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Emotional speech processing technologies recognize the user&amp;#039;s emotional state using computational analysis of speech features. Vocal parameters and [[prosody (linguistics)|prosodic]] features such as pitch variables and speech rate can be analyzed through pattern recognition techniques.&amp;lt;ref name=&amp;quot;Dellaert&amp;quot;&amp;gt;Dellaert, F., Polizin, t., and Waibel, A., Recognizing Emotion in Speech&amp;quot;, In Proc. Of ICSLP 1996, Philadelphia, PA, pp.1970–1973, 1996&amp;lt;/ref&amp;gt;&amp;lt;ref name=&amp;quot;Lee&amp;quot;&amp;gt;Lee, C.M.; Narayanan, S.; Pieraccini, R., Recognition of Negative Emotion in the Human Speech Signals, Workshop on Auto. Speech Recognition and Understanding, Dec 2001&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Speech analysis is an effective method of identifying affective state, having an average reported accuracy of 70 to 80% in research from 2003 and 2006.&amp;lt;ref&amp;gt;{{Cite journal|last1=Neiberg|first1=D|last2=Elenius|first2=K|last3=Laskowski|first3=K|date=2006|title=Emotion recognition in spontaneous speech using GMMs|url=http://www.speech.kth.se/prod/publications/files/1192.pdf|journal=Proceedings of Interspeech|doi=10.21437/Interspeech.2006-277|s2cid=5790745}}&amp;lt;/ref&amp;gt;&amp;lt;ref&amp;gt;{{Cite journal|last1=Yacoub|first1=Sherif|last2=Simske|first2=Steve|last3=Lin|first3=Xiaofan|last4=Burns|first4=John|date=2003|title=Recognition of Emotions in Interactive Voice Response Systems|journal=Proceedings of Eurospeech|pages=729–732|doi=10.21437/Eurospeech.2003-307 |citeseerx=10.1.1.420.8158|s2cid=11671944 }}&amp;lt;/ref&amp;gt; These systems tend to outperform average human accuracy (approximately 60%&amp;lt;ref name=&quot;Dellaert&quot; /&amp;gt;) but are less accurate than systems which employ other modalities for emotion detection, such as physiological states or facial expressions.&amp;lt;ref name=&quot;Hudlicka-2003-p24&quot;&amp;gt;{{harvnb|Hudlicka|2003|p=24}}&amp;lt;/ref&amp;gt; However, since many speech characteristics are independent of semantics or culture, this technique is considered to be a promising route for further research.&amp;lt;ref name=&quot;Hudlicka-2003-p25&quot;&amp;gt;{{harvnb|Hudlicka|2003|p=25}}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Speech analysis is an effective method of identifying affective state, having an average reported accuracy of 70 to 80% in research from 2003 and 2006.&amp;lt;ref&amp;gt;{{Cite journal|last1=Neiberg|first1=D|last2=Elenius|first2=K|last3=Laskowski|first3=K|date=2006|title=Emotion recognition in spontaneous speech using GMMs|url=http://www.speech.kth.se/prod/publications/files/1192.pdf|journal=Proceedings of Interspeech|doi=10.21437/Interspeech.2006-277|s2cid=5790745&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;|archive-date=2020-07-16|access-date=2017-04-29|archive-url=https://web.archive.org/web/20200716151038/http://www.speech.kth.se/prod/publications/files/1192.pdf|url-status=dead&lt;/ins&gt;}}&amp;lt;/ref&amp;gt;&amp;lt;ref&amp;gt;{{Cite journal|last1=Yacoub|first1=Sherif|last2=Simske|first2=Steve|last3=Lin|first3=Xiaofan|last4=Burns|first4=John|date=2003|title=Recognition of Emotions in Interactive Voice Response Systems|journal=Proceedings of Eurospeech|pages=729–732|doi=10.21437/Eurospeech.2003-307 |citeseerx=10.1.1.420.8158|s2cid=11671944 }}&amp;lt;/ref&amp;gt; These systems tend to outperform average human accuracy (approximately 60%&amp;lt;ref name=&quot;Dellaert&quot; /&amp;gt;) but are less accurate than systems which employ other modalities for emotion detection, such as physiological states or facial expressions.&amp;lt;ref name=&quot;Hudlicka-2003-p24&quot;&amp;gt;{{harvnb|Hudlicka|2003|p=24}}&amp;lt;/ref&amp;gt; However, since many speech characteristics are independent of semantics or culture, this technique is considered to be a promising route for further research.&amp;lt;ref name=&quot;Hudlicka-2003-p25&quot;&amp;gt;{{harvnb|Hudlicka|2003|p=25}}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;====Algorithms====&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;====Algorithms====&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot; id=&quot;mw-diff-left-l228&quot;&gt;Line 228:&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;Line 228:&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;The corrugator supercilii muscle, also known as the &amp;#039;frowning&amp;#039; muscle, draws the brow down into a frown, and therefore is the best test for negative, unpleasant emotional response.↵The zygomaticus major muscle is responsible for pulling the corners of the mouth back when you smile, and therefore is the muscle used to test for a positive emotional response.&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;The corrugator supercilii muscle, also known as the &amp;#039;frowning&amp;#039; muscle, draws the brow down into a frown, and therefore is the best test for negative, unpleasant emotional response.↵The zygomaticus major muscle is responsible for pulling the corners of the mouth back when you smile, and therefore is the muscle used to test for a positive emotional response.&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;[[File:Gsrplot.svg&lt;del style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;|500px|thumb&lt;/del&gt;|Here we can see a plot of skin resistance measured using GSR and time whilst the subject played a video game. There are several peaks that are clear in the graph, which suggests that GSR is a good method of differentiating between an aroused and a non-aroused state. For example, at the start of the game where there is usually not much exciting game play, there is a high level of resistance recorded, which suggests a low level of conductivity and therefore less arousal. This is in clear contrast with the sudden trough where the player is killed as one is usually very stressed and tense as their character is killed in the game.]]&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;[[File:Gsrplot.svg|Here we can see a plot of skin resistance measured using GSR and time whilst the subject played a video game. There are several peaks that are clear in the graph, which suggests that GSR is a good method of differentiating between an aroused and a non-aroused state. For example, at the start of the game where there is usually not much exciting game play, there is a high level of resistance recorded, which suggests a low level of conductivity and therefore less arousal. This is in clear contrast with the sudden trough where the player is killed as one is usually very stressed and tense as their character is killed in the game.&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;|frame&lt;/ins&gt;]]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;====Galvanic skin response====&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;====Galvanic skin response====&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot; id=&quot;mw-diff-left-l245&quot;&gt;Line 245:&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;Line 245:&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;=====Methodology=====&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;=====Methodology=====&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Approaches are based on facial color changes. Delaunay triangulation is used to create the triangular local areas. Some of these triangles which define the interior of the mouth and eyes (sclera and iris) are removed. Use the left triangular areas’ pixels to create feature vectors.&amp;lt;ref name=&quot;face&quot;/&amp;gt; It shows that converting the pixel color of the standard RGB color space to a color space such as oRGB color space&amp;lt;ref name=&quot;orgb&quot;&amp;gt;{{cite journal | last1=Bratkova | first1=Margarita | last2=Boulos | first2=Solomon | last3=Shirley | first3=Peter | title=oRGB: A Practical Opponent Color Space for Computer Graphics | journal=IEEE Computer Graphics and Applications | volume=29 | issue=1 | year=2009 | doi=10.1109/mcg.2009.13 | pages=42–55| pmid=19363957 | s2cid=16690341 }}&amp;lt;/ref&amp;gt; or LMS channels perform better when dealing with faces.&amp;lt;ref name=&quot;mec&quot;&amp;gt;Hadas Shahar, [[Hagit Hel-Or]], [http://openaccess.thecvf.com/content_ICCVW_2019/papers/CVPM/Shahar_Micro_Expression_Classification_using_Facial_Color_and_Deep_Learning_Methods_ICCVW_2019_paper.pdf Micro Expression Classification using Facial Color and Deep Learning Methods], The IEEE International Conference on Computer Vision (ICCV), 2019, pp. 0–0.&amp;lt;/ref&amp;gt; So, map the above vector onto the better color space and decompose into red-green and yellow-blue channels. Then use deep learning methods to find equivalent emotions.&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Approaches are based on facial color changes. Delaunay triangulation is used to create the triangular local areas. Some of these triangles which define the interior of the mouth and eyes (sclera and iris) are removed. Use the left triangular areas’ pixels to create feature vectors.&amp;lt;ref name=&quot;face&quot;/&amp;gt; It shows that converting the pixel color of the standard RGB color space to a color space such as oRGB color space&amp;lt;ref name=&quot;orgb&quot;&amp;gt;{{cite journal | last1=Bratkova | first1=Margarita | last2=Boulos | first2=Solomon | last3=Shirley | first3=Peter | title=oRGB: A Practical Opponent Color Space for Computer Graphics | journal=IEEE Computer Graphics and Applications | volume=29 | issue=1 | year=2009 | doi=10.1109/mcg.2009.13 | pages=42–55| pmid=19363957 &lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;| bibcode=2009ICGA...29a..42B &lt;/ins&gt;| s2cid=16690341 }}&amp;lt;/ref&amp;gt; or LMS channels perform better when dealing with faces.&amp;lt;ref name=&quot;mec&quot;&amp;gt;Hadas Shahar, [[Hagit Hel-Or]], [http://openaccess.thecvf.com/content_ICCVW_2019/papers/CVPM/Shahar_Micro_Expression_Classification_using_Facial_Color_and_Deep_Learning_Methods_ICCVW_2019_paper.pdf Micro Expression Classification using Facial Color and Deep Learning Methods], The IEEE International Conference on Computer Vision (ICCV), 2019, pp. 0–0.&amp;lt;/ref&amp;gt; So, map the above vector onto the better color space and decompose into red-green and yellow-blue channels. Then use deep learning methods to find equivalent emotions.&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;===Visual aesthetics===&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;===Visual aesthetics===&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot; id=&quot;mw-diff-left-l252&quot;&gt;Line 252:&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;Line 252:&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Potential applications==&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Potential applications==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;=== Education ===&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;=== Education ===&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Affection influences learners&#039; learning state. Using affective computing technology, computers can judge the learners&#039; affection and learning state by recognizing their facial expressions. In education, the teacher can use the analysis result to understand the student&#039;s learning and accepting ability, and then formulate reasonable teaching plans. At the same time, they can pay attention to students&#039; inner feelings, which is helpful to students&#039; psychological health. Especially in distance education, due to the separation of time and space, there is no emotional incentive between teachers and students for two-way communication. Without the atmosphere brought by traditional classroom learning, students are easily bored, and affect the learning effect. Applying affective computing in distance education system can effectively improve this situation.&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Affection influences learners&#039; learning state. Using affective computing technology, computers can judge the learners&#039; affection and learning state by recognizing their facial expressions. In education, the teacher can use the analysis result to understand the student&#039;s learning and accepting ability, and then formulate reasonable teaching plans. At the same time, they can pay attention to students&#039; inner feelings, which is helpful to students&#039; psychological health. Especially in distance education, due to the separation of time and space, there is no emotional incentive between teachers and students for two-way communication. Without the atmosphere brought by traditional classroom learning, students are easily bored, and affect the learning effect. Applying affective computing in distance education system can effectively improve this situation.&amp;lt;ref&amp;gt;{{Cite journal|url=http://www.learntechlib.org/p/173785/|title = Review of affective computing in education/Learning: Trends and challenges|journal = British Journal of Educational Technology|date = November 2016|volume = 47|issue = 6|pages = 1304–1323|last1 = Wu|first1 = Chih-Hung|last2 = Huang|first2 = Yueh-Min|last3 = Hwang|first3 = Jan-Pan|doi = 10.1111/bjet.12324&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;}}&amp;lt;/ref&amp;gt;Emotional AI can provide students with learning support, which benefits students cognitive and emotional outcomes.&amp;lt;ref&amp;gt;{{Cite journal |last=Zhang |first=Heng |last2=Liu |first2=Yuhan |last3=Jiang |first3=Meilin |last4=Chen |first4=Juanjuan |last5=Wang |first5=Minhong |last6=Paas |first6=Fred |date=2025-11-15 |title=Emotional Artificial Intelligence in Education: A Systematic Review and Meta-Analysis |url=https://doi.org/10.1007/s10648-025-10086-4 |journal=Educational Psychology Review |language=en |volume=37 |issue=4 |pages=106 |doi=10.1007/s10648-025-10086-4 |issn=1573-336X|doi-access=free &lt;/ins&gt;}}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&amp;lt;ref&amp;gt;{{Cite journal|url=http://www.learntechlib.org/p/173785/|title = Review of affective computing in education/Learning: Trends and challenges|journal = British Journal of Educational Technology|date = November 2016|volume = 47|issue = 6|pages = 1304–1323|last1 = Wu|first1 = Chih-Hung|last2 = Huang|first2 = Yueh-Min|last3 = Hwang|first3 = Jan-Pan|doi = 10.1111/bjet.12324}}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-side-added&quot;&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;=== Transportation ===&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;=== Transportation ===&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;The applications of sensory computing may contribute to improving road safety. For example, a car can monitor the emotion of all occupants and engage in additional safety measures, such as alerting other vehicles if it detects the driver to be angry.&amp;lt;ref&amp;gt;{{cite web |date=30 August 2018 |title=In-Car Facial Recognition Detects Angry Drivers To Prevent Road Rage |url=https://gizmodo.com/in-car-facial-recognition-detects-angry-drivers-to-prev-1543709793 |website=Gizmodo}}&amp;lt;/ref&amp;gt; In addition, affective computing systems for monitoring the driver&#039;s stress may allow various interventions such as driver assistance systems adjusted according to the stress level&amp;lt;ref&amp;gt;{{Cite journal |last1=Collet |first1=Christian |last2=Musicant |first2=Oren |date=2019-04-24 |title=Associating Vehicles Automation With Drivers Functional State Assessment Systems: A Challenge for Road Safety in the Future |journal=Frontiers in Human Neuroscience |volume=13 |&lt;del style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;page&lt;/del&gt;=131 |doi=10.3389/fnhum.2019.00131 |issn=1662-5161 |pmc=6503868 |pmid=31114489 |doi-access=free }}&amp;lt;/ref&amp;gt; and minimal and direct interventions to change the emotional state of the driver.&amp;lt;ref&amp;gt;{{Cite book |last1=Balters |first1=Stephanie |last2=Bernstein |first2=Madeline |last3=Paredes |first3=Pablo E. |chapter=On-road Stress Analysis for In-car Interventions During the Commute |date=2019-05-02 |title=Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems |chapter-url=https://dl.acm.org/doi/10.1145/3290607.3312824 |language=en |publisher=ACM |pages=1–6 |doi=10.1145/3290607.3312824 |isbn=978-1-4503-5971-9|s2cid=144207824 }}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;The applications of sensory computing may contribute to improving road safety. For example, a car can monitor the emotion of all occupants and engage in additional safety measures, such as alerting other vehicles if it detects the driver to be angry.&amp;lt;ref&amp;gt;{{cite web |date=30 August 2018 |title=In-Car Facial Recognition Detects Angry Drivers To Prevent Road Rage |url=https://gizmodo.com/in-car-facial-recognition-detects-angry-drivers-to-prev-1543709793 |website=Gizmodo}}&amp;lt;/ref&amp;gt; In addition, affective computing systems for monitoring the driver&#039;s stress may allow various interventions such as driver assistance systems adjusted according to the stress level&amp;lt;ref&amp;gt;{{Cite journal |last1=Collet |first1=Christian |last2=Musicant |first2=Oren |date=2019-04-24 |title=Associating Vehicles Automation With Drivers Functional State Assessment Systems: A Challenge for Road Safety in the Future |journal=Frontiers in Human Neuroscience |volume=13 |&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;article-number&lt;/ins&gt;=131 |doi=10.3389/fnhum.2019.00131 |issn=1662-5161 |pmc=6503868 |pmid=31114489 |doi-access=free }}&amp;lt;/ref&amp;gt; and minimal and direct interventions to change the emotional state of the driver.&amp;lt;ref&amp;gt;{{Cite book |last1=Balters |first1=Stephanie |last2=Bernstein |first2=Madeline |last3=Paredes |first3=Pablo E. |chapter=On-road Stress Analysis for In-car Interventions During the Commute |date=2019-05-02 |title=Extended Abstracts of the 2019 CHI Conference on Human Factors in Computing Systems |chapter-url=https://dl.acm.org/doi/10.1145/3290607.3312824 |language=en |publisher=ACM |pages=1–6 |doi=10.1145/3290607.3312824 |isbn=978-1-4503-5971-9|s2cid=144207824 }}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;=== Healthcare ===&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;=== Healthcare ===&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>imported&gt;OAbot</name></author>
	</entry>
	<entry>
		<id>http://debianws.lexgopc.com/wiki143/index.php?title=Affective_computing&amp;diff=1689851&amp;oldid=prev</id>
		<title>imported&gt;OAbot: Open access bot: url-access=subscription updated in citation with #oabot.</title>
		<link rel="alternate" type="text/html" href="http://debianws.lexgopc.com/wiki143/index.php?title=Affective_computing&amp;diff=1689851&amp;oldid=prev"/>
		<updated>2025-06-30T03:36:25Z</updated>

		<summary type="html">&lt;p&gt;&lt;a href=&quot;https://en.wikipedia.org/wiki/OABOT&quot; class=&quot;extiw&quot; title=&quot;wikipedia:OABOT&quot;&gt;Open access bot&lt;/a&gt;: url-access=subscription updated in citation with #oabot.&lt;/p&gt;
&lt;table style=&quot;background-color: #fff; color: #202122;&quot; data-mw=&quot;interface&quot;&gt;
				&lt;col class=&quot;diff-marker&quot; /&gt;
				&lt;col class=&quot;diff-content&quot; /&gt;
				&lt;col class=&quot;diff-marker&quot; /&gt;
				&lt;col class=&quot;diff-content&quot; /&gt;
				&lt;tr class=&quot;diff-title&quot; lang=&quot;en&quot;&gt;
				&lt;td colspan=&quot;2&quot; style=&quot;background-color: #fff; color: #202122; text-align: center;&quot;&gt;← Previous revision&lt;/td&gt;
				&lt;td colspan=&quot;2&quot; style=&quot;background-color: #fff; color: #202122; text-align: center;&quot;&gt;Revision as of 03:36, 30 June 2025&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot; id=&quot;mw-diff-left-l52&quot;&gt;Line 52:&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;Line 52:&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Another area within affective computing is the design of computational devices proposed to exhibit either innate emotional capabilities or that are capable of convincingly simulating emotions. A more practical approach, based on current technological capabilities, is the simulation of emotions in conversational agents in order to enrich and facilitate interactivity between human and machine.&amp;lt;ref&amp;gt;{{Cite book|last=Heise|first=David|contribution=Enculturating agents with expressive role behavior|year=2004|title=Agent Culture: Human-Agent Interaction in a Mutlicultural World|editor1=Sabine Payr|pages=127–142|publisher=Lawrence Erlbaum Associates|editor2-first=Robert |editor2-last=Trappl}}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Another area within affective computing is the design of computational devices proposed to exhibit either innate emotional capabilities or that are capable of convincingly simulating emotions. A more practical approach, based on current technological capabilities, is the simulation of emotions in conversational agents in order to enrich and facilitate interactivity between human and machine.&amp;lt;ref&amp;gt;{{Cite book|last=Heise|first=David|contribution=Enculturating agents with expressive role behavior|year=2004|title=Agent Culture: Human-Agent Interaction in a Mutlicultural World|editor1=Sabine Payr|pages=127–142|publisher=Lawrence Erlbaum Associates|editor2-first=Robert |editor2-last=Trappl}}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;[[Marvin Minsky]], one of the pioneering computer scientists in [[artificial intelligence]], relates emotions to the broader issues of machine intelligence stating in &#039;&#039;[[The Emotion Machine]]&#039;&#039; that emotion is &quot;not especially different from the processes that we call &#039;thinking.&#039;&quot;&amp;lt;ref&amp;gt;{{cite news|url=https://www.washingtonpost.com/wp-dyn/content/article/2006/12/14/AR2006121401554.html|title=Mind Over Matter|last=Restak|first=Richard|date=2006-12-17|newspaper=The Washington Post|access-date=2008-05-13}}&amp;lt;/ref&amp;gt; The innovative approach &quot;digital humans&quot; or [[Virtual human|virtual humans]] includes an attempt to give these programs, which simulate humans, the emotional dimension as well, including reactions in accordance with the reaction that a real person would react in a certain emotionally stimulating situation as well as facial expressions and gestures.&amp;lt;ref&amp;gt;{{Cite journal |last1=Loveys |first1=Kate |last2=Sagar |first2=Mark |last3=Broadbent |first3=Elizabeth |date=2020-07-22 |title=The Effect of Multimodal Emotional Expression on Responses to a Digital Human during a Self-Disclosure Conversation: a Computational Analysis of User Language |url=http://dx.doi.org/10.1007/s10916-020-01624-4 |journal=Journal of Medical Systems |volume=44 |issue=9 |page=143 |doi=10.1007/s10916-020-01624-4 |pmid=32700060 |s2cid=220717084 |issn=0148-5598}}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;[[Marvin Minsky]], one of the pioneering computer scientists in [[artificial intelligence]], relates emotions to the broader issues of machine intelligence stating in &#039;&#039;[[The Emotion Machine]]&#039;&#039; that emotion is &quot;not especially different from the processes that we call &#039;thinking.&#039;&quot;&amp;lt;ref&amp;gt;{{cite news|url=https://www.washingtonpost.com/wp-dyn/content/article/2006/12/14/AR2006121401554.html|title=Mind Over Matter|last=Restak|first=Richard|date=2006-12-17|newspaper=The Washington Post|access-date=2008-05-13}}&amp;lt;/ref&amp;gt; The innovative approach &quot;digital humans&quot; or [[Virtual human|virtual humans]] includes an attempt to give these programs, which simulate humans, the emotional dimension as well, including reactions in accordance with the reaction that a real person would react in a certain emotionally stimulating situation as well as facial expressions and gestures.&amp;lt;ref&amp;gt;{{Cite journal |last1=Loveys |first1=Kate |last2=Sagar |first2=Mark |last3=Broadbent |first3=Elizabeth |date=2020-07-22 |title=The Effect of Multimodal Emotional Expression on Responses to a Digital Human during a Self-Disclosure Conversation: a Computational Analysis of User Language |url=http://dx.doi.org/10.1007/s10916-020-01624-4 |journal=Journal of Medical Systems |volume=44 |issue=9 |page=143 |doi=10.1007/s10916-020-01624-4 |pmid=32700060 |s2cid=220717084 |issn=0148-5598&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;|url-access=subscription &lt;/ins&gt;}}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Emotion in machines often refers to emotion in computational, often AI-based, systems. As a result, the terms &amp;#039;emotional AI&amp;#039; and &amp;#039;[https://mitsloan.mit.edu/ideas-made-to-matter/emotion-ai-explained emotion AI]&amp;#039; are being used.&amp;lt;ref&amp;gt;{{Cite journal |last=Ho |first=Manh-Tung |date=29 March 2023 |title=An analytical framework for studying attitude towards emotional AI: The three-pronged approach |journal=[[MethodsX]] |volume=10 |issue=102149 |doi=10.1016/j.mex.2023.102149 |pmid=37091958 |pmc=10113835 }}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;Emotion in machines often refers to emotion in computational, often AI-based, systems. As a result, the terms &amp;#039;emotional AI&amp;#039; and &amp;#039;[https://mitsloan.mit.edu/ideas-made-to-matter/emotion-ai-explained emotion AI]&amp;#039; are being used.&amp;lt;ref&amp;gt;{{Cite journal |last=Ho |first=Manh-Tung |date=29 March 2023 |title=An analytical framework for studying attitude towards emotional AI: The three-pronged approach |journal=[[MethodsX]] |volume=10 |issue=102149 |doi=10.1016/j.mex.2023.102149 |pmid=37091958 |pmc=10113835 }}&amp;lt;/ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>imported&gt;OAbot</name></author>
	</entry>
	<entry>
		<id>http://debianws.lexgopc.com/wiki143/index.php?title=Affective_computing&amp;diff=682933&amp;oldid=prev</id>
		<title>imported&gt;Maxeto0910: period after sentence</title>
		<link rel="alternate" type="text/html" href="http://debianws.lexgopc.com/wiki143/index.php?title=Affective_computing&amp;diff=682933&amp;oldid=prev"/>
		<updated>2025-06-20T02:18:42Z</updated>

		<summary type="html">&lt;p&gt;period after sentence&lt;/p&gt;
&lt;table style=&quot;background-color: #fff; color: #202122;&quot; data-mw=&quot;interface&quot;&gt;
				&lt;col class=&quot;diff-marker&quot; /&gt;
				&lt;col class=&quot;diff-content&quot; /&gt;
				&lt;col class=&quot;diff-marker&quot; /&gt;
				&lt;col class=&quot;diff-content&quot; /&gt;
				&lt;tr class=&quot;diff-title&quot; lang=&quot;en&quot;&gt;
				&lt;td colspan=&quot;2&quot; style=&quot;background-color: #fff; color: #202122; text-align: center;&quot;&gt;← Previous revision&lt;/td&gt;
				&lt;td colspan=&quot;2&quot; style=&quot;background-color: #fff; color: #202122; text-align: center;&quot;&gt;Revision as of 02:18, 20 June 2025&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot; id=&quot;mw-diff-left-l1&quot;&gt;Line 1:&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;Line 1:&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{update|date=January 2023}}&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{update|date=January 2023}}&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{short description|Area of research in computer science aiming to understand the emotional state of users}}&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{short description|Area of research in computer science aiming to understand the emotional state of users}}&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;[[File:Sophia at the AI for Good Global Summit 2018 (27254369347) (cropped).jpg|thumb|Electronic devices such as robots are increasingly able to mimic human emotion]]  &lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;[[File:Sophia at the AI for Good Global Summit 2018 (27254369347) (cropped).jpg|thumb|Electronic devices such as robots are increasingly able to mimic human emotion&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;.&lt;/ins&gt;]]  &lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&amp;#039;&amp;#039;&amp;#039;Affective computing&amp;#039;&amp;#039;&amp;#039; is the study and development of systems and devices that can recognize, interpret, process, and simulate human [[Affect (psychology)|affects]]. It is an interdisciplinary field spanning [[computer science]], [[psychology]], and [[cognitive science]].&amp;lt;ref name=TaoTan&amp;gt;{{cite conference |first=Jianhua |last=Tao |author2=Tieniu Tan |title=Affective Computing: A Review |book-title=Affective Computing and Intelligent Interaction |volume=[[LNCS]] 3784 |pages=981–995 |publisher=Springer |year=2005 |doi=10.1007/11573548 }}&amp;lt;/ref&amp;gt; While some core ideas in the field may be traced as far back as to early philosophical inquiries into [[Emotion#James–Lange theory|emotion]],&amp;lt;ref&amp;gt;{{cite journal |last=James |first=William |year=1884 |title=What Is Emotion |journal=Mind |volume=9 |issue=34 |pages=188–205 |doi=10.1093/mind/os-IX.34.188|url=https://zenodo.org/record/1431811 }} Cited by Tao and Tan.&amp;lt;/ref&amp;gt; the more modern branch of computer science originated with [[Rosalind Picard]]&amp;#039;s 1995 paper entitled &amp;quot;Affective Computing&amp;quot;&amp;lt;ref&amp;gt;[https://vismod.media.mit.edu/pub/tech-reports/TR-321.pdf &amp;quot;Affective Computing&amp;quot;] MIT Technical Report #321 ([http://vismod.media.mit.edu/pub/tech-reports/TR-321-ABSTRACT.html Abstract]), 1995&amp;lt;/ref&amp;gt; and her 1997 book of the same name&amp;lt;ref name=&amp;quot;Affective Computing&amp;quot;&amp;gt;{{cite book|last1=Picard|first1=Rosalind|title=Affective Computing|date=1997|publisher=MIT Press|location=Cambridge, MA|page=1}}&amp;lt;/ref&amp;gt; published by [[MIT Press]].&amp;lt;ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&amp;#039;&amp;#039;&amp;#039;Affective computing&amp;#039;&amp;#039;&amp;#039; is the study and development of systems and devices that can recognize, interpret, process, and simulate human [[Affect (psychology)|affects]]. It is an interdisciplinary field spanning [[computer science]], [[psychology]], and [[cognitive science]].&amp;lt;ref name=TaoTan&amp;gt;{{cite conference |first=Jianhua |last=Tao |author2=Tieniu Tan |title=Affective Computing: A Review |book-title=Affective Computing and Intelligent Interaction |volume=[[LNCS]] 3784 |pages=981–995 |publisher=Springer |year=2005 |doi=10.1007/11573548 }}&amp;lt;/ref&amp;gt; While some core ideas in the field may be traced as far back as to early philosophical inquiries into [[Emotion#James–Lange theory|emotion]],&amp;lt;ref&amp;gt;{{cite journal |last=James |first=William |year=1884 |title=What Is Emotion |journal=Mind |volume=9 |issue=34 |pages=188–205 |doi=10.1093/mind/os-IX.34.188|url=https://zenodo.org/record/1431811 }} Cited by Tao and Tan.&amp;lt;/ref&amp;gt; the more modern branch of computer science originated with [[Rosalind Picard]]&amp;#039;s 1995 paper entitled &amp;quot;Affective Computing&amp;quot;&amp;lt;ref&amp;gt;[https://vismod.media.mit.edu/pub/tech-reports/TR-321.pdf &amp;quot;Affective Computing&amp;quot;] MIT Technical Report #321 ([http://vismod.media.mit.edu/pub/tech-reports/TR-321-ABSTRACT.html Abstract]), 1995&amp;lt;/ref&amp;gt; and her 1997 book of the same name&amp;lt;ref name=&amp;quot;Affective Computing&amp;quot;&amp;gt;{{cite book|last1=Picard|first1=Rosalind|title=Affective Computing|date=1997|publisher=MIT Press|location=Cambridge, MA|page=1}}&amp;lt;/ref&amp;gt; published by [[MIT Press]].&amp;lt;ref&amp;gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{cite web&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;{{cite web&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot; id=&quot;mw-diff-left-l26&quot;&gt;Line 26:&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;Line 26:&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;|quote= Rosalind Picard, a genial MIT professor, is the field&amp;#039;s godmother; her 1997 book, &amp;#039;&amp;#039;Affective Computing&amp;#039;&amp;#039;, triggered an explosion of interest in the emotional side of computers and their users.&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;|quote= Rosalind Picard, a genial MIT professor, is the field&amp;#039;s godmother; her 1997 book, &amp;#039;&amp;#039;Affective Computing&amp;#039;&amp;#039;, triggered an explosion of interest in the emotional side of computers and their users.&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;| archive-url= https://web.archive.org/web/20080518185630/http://www.wired.com/wired/archive/11.12/love.html| archive-date= 18 May 2008 | url-status= live}}&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;| archive-url= https://web.archive.org/web/20080518185630/http://www.wired.com/wired/archive/11.12/love.html| archive-date= 18 May 2008 | url-status= live}}&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;−&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&amp;lt;/ref&amp;gt; One of the motivations for the research is the ability to give machines [[emotional intelligence]], including to [[artificial empathy|simulate empathy]]. The machine should interpret the emotional state of humans and adapt its behavior to them, giving an appropriate response to those emotions.&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&amp;lt;/ref&amp;gt; One of the motivations for the research is the ability to give machines [[emotional intelligence]], including to [[artificial empathy|simulate empathy]]. The machine should interpret the emotional state of humans and adapt its behavior to them, giving an appropriate response to those emotions. &lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;Recent experimental research has shown that subtle affective haptic feedback can shape human reward learning and mobile interaction behavior,&amp;lt;ref&amp;gt;Hampton, W. H., &amp;amp; Hildebrand, C. (2025). &quot;Haptic Rewards: How Mobile Vibrations Shape Reward Response and Consumer Choice.&quot; *Journal of Consumer Research*. https://doi.org/10.1093/jcr/ucaf025&amp;lt;/ref&amp;gt; suggesting that affective computing systems may not only interpret emotional states but also actively modulate user actions through emotion-laden outputs.&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-side-deleted&quot;&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot; data-marker=&quot;+&quot;&gt;&lt;/td&gt;&lt;td style=&quot;color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt; &lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;br&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;== Areas ==&lt;/div&gt;&lt;/td&gt;&lt;td class=&quot;diff-marker&quot;&gt;&lt;/td&gt;&lt;td style=&quot;background-color: #f8f9fa; color: #202122; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #eaecf0; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;== Areas ==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>imported&gt;Maxeto0910</name></author>
	</entry>
	<entry>
		<id>http://debianws.lexgopc.com/wiki143/index.php?title=Affective_computing&amp;diff=160059&amp;oldid=prev</id>
		<title>87.102.101.117: /* Healthcare */</title>
		<link rel="alternate" type="text/html" href="http://debianws.lexgopc.com/wiki143/index.php?title=Affective_computing&amp;diff=160059&amp;oldid=prev"/>
		<updated>2025-03-06T10:01:22Z</updated>

		<summary type="html">&lt;p&gt;&lt;span class=&quot;autocomment&quot;&gt;Healthcare&lt;/span&gt;&lt;/p&gt;
&lt;a href=&quot;http://debianws.lexgopc.com/wiki143/index.php?title=Affective_computing&amp;amp;diff=160059&quot;&gt;Show changes&lt;/a&gt;</summary>
		<author><name>87.102.101.117</name></author>
	</entry>
</feed>