<?xml version="1.0"?>
<?xml-stylesheet type="text/css" href="http://cslt.org/mediawiki/skins/common/feed.css?303"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="zh-cn">
		<id>http://cslt.org/mediawiki/index.php?action=history&amp;feed=atom&amp;title=ASR-nsfc-publication</id>
		<title>ASR-nsfc-publication - 版本历史</title>
		<link rel="self" type="application/atom+xml" href="http://cslt.org/mediawiki/index.php?action=history&amp;feed=atom&amp;title=ASR-nsfc-publication"/>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;action=history"/>
		<updated>2026-04-10T08:46:57Z</updated>
		<subtitle>本wiki的该页面的版本历史</subtitle>
		<generator>MediaWiki 1.23.3</generator>

	<entry>
		<id>http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37567&amp;oldid=prev</id>
		<title>2022年1月7日 (五) 13:10 Cslt</title>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37567&amp;oldid=prev"/>
				<updated>2022-01-07T13:10:45Z</updated>
		
		<summary type="html">&lt;p&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2022年1月7日 (五) 13:10的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (SCI)==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (SCI)==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Ruiqi Liu, Jiawen Kang, Yue Fa, Hao Cui, Yunqi Cai, Ravichander Vipperla, Thomas Fang Zheng and Dong Wang. &amp;quot;CN-Celeb: multi-genre speaker recognition&amp;quot;, Speech Communication, 2022. [https://arxiv.org/pdf/2012.12468]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Ruiqi Liu, Jiawen Kang, Yue Fa, Hao Cui, Yunqi Cai, Ravichander Vipperla, Thomas Fang Zheng and Dong Wang. &amp;quot;CN-Celeb: multi-genre speaker recognition&amp;quot;, Speech Communication, 2022. [https://arxiv.org/pdf/2012.12468 &lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;pdf&lt;/ins&gt;]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Dong Wang etc., A Principle Solution for Enroll-Test Mismatch, IEEE Transaction on Audio, Speech and Language Processing, 2021 [https://arxiv.org/pdf/2012.12471.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Dong Wang etc., A Principle Solution for Enroll-Test Mismatch, IEEE Transaction on Audio, Speech and Language Processing, 2021 [https://arxiv.org/pdf/2012.12471.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Cslt</name></author>	</entry>

	<entry>
		<id>http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37566&amp;oldid=prev</id>
		<title>2022年1月7日 (五) 13:10 Cslt</title>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37566&amp;oldid=prev"/>
				<updated>2022-01-07T13:10:32Z</updated>
		
		<summary type="html">&lt;p&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2022年1月7日 (五) 13:10的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (SCI)==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (SCI)==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;# Lantian Li, Ruiqi Liu, Jiawen Kang, Yue Fa, Hao Cui, Yunqi Cai, Ravichander Vipperla, Thomas Fang Zheng and Dong Wang. &amp;quot;CN-Celeb: multi-genre speaker recognition&amp;quot;, Speech Communication, 2022. [https://arxiv.org/pdf/2012.12468]&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Dong Wang etc., A Principle Solution for Enroll-Test Mismatch, IEEE Transaction on Audio, Speech and Language Processing, 2021 [https://arxiv.org/pdf/2012.12471.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Dong Wang etc., A Principle Solution for Enroll-Test Mismatch, IEEE Transaction on Audio, Speech and Language Processing, 2021 [https://arxiv.org/pdf/2012.12471.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Cslt</name></author>	</entry>

	<entry>
		<id>http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37494&amp;oldid=prev</id>
		<title>Cslt：/* Conference papers (EI) */</title>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37494&amp;oldid=prev"/>
				<updated>2021-12-29T07:51:31Z</updated>
		
		<summary type="html">&lt;p&gt;‎&lt;span dir=&quot;auto&quot;&gt;&lt;span class=&quot;autocomment&quot;&gt;Conference papers (EI)&lt;/span&gt;&lt;/span&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2021年12月29日 (三) 07:51的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第14行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第14行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Conference papers (EI)==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Conference papers (EI)==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;# Tiankai Zhi, Ying Shi, Wenqiang Du, Guanyu Li and Dong Wang, &amp;quot;A Free Mongolian Speech Database and Accompanied Baselines&amp;quot;, O-COCOSDA 2021.[]&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;# Jiao Han, Yunqi Cai, Lantian Li, Guanyu Li, Dong Wang, &amp;quot;An MAP Estimation for Between-Class Variance&amp;quot;, APSIPA 2021. []&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;# Di Wang, Lantian Li, Hongzhi Yu, Dong Wang,A STUDY ON DECOUPLED PROBABILISTIC LINEAR DISCRIMINANT ANALYSIS, APSIPA 2021.&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;# Haoran Sun, Lantian Li, Thomas Fang Zheng, Dong Wang， HOW SPEECH IS RECOGNIZED TO BE EMOTIONAL - A STUDY BASED ON INFORMATION DECOMPOSITION, APSIPA 2021.&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;# Lantian Li, Yang Zhang, Jiawen Kang, Thomas Fang Zheng, Dong Wang, &amp;quot;SQUEEZING VALUE OF CROSS-DOMAIN LABELS: A DECOUPLED SCORING APPROACH FOR SPEAKER VERIFICATION&amp;quot;, ICASSP 2021. [pdf]&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Ying Shi, Haolin Chen, Zhiyuan Tang, Lantian Li, Dong Wang, Jiqing Han, Can We Trust Deep Speech Prior?, SLT 2021[https://arxiv.org/abs/2011.02110 pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Ying Shi, Haolin Chen, Zhiyuan Tang, Lantian Li, Dong Wang, Jiqing Han, Can We Trust Deep Speech Prior?, SLT 2021[https://arxiv.org/abs/2011.02110 pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zheng Li, Miao Zhao, Qingyang Hong, Lin Li, Zhiyuan Tang, Dong Wang, Liming Song, Cheng Yang, &amp;quot;AP20-OLR Challenge: Three Tasks and TheirBaselines&amp;quot;, APSIPA 2020. [https://arxiv.org/pdf/2006.03473.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zheng Li, Miao Zhao, Qingyang Hong, Lin Li, Zhiyuan Tang, Dong Wang, Liming Song, Cheng Yang, &amp;quot;AP20-OLR Challenge: Three Tasks and TheirBaselines&amp;quot;, APSIPA 2020. [https://arxiv.org/pdf/2006.03473.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Cslt</name></author>	</entry>

	<entry>
		<id>http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37493&amp;oldid=prev</id>
		<title>Cslt：/* Journal papers (SCI) */</title>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37493&amp;oldid=prev"/>
				<updated>2021-12-29T07:49:02Z</updated>
		
		<summary type="html">&lt;p&gt;‎&lt;span dir=&quot;auto&quot;&gt;&lt;span class=&quot;autocomment&quot;&gt;Journal papers (SCI)&lt;/span&gt;&lt;/span&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2021年12月29日 (三) 07:49的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (SCI)==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (SCI)==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Dong Wang etc., A Principle Solution for Enroll-Test Mismatch, IEEE Transaction on Audio, Speech and Language Processing, 2021 [https://arxiv.org/pdf/2012.12471.pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Dong Wang etc., A Principle Solution for Enroll-Test Mismatch, IEEE Transaction on Audio, Speech and Language Processing, 2021 [https://arxiv.org/pdf/2012.12471.&lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;pdf &lt;/ins&gt;pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, &amp;quot;A Simulation Study on Optimal Scores for Speaker Recognition&amp;quot;, EURASIP Journal on Audio, Speech, and Music Processing, 2020. [http://wangd.cslt.org/public/pdf/nl-eurosip.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, &amp;quot;A Simulation Study on Optimal Scores for Speaker Recognition&amp;quot;, EURASIP Journal on Audio, Speech, and Music Processing, 2020. [http://wangd.cslt.org/public/pdf/nl-eurosip.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Cslt</name></author>	</entry>

	<entry>
		<id>http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37492&amp;oldid=prev</id>
		<title>Cslt：/* Journal papers (SCI) */</title>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37492&amp;oldid=prev"/>
				<updated>2021-12-29T07:48:49Z</updated>
		
		<summary type="html">&lt;p&gt;‎&lt;span dir=&quot;auto&quot;&gt;&lt;span class=&quot;autocomment&quot;&gt;Journal papers (SCI)&lt;/span&gt;&lt;/span&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2021年12月29日 (三) 07:48的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (SCI)==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (SCI)==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, etc., A Principle Solution for Enroll-Test Mismatch, IEEE Transaction on Audio, Speech and Language Processing [https://arxiv.org/pdf/2012.12471.pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, &lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;Dong Wang &lt;/ins&gt;etc., A Principle Solution for Enroll-Test Mismatch, IEEE Transaction on Audio, Speech and Language Processing&lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;, 2021 &lt;/ins&gt;[https://arxiv.org/pdf/2012.12471.pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, &amp;quot;A Simulation Study on Optimal Scores for Speaker Recognition&amp;quot;, EURASIP Journal on Audio, Speech, and Music Processing, 2020. [http://wangd.cslt.org/public/pdf/nl-eurosip.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, &amp;quot;A Simulation Study on Optimal Scores for Speaker Recognition&amp;quot;, EURASIP Journal on Audio, Speech, and Music Processing, 2020. [http://wangd.cslt.org/public/pdf/nl-eurosip.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Gulnur Arkin, Askar Hamdulla and Mijit Ablimit , Analysis of phonemes and tones confusion rules obtained by ASR，Wireless Networks，2020.[https://link.springer.com/article/10.1007%2Fs11276-019-02220-2 link]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Gulnur Arkin, Askar Hamdulla and Mijit Ablimit , Analysis of phonemes and tones confusion rules obtained by ASR，Wireless Networks，2020.[https://link.springer.com/article/10.1007%2Fs11276-019-02220-2 link]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang, Lantian Li, Dong Wang,&amp;#160; Ravichander Vipperla, &amp;quot;Collaborative Joint Training With Multitask Recurrent Model for Speech and Speaker Recognition&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing 2018, vol 25, no.3. [http://ieeexplore.ieee.org/document/7782371 online]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang, Lantian Li, Dong Wang,&amp;#160; Ravichander Vipperla, &amp;quot;Collaborative Joint Training With Multitask Recurrent Model for Speech and Speaker Recognition&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing 2018, vol 25, no.3. [http://ieeexplore.ieee.org/document/7782371 online]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang,Dong Wang,Yixiang Chen,Lantian Li,Andrew Abel, &amp;quot;Phonetic Temporal Neural Model for Language Identification&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing 2017. [http://ieeexplore.ieee.org/document/8070977 online] &amp;#160;&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang,Dong Wang,Yixiang Chen,Lantian Li,Andrew Abel, &amp;quot;Phonetic Temporal Neural Model for Language Identification&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing 2017. [http://ieeexplore.ieee.org/document/8070977 online]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (EI)==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (EI)==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Cslt</name></author>	</entry>

	<entry>
		<id>http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37491&amp;oldid=prev</id>
		<title>2021年12月29日 (三) 07:48 Cslt</title>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=37491&amp;oldid=prev"/>
				<updated>2021-12-29T07:48:27Z</updated>
		
		<summary type="html">&lt;p&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2021年12月29日 (三) 07:48的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第1行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (SCI)==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (SCI)==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot;&gt;&amp;#160;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;&lt;ins style=&quot;font-weight: bold; text-decoration: none;&quot;&gt;# Lantian Li, etc., A Principle Solution for Enroll-Test Mismatch, IEEE Transaction on Audio, Speech and Language Processing [https://arxiv.org/pdf/2012.12471.pdf]&lt;/ins&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, &amp;quot;A Simulation Study on Optimal Scores for Speaker Recognition&amp;quot;, EURASIP Journal on Audio, Speech, and Music Processing, 2020. [http://wangd.cslt.org/public/pdf/nl-eurosip.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, &amp;quot;A Simulation Study on Optimal Scores for Speaker Recognition&amp;quot;, EURASIP Journal on Audio, Speech, and Music Processing, 2020. [http://wangd.cslt.org/public/pdf/nl-eurosip.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Cslt</name></author>	</entry>

	<entry>
		<id>http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=35812&amp;oldid=prev</id>
		<title>2020年12月19日 (六) 02:01 Cslt</title>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=35812&amp;oldid=prev"/>
				<updated>2020-12-19T02:01:57Z</updated>
		
		<summary type="html">&lt;p&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2020年12月19日 (六) 02:01的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第3行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第3行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, &amp;quot;A Simulation Study on Optimal Scores for Speaker Recognition&amp;quot;, EURASIP Journal on Audio, Speech, and Music Processing, 2020. [http://wangd.cslt.org/public/pdf/nl-eurosip.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, &amp;quot;A Simulation Study on Optimal Scores for Speaker Recognition&amp;quot;, EURASIP Journal on Audio, Speech, and Music Processing, 2020. [http://wangd.cslt.org/public/pdf/nl-eurosip.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Gulnur Arkin, Askar Hamdulla and Mijit Ablimit , Analysis of phonemes and tones confusion rules obtained by ASR，Wireless Networks，2020.[https://link.springer.com/article/10.1007%2Fs11276-019-02220-2 link]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Gulnur Arkin, Askar Hamdulla and Mijit Ablimit , Analysis of phonemes and tones confusion rules obtained by ASR，Wireless Networks，2020.[https://link.springer.com/article/10.1007%2Fs11276-019-02220-2 link]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang, Lantian Li, Dong Wang,&amp;#160; Ravichander Vipperla, &amp;quot;Collaborative Joint Training With Multitask Recurrent Model for Speech and Speaker Recognition&amp;quot;, IEEE &lt;del class=&quot;diffchange diffchange-inline&quot;&gt;TASLP &lt;/del&gt;2018, vol 25, no.3. [http://ieeexplore.ieee.org/document/7782371 online]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang, Lantian Li, Dong Wang,&amp;#160; Ravichander Vipperla, &amp;quot;Collaborative Joint Training With Multitask Recurrent Model for Speech and Speaker Recognition&amp;quot;, IEEE &lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;Transactions on Audio, Speech and Language Processing &lt;/ins&gt;2018, vol 25, no.3. [http://ieeexplore.ieee.org/document/7782371 online]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang,Dong Wang,Yixiang Chen,Lantian Li,Andrew Abel, &amp;quot;Phonetic Temporal Neural Model for Language Identification&amp;quot;, IEEE &lt;del class=&quot;diffchange diffchange-inline&quot;&gt;TASLP &lt;/del&gt;2017. [http://ieeexplore.ieee.org/document/8070977 online] &amp;#160;&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang,Dong Wang,Yixiang Chen,Lantian Li,Andrew Abel, &amp;quot;Phonetic Temporal Neural Model for Language Identification&amp;quot;, IEEE &lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;Transactions on Audio, Speech and Language Processing &lt;/ins&gt;2017. [http://ieeexplore.ieee.org/document/8070977 online] &amp;#160;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (EI)==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Journal papers (EI)==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Cslt</name></author>	</entry>

	<entry>
		<id>http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=35811&amp;oldid=prev</id>
		<title>2020年12月19日 (六) 01:56 Cslt</title>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=35811&amp;oldid=prev"/>
				<updated>2020-12-19T01:56:42Z</updated>
		
		<summary type="html">&lt;p&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2020年12月19日 (六) 01:56的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第2行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第2行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yunqi Cai, Lantian Li, Andrew Abel, Xiaoyan Zhu, Dong Wang, &amp;quot;Deep Normalization for Speaker Vectors&amp;quot;, IEEE Transactions on Audio, Speech and Language Processing, 2020. [https://arxiv.org/pdf/2004.04095.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, &amp;quot;A Simulation Study on Optimal Scores for Speaker Recognition&amp;quot;, EURASIP Journal on Audio, Speech, and Music Processing, 2020. [http://wangd.cslt.org/public/pdf/nl-eurosip.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, &amp;quot;A Simulation Study on Optimal Scores for Speaker Recognition&amp;quot;, EURASIP Journal on Audio, Speech, and Music Processing, 2020. [http://wangd.cslt.org/public/pdf/nl-eurosip.pdf pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Analysis of phonemes and tones confusion rules obtained by ASR，Wireless Networks，2020.[https://link.springer.com/article/10.1007%2Fs11276-019-02220-2 link]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# &lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;Gulnur Arkin, Askar Hamdulla and Mijit Ablimit , &lt;/ins&gt;Analysis of phonemes and tones confusion rules obtained by ASR，Wireless Networks，2020.[https://link.springer.com/article/10.1007%2Fs11276-019-02220-2 link]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang, Lantian Li, Dong Wang,&amp;#160; Ravichander Vipperla, &amp;quot;Collaborative Joint Training With Multitask Recurrent Model for Speech and Speaker Recognition&amp;quot;, IEEE TASLP 2018, vol 25, no.3. [http://ieeexplore.ieee.org/document/7782371 online]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang, Lantian Li, Dong Wang,&amp;#160; Ravichander Vipperla, &amp;quot;Collaborative Joint Training With Multitask Recurrent Model for Speech and Speaker Recognition&amp;quot;, IEEE TASLP 2018, vol 25, no.3. [http://ieeexplore.ieee.org/document/7782371 online]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang,Dong Wang,Yixiang Chen,Lantian Li,Andrew Abel, &amp;quot;Phonetic Temporal Neural Model for Language Identification&amp;quot;, IEEE TASLP 2017. [http://ieeexplore.ieee.org/document/8070977 online] &amp;#160;&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang,Dong Wang,Yixiang Chen,Lantian Li,Andrew Abel, &amp;quot;Phonetic Temporal Neural Model for Language Identification&amp;quot;, IEEE TASLP 2017. [http://ieeexplore.ieee.org/document/8070977 online] &amp;#160;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Cslt</name></author>	</entry>

	<entry>
		<id>http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=35810&amp;oldid=prev</id>
		<title>2020年12月19日 (六) 01:55 Cslt</title>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=35810&amp;oldid=prev"/>
				<updated>2020-12-19T01:55:49Z</updated>
		
		<summary type="html">&lt;p&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2020年12月19日 (六) 01:55的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第47行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第47行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, Thomas Fang Zheng, Zhiyuan Tang, Ying Shi, Lantian Li, Shiyue Zhang Hongzhi Yu, Guanyu Li, Shipeng Xu, Askar Hummdulla, Mijit Ablimit, Gulnigar Mahmut, M2ASR: AMBITIONS AND FIRST YEAR PROGRESS, O-COCOSDA 2017. [[媒体文件:M2asr.pdf|pdf]]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Dong Wang, Thomas Fang Zheng, Zhiyuan Tang, Ying Shi, Lantian Li, Shiyue Zhang Hongzhi Yu, Guanyu Li, Shipeng Xu, Askar Hummdulla, Mijit Ablimit, Gulnigar Mahmut, M2ASR: AMBITIONS AND FIRST YEAR PROGRESS, O-COCOSDA 2017. [[媒体文件:M2asr.pdf|pdf]]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yang Feng, Shiyue Zhang, Andy Zhang, Dong Wang and Andrew Abel, Memory-augmented Neural Machine Translation, EMNLP 2017 [http://wangd.cslt.org/public/pdf/memory-augmented-nmt.pdf pdf] &amp;#160;&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yang Feng, Shiyue Zhang, Andy Zhang, Dong Wang and Andrew Abel, Memory-augmented Neural Machine Translation, EMNLP 2017 [http://wangd.cslt.org/public/pdf/memory-augmented-nmt.pdf pdf] &amp;#160;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Yixiang Chen, Dong Wang, Thomas Fang Zheng, A Study on Replay Attack and Anti-Spoofing for Automatic Speaker Verification, Interspeech 2017 [https://arxiv.org/pdf/1705.03670.pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Yixiang Chen, Dong Wang, Thomas Fang Zheng, A Study on Replay Attack and Anti-Spoofing for Automatic Speaker Verification, Interspeech 2017 [https://arxiv.org/pdf/1705.03670.&lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;pdf &lt;/ins&gt;pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Yixiang Chen, Ying Shi, Zhiyuan Tang, Dong Wang, &amp;quot;Deep Speaker Feature Learning for Text-independent Speaker Verification&amp;quot;, Interspeech 2017[https://arxiv.org/pdf/1705.03670.pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li, Yixiang Chen, Ying Shi, Zhiyuan Tang, Dong Wang, &amp;quot;Deep Speaker Feature Learning for Text-independent Speaker Verification&amp;quot;, Interspeech 2017[https://arxiv.org/pdf/1705.03670.&lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;pdf &lt;/ins&gt;pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Jiyuan Zhang, Yang Feng, Dong Wang, Yang Wang, Andrw Abel, Shiyue Zhang, Andi Zhangi, &amp;quot;Flexible and Creative Chinese Poetry Generation Using Neural Memory&amp;quot;, ACL 2017 [https://arxiv.org/abs/1705.03773 link]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Jiyuan Zhang, Yang Feng, Dong Wang, Yang Wang, Andrw Abel, Shiyue Zhang, Andi Zhangi, &amp;quot;Flexible and Creative Chinese Poetry Generation Using Neural Memory&amp;quot;, ACL 2017 [https://arxiv.org/abs/1705.03773 link]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang, Ying Shi, Dong Wang, Yang Feng, and Shiyue Zhang, &amp;quot;Memory Visualization for Gated Recurrent Neural Networks in Speech Recognition&amp;quot;, ICASSP 2017.[https://arxiv.org/abs/1609.08789 link]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Zhiyuan Tang, Ying Shi, Dong Wang, Yang Feng, and Shiyue Zhang, &amp;quot;Memory Visualization for Gated Recurrent Neural Networks in Speech Recognition&amp;quot;, ICASSP 2017.[https://arxiv.org/abs/1609.08789 link]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第62行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第62行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Miao Zhang, Yixiang Chen, Lantian Li and Dong Wang, Speaker Recognition with Cough, Laugh and “Wei”, APSIPA 2017, link: [https://arxiv.org/abs/1706.07860 arXiv]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Miao Zhang, Yixiang Chen, Lantian Li and Dong Wang, Speaker Recognition with Cough, Laugh and “Wei”, APSIPA 2017, link: [https://arxiv.org/abs/1706.07860 arXiv]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Rehmutulla Memet; Mewlude Nijat; Gulnigar Mahmut; Askar Hamdulla, A rule and statistical modeling based stem extraction method for Kazakh words，Proceedings of the 2017 International Conference on Asian Language Processing, IALP 2017, v 2018-January, p 231-234, July 2, 2017 [https://ieeexplore.ieee.org/abstract/document/8300586 link]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Rehmutulla Memet; Mewlude Nijat; Gulnigar Mahmut; Askar Hamdulla, A rule and statistical modeling based stem extraction method for Kazakh words，Proceedings of the 2017 International Conference on Asian Language Processing, IALP 2017, v 2018-January, p 231-234, July 2, 2017 [https://ieeexplore.ieee.org/abstract/document/8300586 link]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Shipeng Xu, Hongzhi Yu, Thomas Fang Zheng, Guanyu Li, Gegeentana. “Language Resource Construction for Mongolian”, Proceedings of APSIPA Annual Summit and Conference(APSIPA), 2017 [https://ieeexplore.ieee.org/abstract/document/8282132]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Shipeng Xu, Hongzhi Yu, Thomas Fang Zheng, Guanyu Li, Gegeentana. “Language Resource Construction for Mongolian”, Proceedings of APSIPA Annual Summit and Conference(APSIPA), 2017 [https://ieeexplore.ieee.org/abstract/document/8282132 &lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;pdf&lt;/ins&gt;]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Other papers==&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;==Other papers==&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# 沙尔旦尔·帕尔哈提,米吉提·阿不里米提,艾斯卡尔·艾木都拉. 基于稳健词素序列和LSTM的维吾尔语短文本分类[J]. 中文信息学报,2020,34(01):63-70. [http://jcip.cipsc.org.cn/CN/article/downloadArticleFile.do?attachType=PDF&amp;amp;id=2894 &lt;del class=&quot;diffchange diffchange-inline&quot;&gt;pdf&lt;/del&gt;]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# 沙尔旦尔·帕尔哈提,米吉提·阿不里米提,艾斯卡尔·艾木都拉. 基于稳健词素序列和LSTM的维吾尔语短文本分类[J]. 中文信息学报,2020,34(01):63-70. [http://jcip.cipsc.org.cn/CN/article/downloadArticleFile.do?attachType=PDF&amp;amp;id=2894 &lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;link&lt;/ins&gt;]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# 沙尔旦尔·帕尔哈提,米吉提·阿不里米提,艾斯卡尔·艾木都拉. 词干单元和卷积神经网络的哈萨克短文本分类[J]. 小型微型计算机系统,2020,41(08):1627-1633. [http://gb.oversea.cnki.net/KCMS/detail/detail.aspx?filename=XXWX202008014&amp;amp;dbcode=CJFD&amp;amp;dbname=CJFDTEMP]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# 沙尔旦尔·帕尔哈提,米吉提·阿不里米提,艾斯卡尔·艾木都拉. 词干单元和卷积神经网络的哈萨克短文本分类[J]. 小型微型计算机系统,2020,41(08):1627-1633. [http://gb.oversea.cnki.net/KCMS/detail/detail.aspx?filename=XXWX202008014&amp;amp;dbcode=CJFD&amp;amp;dbname=CJFDTEMP &lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;link&lt;/ins&gt;]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Cslt</name></author>	</entry>

	<entry>
		<id>http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=35809&amp;oldid=prev</id>
		<title>2020年12月19日 (六) 01:54 Cslt</title>
		<link rel="alternate" type="text/html" href="http://cslt.org/mediawiki/index.php?title=ASR-nsfc-publication&amp;diff=35809&amp;oldid=prev"/>
				<updated>2020-12-19T01:54:56Z</updated>
		
		<summary type="html">&lt;p&gt;&lt;/p&gt;
&lt;table class='diff diff-contentalign-left'&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;col class='diff-marker' /&gt;
				&lt;col class='diff-content' /&gt;
				&lt;tr style='vertical-align: top;'&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;←上一版本&lt;/td&gt;
				&lt;td colspan='2' style=&quot;background-color: white; color:black; text-align: center;&quot;&gt;2020年12月19日 (六) 01:54的版本&lt;/td&gt;
				&lt;/tr&gt;&lt;tr&gt;&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第23行：&lt;/td&gt;
&lt;td colspan=&quot;2&quot; class=&quot;diff-lineno&quot;&gt;第23行：&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yang Zhang and Lantian Li and Dong Wang, &amp;quot;VAE-based regularization for deep speaker embedding&amp;quot;, Interspeech 2019 [https://arxiv.org/abs/1904.03617 pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Yang Zhang and Lantian Li and Dong Wang, &amp;quot;VAE-based regularization for deep speaker embedding&amp;quot;, Interspeech 2019 [https://arxiv.org/abs/1904.03617 pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li,Zhiyuan Tang,Ying Shi,Dong Wang, &amp;quot;Gaussian-Constrained Training for Speaker Verification&amp;quot;, ICASSP 2019[https://arxiv.org/abs/1811.03258 pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Lantian Li,Zhiyuan Tang,Ying Shi,Dong Wang, &amp;quot;Gaussian-Constrained Training for Speaker Verification&amp;quot;, ICASSP 2019[https://arxiv.org/abs/1811.03258 pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;−&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #ffe49c; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Sardar Parhat, Gao Ting, Mijit Ablimit, Askar Hamdulla, A morpheme sequence and convolutional neural network based Kazakh text classification，2019 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference, APSIPA ASC 2019, p 1903-1906, November 2019 [http://www.apsipa.org/proceedings/2019/pdfs/319.pdf]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;+&lt;/td&gt;&lt;td style=&quot;color:black; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #a3d3ff; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Sardar Parhat, Gao Ting, Mijit Ablimit, Askar Hamdulla, A morpheme sequence and convolutional neural network based Kazakh text classification，2019 Asia-Pacific Signal and Information Processing Association Annual Summit and Conference, APSIPA ASC 2019, p 1903-1906, November 2019 [http://www.apsipa.org/proceedings/2019/pdfs/319.&lt;ins class=&quot;diffchange diffchange-inline&quot;&gt;pdf &lt;/ins&gt;pdf]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Arkin G, Alijan G, Hamdulla A, A Comparative Analysis of Acoustic Characteristics between Kazak Uyghur Mandarin Learners and Standard Mandarin Speakers，Proceedings of the 2019 International Conference on Asian Language Processing, IALP 2019, p 474-479, November 2019 [https://ieeexplore.ieee.org/abstract/document/9037703/ link]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Arkin G, Alijan G, Hamdulla A, A Comparative Analysis of Acoustic Characteristics between Kazak Uyghur Mandarin Learners and Standard Mandarin Speakers，Proceedings of the 2019 International Conference on Asian Language Processing, IALP 2019, p 474-479, November 2019 [https://ieeexplore.ieee.org/abstract/document/9037703/ link]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;tr&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Statistical Analysis of Syllable Duration of Uyghur Language，Proceedings of the 2019 International Conference on Asian Language Processing, IALP 2019, p 468-473, November 2019 [https://ieeexplore.ieee.org/abstract/document/9037656]&lt;/div&gt;&lt;/td&gt;&lt;td class='diff-marker'&gt;&amp;#160;&lt;/td&gt;&lt;td style=&quot;background-color: #f9f9f9; color: #333333; font-size: 88%; border-style: solid; border-width: 1px 1px 1px 4px; border-radius: 0.33em; border-color: #e6e6e6; vertical-align: top; white-space: pre-wrap;&quot;&gt;&lt;div&gt;# Statistical Analysis of Syllable Duration of Uyghur Language，Proceedings of the 2019 International Conference on Asian Language Processing, IALP 2019, p 468-473, November 2019 [https://ieeexplore.ieee.org/abstract/document/9037656]&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;
&lt;/table&gt;</summary>
		<author><name>Cslt</name></author>	</entry>

	</feed>