@inproceedings{wu-etal-2012-factored-recurrent,
title = "Factored recurrent neural network language model in {TED} lecture transcription",
author = "Wu, Youzheng and
Yamamoto, Hitoshi and
Lu, Xugang and
Matsuda, Shigeki and
Hori, Chiori and
Kashioka, Hideki",
booktitle = "Proceedings of the 9th International Workshop on Spoken Language Translation: Papers",
month = dec # " 6-7",
year = "2012",
address = "Hong Kong, Table of contents",
url = "https://aclanthology.org/2012.iwslt-papers.11/",
pages = "222--228",
abstract = "In this study, we extend recurrent neural network-based language models (RNNLMs) by explicitly integrating morphological and syntactic factors (or features). Our proposed RNNLM is called a factored RNNLM that is expected to enhance RNNLMs. A number of experiments are carried out on top of state-of-the-art LVCSR system that show the factored RNNLM improves the performance measured by perplexity and word error rate. In the IWSLT TED test data sets, absolute word error rate reductions over RNNLM and n-gram LM are 0.4{\ensuremath{\sim}}0.8 points."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wu-etal-2012-factored-recurrent">
<titleInfo>
<title>Factored recurrent neural network language model in TED lecture transcription</title>
</titleInfo>
<name type="personal">
<namePart type="given">Youzheng</namePart>
<namePart type="family">Wu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hitoshi</namePart>
<namePart type="family">Yamamoto</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xugang</namePart>
<namePart type="family">Lu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shigeki</namePart>
<namePart type="family">Matsuda</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chiori</namePart>
<namePart type="family">Hori</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hideki</namePart>
<namePart type="family">Kashioka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2012-dec 6-7</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 9th International Workshop on Spoken Language Translation: Papers</title>
</titleInfo>
<originInfo>
<place>
<placeTerm type="text">Hong Kong, Table of contents</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In this study, we extend recurrent neural network-based language models (RNNLMs) by explicitly integrating morphological and syntactic factors (or features). Our proposed RNNLM is called a factored RNNLM that is expected to enhance RNNLMs. A number of experiments are carried out on top of state-of-the-art LVCSR system that show the factored RNNLM improves the performance measured by perplexity and word error rate. In the IWSLT TED test data sets, absolute word error rate reductions over RNNLM and n-gram LM are 0.4\ensuremath\sim0.8 points.</abstract>
<identifier type="citekey">wu-etal-2012-factored-recurrent</identifier>
<location>
<url>https://aclanthology.org/2012.iwslt-papers.11/</url>
</location>
<part>
<date>2012-dec 6-7</date>
<extent unit="page">
<start>222</start>
<end>228</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Factored recurrent neural network language model in TED lecture transcription
%A Wu, Youzheng
%A Yamamoto, Hitoshi
%A Lu, Xugang
%A Matsuda, Shigeki
%A Hori, Chiori
%A Kashioka, Hideki
%S Proceedings of the 9th International Workshop on Spoken Language Translation: Papers
%D 2012
%8 dec 6 7
%C Hong Kong, Table of contents
%F wu-etal-2012-factored-recurrent
%X In this study, we extend recurrent neural network-based language models (RNNLMs) by explicitly integrating morphological and syntactic factors (or features). Our proposed RNNLM is called a factored RNNLM that is expected to enhance RNNLMs. A number of experiments are carried out on top of state-of-the-art LVCSR system that show the factored RNNLM improves the performance measured by perplexity and word error rate. In the IWSLT TED test data sets, absolute word error rate reductions over RNNLM and n-gram LM are 0.4\ensuremath\sim0.8 points.
%U https://aclanthology.org/2012.iwslt-papers.11/
%P 222-228
Markdown (Informal)
[Factored recurrent neural network language model in TED lecture transcription](https://aclanthology.org/2012.iwslt-papers.11/) (Wu et al., IWSLT 2012)
ACL