gptkbp:instanceOf
|
large language model
|
gptkbp:architecture
|
gptkb:transformation
|
gptkbp:arXivID
|
2010.11934
|
gptkbp:author
|
gptkb:Adam_Roberts
gptkb:Colin_Raffel
gptkb:Katherine_Lee
gptkb:Michael_Matena
gptkb:Noam_Shazeer
gptkb:Peter_J._Liu
gptkb:Sharan_Narang
gptkb:Wei_Li
gptkb:Yanqi_Zhou
|
gptkbp:availableOn
|
gptkb:Hugging_Face
|
gptkbp:basedOn
|
gptkb:T5
|
gptkbp:citation
|
1000+
|
gptkbp:developedBy
|
gptkb:Google_Research
|
gptkbp:encoderDecoder
|
true
|
gptkbp:fineTunedWith
|
true
|
gptkbp:github
|
https://github.com/google-research/text-to-text-transfer-transformer
|
https://www.w3.org/2000/01/rdf-schema#label
|
mT5
|
gptkbp:input
|
gptkb:text
|
gptkbp:inputSequenceLength
|
512
|
gptkbp:language
|
101
|
gptkbp:license
|
Apache 2.0
|
gptkbp:memiliki_tugas
|
text-to-text
|
gptkbp:multilingual
|
true
|
gptkbp:notablePublication
|
gptkb:mT5:_A_Massively_Multilingual_Pre-trained_Text-to-Text_Transformer
|
gptkbp:openSource
|
true
|
gptkbp:output
|
gptkb:text
|
gptkbp:outputSequenceLength
|
512
|
gptkbp:pretrainingObjective
|
span corruption
|
gptkbp:relatedTo
|
gptkb:T5
gptkb:ByT5
|
gptkbp:releaseYear
|
2020
|
gptkbp:size
|
gptkb:Base
gptkb:XXL
Small
Large
XL
|
gptkbp:supportsLanguage
|
multilingual
|
gptkbp:tokenizerType
|
gptkb:SentencePiece
|
gptkbp:trainer
|
gptkb:Common_Crawl
gptkb:mC4
|
gptkbp:type
|
self-attention
|
gptkbp:usedFor
|
translator
question answering
summarization
text generation
text classification
|
gptkbp:bfsParent
|
gptkb:mBART
gptkb:MC4
gptkb:Text-To-Text_Transfer_Transformer
gptkb:T5_(Text-To-Text_Transfer_Transformer)
|
gptkbp:bfsLayer
|
7
|