from transformers import TrOCRProcessor processor = TrOCRProcessor.from_pretrained("microsoft/trocr-small-printed") TrOCRProcessorは、特徴抽出器とトークナイザをラップしただけです。任意の特徴抽出器とトークナイザを使えます。google/vit-base-patch16-224-in21k や cl-tohoku/bert-base-japaneseなど 事前学習モデルを選択 事前学習モデルが9種類ありますが、すべて10epochsほど試し一番いいので学習させるのがよいかもです。 from transformers import VisionEncoderDecoderModel import torch device = torch.device("cuda"
![TrOCRでファインチューニング - Qiita](https://cdn-ak-scissors.b.st-hatena.com/image/square/1b16a2ba4b440ed67bdb227b6964eb6a56d29371/height=288;version=1;width=512/https%3A%2F%2Fqiita-user-contents.imgix.net%2Fhttps%253A%252F%252Fcdn.qiita.com%252Fassets%252Fpublic%252Farticle-ogp-background-412672c5f0600ab9a64263b751f1bc81.png%3Fixlib%3Drb-4.0.0%26w%3D1200%26mark64%3DaHR0cHM6Ly9xaWl0YS11c2VyLWNvbnRlbnRzLmltZ2l4Lm5ldC9-dGV4dD9peGxpYj1yYi00LjAuMCZ3PTk3MiZoPTM3OCZ0eHQ9VHJPQ1IlRTMlODElQTclRTMlODMlOTUlRTMlODIlQTElRTMlODIlQTQlRTMlODMlQjMlRTMlODMlODElRTMlODMlQTUlRTMlODMlQkMlRTMlODMlOEIlRTMlODMlQjMlRTMlODIlQjAmdHh0LWNvbG9yPSUyMzIxMjEyMSZ0eHQtZm9udD1IaXJhZ2lubyUyMFNhbnMlMjBXNiZ0eHQtc2l6ZT01NiZ0eHQtYWxpZ249bGVmdCUyQ3RvcCZzPWRhYzY0NmExMzI5NTkzNjBlNTNiMDU1ZjAyMWI1MDcy%26mark-x%3D142%26mark-y%3D57%26blend64%3DaHR0cHM6Ly9xaWl0YS11c2VyLWNvbnRlbnRzLmltZ2l4Lm5ldC9-dGV4dD9peGxpYj1yYi00LjAuMCZoPTc2Jnc9NzcwJnR4dD0lNDByZWx1JnR4dC1jb2xvcj0lMjMyMTIxMjEmdHh0LWZvbnQ9SGlyYWdpbm8lMjBTYW5zJTIwVzYmdHh0LXNpemU9MzYmdHh0LWFsaWduPWxlZnQlMkN0b3Amcz02ZmVkOGEwNzhkYTI3ZDEyY2RhOGM1ZDA2OTJhNzQ2Yw%26blend-x%3D142%26blend-y%3D486%26blend-mode%3Dnormal%26s%3D705adb17cf5fce1428e2107d97f988d6)