#include <iostream>
using namespace std;
int main() {
// your code goes here
return 0;
}
I2luY2x1ZGUgPGlvc3RyZWFtPgp1c2luZyBuYW1lc3BhY2Ugc3RkOwoKaW50IG1haW4oKSB7CgkvLyB5b3VyIGNvZGUgZ29lcyBoZXJlCglyZXR1cm4gMDsKfQ==
ZnJvbSB0cmFuc2Zvcm1lcnMgaW1wb3J0IEdQVDJMTUhlYWRNb2RlbCwgR1BUMlRva2VuaXplcgoKdG9rZW5pemVyID0gR1BUMlRva2VuaXplci5mcm9tX3ByZXRyYWluZWQoJnF1b3Q7Z3B0MiZxdW90OykKbW9kZWwgPSBHUFQyTE1IZWFkTW9kZWwuZnJvbV9wcmV0cmFpbmVkKCZxdW90O2dwdDImcXVvdDspCgppbnB1dF90ZXh0ID0gJnF1b3Q75LuK5pel44Gv5aSp5rCX44GMJnF1b3Q7CmlucHV0cyA9IHRva2VuaXplcihpbnB1dF90ZXh0LCByZXR1cm5fdGVuc29ycz0mcXVvdDtwdCZxdW90OykKb3V0cHV0cyA9IG1vZGVsLmdlbmVyYXRlKCoqaW5wdXRzLCBtYXhfbGVuZ3RoPTUwKQpwcmludCh0b2tlbml6ZXIuZGVjb2RlKG91dHB1dHNbMF0pKQo=
from transformers import GPT2LMHeadModel, GPT2Tokenizer
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("gpt2")
input_text = "今日は天気が"
inputs = tokenizer(input_text, return_tensors="pt")
outputs = model.generate(**inputs, max_length=50)
print(tokenizer.decode(outputs[0]))