라이브러리 로드 및 TPU 설정

!pip install transformers

!pip install cloud-tpu-client==0.10 torch==1.9.0 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl

import torch_xla
import torch_xla.core.xla_model as xm
import transformers
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
from transformers import BertTokenizer, BertForSequenceClassification
import pandas as pd
import numpy as np
import urllib.request
import os
from tqdm import tqdm
import tensorflow as tf

데이터 불러오기

urllib.request.urlretrieve("https://raw.githubusercontent.com/e9t/nsmc/master/ratings_train.txt", filename="ratings_train.txt")
urllib.request.urlretrieve("https://raw.githubusercontent.com/e9t/nsmc/master/ratings_test.txt", filename="ratings_test.txt")

train_data = pd.read_table('ratings_train.txt')
test_data = pd.read_table('ratings_test.txt')

train_data = train_data.dropna(how = 'any')
train_data = train_data.reset_index(drop=True)
print(train_data.isnull().values.any())

test_data = test_data.dropna(how = 'any')
test_data = test_data.reset_index(drop=True)
print(test_data.isnull().values.any())

데이터 전처리

tokenizer = BertTokenizer.from_pretrained("klue/bert-base")

def convert_examples_to_features(examples,labels,max_seq_len,tokenizer):

  input_ids = []
  attention_masks = []
  token_type_ids = []
  data_labels = []

  for example, label in tqdm(zip(examples,labels),total=len(examples)):

    input_id = tokenizer.encode(example,max_length=max_seq_len,pad_to_max_length=True)
    padding_count = input_id.count(tokenizer.pad_token_id)
    attention_mask = [1] * (max_seq_len - padding_count) + [0] * padding_count
    token_type_id = [0] * max_seq_len

    assert len(input_id) == max_seq_len, "Error with input length {} vs {}".format(len(input_id), max_seq_len)
    assert len(attention_mask) == max_seq_len, "Error with attention mask length {} vs {}".format(len(attention_mask), max_seq_len)
    assert len(token_type_id) == max_seq_len, "Error with token type length {} vs {}".format(len(token_type_id), max_seq_len)

    input_ids.append(input_id)
    attention_masks.append(attention_mask)
    token_type_ids.append(token_type_id)
    data_labels.append(label)

  input_ids = torch.tensor(input_ids)
  attention_masks = torch.tensor(attention_masks)
  token_type_ids = torch.tensor(token_type_ids)

  data_labels = torch.tensor(data_labels)

  return (input_ids, attention_masks, token_type_ids), data_labels

max_seq_len = 128

train_X,train_y = convert_examples_to_features(train_data["document"],train_data["label"],max_seq_len = max_seq_len, tokenizer = tokenizer)
test_X, test_y = convert_examples_to_features(test_data["document"],test_data["label"],max_seq_len = max_seq_len,tokenizer=tokenizer)

모델 생성 및 학습

model = BertForSequenceClassification.from_pretrained("klue/bert-base")
device = xm.xla_device()

model.classifier = nn.Sequential(
    nn.Linear(768,1,bias=True),
    nn.Sigmoid()
)
model = model.to(device)

optimizer = optim.Adam(model.parameters(),lr=5e-5)
criterion = nn.BCELoss()

input_ids_train = train_X[0]
attention_masks_train = train_X[1]
token_type_ids_train = train_X[2]

input_ids_test = test_X[0]
attention_masks_test = test_X[1]
token_type_ids_test = test_X[2]

train_tensor = TensorDataset(input_ids_train,attention_masks_train,token_type_ids_train,train_y)
test_tensor = TensorDataset(input_ids_test,attention_masks_test,token_type_ids_test,test_y)

train_loader = DataLoader(train_tensor,batch_size=128,shuffle=True,drop_last=True)
test_loader = DataLoader(test_tensor,batch_size=128,shuffle=False)

for epoch in range(2):
  avg_loss = 0 
  for batch in train_loader:

    input_id = batch[0].to(device)
    attention_mask = batch[1].to(device)
    token_type_id = batch[2].to(device)
    label = batch[3]
    label = label.type(torch.FloatTensor).to(device)

    logits = model(input_id,attention_mask,token_type_id)
    loss = criterion(logits[0].view(-1),label)

    optimizer.zero_grad()
    loss.backward()
    xm.optimizer_step(optimizer,barrier=True)

    avg_loss += loss / len(train_loader)

  print("epoch : {}일때 loss : {}".format(epoch+1,avg_loss))

평가

def test():
  model.eval()
  corrected = 0
  for batch in test_loader:

    with torch.no_grad():
      input_id = batch[0].to(device)
      attention_mask = batch[1].to(device)
      token_type_id = batch[2].to(device)
      label = batch[3].to(device)

      logits = model(input_id,attention_mask,token_type_id)
      corrected += ((logits[0].cpu() >= 0.5).float().view(-1) == label.cpu()).sum()

  print("테스트 정확도 : {:.2f}".format(corrected / len(test_loader.dataset) * 100))
  test()

  def sentiment_predict(new_sentence):
  
  model.eval()

  input_id = tokenizer.encode(new_sentence,max_length=max_seq_len,pad_to_max_length = True)

  padding_count = input_id.count(tokenizer.pad_token_id)
  attention_mask = [1] * (max_seq_len - padding_count) + [0] * padding_count
  token_type_id = [0] * max_seq_len

  input_id = torch.tensor(input_id).unsqueeze(0)
  attention_mask = torch.tensor(attention_mask).unsqueeze(0)
  token_type_id = torch.tensor(token_type_id).unsqueeze(0)

  input_id = input_id.to(device)
  attention_mask = attention_mask.to(device)
  token_type_id = token_type_id.to(device)
  with torch.no_grad():
    logits = model(input_id,attention_mask,token_type_id)
  score = logits[0].cpu().item()

  if score > 0.5:
    print("{:.2f}% 확률로 긍정 리뷰입니다.\n".format(score * 100))

  else:
    print("{:.2f}% 확률로 부정 리뷰입니다.\n".format((1-score) * 100))

sentiment_predict("이 영화 존잼입니다 대박") # 예시

Tags:

Categories:

Updated: