|
@ -8,9 +8,7 @@ import random |
|
|
import numpy as np |
|
|
import numpy as np |
|
|
import time |
|
|
import time |
|
|
import logging |
|
|
import logging |
|
|
import progressbar |
|
|
|
|
|
|
|
|
|
|
|
import logging |
|
|
|
|
|
logging.getLogger('transformers.generation_utils').disabled = True |
|
|
logging.getLogger('transformers.generation_utils').disabled = True |
|
|
|
|
|
|
|
|
def eval_model(args, model, data, cuda_available, device): |
|
|
def eval_model(args, model, data, cuda_available, device): |
|
@ -19,10 +17,7 @@ def eval_model(args, model, data, cuda_available, device): |
|
|
val_loss, token_sum = 0., 0. |
|
|
val_loss, token_sum = 0., 0. |
|
|
model.eval() |
|
|
model.eval() |
|
|
with torch.no_grad(): |
|
|
with torch.no_grad(): |
|
|
p = progressbar.ProgressBar(eval_step) |
|
|
|
|
|
p.start() |
|
|
|
|
|
for idx in range(eval_step): |
|
|
for idx in range(eval_step): |
|
|
p.update(idx) |
|
|
|
|
|
batch_input_tensor, batch_labels, _ = \ |
|
|
batch_input_tensor, batch_labels, _ = \ |
|
|
data.get_next_validation_batch(batch_size=dataset_batch_size, mode='test') |
|
|
data.get_next_validation_batch(batch_size=dataset_batch_size, mode='test') |
|
|
if cuda_available: |
|
|
if cuda_available: |
|
@ -33,7 +28,6 @@ def eval_model(args, model, data, cuda_available, device): |
|
|
one_val_token_sum = torch.sum(one_val_token_sum) |
|
|
one_val_token_sum = torch.sum(one_val_token_sum) |
|
|
val_loss += one_val_loss.item() |
|
|
val_loss += one_val_loss.item() |
|
|
token_sum += one_val_token_sum.item() |
|
|
token_sum += one_val_token_sum.item() |
|
|
p.finish() |
|
|
|
|
|
model.train() |
|
|
model.train() |
|
|
val_loss = val_loss / token_sum |
|
|
val_loss = val_loss / token_sum |
|
|
return val_loss |
|
|
return val_loss |
|
|