diff --git a/caption.py b/caption.py index 8ca071905..eee8f328f 100644 --- a/caption.py +++ b/caption.py @@ -104,7 +104,7 @@ def caption_image_beam_search(encoder, decoder, image_path, word_map, beam_size= top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s) # Convert unrolled indices to actual indices of scores - prev_word_inds = top_k_words / vocab_size # (s) + prev_word_inds = torch.div(top_k_words, vocab_size, rounding_mode='floor') # (s) next_word_inds = top_k_words % vocab_size # (s) # Add new words to sequences, alphas @@ -167,7 +167,7 @@ def visualize_att(image_path, seq, alphas, rev_word_map, smooth=True): for t in range(len(words)): if t > 50: break - plt.subplot(np.ceil(len(words) / 5.), 5, t + 1) + plt.subplot(int(np.ceil(len(words) / 5.)), 5, t + 1) plt.text(0, 1, '%s' % (words[t]), color='black', backgroundcolor='white', fontsize=12) plt.imshow(image) diff --git a/eval.py b/eval.py index 3e9359a09..52801f2b0 100644 --- a/eval.py +++ b/eval.py @@ -121,7 +121,7 @@ def evaluate(beam_size): top_k_scores, top_k_words = scores.view(-1).topk(k, 0, True, True) # (s) # Convert unrolled indices to actual indices of scores - prev_word_inds = top_k_words / vocab_size # (s) + prev_word_inds = torch.div(top_k_words, vocab_size, rounding_mode='floor') # (s) next_word_inds = top_k_words % vocab_size # (s) # Add new words to sequences